1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* mpihelp-div.c - MPI helper functions
3*4882a593Smuzhiyun * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
4*4882a593Smuzhiyun * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file is part of GnuPG.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Note: This code is heavily based on the GNU MP Library.
9*4882a593Smuzhiyun * Actually it's the same code with only minor changes in the
10*4882a593Smuzhiyun * way the data is stored; this is to support the abstraction
11*4882a593Smuzhiyun * of an optional secure memory allocation which may be used
12*4882a593Smuzhiyun * to avoid revealing of sensitive data due to paging etc.
13*4882a593Smuzhiyun * The GNU MP Library itself is published under the LGPL;
14*4882a593Smuzhiyun * however I decided to publish this code under the plain GPL.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "mpi-internal.h"
18*4882a593Smuzhiyun #include "longlong.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #ifndef UMUL_TIME
21*4882a593Smuzhiyun #define UMUL_TIME 1
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun #ifndef UDIV_TIME
24*4882a593Smuzhiyun #define UDIV_TIME UMUL_TIME
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun mpi_limb_t
mpihelp_mod_1(mpi_ptr_t dividend_ptr,mpi_size_t dividend_size,mpi_limb_t divisor_limb)29*4882a593Smuzhiyun mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
30*4882a593Smuzhiyun mpi_limb_t divisor_limb)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun mpi_size_t i;
33*4882a593Smuzhiyun mpi_limb_t n1, n0, r;
34*4882a593Smuzhiyun mpi_limb_t dummy __maybe_unused;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Botch: Should this be handled at all? Rely on callers? */
37*4882a593Smuzhiyun if (!dividend_size)
38*4882a593Smuzhiyun return 0;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* If multiplication is much faster than division, and the
41*4882a593Smuzhiyun * dividend is large, pre-invert the divisor, and use
42*4882a593Smuzhiyun * only multiplications in the inner loop.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * This test should be read:
45*4882a593Smuzhiyun * Does it ever help to use udiv_qrnnd_preinv?
46*4882a593Smuzhiyun * && Does what we save compensate for the inversion overhead?
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun if (UDIV_TIME > (2 * UMUL_TIME + 6)
49*4882a593Smuzhiyun && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
50*4882a593Smuzhiyun int normalization_steps;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun normalization_steps = count_leading_zeros(divisor_limb);
53*4882a593Smuzhiyun if (normalization_steps) {
54*4882a593Smuzhiyun mpi_limb_t divisor_limb_inverted;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun divisor_limb <<= normalization_steps;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
59*4882a593Smuzhiyun * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
60*4882a593Smuzhiyun * most significant bit (with weight 2**N) implicit.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Special case for DIVISOR_LIMB == 100...000.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if (!(divisor_limb << 1))
65*4882a593Smuzhiyun divisor_limb_inverted = ~(mpi_limb_t)0;
66*4882a593Smuzhiyun else
67*4882a593Smuzhiyun udiv_qrnnd(divisor_limb_inverted, dummy,
68*4882a593Smuzhiyun -divisor_limb, 0, divisor_limb);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun n1 = dividend_ptr[dividend_size - 1];
71*4882a593Smuzhiyun r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Possible optimization:
74*4882a593Smuzhiyun * if (r == 0
75*4882a593Smuzhiyun * && divisor_limb > ((n1 << normalization_steps)
76*4882a593Smuzhiyun * | (dividend_ptr[dividend_size - 2] >> ...)))
77*4882a593Smuzhiyun * ...one division less...
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun for (i = dividend_size - 2; i >= 0; i--) {
80*4882a593Smuzhiyun n0 = dividend_ptr[i];
81*4882a593Smuzhiyun UDIV_QRNND_PREINV(dummy, r, r,
82*4882a593Smuzhiyun ((n1 << normalization_steps)
83*4882a593Smuzhiyun | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
84*4882a593Smuzhiyun divisor_limb, divisor_limb_inverted);
85*4882a593Smuzhiyun n1 = n0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun UDIV_QRNND_PREINV(dummy, r, r,
88*4882a593Smuzhiyun n1 << normalization_steps,
89*4882a593Smuzhiyun divisor_limb, divisor_limb_inverted);
90*4882a593Smuzhiyun return r >> normalization_steps;
91*4882a593Smuzhiyun } else {
92*4882a593Smuzhiyun mpi_limb_t divisor_limb_inverted;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
95*4882a593Smuzhiyun * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
96*4882a593Smuzhiyun * most significant bit (with weight 2**N) implicit.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Special case for DIVISOR_LIMB == 100...000.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun if (!(divisor_limb << 1))
101*4882a593Smuzhiyun divisor_limb_inverted = ~(mpi_limb_t)0;
102*4882a593Smuzhiyun else
103*4882a593Smuzhiyun udiv_qrnnd(divisor_limb_inverted, dummy,
104*4882a593Smuzhiyun -divisor_limb, 0, divisor_limb);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun i = dividend_size - 1;
107*4882a593Smuzhiyun r = dividend_ptr[i];
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun if (r >= divisor_limb)
110*4882a593Smuzhiyun r = 0;
111*4882a593Smuzhiyun else
112*4882a593Smuzhiyun i--;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun for ( ; i >= 0; i--) {
115*4882a593Smuzhiyun n0 = dividend_ptr[i];
116*4882a593Smuzhiyun UDIV_QRNND_PREINV(dummy, r, r,
117*4882a593Smuzhiyun n0, divisor_limb, divisor_limb_inverted);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun return r;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun } else {
122*4882a593Smuzhiyun if (UDIV_NEEDS_NORMALIZATION) {
123*4882a593Smuzhiyun int normalization_steps;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun normalization_steps = count_leading_zeros(divisor_limb);
126*4882a593Smuzhiyun if (normalization_steps) {
127*4882a593Smuzhiyun divisor_limb <<= normalization_steps;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun n1 = dividend_ptr[dividend_size - 1];
130*4882a593Smuzhiyun r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Possible optimization:
133*4882a593Smuzhiyun * if (r == 0
134*4882a593Smuzhiyun * && divisor_limb > ((n1 << normalization_steps)
135*4882a593Smuzhiyun * | (dividend_ptr[dividend_size - 2] >> ...)))
136*4882a593Smuzhiyun * ...one division less...
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun for (i = dividend_size - 2; i >= 0; i--) {
139*4882a593Smuzhiyun n0 = dividend_ptr[i];
140*4882a593Smuzhiyun udiv_qrnnd(dummy, r, r,
141*4882a593Smuzhiyun ((n1 << normalization_steps)
142*4882a593Smuzhiyun | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
143*4882a593Smuzhiyun divisor_limb);
144*4882a593Smuzhiyun n1 = n0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun udiv_qrnnd(dummy, r, r,
147*4882a593Smuzhiyun n1 << normalization_steps,
148*4882a593Smuzhiyun divisor_limb);
149*4882a593Smuzhiyun return r >> normalization_steps;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun /* No normalization needed, either because udiv_qrnnd doesn't require
153*4882a593Smuzhiyun * it, or because DIVISOR_LIMB is already normalized.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun i = dividend_size - 1;
156*4882a593Smuzhiyun r = dividend_ptr[i];
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (r >= divisor_limb)
159*4882a593Smuzhiyun r = 0;
160*4882a593Smuzhiyun else
161*4882a593Smuzhiyun i--;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun for (; i >= 0; i--) {
164*4882a593Smuzhiyun n0 = dividend_ptr[i];
165*4882a593Smuzhiyun udiv_qrnnd(dummy, r, r, n0, divisor_limb);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun return r;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
172*4882a593Smuzhiyun * the NSIZE-DSIZE least significant quotient limbs at QP
173*4882a593Smuzhiyun * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
174*4882a593Smuzhiyun * non-zero, generate that many fraction bits and append them after the
175*4882a593Smuzhiyun * other quotient limbs.
176*4882a593Smuzhiyun * Return the most significant limb of the quotient, this is always 0 or 1.
177*4882a593Smuzhiyun *
178*4882a593Smuzhiyun * Preconditions:
179*4882a593Smuzhiyun * 0. NSIZE >= DSIZE.
180*4882a593Smuzhiyun * 1. The most significant bit of the divisor must be set.
181*4882a593Smuzhiyun * 2. QP must either not overlap with the input operands at all, or
182*4882a593Smuzhiyun * QP + DSIZE >= NP must hold true. (This means that it's
183*4882a593Smuzhiyun * possible to put the quotient in the high part of NUM, right after the
184*4882a593Smuzhiyun * remainder in NUM.
185*4882a593Smuzhiyun * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun mpi_limb_t
mpihelp_divrem(mpi_ptr_t qp,mpi_size_t qextra_limbs,mpi_ptr_t np,mpi_size_t nsize,mpi_ptr_t dp,mpi_size_t dsize)189*4882a593Smuzhiyun mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
190*4882a593Smuzhiyun mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun mpi_limb_t most_significant_q_limb = 0;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun switch (dsize) {
195*4882a593Smuzhiyun case 0:
196*4882a593Smuzhiyun /* We are asked to divide by zero, so go ahead and do it! (To make
197*4882a593Smuzhiyun the compiler not remove this statement, return the value.) */
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * existing clients of this function have been modified
200*4882a593Smuzhiyun * not to call it with dsize == 0, so this should not happen
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun return 1 / dsize;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun case 1:
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun mpi_size_t i;
207*4882a593Smuzhiyun mpi_limb_t n1;
208*4882a593Smuzhiyun mpi_limb_t d;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun d = dp[0];
211*4882a593Smuzhiyun n1 = np[nsize - 1];
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (n1 >= d) {
214*4882a593Smuzhiyun n1 -= d;
215*4882a593Smuzhiyun most_significant_q_limb = 1;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun qp += qextra_limbs;
219*4882a593Smuzhiyun for (i = nsize - 2; i >= 0; i--)
220*4882a593Smuzhiyun udiv_qrnnd(qp[i], n1, n1, np[i], d);
221*4882a593Smuzhiyun qp -= qextra_limbs;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun for (i = qextra_limbs - 1; i >= 0; i--)
224*4882a593Smuzhiyun udiv_qrnnd(qp[i], n1, n1, 0, d);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun np[0] = n1;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun case 2:
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun mpi_size_t i;
233*4882a593Smuzhiyun mpi_limb_t n1, n0, n2;
234*4882a593Smuzhiyun mpi_limb_t d1, d0;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun np += nsize - 2;
237*4882a593Smuzhiyun d1 = dp[1];
238*4882a593Smuzhiyun d0 = dp[0];
239*4882a593Smuzhiyun n1 = np[1];
240*4882a593Smuzhiyun n0 = np[0];
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
243*4882a593Smuzhiyun sub_ddmmss(n1, n0, n1, n0, d1, d0);
244*4882a593Smuzhiyun most_significant_q_limb = 1;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
248*4882a593Smuzhiyun mpi_limb_t q;
249*4882a593Smuzhiyun mpi_limb_t r;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (i >= qextra_limbs)
252*4882a593Smuzhiyun np--;
253*4882a593Smuzhiyun else
254*4882a593Smuzhiyun np[0] = 0;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (n1 == d1) {
257*4882a593Smuzhiyun /* Q should be either 111..111 or 111..110. Need special
258*4882a593Smuzhiyun * treatment of this rare case as normal division would
259*4882a593Smuzhiyun * give overflow. */
260*4882a593Smuzhiyun q = ~(mpi_limb_t) 0;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun r = n0 + d1;
263*4882a593Smuzhiyun if (r < d1) { /* Carry in the addition? */
264*4882a593Smuzhiyun add_ssaaaa(n1, n0, r - d0,
265*4882a593Smuzhiyun np[0], 0, d0);
266*4882a593Smuzhiyun qp[i] = q;
267*4882a593Smuzhiyun continue;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun n1 = d0 - (d0 != 0 ? 1 : 0);
270*4882a593Smuzhiyun n0 = -d0;
271*4882a593Smuzhiyun } else {
272*4882a593Smuzhiyun udiv_qrnnd(q, r, n1, n0, d1);
273*4882a593Smuzhiyun umul_ppmm(n1, n0, d0, q);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun n2 = np[0];
277*4882a593Smuzhiyun q_test:
278*4882a593Smuzhiyun if (n1 > r || (n1 == r && n0 > n2)) {
279*4882a593Smuzhiyun /* The estimated Q was too large. */
280*4882a593Smuzhiyun q--;
281*4882a593Smuzhiyun sub_ddmmss(n1, n0, n1, n0, 0, d0);
282*4882a593Smuzhiyun r += d1;
283*4882a593Smuzhiyun if (r >= d1) /* If not carry, test Q again. */
284*4882a593Smuzhiyun goto q_test;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun qp[i] = q;
288*4882a593Smuzhiyun sub_ddmmss(n1, n0, r, n2, n1, n0);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun np[1] = n1;
291*4882a593Smuzhiyun np[0] = n0;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun default:
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun mpi_size_t i;
298*4882a593Smuzhiyun mpi_limb_t dX, d1, n0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun np += nsize - dsize;
301*4882a593Smuzhiyun dX = dp[dsize - 1];
302*4882a593Smuzhiyun d1 = dp[dsize - 2];
303*4882a593Smuzhiyun n0 = np[dsize - 1];
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (n0 >= dX) {
306*4882a593Smuzhiyun if (n0 > dX
307*4882a593Smuzhiyun || mpihelp_cmp(np, dp, dsize - 1) >= 0) {
308*4882a593Smuzhiyun mpihelp_sub_n(np, np, dp, dsize);
309*4882a593Smuzhiyun n0 = np[dsize - 1];
310*4882a593Smuzhiyun most_significant_q_limb = 1;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
315*4882a593Smuzhiyun mpi_limb_t q;
316*4882a593Smuzhiyun mpi_limb_t n1, n2;
317*4882a593Smuzhiyun mpi_limb_t cy_limb;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (i >= qextra_limbs) {
320*4882a593Smuzhiyun np--;
321*4882a593Smuzhiyun n2 = np[dsize];
322*4882a593Smuzhiyun } else {
323*4882a593Smuzhiyun n2 = np[dsize - 1];
324*4882a593Smuzhiyun MPN_COPY_DECR(np + 1, np, dsize - 1);
325*4882a593Smuzhiyun np[0] = 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (n0 == dX) {
329*4882a593Smuzhiyun /* This might over-estimate q, but it's probably not worth
330*4882a593Smuzhiyun * the extra code here to find out. */
331*4882a593Smuzhiyun q = ~(mpi_limb_t) 0;
332*4882a593Smuzhiyun } else {
333*4882a593Smuzhiyun mpi_limb_t r;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
336*4882a593Smuzhiyun umul_ppmm(n1, n0, d1, q);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun while (n1 > r
339*4882a593Smuzhiyun || (n1 == r
340*4882a593Smuzhiyun && n0 > np[dsize - 2])) {
341*4882a593Smuzhiyun q--;
342*4882a593Smuzhiyun r += dX;
343*4882a593Smuzhiyun if (r < dX) /* I.e. "carry in previous addition?" */
344*4882a593Smuzhiyun break;
345*4882a593Smuzhiyun n1 -= n0 < d1;
346*4882a593Smuzhiyun n0 -= d1;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Possible optimization: We already have (q * n0) and (1 * n1)
351*4882a593Smuzhiyun * after the calculation of q. Taking advantage of that, we
352*4882a593Smuzhiyun * could make this loop make two iterations less. */
353*4882a593Smuzhiyun cy_limb = mpihelp_submul_1(np, dp, dsize, q);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (n2 != cy_limb) {
356*4882a593Smuzhiyun mpihelp_add_n(np, np, dp, dsize);
357*4882a593Smuzhiyun q--;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun qp[i] = q;
361*4882a593Smuzhiyun n0 = np[dsize - 1];
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return most_significant_q_limb;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /****************
370*4882a593Smuzhiyun * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
371*4882a593Smuzhiyun * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
372*4882a593Smuzhiyun * Return the single-limb remainder.
373*4882a593Smuzhiyun * There are no constraints on the value of the divisor.
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun mpi_limb_t
mpihelp_divmod_1(mpi_ptr_t quot_ptr,mpi_ptr_t dividend_ptr,mpi_size_t dividend_size,mpi_limb_t divisor_limb)379*4882a593Smuzhiyun mpihelp_divmod_1(mpi_ptr_t quot_ptr,
380*4882a593Smuzhiyun mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
381*4882a593Smuzhiyun mpi_limb_t divisor_limb)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun mpi_size_t i;
384*4882a593Smuzhiyun mpi_limb_t n1, n0, r;
385*4882a593Smuzhiyun mpi_limb_t dummy __maybe_unused;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!dividend_size)
388*4882a593Smuzhiyun return 0;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* If multiplication is much faster than division, and the
391*4882a593Smuzhiyun * dividend is large, pre-invert the divisor, and use
392*4882a593Smuzhiyun * only multiplications in the inner loop.
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * This test should be read:
395*4882a593Smuzhiyun * Does it ever help to use udiv_qrnnd_preinv?
396*4882a593Smuzhiyun * && Does what we save compensate for the inversion overhead?
397*4882a593Smuzhiyun */
398*4882a593Smuzhiyun if (UDIV_TIME > (2 * UMUL_TIME + 6)
399*4882a593Smuzhiyun && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
400*4882a593Smuzhiyun int normalization_steps;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun normalization_steps = count_leading_zeros(divisor_limb);
403*4882a593Smuzhiyun if (normalization_steps) {
404*4882a593Smuzhiyun mpi_limb_t divisor_limb_inverted;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun divisor_limb <<= normalization_steps;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
409*4882a593Smuzhiyun * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
410*4882a593Smuzhiyun * most significant bit (with weight 2**N) implicit.
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun /* Special case for DIVISOR_LIMB == 100...000. */
413*4882a593Smuzhiyun if (!(divisor_limb << 1))
414*4882a593Smuzhiyun divisor_limb_inverted = ~(mpi_limb_t)0;
415*4882a593Smuzhiyun else
416*4882a593Smuzhiyun udiv_qrnnd(divisor_limb_inverted, dummy,
417*4882a593Smuzhiyun -divisor_limb, 0, divisor_limb);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun n1 = dividend_ptr[dividend_size - 1];
420*4882a593Smuzhiyun r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Possible optimization:
423*4882a593Smuzhiyun * if (r == 0
424*4882a593Smuzhiyun * && divisor_limb > ((n1 << normalization_steps)
425*4882a593Smuzhiyun * | (dividend_ptr[dividend_size - 2] >> ...)))
426*4882a593Smuzhiyun * ...one division less...
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun for (i = dividend_size - 2; i >= 0; i--) {
429*4882a593Smuzhiyun n0 = dividend_ptr[i];
430*4882a593Smuzhiyun UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
431*4882a593Smuzhiyun ((n1 << normalization_steps)
432*4882a593Smuzhiyun | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
433*4882a593Smuzhiyun divisor_limb, divisor_limb_inverted);
434*4882a593Smuzhiyun n1 = n0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun UDIV_QRNND_PREINV(quot_ptr[0], r, r,
437*4882a593Smuzhiyun n1 << normalization_steps,
438*4882a593Smuzhiyun divisor_limb, divisor_limb_inverted);
439*4882a593Smuzhiyun return r >> normalization_steps;
440*4882a593Smuzhiyun } else {
441*4882a593Smuzhiyun mpi_limb_t divisor_limb_inverted;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
444*4882a593Smuzhiyun * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
445*4882a593Smuzhiyun * most significant bit (with weight 2**N) implicit.
446*4882a593Smuzhiyun */
447*4882a593Smuzhiyun /* Special case for DIVISOR_LIMB == 100...000. */
448*4882a593Smuzhiyun if (!(divisor_limb << 1))
449*4882a593Smuzhiyun divisor_limb_inverted = ~(mpi_limb_t) 0;
450*4882a593Smuzhiyun else
451*4882a593Smuzhiyun udiv_qrnnd(divisor_limb_inverted, dummy,
452*4882a593Smuzhiyun -divisor_limb, 0, divisor_limb);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun i = dividend_size - 1;
455*4882a593Smuzhiyun r = dividend_ptr[i];
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (r >= divisor_limb)
458*4882a593Smuzhiyun r = 0;
459*4882a593Smuzhiyun else
460*4882a593Smuzhiyun quot_ptr[i--] = 0;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun for ( ; i >= 0; i--) {
463*4882a593Smuzhiyun n0 = dividend_ptr[i];
464*4882a593Smuzhiyun UDIV_QRNND_PREINV(quot_ptr[i], r, r,
465*4882a593Smuzhiyun n0, divisor_limb, divisor_limb_inverted);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun return r;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun } else {
470*4882a593Smuzhiyun if (UDIV_NEEDS_NORMALIZATION) {
471*4882a593Smuzhiyun int normalization_steps;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun normalization_steps = count_leading_zeros(divisor_limb);
474*4882a593Smuzhiyun if (normalization_steps) {
475*4882a593Smuzhiyun divisor_limb <<= normalization_steps;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun n1 = dividend_ptr[dividend_size - 1];
478*4882a593Smuzhiyun r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Possible optimization:
481*4882a593Smuzhiyun * if (r == 0
482*4882a593Smuzhiyun * && divisor_limb > ((n1 << normalization_steps)
483*4882a593Smuzhiyun * | (dividend_ptr[dividend_size - 2] >> ...)))
484*4882a593Smuzhiyun * ...one division less...
485*4882a593Smuzhiyun */
486*4882a593Smuzhiyun for (i = dividend_size - 2; i >= 0; i--) {
487*4882a593Smuzhiyun n0 = dividend_ptr[i];
488*4882a593Smuzhiyun udiv_qrnnd(quot_ptr[i + 1], r, r,
489*4882a593Smuzhiyun ((n1 << normalization_steps)
490*4882a593Smuzhiyun | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
491*4882a593Smuzhiyun divisor_limb);
492*4882a593Smuzhiyun n1 = n0;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun udiv_qrnnd(quot_ptr[0], r, r,
495*4882a593Smuzhiyun n1 << normalization_steps,
496*4882a593Smuzhiyun divisor_limb);
497*4882a593Smuzhiyun return r >> normalization_steps;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun /* No normalization needed, either because udiv_qrnnd doesn't require
501*4882a593Smuzhiyun * it, or because DIVISOR_LIMB is already normalized.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun i = dividend_size - 1;
504*4882a593Smuzhiyun r = dividend_ptr[i];
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (r >= divisor_limb)
507*4882a593Smuzhiyun r = 0;
508*4882a593Smuzhiyun else
509*4882a593Smuzhiyun quot_ptr[i--] = 0;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun for (; i >= 0; i--) {
512*4882a593Smuzhiyun n0 = dividend_ptr[i];
513*4882a593Smuzhiyun udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun return r;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518