1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /* multi_arith.h: multi-precision integer arithmetic functions, needed
3*4882a593Smuzhiyun to do extended-precision floating point.
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun (c) 1998 David Huggins-Daines.
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun Somewhat based on arch/alpha/math-emu/ieee-math.c, which is (c)
8*4882a593Smuzhiyun David Mosberger-Tang.
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /* Note:
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun These are not general multi-precision math routines. Rather, they
15*4882a593Smuzhiyun implement the subset of integer arithmetic that we need in order to
16*4882a593Smuzhiyun multiply, divide, and normalize 128-bit unsigned mantissae. */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #ifndef MULTI_ARITH_H
19*4882a593Smuzhiyun #define MULTI_ARITH_H
20*4882a593Smuzhiyun
fp_denormalize(struct fp_ext * reg,unsigned int cnt)21*4882a593Smuzhiyun static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun reg->exp += cnt;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun switch (cnt) {
26*4882a593Smuzhiyun case 0 ... 8:
27*4882a593Smuzhiyun reg->lowmant = reg->mant.m32[1] << (8 - cnt);
28*4882a593Smuzhiyun reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
29*4882a593Smuzhiyun (reg->mant.m32[0] << (32 - cnt));
30*4882a593Smuzhiyun reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
31*4882a593Smuzhiyun break;
32*4882a593Smuzhiyun case 9 ... 32:
33*4882a593Smuzhiyun reg->lowmant = reg->mant.m32[1] >> (cnt - 8);
34*4882a593Smuzhiyun if (reg->mant.m32[1] << (40 - cnt))
35*4882a593Smuzhiyun reg->lowmant |= 1;
36*4882a593Smuzhiyun reg->mant.m32[1] = (reg->mant.m32[1] >> cnt) |
37*4882a593Smuzhiyun (reg->mant.m32[0] << (32 - cnt));
38*4882a593Smuzhiyun reg->mant.m32[0] = reg->mant.m32[0] >> cnt;
39*4882a593Smuzhiyun break;
40*4882a593Smuzhiyun case 33 ... 39:
41*4882a593Smuzhiyun asm volatile ("bfextu %1{%2,#8},%0" : "=d" (reg->lowmant)
42*4882a593Smuzhiyun : "m" (reg->mant.m32[0]), "d" (64 - cnt));
43*4882a593Smuzhiyun if (reg->mant.m32[1] << (40 - cnt))
44*4882a593Smuzhiyun reg->lowmant |= 1;
45*4882a593Smuzhiyun reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
46*4882a593Smuzhiyun reg->mant.m32[0] = 0;
47*4882a593Smuzhiyun break;
48*4882a593Smuzhiyun case 40 ... 71:
49*4882a593Smuzhiyun reg->lowmant = reg->mant.m32[0] >> (cnt - 40);
50*4882a593Smuzhiyun if ((reg->mant.m32[0] << (72 - cnt)) || reg->mant.m32[1])
51*4882a593Smuzhiyun reg->lowmant |= 1;
52*4882a593Smuzhiyun reg->mant.m32[1] = reg->mant.m32[0] >> (cnt - 32);
53*4882a593Smuzhiyun reg->mant.m32[0] = 0;
54*4882a593Smuzhiyun break;
55*4882a593Smuzhiyun default:
56*4882a593Smuzhiyun reg->lowmant = reg->mant.m32[0] || reg->mant.m32[1];
57*4882a593Smuzhiyun reg->mant.m32[0] = 0;
58*4882a593Smuzhiyun reg->mant.m32[1] = 0;
59*4882a593Smuzhiyun break;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
fp_overnormalize(struct fp_ext * reg)63*4882a593Smuzhiyun static inline int fp_overnormalize(struct fp_ext *reg)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int shift;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (reg->mant.m32[0]) {
68*4882a593Smuzhiyun asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[0]));
69*4882a593Smuzhiyun reg->mant.m32[0] = (reg->mant.m32[0] << shift) | (reg->mant.m32[1] >> (32 - shift));
70*4882a593Smuzhiyun reg->mant.m32[1] = (reg->mant.m32[1] << shift);
71*4882a593Smuzhiyun } else {
72*4882a593Smuzhiyun asm ("bfffo %1{#0,#32},%0" : "=d" (shift) : "dm" (reg->mant.m32[1]));
73*4882a593Smuzhiyun reg->mant.m32[0] = (reg->mant.m32[1] << shift);
74*4882a593Smuzhiyun reg->mant.m32[1] = 0;
75*4882a593Smuzhiyun shift += 32;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return shift;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
fp_addmant(struct fp_ext * dest,struct fp_ext * src)81*4882a593Smuzhiyun static inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun int carry;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* we assume here, gcc only insert move and a clr instr */
86*4882a593Smuzhiyun asm volatile ("add.b %1,%0" : "=d,g" (dest->lowmant)
87*4882a593Smuzhiyun : "g,d" (src->lowmant), "0,0" (dest->lowmant));
88*4882a593Smuzhiyun asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[1])
89*4882a593Smuzhiyun : "d" (src->mant.m32[1]), "0" (dest->mant.m32[1]));
90*4882a593Smuzhiyun asm volatile ("addx.l %1,%0" : "=d" (dest->mant.m32[0])
91*4882a593Smuzhiyun : "d" (src->mant.m32[0]), "0" (dest->mant.m32[0]));
92*4882a593Smuzhiyun asm volatile ("addx.l %0,%0" : "=d" (carry) : "0" (0));
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return carry;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
fp_addcarry(struct fp_ext * reg)97*4882a593Smuzhiyun static inline int fp_addcarry(struct fp_ext *reg)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun if (++reg->exp == 0x7fff) {
100*4882a593Smuzhiyun if (reg->mant.m64)
101*4882a593Smuzhiyun fp_set_sr(FPSR_EXC_INEX2);
102*4882a593Smuzhiyun reg->mant.m64 = 0;
103*4882a593Smuzhiyun fp_set_sr(FPSR_EXC_OVFL);
104*4882a593Smuzhiyun return 0;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun reg->lowmant = (reg->mant.m32[1] << 7) | (reg->lowmant ? 1 : 0);
107*4882a593Smuzhiyun reg->mant.m32[1] = (reg->mant.m32[1] >> 1) |
108*4882a593Smuzhiyun (reg->mant.m32[0] << 31);
109*4882a593Smuzhiyun reg->mant.m32[0] = (reg->mant.m32[0] >> 1) | 0x80000000;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return 1;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
fp_submant(struct fp_ext * dest,struct fp_ext * src1,struct fp_ext * src2)114*4882a593Smuzhiyun static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
115*4882a593Smuzhiyun struct fp_ext *src2)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun /* we assume here, gcc only insert move and a clr instr */
118*4882a593Smuzhiyun asm volatile ("sub.b %1,%0" : "=d,g" (dest->lowmant)
119*4882a593Smuzhiyun : "g,d" (src2->lowmant), "0,0" (src1->lowmant));
120*4882a593Smuzhiyun asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[1])
121*4882a593Smuzhiyun : "d" (src2->mant.m32[1]), "0" (src1->mant.m32[1]));
122*4882a593Smuzhiyun asm volatile ("subx.l %1,%0" : "=d" (dest->mant.m32[0])
123*4882a593Smuzhiyun : "d" (src2->mant.m32[0]), "0" (src1->mant.m32[0]));
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #define fp_mul64(desth, destl, src1, src2) ({ \
127*4882a593Smuzhiyun asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \
128*4882a593Smuzhiyun : "dm" (src1), "0" (src2)); \
129*4882a593Smuzhiyun })
130*4882a593Smuzhiyun #define fp_div64(quot, rem, srch, srcl, div) \
131*4882a593Smuzhiyun asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \
132*4882a593Smuzhiyun : "dm" (div), "1" (srch), "0" (srcl))
133*4882a593Smuzhiyun #define fp_add64(dest1, dest2, src1, src2) ({ \
134*4882a593Smuzhiyun asm ("add.l %1,%0" : "=d,dm" (dest2) \
135*4882a593Smuzhiyun : "dm,d" (src2), "0,0" (dest2)); \
136*4882a593Smuzhiyun asm ("addx.l %1,%0" : "=d" (dest1) \
137*4882a593Smuzhiyun : "d" (src1), "0" (dest1)); \
138*4882a593Smuzhiyun })
139*4882a593Smuzhiyun #define fp_addx96(dest, src) ({ \
140*4882a593Smuzhiyun /* we assume here, gcc only insert move and a clr instr */ \
141*4882a593Smuzhiyun asm volatile ("add.l %1,%0" : "=d,g" (dest->m32[2]) \
142*4882a593Smuzhiyun : "g,d" (temp.m32[1]), "0,0" (dest->m32[2])); \
143*4882a593Smuzhiyun asm volatile ("addx.l %1,%0" : "=d" (dest->m32[1]) \
144*4882a593Smuzhiyun : "d" (temp.m32[0]), "0" (dest->m32[1])); \
145*4882a593Smuzhiyun asm volatile ("addx.l %1,%0" : "=d" (dest->m32[0]) \
146*4882a593Smuzhiyun : "d" (0), "0" (dest->m32[0])); \
147*4882a593Smuzhiyun })
148*4882a593Smuzhiyun #define fp_sub64(dest, src) ({ \
149*4882a593Smuzhiyun asm ("sub.l %1,%0" : "=d,dm" (dest.m32[1]) \
150*4882a593Smuzhiyun : "dm,d" (src.m32[1]), "0,0" (dest.m32[1])); \
151*4882a593Smuzhiyun asm ("subx.l %1,%0" : "=d" (dest.m32[0]) \
152*4882a593Smuzhiyun : "d" (src.m32[0]), "0" (dest.m32[0])); \
153*4882a593Smuzhiyun })
154*4882a593Smuzhiyun #define fp_sub96c(dest, srch, srcm, srcl) ({ \
155*4882a593Smuzhiyun char carry; \
156*4882a593Smuzhiyun asm ("sub.l %1,%0" : "=d,dm" (dest.m32[2]) \
157*4882a593Smuzhiyun : "dm,d" (srcl), "0,0" (dest.m32[2])); \
158*4882a593Smuzhiyun asm ("subx.l %1,%0" : "=d" (dest.m32[1]) \
159*4882a593Smuzhiyun : "d" (srcm), "0" (dest.m32[1])); \
160*4882a593Smuzhiyun asm ("subx.l %2,%1; scs %0" : "=d" (carry), "=d" (dest.m32[0]) \
161*4882a593Smuzhiyun : "d" (srch), "1" (dest.m32[0])); \
162*4882a593Smuzhiyun carry; \
163*4882a593Smuzhiyun })
164*4882a593Smuzhiyun
fp_multiplymant(union fp_mant128 * dest,struct fp_ext * src1,struct fp_ext * src2)165*4882a593Smuzhiyun static inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1,
166*4882a593Smuzhiyun struct fp_ext *src2)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun union fp_mant64 temp;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun fp_mul64(dest->m32[0], dest->m32[1], src1->mant.m32[0], src2->mant.m32[0]);
171*4882a593Smuzhiyun fp_mul64(dest->m32[2], dest->m32[3], src1->mant.m32[1], src2->mant.m32[1]);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[0], src2->mant.m32[1]);
174*4882a593Smuzhiyun fp_addx96(dest, temp);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun fp_mul64(temp.m32[0], temp.m32[1], src1->mant.m32[1], src2->mant.m32[0]);
177*4882a593Smuzhiyun fp_addx96(dest, temp);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
fp_dividemant(union fp_mant128 * dest,struct fp_ext * src,struct fp_ext * div)180*4882a593Smuzhiyun static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
181*4882a593Smuzhiyun struct fp_ext *div)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun union fp_mant128 tmp;
184*4882a593Smuzhiyun union fp_mant64 tmp64;
185*4882a593Smuzhiyun unsigned long *mantp = dest->m32;
186*4882a593Smuzhiyun unsigned long fix, rem, first, dummy;
187*4882a593Smuzhiyun int i;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* the algorithm below requires dest to be smaller than div,
190*4882a593Smuzhiyun but both have the high bit set */
191*4882a593Smuzhiyun if (src->mant.m64 >= div->mant.m64) {
192*4882a593Smuzhiyun fp_sub64(src->mant, div->mant);
193*4882a593Smuzhiyun *mantp = 1;
194*4882a593Smuzhiyun } else
195*4882a593Smuzhiyun *mantp = 0;
196*4882a593Smuzhiyun mantp++;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* basic idea behind this algorithm: we can't divide two 64bit numbers
199*4882a593Smuzhiyun (AB/CD) directly, but we can calculate AB/C0, but this means this
200*4882a593Smuzhiyun quotient is off by C0/CD, so we have to multiply the first result
201*4882a593Smuzhiyun to fix the result, after that we have nearly the correct result
202*4882a593Smuzhiyun and only a few corrections are needed. */
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* C0/CD can be precalculated, but it's an 64bit division again, but
205*4882a593Smuzhiyun we can make it a bit easier, by dividing first through C so we get
206*4882a593Smuzhiyun 10/1D and now only a single shift and the value fits into 32bit. */
207*4882a593Smuzhiyun fix = 0x80000000;
208*4882a593Smuzhiyun dummy = div->mant.m32[1] / div->mant.m32[0] + 1;
209*4882a593Smuzhiyun dummy = (dummy >> 1) | fix;
210*4882a593Smuzhiyun fp_div64(fix, dummy, fix, 0, dummy);
211*4882a593Smuzhiyun fix--;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun for (i = 0; i < 3; i++, mantp++) {
214*4882a593Smuzhiyun if (src->mant.m32[0] == div->mant.m32[0]) {
215*4882a593Smuzhiyun fp_div64(first, rem, 0, src->mant.m32[1], div->mant.m32[0]);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun fp_mul64(*mantp, dummy, first, fix);
218*4882a593Smuzhiyun *mantp += fix;
219*4882a593Smuzhiyun } else {
220*4882a593Smuzhiyun fp_div64(first, rem, src->mant.m32[0], src->mant.m32[1], div->mant.m32[0]);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun fp_mul64(*mantp, dummy, first, fix);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun fp_mul64(tmp.m32[0], tmp.m32[1], div->mant.m32[0], first - *mantp);
226*4882a593Smuzhiyun fp_add64(tmp.m32[0], tmp.m32[1], 0, rem);
227*4882a593Smuzhiyun tmp.m32[2] = 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun fp_mul64(tmp64.m32[0], tmp64.m32[1], *mantp, div->mant.m32[1]);
230*4882a593Smuzhiyun fp_sub96c(tmp, 0, tmp64.m32[0], tmp64.m32[1]);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun src->mant.m32[0] = tmp.m32[1];
233*4882a593Smuzhiyun src->mant.m32[1] = tmp.m32[2];
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun while (!fp_sub96c(tmp, 0, div->mant.m32[0], div->mant.m32[1])) {
236*4882a593Smuzhiyun src->mant.m32[0] = tmp.m32[1];
237*4882a593Smuzhiyun src->mant.m32[1] = tmp.m32[2];
238*4882a593Smuzhiyun *mantp += 1;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
fp_putmant128(struct fp_ext * dest,union fp_mant128 * src,int shift)243*4882a593Smuzhiyun static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
244*4882a593Smuzhiyun int shift)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun unsigned long tmp;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun switch (shift) {
249*4882a593Smuzhiyun case 0:
250*4882a593Smuzhiyun dest->mant.m64 = src->m64[0];
251*4882a593Smuzhiyun dest->lowmant = src->m32[2] >> 24;
252*4882a593Smuzhiyun if (src->m32[3] || (src->m32[2] << 8))
253*4882a593Smuzhiyun dest->lowmant |= 1;
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun case 1:
256*4882a593Smuzhiyun asm volatile ("lsl.l #1,%0"
257*4882a593Smuzhiyun : "=d" (tmp) : "0" (src->m32[2]));
258*4882a593Smuzhiyun asm volatile ("roxl.l #1,%0"
259*4882a593Smuzhiyun : "=d" (dest->mant.m32[1]) : "0" (src->m32[1]));
260*4882a593Smuzhiyun asm volatile ("roxl.l #1,%0"
261*4882a593Smuzhiyun : "=d" (dest->mant.m32[0]) : "0" (src->m32[0]));
262*4882a593Smuzhiyun dest->lowmant = tmp >> 24;
263*4882a593Smuzhiyun if (src->m32[3] || (tmp << 8))
264*4882a593Smuzhiyun dest->lowmant |= 1;
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun case 31:
267*4882a593Smuzhiyun asm volatile ("lsr.l #1,%1; roxr.l #1,%0"
268*4882a593Smuzhiyun : "=d" (dest->mant.m32[0])
269*4882a593Smuzhiyun : "d" (src->m32[0]), "0" (src->m32[1]));
270*4882a593Smuzhiyun asm volatile ("roxr.l #1,%0"
271*4882a593Smuzhiyun : "=d" (dest->mant.m32[1]) : "0" (src->m32[2]));
272*4882a593Smuzhiyun asm volatile ("roxr.l #1,%0"
273*4882a593Smuzhiyun : "=d" (tmp) : "0" (src->m32[3]));
274*4882a593Smuzhiyun dest->lowmant = tmp >> 24;
275*4882a593Smuzhiyun if (src->m32[3] << 7)
276*4882a593Smuzhiyun dest->lowmant |= 1;
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun case 32:
279*4882a593Smuzhiyun dest->mant.m32[0] = src->m32[1];
280*4882a593Smuzhiyun dest->mant.m32[1] = src->m32[2];
281*4882a593Smuzhiyun dest->lowmant = src->m32[3] >> 24;
282*4882a593Smuzhiyun if (src->m32[3] << 8)
283*4882a593Smuzhiyun dest->lowmant |= 1;
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun #endif /* MULTI_ARITH_H */
289