xref: /OK3568_Linux_fs/u-boot/include/linux/math64.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef _LINUX_MATH64_H
2*4882a593Smuzhiyun #define _LINUX_MATH64_H
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <div64.h>
5*4882a593Smuzhiyun #include <linux/bitops.h>
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #if BITS_PER_LONG == 64
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define div64_long(x, y) div64_s64((x), (y))
11*4882a593Smuzhiyun #define div64_ul(x, y)   div64_u64((x), (y))
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /**
14*4882a593Smuzhiyun  * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This is commonly provided by 32bit archs to provide an optimized 64bit
17*4882a593Smuzhiyun  * divide.
18*4882a593Smuzhiyun  */
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)19*4882a593Smuzhiyun static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	*remainder = dividend % divisor;
22*4882a593Smuzhiyun 	return dividend / divisor;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
27*4882a593Smuzhiyun  */
div_s64_rem(s64 dividend,s32 divisor,s32 * remainder)28*4882a593Smuzhiyun static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	*remainder = dividend % divisor;
31*4882a593Smuzhiyun 	return dividend / divisor;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /**
35*4882a593Smuzhiyun  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
36*4882a593Smuzhiyun  */
div64_u64_rem(u64 dividend,u64 divisor,u64 * remainder)37*4882a593Smuzhiyun static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	*remainder = dividend % divisor;
40*4882a593Smuzhiyun 	return dividend / divisor;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * div64_u64 - unsigned 64bit divide with 64bit divisor
45*4882a593Smuzhiyun  */
div64_u64(u64 dividend,u64 divisor)46*4882a593Smuzhiyun static inline u64 div64_u64(u64 dividend, u64 divisor)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return dividend / divisor;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun  * div64_s64 - signed 64bit divide with 64bit divisor
53*4882a593Smuzhiyun  */
div64_s64(s64 dividend,s64 divisor)54*4882a593Smuzhiyun static inline s64 div64_s64(s64 dividend, s64 divisor)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	return dividend / divisor;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #elif BITS_PER_LONG == 32
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define div64_long(x, y) div_s64((x), (y))
62*4882a593Smuzhiyun #define div64_ul(x, y)   div_u64((x), (y))
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #ifndef div_u64_rem
div_u64_rem(u64 dividend,u32 divisor,u32 * remainder)65*4882a593Smuzhiyun static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	*remainder = do_div(dividend, divisor);
68*4882a593Smuzhiyun 	return dividend;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #ifndef div_s64_rem
73*4882a593Smuzhiyun extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #ifndef div64_u64_rem
77*4882a593Smuzhiyun extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #ifndef div64_u64
81*4882a593Smuzhiyun extern u64 div64_u64(u64 dividend, u64 divisor);
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #ifndef div64_s64
85*4882a593Smuzhiyun extern s64 div64_s64(s64 dividend, s64 divisor);
86*4882a593Smuzhiyun #endif
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #endif /* BITS_PER_LONG */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * div_u64 - unsigned 64bit divide with 32bit divisor
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * This is the most common 64bit divide and should be used if possible,
94*4882a593Smuzhiyun  * as many 32bit archs can optimize this variant better than a full 64bit
95*4882a593Smuzhiyun  * divide.
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun #ifndef div_u64
div_u64(u64 dividend,u32 divisor)98*4882a593Smuzhiyun static inline u64 div_u64(u64 dividend, u32 divisor)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	u32 remainder;
101*4882a593Smuzhiyun 	return div_u64_rem(dividend, divisor, &remainder);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun  * div_s64 - signed 64bit divide with 32bit divisor
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun #ifndef div_s64
div_s64(s64 dividend,s32 divisor)109*4882a593Smuzhiyun static inline s64 div_s64(s64 dividend, s32 divisor)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	s32 remainder;
112*4882a593Smuzhiyun 	return div_s64_rem(dividend, divisor, &remainder);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static __always_inline u32
__iter_div_u64_rem(u64 dividend,u32 divisor,u64 * remainder)119*4882a593Smuzhiyun __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 ret = 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	while (dividend >= divisor) {
124*4882a593Smuzhiyun 		/* The following asm() prevents the compiler from
125*4882a593Smuzhiyun 		   optimising this loop into a modulo operation.  */
126*4882a593Smuzhiyun 		asm("" : "+rm"(dividend));
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		dividend -= divisor;
129*4882a593Smuzhiyun 		ret++;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	*remainder = dividend;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return ret;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #ifndef mul_u32_u32
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun  * Many a GCC version messes this up and generates a 64x64 mult :-(
140*4882a593Smuzhiyun  */
mul_u32_u32(u32 a,u32 b)141*4882a593Smuzhiyun static inline u64 mul_u32_u32(u32 a, u32 b)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	return (u64)a * b;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)150*4882a593Smuzhiyun static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	return (u64)(((unsigned __int128)a * mul) >> shift);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun #endif /* mul_u64_u32_shr */
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 mul,unsigned int shift)157*4882a593Smuzhiyun static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	return (u64)(((unsigned __int128)a * mul) >> shift);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun #endif /* mul_u64_u64_shr */
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #ifndef mul_u64_u32_shr
mul_u64_u32_shr(u64 a,u32 mul,unsigned int shift)166*4882a593Smuzhiyun static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	u32 ah, al;
169*4882a593Smuzhiyun 	u64 ret;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	al = a;
172*4882a593Smuzhiyun 	ah = a >> 32;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	ret = mul_u32_u32(al, mul) >> shift;
175*4882a593Smuzhiyun 	if (ah)
176*4882a593Smuzhiyun 		ret += mul_u32_u32(ah, mul) << (32 - shift);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return ret;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun #endif /* mul_u64_u32_shr */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #ifndef mul_u64_u64_shr
mul_u64_u64_shr(u64 a,u64 b,unsigned int shift)183*4882a593Smuzhiyun static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	union {
186*4882a593Smuzhiyun 		u64 ll;
187*4882a593Smuzhiyun 		struct {
188*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
189*4882a593Smuzhiyun 			u32 high, low;
190*4882a593Smuzhiyun #else
191*4882a593Smuzhiyun 			u32 low, high;
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun 		} l;
194*4882a593Smuzhiyun 	} rl, rm, rn, rh, a0, b0;
195*4882a593Smuzhiyun 	u64 c;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	a0.ll = a;
198*4882a593Smuzhiyun 	b0.ll = b;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
201*4882a593Smuzhiyun 	rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
202*4882a593Smuzhiyun 	rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
203*4882a593Smuzhiyun 	rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/*
206*4882a593Smuzhiyun 	 * Each of these lines computes a 64-bit intermediate result into "c",
207*4882a593Smuzhiyun 	 * starting at bits 32-95.  The low 32-bits go into the result of the
208*4882a593Smuzhiyun 	 * multiplication, the high 32-bits are carried into the next step.
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
211*4882a593Smuzhiyun 	rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
212*4882a593Smuzhiyun 	rh.l.high = (c >> 32) + rh.l.high;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/*
215*4882a593Smuzhiyun 	 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
216*4882a593Smuzhiyun 	 * shift it right and throw away the high part of the result.
217*4882a593Smuzhiyun 	 */
218*4882a593Smuzhiyun 	if (shift == 0)
219*4882a593Smuzhiyun 		return rl.ll;
220*4882a593Smuzhiyun 	if (shift < 64)
221*4882a593Smuzhiyun 		return (rl.ll >> shift) | (rh.ll << (64 - shift));
222*4882a593Smuzhiyun 	return rh.ll >> (shift & 63);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun #endif /* mul_u64_u64_shr */
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #endif
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun #ifndef mul_u64_u32_div
mul_u64_u32_div(u64 a,u32 mul,u32 divisor)229*4882a593Smuzhiyun static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	union {
232*4882a593Smuzhiyun 		u64 ll;
233*4882a593Smuzhiyun 		struct {
234*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
235*4882a593Smuzhiyun 			u32 high, low;
236*4882a593Smuzhiyun #else
237*4882a593Smuzhiyun 			u32 low, high;
238*4882a593Smuzhiyun #endif
239*4882a593Smuzhiyun 		} l;
240*4882a593Smuzhiyun 	} u, rl, rh;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	u.ll = a;
243*4882a593Smuzhiyun 	rl.ll = mul_u32_u32(u.l.low, mul);
244*4882a593Smuzhiyun 	rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* Bits 32-63 of the result will be in rh.l.low. */
247*4882a593Smuzhiyun 	rl.l.high = do_div(rh.ll, divisor);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* Bits 0-31 of the result will be in rl.l.low.	*/
250*4882a593Smuzhiyun 	do_div(rl.ll, divisor);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	rl.l.high = rh.l.low;
253*4882a593Smuzhiyun 	return rl.ll;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun #endif /* mul_u64_u32_div */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #endif /* _LINUX_MATH64_H */
258