1*4882a593Smuzhiyun #ifndef _ASM_GENERIC_DIV64_H
2*4882a593Smuzhiyun #define _ASM_GENERIC_DIV64_H
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
5*4882a593Smuzhiyun * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Optimization for constant divisors on 32-bit machines:
8*4882a593Smuzhiyun * Copyright (C) 2006-2015 Nicolas Pitre
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * The semantics of do_div() are:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * uint32_t do_div(uint64_t *n, uint32_t base)
13*4882a593Smuzhiyun * {
14*4882a593Smuzhiyun * uint32_t remainder = *n % base;
15*4882a593Smuzhiyun * *n = *n / base;
16*4882a593Smuzhiyun * return remainder;
17*4882a593Smuzhiyun * }
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * NOTE: macro parameter n is evaluated multiple times,
20*4882a593Smuzhiyun * beware of side effects!
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <linux/types.h>
24*4882a593Smuzhiyun #include <linux/compiler.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #if BITS_PER_LONG == 64
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun # define do_div(n,base) ({ \
29*4882a593Smuzhiyun uint32_t __base = (base); \
30*4882a593Smuzhiyun uint32_t __rem; \
31*4882a593Smuzhiyun __rem = ((uint64_t)(n)) % __base; \
32*4882a593Smuzhiyun (n) = ((uint64_t)(n)) / __base; \
33*4882a593Smuzhiyun __rem; \
34*4882a593Smuzhiyun })
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #elif BITS_PER_LONG == 32
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/log2.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * If the divisor happens to be constant, we determine the appropriate
42*4882a593Smuzhiyun * inverse at compile time to turn the division into a few inline
43*4882a593Smuzhiyun * multiplications which ought to be much faster. And yet only if compiling
44*4882a593Smuzhiyun * with a sufficiently recent gcc version to perform proper 64-bit constant
45*4882a593Smuzhiyun * propagation.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * (It is unfortunate that gcc doesn't perform all this internally.)
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #ifndef __div64_const32_is_OK
51*4882a593Smuzhiyun #define __div64_const32_is_OK (__GNUC__ >= 4)
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define __div64_const32(n, ___b) \
55*4882a593Smuzhiyun ({ \
56*4882a593Smuzhiyun /* \
57*4882a593Smuzhiyun * Multiplication by reciprocal of b: n / b = n * (p / b) / p \
58*4882a593Smuzhiyun * \
59*4882a593Smuzhiyun * We rely on the fact that most of this code gets optimized \
60*4882a593Smuzhiyun * away at compile time due to constant propagation and only \
61*4882a593Smuzhiyun * a few multiplication instructions should remain. \
62*4882a593Smuzhiyun * Hence this monstrous macro (static inline doesn't always \
63*4882a593Smuzhiyun * do the trick here). \
64*4882a593Smuzhiyun */ \
65*4882a593Smuzhiyun uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
66*4882a593Smuzhiyun uint32_t ___p, ___bias; \
67*4882a593Smuzhiyun \
68*4882a593Smuzhiyun /* determine MSB of b */ \
69*4882a593Smuzhiyun ___p = 1 << ilog2(___b); \
70*4882a593Smuzhiyun \
71*4882a593Smuzhiyun /* compute m = ((p << 64) + b - 1) / b */ \
72*4882a593Smuzhiyun ___m = (~0ULL / ___b) * ___p; \
73*4882a593Smuzhiyun ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \
74*4882a593Smuzhiyun \
75*4882a593Smuzhiyun /* one less than the dividend with highest result */ \
76*4882a593Smuzhiyun ___x = ~0ULL / ___b * ___b - 1; \
77*4882a593Smuzhiyun \
78*4882a593Smuzhiyun /* test our ___m with res = m * x / (p << 64) */ \
79*4882a593Smuzhiyun ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
80*4882a593Smuzhiyun ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
81*4882a593Smuzhiyun ___res += (___x & 0xffffffff) * (___m >> 32); \
82*4882a593Smuzhiyun ___t = (___res < ___t) ? (1ULL << 32) : 0; \
83*4882a593Smuzhiyun ___res = (___res >> 32) + ___t; \
84*4882a593Smuzhiyun ___res += (___m >> 32) * (___x >> 32); \
85*4882a593Smuzhiyun ___res /= ___p; \
86*4882a593Smuzhiyun \
87*4882a593Smuzhiyun /* Now sanitize and optimize what we've got. */ \
88*4882a593Smuzhiyun if (~0ULL % (___b / (___b & -___b)) == 0) { \
89*4882a593Smuzhiyun /* special case, can be simplified to ... */ \
90*4882a593Smuzhiyun ___n /= (___b & -___b); \
91*4882a593Smuzhiyun ___m = ~0ULL / (___b / (___b & -___b)); \
92*4882a593Smuzhiyun ___p = 1; \
93*4882a593Smuzhiyun ___bias = 1; \
94*4882a593Smuzhiyun } else if (___res != ___x / ___b) { \
95*4882a593Smuzhiyun /* \
96*4882a593Smuzhiyun * We can't get away without a bias to compensate \
97*4882a593Smuzhiyun * for bit truncation errors. To avoid it we'd need an \
98*4882a593Smuzhiyun * additional bit to represent m which would overflow \
99*4882a593Smuzhiyun * a 64-bit variable. \
100*4882a593Smuzhiyun * \
101*4882a593Smuzhiyun * Instead we do m = p / b and n / b = (n * m + m) / p. \
102*4882a593Smuzhiyun */ \
103*4882a593Smuzhiyun ___bias = 1; \
104*4882a593Smuzhiyun /* Compute m = (p << 64) / b */ \
105*4882a593Smuzhiyun ___m = (~0ULL / ___b) * ___p; \
106*4882a593Smuzhiyun ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
107*4882a593Smuzhiyun } else { \
108*4882a593Smuzhiyun /* \
109*4882a593Smuzhiyun * Reduce m / p, and try to clear bit 31 of m when \
110*4882a593Smuzhiyun * possible, otherwise that'll need extra overflow \
111*4882a593Smuzhiyun * handling later. \
112*4882a593Smuzhiyun */ \
113*4882a593Smuzhiyun uint32_t ___bits = -(___m & -___m); \
114*4882a593Smuzhiyun ___bits |= ___m >> 32; \
115*4882a593Smuzhiyun ___bits = (~___bits) << 1; \
116*4882a593Smuzhiyun /* \
117*4882a593Smuzhiyun * If ___bits == 0 then setting bit 31 is unavoidable. \
118*4882a593Smuzhiyun * Simply apply the maximum possible reduction in that \
119*4882a593Smuzhiyun * case. Otherwise the MSB of ___bits indicates the \
120*4882a593Smuzhiyun * best reduction we should apply. \
121*4882a593Smuzhiyun */ \
122*4882a593Smuzhiyun if (!___bits) { \
123*4882a593Smuzhiyun ___p /= (___m & -___m); \
124*4882a593Smuzhiyun ___m /= (___m & -___m); \
125*4882a593Smuzhiyun } else { \
126*4882a593Smuzhiyun ___p >>= ilog2(___bits); \
127*4882a593Smuzhiyun ___m >>= ilog2(___bits); \
128*4882a593Smuzhiyun } \
129*4882a593Smuzhiyun /* No bias needed. */ \
130*4882a593Smuzhiyun ___bias = 0; \
131*4882a593Smuzhiyun } \
132*4882a593Smuzhiyun \
133*4882a593Smuzhiyun /* \
134*4882a593Smuzhiyun * Now we have a combination of 2 conditions: \
135*4882a593Smuzhiyun * \
136*4882a593Smuzhiyun * 1) whether or not we need to apply a bias, and \
137*4882a593Smuzhiyun * \
138*4882a593Smuzhiyun * 2) whether or not there might be an overflow in the cross \
139*4882a593Smuzhiyun * product determined by (___m & ((1 << 63) | (1 << 31))). \
140*4882a593Smuzhiyun * \
141*4882a593Smuzhiyun * Select the best way to do (m_bias + m * n) / (1 << 64). \
142*4882a593Smuzhiyun * From now on there will be actual runtime code generated. \
143*4882a593Smuzhiyun */ \
144*4882a593Smuzhiyun ___res = __arch_xprod_64(___m, ___n, ___bias); \
145*4882a593Smuzhiyun \
146*4882a593Smuzhiyun ___res /= ___p; \
147*4882a593Smuzhiyun })
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #ifndef __arch_xprod_64
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Default C implementation for __arch_xprod_64()
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
154*4882a593Smuzhiyun * Semantic: retval = ((bias ? m : 0) + m * n) >> 64
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * The product is a 128-bit value, scaled down to 64 bits.
157*4882a593Smuzhiyun * Assuming constant propagation to optimize away unused conditional code.
158*4882a593Smuzhiyun * Architectures may provide their own optimized assembly implementation.
159*4882a593Smuzhiyun */
__arch_xprod_64(const uint64_t m,uint64_t n,bool bias)160*4882a593Smuzhiyun static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun uint32_t m_lo = m;
163*4882a593Smuzhiyun uint32_t m_hi = m >> 32;
164*4882a593Smuzhiyun uint32_t n_lo = n;
165*4882a593Smuzhiyun uint32_t n_hi = n >> 32;
166*4882a593Smuzhiyun uint64_t res, tmp;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (!bias) {
169*4882a593Smuzhiyun res = ((uint64_t)m_lo * n_lo) >> 32;
170*4882a593Smuzhiyun } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
171*4882a593Smuzhiyun /* there can't be any overflow here */
172*4882a593Smuzhiyun res = (m + (uint64_t)m_lo * n_lo) >> 32;
173*4882a593Smuzhiyun } else {
174*4882a593Smuzhiyun res = m + (uint64_t)m_lo * n_lo;
175*4882a593Smuzhiyun tmp = (res < m) ? (1ULL << 32) : 0;
176*4882a593Smuzhiyun res = (res >> 32) + tmp;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
180*4882a593Smuzhiyun /* there can't be any overflow here */
181*4882a593Smuzhiyun res += (uint64_t)m_lo * n_hi;
182*4882a593Smuzhiyun res += (uint64_t)m_hi * n_lo;
183*4882a593Smuzhiyun res >>= 32;
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun tmp = res += (uint64_t)m_lo * n_hi;
186*4882a593Smuzhiyun res += (uint64_t)m_hi * n_lo;
187*4882a593Smuzhiyun tmp = (res < tmp) ? (1ULL << 32) : 0;
188*4882a593Smuzhiyun res = (res >> 32) + tmp;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun res += (uint64_t)m_hi * n_hi;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return res;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun #ifndef __div64_32
198*4882a593Smuzhiyun extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* The unnecessary pointer compare is there
202*4882a593Smuzhiyun * to check for type safety (n must be 64bit)
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun # define do_div(n,base) ({ \
205*4882a593Smuzhiyun uint32_t __base = (base); \
206*4882a593Smuzhiyun uint32_t __rem; \
207*4882a593Smuzhiyun (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
208*4882a593Smuzhiyun if (__builtin_constant_p(__base) && \
209*4882a593Smuzhiyun is_power_of_2(__base)) { \
210*4882a593Smuzhiyun __rem = (n) & (__base - 1); \
211*4882a593Smuzhiyun (n) >>= ilog2(__base); \
212*4882a593Smuzhiyun } else if (__div64_const32_is_OK && \
213*4882a593Smuzhiyun __builtin_constant_p(__base) && \
214*4882a593Smuzhiyun __base != 0) { \
215*4882a593Smuzhiyun uint32_t __res_lo, __n_lo = (n); \
216*4882a593Smuzhiyun (n) = __div64_const32(n, __base); \
217*4882a593Smuzhiyun /* the remainder can be computed with 32-bit regs */ \
218*4882a593Smuzhiyun __res_lo = (n); \
219*4882a593Smuzhiyun __rem = __n_lo - __res_lo * __base; \
220*4882a593Smuzhiyun } else if (likely(((n) >> 32) == 0)) { \
221*4882a593Smuzhiyun __rem = (uint32_t)(n) % __base; \
222*4882a593Smuzhiyun (n) = (uint32_t)(n) / __base; \
223*4882a593Smuzhiyun } else \
224*4882a593Smuzhiyun __rem = __div64_32(&(n), __base); \
225*4882a593Smuzhiyun __rem; \
226*4882a593Smuzhiyun })
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun #else /* BITS_PER_LONG == ?? */
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun # error do_div() does not yet support the C64
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun #endif /* BITS_PER_LONG */
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Wrapper for do_div(). Doesn't modify dividend and returns
235*4882a593Smuzhiyun * the result, not reminder.
236*4882a593Smuzhiyun */
lldiv(uint64_t dividend,uint32_t divisor)237*4882a593Smuzhiyun static inline uint64_t lldiv(uint64_t dividend, uint32_t divisor)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun uint64_t __res = dividend;
240*4882a593Smuzhiyun do_div(__res, divisor);
241*4882a593Smuzhiyun return(__res);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun #endif /* _ASM_GENERIC_DIV64_H */
245