Lines Matching +full:bit +full:- +full:shift
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
16 * This is commonly provided by 32bit archs to provide an optimized 64bit
26 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
35 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
44 * div64_u64 - unsigned 64bit divide with 64bit divisor
52 * div64_s64 - signed 64bit divide with 64bit divisor
91 * div_u64 - unsigned 64bit divide with 32bit divisor
93 * This is the most common 64bit divide and should be used if possible,
94 * as many 32bit archs can optimize this variant better than a full 64bit
106 * div_s64 - signed 64bit divide with 32bit divisor
128 dividend -= divisor; in __iter_div_u64_rem()
139 * Many a GCC version messes this up and generates a 64x64 mult :-(
150 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) in mul_u64_u32_shr() argument
152 return (u64)(((unsigned __int128)a * mul) >> shift); in mul_u64_u32_shr()
157 static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) in mul_u64_u64_shr() argument
159 return (u64)(((unsigned __int128)a * mul) >> shift); in mul_u64_u64_shr()
166 static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) in mul_u64_u32_shr() argument
174 ret = mul_u32_u32(al, mul) >> shift; in mul_u64_u32_shr()
176 ret += mul_u32_u32(ah, mul) << (32 - shift); in mul_u64_u32_shr()
183 static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) in mul_u64_u64_shr() argument
206 * Each of these lines computes a 64-bit intermediate result into "c", in mul_u64_u64_shr()
207 * starting at bits 32-95. The low 32-bits go into the result of the in mul_u64_u64_shr()
208 * multiplication, the high 32-bits are carried into the next step. in mul_u64_u64_shr()
215 * The 128-bit result of the multiplication is in rl.ll and rh.ll, in mul_u64_u64_shr()
216 * shift it right and throw away the high part of the result. in mul_u64_u64_shr()
218 if (shift == 0) in mul_u64_u64_shr()
220 if (shift < 64) in mul_u64_u64_shr()
221 return (rl.ll >> shift) | (rh.ll << (64 - shift)); in mul_u64_u64_shr()
222 return rh.ll >> (shift & 63); in mul_u64_u64_shr()
246 /* Bits 32-63 of the result will be in rh.l.low. */ in mul_u64_u32_div()
249 /* Bits 0-31 of the result will be in rl.l.low. */ in mul_u64_u32_div()