1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef __VDSO_MATH64_H 3*4882a593Smuzhiyun #define __VDSO_MATH64_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun static __always_inline u32 __iter_div_u64_rem(u64 dividend,u32 divisor,u64 * remainder)6*4882a593Smuzhiyun__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 7*4882a593Smuzhiyun { 8*4882a593Smuzhiyun u32 ret = 0; 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun while (dividend >= divisor) { 11*4882a593Smuzhiyun /* The following asm() prevents the compiler from 12*4882a593Smuzhiyun optimising this loop into a modulo operation. */ 13*4882a593Smuzhiyun asm("" : "+rm"(dividend)); 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun dividend -= divisor; 16*4882a593Smuzhiyun ret++; 17*4882a593Smuzhiyun } 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun *remainder = dividend; 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun return ret; 22*4882a593Smuzhiyun } 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #endif /* __VDSO_MATH64_H */ 25