1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki 3*4882a593Smuzhiyun * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public 6*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive 7*4882a593Smuzhiyun * for more details. 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun #ifndef __ASM_DIV64_H 10*4882a593Smuzhiyun #define __ASM_DIV64_H 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun #include <asm/bitsperlong.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #if BITS_PER_LONG == 32 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun /* 17*4882a593Smuzhiyun * No traps on overflows for any of these... 18*4882a593Smuzhiyun */ 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun #define do_div64_32(res, high, low, base) ({ \ 21*4882a593Smuzhiyun unsigned long __cf, __tmp, __tmp2, __i; \ 22*4882a593Smuzhiyun unsigned long __quot32, __mod32; \ 23*4882a593Smuzhiyun \ 24*4882a593Smuzhiyun __asm__( \ 25*4882a593Smuzhiyun " .set push \n" \ 26*4882a593Smuzhiyun " .set noat \n" \ 27*4882a593Smuzhiyun " .set noreorder \n" \ 28*4882a593Smuzhiyun " move %2, $0 \n" \ 29*4882a593Smuzhiyun " move %3, $0 \n" \ 30*4882a593Smuzhiyun " b 1f \n" \ 31*4882a593Smuzhiyun " li %4, 0x21 \n" \ 32*4882a593Smuzhiyun "0: \n" \ 33*4882a593Smuzhiyun " sll $1, %0, 0x1 \n" \ 34*4882a593Smuzhiyun " srl %3, %0, 0x1f \n" \ 35*4882a593Smuzhiyun " or %0, $1, %5 \n" \ 36*4882a593Smuzhiyun " sll %1, %1, 0x1 \n" \ 37*4882a593Smuzhiyun " sll %2, %2, 0x1 \n" \ 38*4882a593Smuzhiyun "1: \n" \ 39*4882a593Smuzhiyun " bnez %3, 2f \n" \ 40*4882a593Smuzhiyun " sltu %5, %0, %z6 \n" \ 41*4882a593Smuzhiyun " bnez %5, 3f \n" \ 42*4882a593Smuzhiyun "2: \n" \ 43*4882a593Smuzhiyun " addiu %4, %4, -1 \n" \ 44*4882a593Smuzhiyun " subu %0, %0, %z6 \n" \ 45*4882a593Smuzhiyun " addiu %2, %2, 1 \n" \ 46*4882a593Smuzhiyun "3: \n" \ 47*4882a593Smuzhiyun " bnez %4, 0b \n" \ 48*4882a593Smuzhiyun " srl %5, %1, 0x1f \n" \ 49*4882a593Smuzhiyun " .set pop" \ 50*4882a593Smuzhiyun : "=&r" (__mod32), "=&r" (__tmp), \ 51*4882a593Smuzhiyun "=&r" (__quot32), "=&r" (__cf), \ 52*4882a593Smuzhiyun "=&r" (__i), "=&r" (__tmp2) \ 53*4882a593Smuzhiyun : "Jr" (base), "0" (high), "1" (low)); \ 54*4882a593Smuzhiyun \ 55*4882a593Smuzhiyun (res) = __quot32; \ 56*4882a593Smuzhiyun __mod32; \ 57*4882a593Smuzhiyun }) 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun #define __div64_32(n, base) ({ \ 60*4882a593Smuzhiyun unsigned long __upper, __low, __high, __radix; \ 61*4882a593Smuzhiyun unsigned long long __quot; \ 62*4882a593Smuzhiyun unsigned long long __div; \ 63*4882a593Smuzhiyun unsigned long __mod; \ 64*4882a593Smuzhiyun \ 65*4882a593Smuzhiyun __div = (*n); \ 66*4882a593Smuzhiyun __radix = (base); \ 67*4882a593Smuzhiyun \ 68*4882a593Smuzhiyun __high = __div >> 32; \ 69*4882a593Smuzhiyun __low = __div; \ 70*4882a593Smuzhiyun \ 71*4882a593Smuzhiyun if (__high < __radix) { \ 72*4882a593Smuzhiyun __upper = __high; \ 73*4882a593Smuzhiyun __high = 0; \ 74*4882a593Smuzhiyun } else { \ 75*4882a593Smuzhiyun __upper = __high % __radix; \ 76*4882a593Smuzhiyun __high /= __radix; \ 77*4882a593Smuzhiyun } \ 78*4882a593Smuzhiyun \ 79*4882a593Smuzhiyun __mod = do_div64_32(__low, __upper, __low, __radix); \ 80*4882a593Smuzhiyun \ 81*4882a593Smuzhiyun __quot = __high; \ 82*4882a593Smuzhiyun __quot = __quot << 32 | __low; \ 83*4882a593Smuzhiyun (*n) = __quot; \ 84*4882a593Smuzhiyun __mod; \ 85*4882a593Smuzhiyun }) 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun #endif /* BITS_PER_LONG == 32 */ 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun #include <asm-generic/div64.h> 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun #endif /* __ASM_DIV64_H */ 92