1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_ARM_DIV64
3*4882a593Smuzhiyun #define __ASM_ARM_DIV64
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <asm/compiler.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * The semantics of __div64_32() are:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * uint32_t __div64_32(uint64_t *n, uint32_t base)
12*4882a593Smuzhiyun * {
13*4882a593Smuzhiyun * uint32_t remainder = *n % base;
14*4882a593Smuzhiyun * *n = *n / base;
15*4882a593Smuzhiyun * return remainder;
16*4882a593Smuzhiyun * }
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * In other words, a 64-bit dividend with a 32-bit divisor producing
19*4882a593Smuzhiyun * a 64-bit result and a 32-bit remainder. To accomplish this optimally
20*4882a593Smuzhiyun * we override the generic version in lib/div64.c to call our __do_div64
21*4882a593Smuzhiyun * assembly implementation with completely non standard calling convention
22*4882a593Smuzhiyun * for arguments and results (beware).
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifdef __ARMEB__
26*4882a593Smuzhiyun #define __xh "r0"
27*4882a593Smuzhiyun #define __xl "r1"
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun #define __xl "r0"
30*4882a593Smuzhiyun #define __xh "r1"
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun
__div64_32(uint64_t * n,uint32_t base)33*4882a593Smuzhiyun static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun register unsigned int __base asm("r4") = base;
36*4882a593Smuzhiyun register unsigned long long __n asm("r0") = *n;
37*4882a593Smuzhiyun register unsigned long long __res asm("r2");
38*4882a593Smuzhiyun register unsigned int __rem asm(__xh);
39*4882a593Smuzhiyun asm( __asmeq("%0", __xh)
40*4882a593Smuzhiyun __asmeq("%1", "r2")
41*4882a593Smuzhiyun __asmeq("%2", "r0")
42*4882a593Smuzhiyun __asmeq("%3", "r4")
43*4882a593Smuzhiyun "bl __do_div64"
44*4882a593Smuzhiyun : "=r" (__rem), "=r" (__res)
45*4882a593Smuzhiyun : "r" (__n), "r" (__base)
46*4882a593Smuzhiyun : "ip", "lr", "cc");
47*4882a593Smuzhiyun *n = __res;
48*4882a593Smuzhiyun return __rem;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun #define __div64_32 __div64_32
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #if !defined(CONFIG_AEABI)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * In OABI configurations, some uses of the do_div function
56*4882a593Smuzhiyun * cause gcc to run out of registers. To work around that,
57*4882a593Smuzhiyun * we can force the use of the out-of-line version for
58*4882a593Smuzhiyun * configurations that build a OABI kernel.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun #define do_div(n, base) __div64_32(&(n), base)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #else
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * gcc versions earlier than 4.0 are simply too problematic for the
66*4882a593Smuzhiyun * __div64_const32() code in asm-generic/div64.h. First there is
67*4882a593Smuzhiyun * gcc PR 15089 that tend to trig on more complex constructs, spurious
68*4882a593Smuzhiyun * .global __udivsi3 are inserted even if none of those symbols are
69*4882a593Smuzhiyun * referenced in the generated code, and those gcc versions are not able
70*4882a593Smuzhiyun * to do constant propagation on long long values anyway.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define __div64_const32_is_OK (__GNUC__ >= 4)
74*4882a593Smuzhiyun
__arch_xprod_64(uint64_t m,uint64_t n,bool bias)75*4882a593Smuzhiyun static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun unsigned long long res;
78*4882a593Smuzhiyun register unsigned int tmp asm("ip") = 0;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (!bias) {
81*4882a593Smuzhiyun asm ( "umull %Q0, %R0, %Q1, %Q2\n\t"
82*4882a593Smuzhiyun "mov %Q0, #0"
83*4882a593Smuzhiyun : "=&r" (res)
84*4882a593Smuzhiyun : "r" (m), "r" (n)
85*4882a593Smuzhiyun : "cc");
86*4882a593Smuzhiyun } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
87*4882a593Smuzhiyun res = m;
88*4882a593Smuzhiyun asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t"
89*4882a593Smuzhiyun "mov %Q0, #0"
90*4882a593Smuzhiyun : "+&r" (res)
91*4882a593Smuzhiyun : "r" (m), "r" (n)
92*4882a593Smuzhiyun : "cc");
93*4882a593Smuzhiyun } else {
94*4882a593Smuzhiyun asm ( "umull %Q0, %R0, %Q2, %Q3\n\t"
95*4882a593Smuzhiyun "cmn %Q0, %Q2\n\t"
96*4882a593Smuzhiyun "adcs %R0, %R0, %R2\n\t"
97*4882a593Smuzhiyun "adc %Q0, %1, #0"
98*4882a593Smuzhiyun : "=&r" (res), "+&r" (tmp)
99*4882a593Smuzhiyun : "r" (m), "r" (n)
100*4882a593Smuzhiyun : "cc");
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
104*4882a593Smuzhiyun asm ( "umlal %R0, %Q0, %R1, %Q2\n\t"
105*4882a593Smuzhiyun "umlal %R0, %Q0, %Q1, %R2\n\t"
106*4882a593Smuzhiyun "mov %R0, #0\n\t"
107*4882a593Smuzhiyun "umlal %Q0, %R0, %R1, %R2"
108*4882a593Smuzhiyun : "+&r" (res)
109*4882a593Smuzhiyun : "r" (m), "r" (n)
110*4882a593Smuzhiyun : "cc");
111*4882a593Smuzhiyun } else {
112*4882a593Smuzhiyun asm ( "umlal %R0, %Q0, %R2, %Q3\n\t"
113*4882a593Smuzhiyun "umlal %R0, %1, %Q2, %R3\n\t"
114*4882a593Smuzhiyun "mov %R0, #0\n\t"
115*4882a593Smuzhiyun "adds %Q0, %1, %Q0\n\t"
116*4882a593Smuzhiyun "adc %R0, %R0, #0\n\t"
117*4882a593Smuzhiyun "umlal %Q0, %R0, %R2, %R3"
118*4882a593Smuzhiyun : "+&r" (res), "+&r" (tmp)
119*4882a593Smuzhiyun : "r" (m), "r" (n)
120*4882a593Smuzhiyun : "cc");
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return res;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun #define __arch_xprod_64 __arch_xprod_64
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun #include <asm-generic/div64.h>
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #endif
132