1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _M68K_DELAY_H
3*4882a593Smuzhiyun #define _M68K_DELAY_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/param.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * Copyright (C) 1994 Hamish Macdonald
9*4882a593Smuzhiyun * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Delay routines, using a pre-computed "loops_per_jiffy" value.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #if defined(CONFIG_COLDFIRE)
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * The ColdFire runs the delay loop at significantly different speeds
17*4882a593Smuzhiyun * depending upon long word alignment or not. We'll pad it to
18*4882a593Smuzhiyun * long word alignment which is the faster version.
19*4882a593Smuzhiyun * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
20*4882a593Smuzhiyun * than using a NOP (0x4e71) instruction because it executes in one
21*4882a593Smuzhiyun * cycle not three and doesn't allow for an arbitrary delay waiting
22*4882a593Smuzhiyun * for bus cycles to finish. Also fp/a6 isn't likely to cause a
23*4882a593Smuzhiyun * stall waiting for the register to become valid if such is added
24*4882a593Smuzhiyun * to the coldfire at some stage.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
27*4882a593Smuzhiyun #else
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * No instruction alignment required for other m68k types.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun #define DELAY_ALIGN
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
__delay(unsigned long loops)34*4882a593Smuzhiyun static inline void __delay(unsigned long loops)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun __asm__ __volatile__ (
37*4882a593Smuzhiyun DELAY_ALIGN
38*4882a593Smuzhiyun "1: subql #1,%0\n\t"
39*4882a593Smuzhiyun "jcc 1b"
40*4882a593Smuzhiyun : "=d" (loops)
41*4882a593Smuzhiyun : "0" (loops));
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun extern void __bad_udelay(void);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #ifdef CONFIG_CPU_HAS_NO_MULDIV64
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * The simpler m68k and ColdFire processors do not have a 32*32->64
50*4882a593Smuzhiyun * multiply instruction. So we need to handle them a little differently.
51*4882a593Smuzhiyun * We use a bit of shifting and a single 32*32->32 multiply to get close.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #define HZSCALE (268435456 / (1000000 / HZ))
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define __const_udelay(u) \
56*4882a593Smuzhiyun __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #else
59*4882a593Smuzhiyun
__xdelay(unsigned long xloops)60*4882a593Smuzhiyun static inline void __xdelay(unsigned long xloops)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun unsigned long tmp;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun __asm__ ("mulul %2,%0:%1"
65*4882a593Smuzhiyun : "=d" (xloops), "=d" (tmp)
66*4882a593Smuzhiyun : "d" (xloops), "1" (loops_per_jiffy));
67*4882a593Smuzhiyun __delay(xloops * HZ);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * The definition of __const_udelay is specifically made a macro so that
72*4882a593Smuzhiyun * the const factor (4295 = 2**32 / 1000000) can be optimized out when
73*4882a593Smuzhiyun * the delay is a const.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun #define __const_udelay(n) (__xdelay((n) * 4295))
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun
__udelay(unsigned long usecs)79*4882a593Smuzhiyun static inline void __udelay(unsigned long usecs)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun __const_udelay(usecs);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Use only for very small delays ( < 1 msec). Should probably use a
86*4882a593Smuzhiyun * lookup table, really, as the multiplications take much too long with
87*4882a593Smuzhiyun * short delays. This is a "reasonable" implementation, though (and the
88*4882a593Smuzhiyun * first constant multiplications gets optimized away if the delay is
89*4882a593Smuzhiyun * a constant)
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun #define udelay(n) (__builtin_constant_p(n) ? \
92*4882a593Smuzhiyun ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * nanosecond delay:
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of loops
98*4882a593Smuzhiyun * per microsecond
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) is the number of
101*4882a593Smuzhiyun * nanoseconds per loop
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * So n / ( 1000 / ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6) ) would
104*4882a593Smuzhiyun * be the number of loops for n nanoseconds
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * The simpler m68k and ColdFire processors do not have a 32*32->64
109*4882a593Smuzhiyun * multiply instruction. So we need to handle them a little differently.
110*4882a593Smuzhiyun * We use a bit of shifting and a single 32*32->32 multiply to get close.
111*4882a593Smuzhiyun * This is a macro so that the const version can factor out the first
112*4882a593Smuzhiyun * multiply and shift.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun #define HZSCALE (268435456 / (1000000 / HZ))
115*4882a593Smuzhiyun
ndelay(unsigned long nsec)116*4882a593Smuzhiyun static inline void ndelay(unsigned long nsec)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun __delay(DIV_ROUND_UP(nsec *
119*4882a593Smuzhiyun ((((HZSCALE) >> 11) *
120*4882a593Smuzhiyun (loops_per_jiffy >> 11)) >> 6),
121*4882a593Smuzhiyun 1000));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun #define ndelay(n) ndelay(n)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #endif /* defined(_M68K_DELAY_H) */
126