xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/delay.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_DELAY_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_DELAY_H
4*4882a593Smuzhiyun #ifdef __KERNEL__
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/processor.h>
7*4882a593Smuzhiyun #include <asm/time.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * Copyright 1996, Paul Mackerras.
11*4882a593Smuzhiyun  * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * PPC64 Support added by Dave Engebretsen, Todd Inglett, Mike Corrigan,
14*4882a593Smuzhiyun  * Anton Blanchard.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun extern void __delay(unsigned long loops);
18*4882a593Smuzhiyun extern void udelay(unsigned long usecs);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * On shared processor machines the generic implementation of mdelay can
22*4882a593Smuzhiyun  * result in large errors. While each iteration of the loop inside mdelay
23*4882a593Smuzhiyun  * is supposed to take 1ms, the hypervisor could sleep our partition for
24*4882a593Smuzhiyun  * longer (eg 10ms). With the right timing these errors can add up.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Since there is no 32bit overflow issue on 64bit kernels, just call
27*4882a593Smuzhiyun  * udelay directly.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #ifdef CONFIG_PPC64
30*4882a593Smuzhiyun #define mdelay(n)	udelay((n) * 1000)
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * spin_event_timeout - spin until a condition gets true or a timeout elapses
35*4882a593Smuzhiyun  * @condition: a C expression to evalate
36*4882a593Smuzhiyun  * @timeout: timeout, in microseconds
37*4882a593Smuzhiyun  * @delay: the number of microseconds to delay between each evaluation of
38*4882a593Smuzhiyun  *         @condition
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * The process spins until the condition evaluates to true (non-zero) or the
41*4882a593Smuzhiyun  * timeout elapses.  The return value of this macro is the value of
42*4882a593Smuzhiyun  * @condition when the loop terminates. This allows you to determine the cause
43*4882a593Smuzhiyun  * of the loop terminates.  If the return value is zero, then you know a
44*4882a593Smuzhiyun  * timeout has occurred.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * This primary purpose of this macro is to poll on a hardware register
47*4882a593Smuzhiyun  * until a status bit changes.  The timeout ensures that the loop still
48*4882a593Smuzhiyun  * terminates even if the bit never changes.  The delay is for devices that
49*4882a593Smuzhiyun  * need a delay in between successive reads.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * gcc will optimize out the if-statement if @delay is a constant.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun #define spin_event_timeout(condition, timeout, delay)                          \
54*4882a593Smuzhiyun ({                                                                             \
55*4882a593Smuzhiyun 	typeof(condition) __ret;                                               \
56*4882a593Smuzhiyun 	unsigned long __loops = tb_ticks_per_usec * timeout;                   \
57*4882a593Smuzhiyun 	unsigned long __start = mftb();                                     \
58*4882a593Smuzhiyun                                                                                \
59*4882a593Smuzhiyun 	if (delay) {                                                           \
60*4882a593Smuzhiyun 		while (!(__ret = (condition)) &&                               \
61*4882a593Smuzhiyun 				(tb_ticks_since(__start) <= __loops))          \
62*4882a593Smuzhiyun 			udelay(delay);                                         \
63*4882a593Smuzhiyun 	} else {                                                               \
64*4882a593Smuzhiyun 		spin_begin();                                                  \
65*4882a593Smuzhiyun 		while (!(__ret = (condition)) &&                               \
66*4882a593Smuzhiyun 				(tb_ticks_since(__start) <= __loops))          \
67*4882a593Smuzhiyun 			spin_cpu_relax();                                      \
68*4882a593Smuzhiyun 		spin_end();                                                    \
69*4882a593Smuzhiyun 	}                                                                      \
70*4882a593Smuzhiyun 	if (!__ret)                                                            \
71*4882a593Smuzhiyun 		__ret = (condition);                                           \
72*4882a593Smuzhiyun 	__ret;		                                                       \
73*4882a593Smuzhiyun })
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #endif /* __KERNEL__ */
76*4882a593Smuzhiyun #endif /* _ASM_POWERPC_DELAY_H */
77