xref: /OK3568_Linux_fs/kernel/include/asm-generic/barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Generic barrier definitions.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * It should be possible to use these on really simple architectures,
6*4882a593Smuzhiyun  * but it serves more as a starting point for new ports.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9*4882a593Smuzhiyun  * Written by David Howells (dhowells@redhat.com)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef __ASM_GENERIC_BARRIER_H
12*4882a593Smuzhiyun #define __ASM_GENERIC_BARRIER_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/compiler.h>
17*4882a593Smuzhiyun #include <asm/rwonce.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #ifndef nop
20*4882a593Smuzhiyun #define nop()	asm volatile ("nop")
21*4882a593Smuzhiyun #endif
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Force strict CPU ordering. And yes, this is required on UP too when we're
25*4882a593Smuzhiyun  * talking to devices.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Fall back to compiler barriers if nothing better is provided.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #ifndef mb
31*4882a593Smuzhiyun #define mb()	barrier()
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #ifndef rmb
35*4882a593Smuzhiyun #define rmb()	mb()
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #ifndef wmb
39*4882a593Smuzhiyun #define wmb()	mb()
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #ifndef dma_rmb
43*4882a593Smuzhiyun #define dma_rmb()	rmb()
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #ifndef dma_wmb
47*4882a593Smuzhiyun #define dma_wmb()	wmb()
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #ifndef __smp_mb
51*4882a593Smuzhiyun #define __smp_mb()	mb()
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #ifndef __smp_rmb
55*4882a593Smuzhiyun #define __smp_rmb()	rmb()
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifndef __smp_wmb
59*4882a593Smuzhiyun #define __smp_wmb()	wmb()
60*4882a593Smuzhiyun #endif
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #ifdef CONFIG_SMP
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #ifndef smp_mb
65*4882a593Smuzhiyun #define smp_mb()	__smp_mb()
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #ifndef smp_rmb
69*4882a593Smuzhiyun #define smp_rmb()	__smp_rmb()
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #ifndef smp_wmb
73*4882a593Smuzhiyun #define smp_wmb()	__smp_wmb()
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #else	/* !CONFIG_SMP */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #ifndef smp_mb
79*4882a593Smuzhiyun #define smp_mb()	barrier()
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #ifndef smp_rmb
83*4882a593Smuzhiyun #define smp_rmb()	barrier()
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifndef smp_wmb
87*4882a593Smuzhiyun #define smp_wmb()	barrier()
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #endif	/* CONFIG_SMP */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #ifndef __smp_store_mb
93*4882a593Smuzhiyun #define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #ifndef __smp_mb__before_atomic
97*4882a593Smuzhiyun #define __smp_mb__before_atomic()	__smp_mb()
98*4882a593Smuzhiyun #endif
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #ifndef __smp_mb__after_atomic
101*4882a593Smuzhiyun #define __smp_mb__after_atomic()	__smp_mb()
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #ifndef __smp_store_release
105*4882a593Smuzhiyun #define __smp_store_release(p, v)					\
106*4882a593Smuzhiyun do {									\
107*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
108*4882a593Smuzhiyun 	__smp_mb();							\
109*4882a593Smuzhiyun 	WRITE_ONCE(*p, v);						\
110*4882a593Smuzhiyun } while (0)
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #ifndef __smp_load_acquire
114*4882a593Smuzhiyun #define __smp_load_acquire(p)						\
115*4882a593Smuzhiyun ({									\
116*4882a593Smuzhiyun 	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
117*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
118*4882a593Smuzhiyun 	__smp_mb();							\
119*4882a593Smuzhiyun 	(typeof(*p))___p1;						\
120*4882a593Smuzhiyun })
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #ifdef CONFIG_SMP
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #ifndef smp_store_mb
126*4882a593Smuzhiyun #define smp_store_mb(var, value)  __smp_store_mb(var, value)
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #ifndef smp_mb__before_atomic
130*4882a593Smuzhiyun #define smp_mb__before_atomic()	__smp_mb__before_atomic()
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #ifndef smp_mb__after_atomic
134*4882a593Smuzhiyun #define smp_mb__after_atomic()	__smp_mb__after_atomic()
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #ifndef smp_store_release
138*4882a593Smuzhiyun #define smp_store_release(p, v) __smp_store_release(p, v)
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #ifndef smp_load_acquire
142*4882a593Smuzhiyun #define smp_load_acquire(p) __smp_load_acquire(p)
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #else	/* !CONFIG_SMP */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #ifndef smp_store_mb
148*4882a593Smuzhiyun #define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #ifndef smp_mb__before_atomic
152*4882a593Smuzhiyun #define smp_mb__before_atomic()	barrier()
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #ifndef smp_mb__after_atomic
156*4882a593Smuzhiyun #define smp_mb__after_atomic()	barrier()
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #ifndef smp_store_release
160*4882a593Smuzhiyun #define smp_store_release(p, v)						\
161*4882a593Smuzhiyun do {									\
162*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
163*4882a593Smuzhiyun 	barrier();							\
164*4882a593Smuzhiyun 	WRITE_ONCE(*p, v);						\
165*4882a593Smuzhiyun } while (0)
166*4882a593Smuzhiyun #endif
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #ifndef smp_load_acquire
169*4882a593Smuzhiyun #define smp_load_acquire(p)						\
170*4882a593Smuzhiyun ({									\
171*4882a593Smuzhiyun 	__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);		\
172*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
173*4882a593Smuzhiyun 	barrier();							\
174*4882a593Smuzhiyun 	(typeof(*p))___p1;						\
175*4882a593Smuzhiyun })
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #endif	/* CONFIG_SMP */
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /* Barriers for virtual machine guests when talking to an SMP host */
181*4882a593Smuzhiyun #define virt_mb() __smp_mb()
182*4882a593Smuzhiyun #define virt_rmb() __smp_rmb()
183*4882a593Smuzhiyun #define virt_wmb() __smp_wmb()
184*4882a593Smuzhiyun #define virt_store_mb(var, value) __smp_store_mb(var, value)
185*4882a593Smuzhiyun #define virt_mb__before_atomic() __smp_mb__before_atomic()
186*4882a593Smuzhiyun #define virt_mb__after_atomic()	__smp_mb__after_atomic()
187*4882a593Smuzhiyun #define virt_store_release(p, v) __smp_store_release(p, v)
188*4882a593Smuzhiyun #define virt_load_acquire(p) __smp_load_acquire(p)
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * A control dependency provides a LOAD->STORE order, the additional RMB
194*4882a593Smuzhiyun  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
195*4882a593Smuzhiyun  * aka. (load)-ACQUIRE.
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * Architectures that do not do load speculation can have this be barrier().
198*4882a593Smuzhiyun  */
199*4882a593Smuzhiyun #ifndef smp_acquire__after_ctrl_dep
200*4882a593Smuzhiyun #define smp_acquire__after_ctrl_dep()		smp_rmb()
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun  * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
205*4882a593Smuzhiyun  * @ptr: pointer to the variable to wait on
206*4882a593Smuzhiyun  * @cond: boolean expression to wait for
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * Equivalent to using READ_ONCE() on the condition variable.
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * Due to C lacking lambda expressions we load the value of *ptr into a
211*4882a593Smuzhiyun  * pre-named variable @VAL to be used in @cond.
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun #ifndef smp_cond_load_relaxed
214*4882a593Smuzhiyun #define smp_cond_load_relaxed(ptr, cond_expr) ({		\
215*4882a593Smuzhiyun 	typeof(ptr) __PTR = (ptr);				\
216*4882a593Smuzhiyun 	__unqual_scalar_typeof(*ptr) VAL;			\
217*4882a593Smuzhiyun 	for (;;) {						\
218*4882a593Smuzhiyun 		VAL = READ_ONCE(*__PTR);			\
219*4882a593Smuzhiyun 		if (cond_expr)					\
220*4882a593Smuzhiyun 			break;					\
221*4882a593Smuzhiyun 		cpu_relax();					\
222*4882a593Smuzhiyun 	}							\
223*4882a593Smuzhiyun 	(typeof(*ptr))VAL;					\
224*4882a593Smuzhiyun })
225*4882a593Smuzhiyun #endif
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun  * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
229*4882a593Smuzhiyun  * @ptr: pointer to the variable to wait on
230*4882a593Smuzhiyun  * @cond: boolean expression to wait for
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * Equivalent to using smp_load_acquire() on the condition variable but employs
233*4882a593Smuzhiyun  * the control dependency of the wait to reduce the barrier on many platforms.
234*4882a593Smuzhiyun  */
235*4882a593Smuzhiyun #ifndef smp_cond_load_acquire
236*4882a593Smuzhiyun #define smp_cond_load_acquire(ptr, cond_expr) ({		\
237*4882a593Smuzhiyun 	__unqual_scalar_typeof(*ptr) _val;			\
238*4882a593Smuzhiyun 	_val = smp_cond_load_relaxed(ptr, cond_expr);		\
239*4882a593Smuzhiyun 	smp_acquire__after_ctrl_dep();				\
240*4882a593Smuzhiyun 	(typeof(*ptr))_val;					\
241*4882a593Smuzhiyun })
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun  * pmem_wmb() ensures that all stores for which the modification
246*4882a593Smuzhiyun  * are written to persistent storage by preceding instructions have
247*4882a593Smuzhiyun  * updated persistent storage before any data  access or data transfer
248*4882a593Smuzhiyun  * caused by subsequent instructions is initiated.
249*4882a593Smuzhiyun  */
250*4882a593Smuzhiyun #ifndef pmem_wmb
251*4882a593Smuzhiyun #define pmem_wmb()	wmb()
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
255*4882a593Smuzhiyun #endif /* __ASM_GENERIC_BARRIER_H */
256