xref: /OK3568_Linux_fs/kernel/tools/arch/s390/include/asm/barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copied from the kernel sources:
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corp. 1999, 2009
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __TOOLS_LINUX_ASM_BARRIER_H
11*4882a593Smuzhiyun #define __TOOLS_LINUX_ASM_BARRIER_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * Force strict CPU ordering.
15*4882a593Smuzhiyun  * And yes, this is required on UP too when we're talking
16*4882a593Smuzhiyun  * to devices.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
20*4882a593Smuzhiyun /* Fast-BCR without checkpoint synchronization */
21*4882a593Smuzhiyun #define __ASM_BARRIER "bcr 14,0\n"
22*4882a593Smuzhiyun #else
23*4882a593Smuzhiyun #define __ASM_BARRIER "bcr 15,0\n"
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define mb() do {  asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define rmb()				mb()
29*4882a593Smuzhiyun #define wmb()				mb()
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define smp_store_release(p, v)			\
32*4882a593Smuzhiyun do {						\
33*4882a593Smuzhiyun 	barrier();				\
34*4882a593Smuzhiyun 	WRITE_ONCE(*p, v);			\
35*4882a593Smuzhiyun } while (0)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define smp_load_acquire(p)			\
38*4882a593Smuzhiyun ({						\
39*4882a593Smuzhiyun 	typeof(*p) ___p1 = READ_ONCE(*p);	\
40*4882a593Smuzhiyun 	barrier();				\
41*4882a593Smuzhiyun 	___p1;					\
42*4882a593Smuzhiyun })
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #endif /* __TOOLS_LIB_ASM_BARRIER_H */
45