xref: /OK3568_Linux_fs/kernel/tools/arch/powerpc/include/asm/barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copied from the kernel sources:
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
8*4882a593Smuzhiyun #define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * Memory barrier.
12*4882a593Smuzhiyun  * The sync instruction guarantees that all memory accesses initiated
13*4882a593Smuzhiyun  * by this processor have been performed (with respect to all other
14*4882a593Smuzhiyun  * mechanisms that access memory).  The eieio instruction is a barrier
15*4882a593Smuzhiyun  * providing an ordering (separately) for (a) cacheable stores and (b)
16*4882a593Smuzhiyun  * loads and stores to non-cacheable memory (e.g. I/O devices).
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * mb() prevents loads and stores being reordered across this point.
19*4882a593Smuzhiyun  * rmb() prevents loads being reordered across this point.
20*4882a593Smuzhiyun  * wmb() prevents stores being reordered across this point.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * *mb() variants without smp_ prefix must order all types of memory
23*4882a593Smuzhiyun  * operations with one another. sync is the only instruction sufficient
24*4882a593Smuzhiyun  * to do this.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #define mb()   __asm__ __volatile__ ("sync" : : : "memory")
27*4882a593Smuzhiyun #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
28*4882a593Smuzhiyun #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #if defined(__powerpc64__)
31*4882a593Smuzhiyun #define smp_lwsync()	__asm__ __volatile__ ("lwsync" : : : "memory")
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define smp_store_release(p, v)			\
34*4882a593Smuzhiyun do {						\
35*4882a593Smuzhiyun 	smp_lwsync();				\
36*4882a593Smuzhiyun 	WRITE_ONCE(*p, v);			\
37*4882a593Smuzhiyun } while (0)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define smp_load_acquire(p)			\
40*4882a593Smuzhiyun ({						\
41*4882a593Smuzhiyun 	typeof(*p) ___p1 = READ_ONCE(*p);	\
42*4882a593Smuzhiyun 	smp_lwsync();				\
43*4882a593Smuzhiyun 	___p1;					\
44*4882a593Smuzhiyun })
45*4882a593Smuzhiyun #endif /* defined(__powerpc64__) */
46*4882a593Smuzhiyun #endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
47