xref: /OK3568_Linux_fs/kernel/arch/x86/um/asm/barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_UM_BARRIER_H_
3*4882a593Smuzhiyun #define _ASM_UM_BARRIER_H_
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/alternative.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * Force strict CPU ordering.
9*4882a593Smuzhiyun  * And yes, this is required on UP too when we're talking
10*4882a593Smuzhiyun  * to devices.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #ifdef CONFIG_X86_32
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define mb()	alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
15*4882a593Smuzhiyun #define rmb()	alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
16*4882a593Smuzhiyun #define wmb()	alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #else /* CONFIG_X86_32 */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define mb()	asm volatile("mfence" : : : "memory")
21*4882a593Smuzhiyun #define rmb()	asm volatile("lfence" : : : "memory")
22*4882a593Smuzhiyun #define wmb()	asm volatile("sfence" : : : "memory")
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #endif /* CONFIG_X86_32 */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <asm-generic/barrier.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #endif
29