xref: /OK3568_Linux_fs/kernel/arch/ia64/include/asm/barrier.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Memory barrier definitions.  This is based on information published
4*4882a593Smuzhiyun  * in the Processor Abstraction Layer and the System Abstraction Layer
5*4882a593Smuzhiyun  * manual.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 1998-2003 Hewlett-Packard Co
8*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
9*4882a593Smuzhiyun  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
10*4882a593Smuzhiyun  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #ifndef _ASM_IA64_BARRIER_H
13*4882a593Smuzhiyun #define _ASM_IA64_BARRIER_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/compiler.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Macros to force memory ordering.  In these descriptions, "previous"
19*4882a593Smuzhiyun  * and "subsequent" refer to program order; "visible" means that all
20*4882a593Smuzhiyun  * architecturally visible effects of a memory access have occurred
21*4882a593Smuzhiyun  * (at a minimum, this means the memory has been read or written).
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *   wmb():	Guarantees that all preceding stores to memory-
24*4882a593Smuzhiyun  *		like regions are visible before any subsequent
25*4882a593Smuzhiyun  *		stores and that all following stores will be
26*4882a593Smuzhiyun  *		visible only after all previous stores.
27*4882a593Smuzhiyun  *   rmb():	Like wmb(), but for reads.
28*4882a593Smuzhiyun  *   mb():	wmb()/rmb() combo, i.e., all previous memory
29*4882a593Smuzhiyun  *		accesses are visible before all subsequent
30*4882a593Smuzhiyun  *		accesses and vice versa.  This is also known as
31*4882a593Smuzhiyun  *		a "fence."
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Note: "mb()" and its variants cannot be used as a fence to order
34*4882a593Smuzhiyun  * accesses to memory mapped I/O registers.  For that, mf.a needs to
35*4882a593Smuzhiyun  * be used.  However, we don't want to always use mf.a because (a)
36*4882a593Smuzhiyun  * it's (presumably) much slower than mf and (b) mf.a is supported for
37*4882a593Smuzhiyun  * sequential memory pages only.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define mb()		ia64_mf()
40*4882a593Smuzhiyun #define rmb()		mb()
41*4882a593Smuzhiyun #define wmb()		mb()
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define dma_rmb()	mb()
44*4882a593Smuzhiyun #define dma_wmb()	mb()
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun # define __smp_mb()	mb()
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define __smp_mb__before_atomic()	barrier()
49*4882a593Smuzhiyun #define __smp_mb__after_atomic()	barrier()
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
53*4882a593Smuzhiyun  * need for asm trickery!
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define __smp_store_release(p, v)						\
57*4882a593Smuzhiyun do {									\
58*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
59*4882a593Smuzhiyun 	barrier();							\
60*4882a593Smuzhiyun 	WRITE_ONCE(*p, v);						\
61*4882a593Smuzhiyun } while (0)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define __smp_load_acquire(p)						\
64*4882a593Smuzhiyun ({									\
65*4882a593Smuzhiyun 	typeof(*p) ___p1 = READ_ONCE(*p);				\
66*4882a593Smuzhiyun 	compiletime_assert_atomic_type(*p);				\
67*4882a593Smuzhiyun 	barrier();							\
68*4882a593Smuzhiyun 	___p1;								\
69*4882a593Smuzhiyun })
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun  * The group barrier in front of the rsm & ssm are necessary to ensure
73*4882a593Smuzhiyun  * that none of the previous instructions in the same group are
74*4882a593Smuzhiyun  * affected by the rsm/ssm.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #include <asm-generic/barrier.h>
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #endif /* _ASM_IA64_BARRIER_H */
80