xref: /OK3568_Linux_fs/kernel/include/linux/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Atomic operations usable in machine independent code */
3*4882a593Smuzhiyun #ifndef _LINUX_ATOMIC_H
4*4882a593Smuzhiyun #define _LINUX_ATOMIC_H
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/atomic.h>
8*4882a593Smuzhiyun #include <asm/barrier.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * Relaxed variants of xchg, cmpxchg and some atomic operations.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * We support four variants:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * - Fully ordered: The default implementation, no suffix required.
16*4882a593Smuzhiyun  * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
17*4882a593Smuzhiyun  * - Release: Provides RELEASE semantics, _release suffix.
18*4882a593Smuzhiyun  * - Relaxed: No ordering guarantees, _relaxed suffix.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * For compound atomics performing both a load and a store, ACQUIRE
21*4882a593Smuzhiyun  * semantics apply only to the load and RELEASE semantics only to the
22*4882a593Smuzhiyun  * store portion of the operation. Note that a failed cmpxchg_acquire
23*4882a593Smuzhiyun  * does -not- imply any memory ordering constraints.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
29*4882a593Smuzhiyun #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
32*4882a593Smuzhiyun #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * The idea here is to build acquire/release variants by adding explicit
36*4882a593Smuzhiyun  * barriers on top of the relaxed variant. In the case where the relaxed
37*4882a593Smuzhiyun  * variant is already fully ordered, no additional barriers are needed.
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * If an architecture overrides __atomic_acquire_fence() it will probably
40*4882a593Smuzhiyun  * want to define smp_mb__after_spinlock().
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #ifndef __atomic_acquire_fence
43*4882a593Smuzhiyun #define __atomic_acquire_fence		smp_mb__after_atomic
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #ifndef __atomic_release_fence
47*4882a593Smuzhiyun #define __atomic_release_fence		smp_mb__before_atomic
48*4882a593Smuzhiyun #endif
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #ifndef __atomic_pre_full_fence
51*4882a593Smuzhiyun #define __atomic_pre_full_fence		smp_mb__before_atomic
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #ifndef __atomic_post_full_fence
55*4882a593Smuzhiyun #define __atomic_post_full_fence	smp_mb__after_atomic
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define __atomic_op_acquire(op, args...)				\
59*4882a593Smuzhiyun ({									\
60*4882a593Smuzhiyun 	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
61*4882a593Smuzhiyun 	__atomic_acquire_fence();					\
62*4882a593Smuzhiyun 	__ret;								\
63*4882a593Smuzhiyun })
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define __atomic_op_release(op, args...)				\
66*4882a593Smuzhiyun ({									\
67*4882a593Smuzhiyun 	__atomic_release_fence();					\
68*4882a593Smuzhiyun 	op##_relaxed(args);						\
69*4882a593Smuzhiyun })
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define __atomic_op_fence(op, args...)					\
72*4882a593Smuzhiyun ({									\
73*4882a593Smuzhiyun 	typeof(op##_relaxed(args)) __ret;				\
74*4882a593Smuzhiyun 	__atomic_pre_full_fence();					\
75*4882a593Smuzhiyun 	__ret = op##_relaxed(args);					\
76*4882a593Smuzhiyun 	__atomic_post_full_fence();					\
77*4882a593Smuzhiyun 	__ret;								\
78*4882a593Smuzhiyun })
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #ifdef ARCH_ATOMIC
81*4882a593Smuzhiyun #include <linux/atomic-arch-fallback.h>
82*4882a593Smuzhiyun #include <asm-generic/atomic-instrumented.h>
83*4882a593Smuzhiyun #else
84*4882a593Smuzhiyun #include <linux/atomic-fallback.h>
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #include <asm-generic/atomic-long.h>
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #endif /* _LINUX_ATOMIC_H */
90