1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Low level function for atomic operations
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 1999, 2016
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef __ARCH_S390_ATOMIC_OPS__
9*4882a593Smuzhiyun #define __ARCH_S390_ATOMIC_OPS__
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
14*4882a593Smuzhiyun static inline op_type op_name(op_type val, op_type *ptr) \
15*4882a593Smuzhiyun { \
16*4882a593Smuzhiyun op_type old; \
17*4882a593Smuzhiyun \
18*4882a593Smuzhiyun asm volatile( \
19*4882a593Smuzhiyun op_string " %[old],%[val],%[ptr]\n" \
20*4882a593Smuzhiyun op_barrier \
21*4882a593Smuzhiyun : [old] "=d" (old), [ptr] "+Q" (*ptr) \
22*4882a593Smuzhiyun : [val] "d" (val) : "cc", "memory"); \
23*4882a593Smuzhiyun return old; \
24*4882a593Smuzhiyun } \
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define __ATOMIC_OPS(op_name, op_type, op_string) \
27*4882a593Smuzhiyun __ATOMIC_OP(op_name, op_type, op_string, "\n") \
28*4882a593Smuzhiyun __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_add, int, "laa")
31*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_and, int, "lan")
32*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_or, int, "lao")
33*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_xor, int, "lax")
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun __ATOMIC_OPS(__atomic64_add, long, "laag")
36*4882a593Smuzhiyun __ATOMIC_OPS(__atomic64_and, long, "lang")
37*4882a593Smuzhiyun __ATOMIC_OPS(__atomic64_or, long, "laog")
38*4882a593Smuzhiyun __ATOMIC_OPS(__atomic64_xor, long, "laxg")
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #undef __ATOMIC_OPS
41*4882a593Smuzhiyun #undef __ATOMIC_OP
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
44*4882a593Smuzhiyun static __always_inline void op_name(op_type val, op_type *ptr) \
45*4882a593Smuzhiyun { \
46*4882a593Smuzhiyun asm volatile( \
47*4882a593Smuzhiyun op_string " %[ptr],%[val]\n" \
48*4882a593Smuzhiyun op_barrier \
49*4882a593Smuzhiyun : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
53*4882a593Smuzhiyun __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
54*4882a593Smuzhiyun __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
57*4882a593Smuzhiyun __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #undef __ATOMIC_CONST_OPS
60*4882a593Smuzhiyun #undef __ATOMIC_CONST_OP
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define __ATOMIC_OP(op_name, op_string) \
65*4882a593Smuzhiyun static inline int op_name(int val, int *ptr) \
66*4882a593Smuzhiyun { \
67*4882a593Smuzhiyun int old, new; \
68*4882a593Smuzhiyun \
69*4882a593Smuzhiyun asm volatile( \
70*4882a593Smuzhiyun "0: lr %[new],%[old]\n" \
71*4882a593Smuzhiyun op_string " %[new],%[val]\n" \
72*4882a593Smuzhiyun " cs %[old],%[new],%[ptr]\n" \
73*4882a593Smuzhiyun " jl 0b" \
74*4882a593Smuzhiyun : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
75*4882a593Smuzhiyun : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
76*4882a593Smuzhiyun return old; \
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define __ATOMIC_OPS(op_name, op_string) \
80*4882a593Smuzhiyun __ATOMIC_OP(op_name, op_string) \
81*4882a593Smuzhiyun __ATOMIC_OP(op_name##_barrier, op_string)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_add, "ar")
84*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_and, "nr")
85*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_or, "or")
86*4882a593Smuzhiyun __ATOMIC_OPS(__atomic_xor, "xr")
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #undef __ATOMIC_OPS
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define __ATOMIC64_OP(op_name, op_string) \
91*4882a593Smuzhiyun static inline long op_name(long val, long *ptr) \
92*4882a593Smuzhiyun { \
93*4882a593Smuzhiyun long old, new; \
94*4882a593Smuzhiyun \
95*4882a593Smuzhiyun asm volatile( \
96*4882a593Smuzhiyun "0: lgr %[new],%[old]\n" \
97*4882a593Smuzhiyun op_string " %[new],%[val]\n" \
98*4882a593Smuzhiyun " csg %[old],%[new],%[ptr]\n" \
99*4882a593Smuzhiyun " jl 0b" \
100*4882a593Smuzhiyun : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
101*4882a593Smuzhiyun : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
102*4882a593Smuzhiyun return old; \
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define __ATOMIC64_OPS(op_name, op_string) \
106*4882a593Smuzhiyun __ATOMIC64_OP(op_name, op_string) \
107*4882a593Smuzhiyun __ATOMIC64_OP(op_name##_barrier, op_string)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun __ATOMIC64_OPS(__atomic64_add, "agr")
110*4882a593Smuzhiyun __ATOMIC64_OPS(__atomic64_and, "ngr")
111*4882a593Smuzhiyun __ATOMIC64_OPS(__atomic64_or, "ogr")
112*4882a593Smuzhiyun __ATOMIC64_OPS(__atomic64_xor, "xgr")
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #undef __ATOMIC64_OPS
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
117*4882a593Smuzhiyun #define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
118*4882a593Smuzhiyun #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
119*4882a593Smuzhiyun #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
122*4882a593Smuzhiyun
__atomic_cmpxchg(int * ptr,int old,int new)123*4882a593Smuzhiyun static inline int __atomic_cmpxchg(int *ptr, int old, int new)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return __sync_val_compare_and_swap(ptr, old, new);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
__atomic_cmpxchg_bool(int * ptr,int old,int new)128*4882a593Smuzhiyun static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return __sync_bool_compare_and_swap(ptr, old, new);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
__atomic64_cmpxchg(long * ptr,long old,long new)133*4882a593Smuzhiyun static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun return __sync_val_compare_and_swap(ptr, old, new);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
__atomic64_cmpxchg_bool(long * ptr,long old,long new)138*4882a593Smuzhiyun static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return __sync_bool_compare_and_swap(ptr, old, new);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #endif /* __ARCH_S390_ATOMIC_OPS__ */
144