xref: /OK3568_Linux_fs/kernel/arch/hexagon/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Atomic operations for the Hexagon architecture
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef _ASM_ATOMIC_H
9*4882a593Smuzhiyun #define _ASM_ATOMIC_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <asm/cmpxchg.h>
13*4882a593Smuzhiyun #include <asm/barrier.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*  Normal writes in our arch don't clear lock reservations  */
16*4882a593Smuzhiyun 
atomic_set(atomic_t * v,int new)17*4882a593Smuzhiyun static inline void atomic_set(atomic_t *v, int new)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	asm volatile(
20*4882a593Smuzhiyun 		"1:	r6 = memw_locked(%0);\n"
21*4882a593Smuzhiyun 		"	memw_locked(%0,p0) = %1;\n"
22*4882a593Smuzhiyun 		"	if (!P0) jump 1b;\n"
23*4882a593Smuzhiyun 		:
24*4882a593Smuzhiyun 		: "r" (&v->counter), "r" (new)
25*4882a593Smuzhiyun 		: "memory", "p0", "r6"
26*4882a593Smuzhiyun 	);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define atomic_set_release(v, i)	atomic_set((v), (i))
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * atomic_read - reads a word, atomically
33*4882a593Smuzhiyun  * @v: pointer to atomic value
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * Assumes all word reads on our architecture are atomic.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define atomic_read(v)		READ_ONCE((v)->counter)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun  * atomic_xchg - atomic
41*4882a593Smuzhiyun  * @v: pointer to memory to change
42*4882a593Smuzhiyun  * @new: new value (technically passed in a register -- see xchg)
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun #define atomic_xchg(v, new)	(xchg(&((v)->counter), (new)))
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * atomic_cmpxchg - atomic compare-and-exchange values
49*4882a593Smuzhiyun  * @v: pointer to value to change
50*4882a593Smuzhiyun  * @old:  desired old value to match
51*4882a593Smuzhiyun  * @new:  new value to put in
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Parameters are then pointer, value-in-register, value-in-register,
54*4882a593Smuzhiyun  * and the output is the old value.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * Apparently this is complicated for archs that don't support
57*4882a593Smuzhiyun  * the memw_locked like we do (or it's broken or whatever).
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Kind of the lynchpin of the rest of the generically defined routines.
60*4882a593Smuzhiyun  * Remember V2 had that bug with dotnew predicate set by memw_locked.
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * "old" is "expected" old val, __oldval is actual old value
63*4882a593Smuzhiyun  */
atomic_cmpxchg(atomic_t * v,int old,int new)64*4882a593Smuzhiyun static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	int __oldval;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	asm volatile(
69*4882a593Smuzhiyun 		"1:	%0 = memw_locked(%1);\n"
70*4882a593Smuzhiyun 		"	{ P0 = cmp.eq(%0,%2);\n"
71*4882a593Smuzhiyun 		"	  if (!P0.new) jump:nt 2f; }\n"
72*4882a593Smuzhiyun 		"	memw_locked(%1,P0) = %3;\n"
73*4882a593Smuzhiyun 		"	if (!P0) jump 1b;\n"
74*4882a593Smuzhiyun 		"2:\n"
75*4882a593Smuzhiyun 		: "=&r" (__oldval)
76*4882a593Smuzhiyun 		: "r" (&v->counter), "r" (old), "r" (new)
77*4882a593Smuzhiyun 		: "memory", "p0"
78*4882a593Smuzhiyun 	);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return __oldval;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
84*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v)			\
85*4882a593Smuzhiyun {									\
86*4882a593Smuzhiyun 	int output;							\
87*4882a593Smuzhiyun 									\
88*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
89*4882a593Smuzhiyun 		"1:	%0 = memw_locked(%1);\n"			\
90*4882a593Smuzhiyun 		"	%0 = "#op "(%0,%2);\n"				\
91*4882a593Smuzhiyun 		"	memw_locked(%1,P3)=%0;\n"			\
92*4882a593Smuzhiyun 		"	if (!P3) jump 1b;\n"				\
93*4882a593Smuzhiyun 		: "=&r" (output)					\
94*4882a593Smuzhiyun 		: "r" (&v->counter), "r" (i)				\
95*4882a593Smuzhiyun 		: "memory", "p3"					\
96*4882a593Smuzhiyun 	);								\
97*4882a593Smuzhiyun }									\
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
100*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t *v)		\
101*4882a593Smuzhiyun {									\
102*4882a593Smuzhiyun 	int output;							\
103*4882a593Smuzhiyun 									\
104*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
105*4882a593Smuzhiyun 		"1:	%0 = memw_locked(%1);\n"			\
106*4882a593Smuzhiyun 		"	%0 = "#op "(%0,%2);\n"				\
107*4882a593Smuzhiyun 		"	memw_locked(%1,P3)=%0;\n"			\
108*4882a593Smuzhiyun 		"	if (!P3) jump 1b;\n"				\
109*4882a593Smuzhiyun 		: "=&r" (output)					\
110*4882a593Smuzhiyun 		: "r" (&v->counter), "r" (i)				\
111*4882a593Smuzhiyun 		: "memory", "p3"					\
112*4882a593Smuzhiyun 	);								\
113*4882a593Smuzhiyun 	return output;							\
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
117*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v)			\
118*4882a593Smuzhiyun {									\
119*4882a593Smuzhiyun 	int output, val;						\
120*4882a593Smuzhiyun 									\
121*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
122*4882a593Smuzhiyun 		"1:	%0 = memw_locked(%2);\n"			\
123*4882a593Smuzhiyun 		"	%1 = "#op "(%0,%3);\n"				\
124*4882a593Smuzhiyun 		"	memw_locked(%2,P3)=%1;\n"			\
125*4882a593Smuzhiyun 		"	if (!P3) jump 1b;\n"				\
126*4882a593Smuzhiyun 		: "=&r" (output), "=&r" (val)				\
127*4882a593Smuzhiyun 		: "r" (&v->counter), "r" (i)				\
128*4882a593Smuzhiyun 		: "memory", "p3"					\
129*4882a593Smuzhiyun 	);								\
130*4882a593Smuzhiyun 	return output;							\
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun ATOMIC_OPS(add)
ATOMIC_OPS(sub)136*4882a593Smuzhiyun ATOMIC_OPS(sub)
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #undef ATOMIC_OPS
139*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun ATOMIC_OPS(and)
142*4882a593Smuzhiyun ATOMIC_OPS(or)
143*4882a593Smuzhiyun ATOMIC_OPS(xor)
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #undef ATOMIC_OPS
146*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
147*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
148*4882a593Smuzhiyun #undef ATOMIC_OP
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * atomic_fetch_add_unless - add unless the number is a given value
152*4882a593Smuzhiyun  * @v: pointer to value
153*4882a593Smuzhiyun  * @a: amount to add
154*4882a593Smuzhiyun  * @u: unless value is equal to u
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * Returns old value.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	int __oldval;
163*4882a593Smuzhiyun 	register int tmp;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	asm volatile(
166*4882a593Smuzhiyun 		"1:	%0 = memw_locked(%2);"
167*4882a593Smuzhiyun 		"	{"
168*4882a593Smuzhiyun 		"		p3 = cmp.eq(%0, %4);"
169*4882a593Smuzhiyun 		"		if (p3.new) jump:nt 2f;"
170*4882a593Smuzhiyun 		"		%1 = add(%0, %3);"
171*4882a593Smuzhiyun 		"	}"
172*4882a593Smuzhiyun 		"	memw_locked(%2, p3) = %1;"
173*4882a593Smuzhiyun 		"	{"
174*4882a593Smuzhiyun 		"		if (!p3) jump 1b;"
175*4882a593Smuzhiyun 		"	}"
176*4882a593Smuzhiyun 		"2:"
177*4882a593Smuzhiyun 		: "=&r" (__oldval), "=&r" (tmp)
178*4882a593Smuzhiyun 		: "r" (v), "r" (a), "r" (u)
179*4882a593Smuzhiyun 		: "memory", "p3"
180*4882a593Smuzhiyun 	);
181*4882a593Smuzhiyun 	return __oldval;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun #define atomic_fetch_add_unless atomic_fetch_add_unless
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #endif
186