xref: /OK3568_Linux_fs/kernel/arch/openrisc/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public License
5*4882a593Smuzhiyun  * version 2.  This program is licensed "as is" without any warranty of any
6*4882a593Smuzhiyun  * kind, whether express or implied.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef __ASM_OPENRISC_ATOMIC_H
10*4882a593Smuzhiyun #define __ASM_OPENRISC_ATOMIC_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* Atomically perform op with v->counter and i */
15*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
16*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v)			\
17*4882a593Smuzhiyun {									\
18*4882a593Smuzhiyun 	int tmp;							\
19*4882a593Smuzhiyun 									\
20*4882a593Smuzhiyun 	__asm__ __volatile__(						\
21*4882a593Smuzhiyun 		"1:	l.lwa	%0,0(%1)	\n"			\
22*4882a593Smuzhiyun 		"	l." #op " %0,%0,%2	\n"			\
23*4882a593Smuzhiyun 		"	l.swa	0(%1),%0	\n"			\
24*4882a593Smuzhiyun 		"	l.bnf	1b		\n"			\
25*4882a593Smuzhiyun 		"	 l.nop			\n"			\
26*4882a593Smuzhiyun 		: "=&r"(tmp)						\
27*4882a593Smuzhiyun 		: "r"(&v->counter), "r"(i)				\
28*4882a593Smuzhiyun 		: "cc", "memory");					\
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Atomically perform op with v->counter and i, return the result */
32*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
33*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t *v)		\
34*4882a593Smuzhiyun {									\
35*4882a593Smuzhiyun 	int tmp;							\
36*4882a593Smuzhiyun 									\
37*4882a593Smuzhiyun 	__asm__ __volatile__(						\
38*4882a593Smuzhiyun 		"1:	l.lwa	%0,0(%1)	\n"			\
39*4882a593Smuzhiyun 		"	l." #op " %0,%0,%2	\n"			\
40*4882a593Smuzhiyun 		"	l.swa	0(%1),%0	\n"			\
41*4882a593Smuzhiyun 		"	l.bnf	1b		\n"			\
42*4882a593Smuzhiyun 		"	 l.nop			\n"			\
43*4882a593Smuzhiyun 		: "=&r"(tmp)						\
44*4882a593Smuzhiyun 		: "r"(&v->counter), "r"(i)				\
45*4882a593Smuzhiyun 		: "cc", "memory");					\
46*4882a593Smuzhiyun 									\
47*4882a593Smuzhiyun 	return tmp;							\
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* Atomically perform op with v->counter and i, return orig v->counter */
51*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
52*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v)			\
53*4882a593Smuzhiyun {									\
54*4882a593Smuzhiyun 	int tmp, old;							\
55*4882a593Smuzhiyun 									\
56*4882a593Smuzhiyun 	__asm__ __volatile__(						\
57*4882a593Smuzhiyun 		"1:	l.lwa	%0,0(%2)	\n"			\
58*4882a593Smuzhiyun 		"	l." #op " %1,%0,%3	\n"			\
59*4882a593Smuzhiyun 		"	l.swa	0(%2),%1	\n"			\
60*4882a593Smuzhiyun 		"	l.bnf	1b		\n"			\
61*4882a593Smuzhiyun 		"	 l.nop			\n"			\
62*4882a593Smuzhiyun 		: "=&r"(old), "=&r"(tmp)				\
63*4882a593Smuzhiyun 		: "r"(&v->counter), "r"(i)				\
64*4882a593Smuzhiyun 		: "cc", "memory");					\
65*4882a593Smuzhiyun 									\
66*4882a593Smuzhiyun 	return old;							\
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)70*4882a593Smuzhiyun ATOMIC_OP_RETURN(sub)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun ATOMIC_FETCH_OP(add)
73*4882a593Smuzhiyun ATOMIC_FETCH_OP(sub)
74*4882a593Smuzhiyun ATOMIC_FETCH_OP(and)
75*4882a593Smuzhiyun ATOMIC_FETCH_OP(or)
76*4882a593Smuzhiyun ATOMIC_FETCH_OP(xor)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun ATOMIC_OP(and)
79*4882a593Smuzhiyun ATOMIC_OP(or)
80*4882a593Smuzhiyun ATOMIC_OP(xor)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
83*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
84*4882a593Smuzhiyun #undef ATOMIC_OP
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define atomic_add_return	atomic_add_return
87*4882a593Smuzhiyun #define atomic_sub_return	atomic_sub_return
88*4882a593Smuzhiyun #define atomic_fetch_add	atomic_fetch_add
89*4882a593Smuzhiyun #define atomic_fetch_sub	atomic_fetch_sub
90*4882a593Smuzhiyun #define atomic_fetch_and	atomic_fetch_and
91*4882a593Smuzhiyun #define atomic_fetch_or		atomic_fetch_or
92*4882a593Smuzhiyun #define atomic_fetch_xor	atomic_fetch_xor
93*4882a593Smuzhiyun #define atomic_and	atomic_and
94*4882a593Smuzhiyun #define atomic_or	atomic_or
95*4882a593Smuzhiyun #define atomic_xor	atomic_xor
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * Atomically add a to v->counter as long as v is not already u.
99*4882a593Smuzhiyun  * Returns the original value at v->counter.
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * This is often used through atomic_inc_not_zero()
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	int old, tmp;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	__asm__ __volatile__(
108*4882a593Smuzhiyun 		"1:	l.lwa %0, 0(%2)		\n"
109*4882a593Smuzhiyun 		"	l.sfeq %0, %4		\n"
110*4882a593Smuzhiyun 		"	l.bf 2f			\n"
111*4882a593Smuzhiyun 		"	 l.add %1, %0, %3	\n"
112*4882a593Smuzhiyun 		"	l.swa 0(%2), %1		\n"
113*4882a593Smuzhiyun 		"	l.bnf 1b		\n"
114*4882a593Smuzhiyun 		"	 l.nop			\n"
115*4882a593Smuzhiyun 		"2:				\n"
116*4882a593Smuzhiyun 		: "=&r"(old), "=&r" (tmp)
117*4882a593Smuzhiyun 		: "r"(&v->counter), "r"(a), "r"(u)
118*4882a593Smuzhiyun 		: "cc", "memory");
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return old;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun #define atomic_fetch_add_unless	atomic_fetch_add_unless
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #include <asm-generic/atomic.h>
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #endif /* __ASM_OPENRISC_ATOMIC_H */
127