1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Atomic xchg and cmpxchg operations.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2001 - 2005 Tensilica Inc.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifndef _XTENSA_CMPXCHG_H
12*4882a593Smuzhiyun #define _XTENSA_CMPXCHG_H
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/bits.h>
17*4882a593Smuzhiyun #include <linux/stringify.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * cmpxchg
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static inline unsigned long
__cmpxchg_u32(volatile int * p,int old,int new)24*4882a593Smuzhiyun __cmpxchg_u32(volatile int *p, int old, int new)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun #if XCHAL_HAVE_EXCLUSIVE
27*4882a593Smuzhiyun unsigned long tmp, result;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun __asm__ __volatile__(
30*4882a593Smuzhiyun "1: l32ex %[result], %[addr]\n"
31*4882a593Smuzhiyun " bne %[result], %[cmp], 2f\n"
32*4882a593Smuzhiyun " mov %[tmp], %[new]\n"
33*4882a593Smuzhiyun " s32ex %[tmp], %[addr]\n"
34*4882a593Smuzhiyun " getex %[tmp]\n"
35*4882a593Smuzhiyun " beqz %[tmp], 1b\n"
36*4882a593Smuzhiyun "2:\n"
37*4882a593Smuzhiyun : [result] "=&a" (result), [tmp] "=&a" (tmp)
38*4882a593Smuzhiyun : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
39*4882a593Smuzhiyun : "memory"
40*4882a593Smuzhiyun );
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return result;
43*4882a593Smuzhiyun #elif XCHAL_HAVE_S32C1I
44*4882a593Smuzhiyun __asm__ __volatile__(
45*4882a593Smuzhiyun " wsr %[cmp], scompare1\n"
46*4882a593Smuzhiyun " s32c1i %[new], %[mem]\n"
47*4882a593Smuzhiyun : [new] "+a" (new), [mem] "+m" (*p)
48*4882a593Smuzhiyun : [cmp] "a" (old)
49*4882a593Smuzhiyun : "memory"
50*4882a593Smuzhiyun );
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return new;
53*4882a593Smuzhiyun #else
54*4882a593Smuzhiyun __asm__ __volatile__(
55*4882a593Smuzhiyun " rsil a15, "__stringify(TOPLEVEL)"\n"
56*4882a593Smuzhiyun " l32i %[old], %[mem]\n"
57*4882a593Smuzhiyun " bne %[old], %[cmp], 1f\n"
58*4882a593Smuzhiyun " s32i %[new], %[mem]\n"
59*4882a593Smuzhiyun "1:\n"
60*4882a593Smuzhiyun " wsr a15, ps\n"
61*4882a593Smuzhiyun " rsync\n"
62*4882a593Smuzhiyun : [old] "=&a" (old), [mem] "+m" (*p)
63*4882a593Smuzhiyun : [cmp] "a" (old), [new] "r" (new)
64*4882a593Smuzhiyun : "a15", "memory");
65*4882a593Smuzhiyun return old;
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun /* This function doesn't exist, so you'll get a linker error
69*4882a593Smuzhiyun * if something tries to do an invalid cmpxchg(). */
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun extern void __cmpxchg_called_with_bad_pointer(void);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static __inline__ unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)74*4882a593Smuzhiyun __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun switch (size) {
77*4882a593Smuzhiyun case 4: return __cmpxchg_u32(ptr, old, new);
78*4882a593Smuzhiyun default: __cmpxchg_called_with_bad_pointer();
79*4882a593Smuzhiyun return old;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define cmpxchg(ptr,o,n) \
84*4882a593Smuzhiyun ({ __typeof__(*(ptr)) _o_ = (o); \
85*4882a593Smuzhiyun __typeof__(*(ptr)) _n_ = (n); \
86*4882a593Smuzhiyun (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
87*4882a593Smuzhiyun (unsigned long)_n_, sizeof (*(ptr))); \
88*4882a593Smuzhiyun })
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #include <asm-generic/cmpxchg-local.h>
91*4882a593Smuzhiyun
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,int size)92*4882a593Smuzhiyun static inline unsigned long __cmpxchg_local(volatile void *ptr,
93*4882a593Smuzhiyun unsigned long old,
94*4882a593Smuzhiyun unsigned long new, int size)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun switch (size) {
97*4882a593Smuzhiyun case 4:
98*4882a593Smuzhiyun return __cmpxchg_u32(ptr, old, new);
99*4882a593Smuzhiyun default:
100*4882a593Smuzhiyun return __cmpxchg_local_generic(ptr, old, new, size);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return old;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
108*4882a593Smuzhiyun * them available.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun #define cmpxchg_local(ptr, o, n) \
111*4882a593Smuzhiyun ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
112*4882a593Smuzhiyun (unsigned long)(n), sizeof(*(ptr))))
113*4882a593Smuzhiyun #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
114*4882a593Smuzhiyun #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * xchg_u32
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Note that a15 is used here because the register allocation
120*4882a593Smuzhiyun * done by the compiler is not guaranteed and a window overflow
121*4882a593Smuzhiyun * may not occur between the rsil and wsr instructions. By using
122*4882a593Smuzhiyun * a15 in the rsil, the machine is guaranteed to be in a state
123*4882a593Smuzhiyun * where no register reference will cause an overflow.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun
xchg_u32(volatile int * m,unsigned long val)126*4882a593Smuzhiyun static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun #if XCHAL_HAVE_EXCLUSIVE
129*4882a593Smuzhiyun unsigned long tmp, result;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun __asm__ __volatile__(
132*4882a593Smuzhiyun "1: l32ex %[result], %[addr]\n"
133*4882a593Smuzhiyun " mov %[tmp], %[val]\n"
134*4882a593Smuzhiyun " s32ex %[tmp], %[addr]\n"
135*4882a593Smuzhiyun " getex %[tmp]\n"
136*4882a593Smuzhiyun " beqz %[tmp], 1b\n"
137*4882a593Smuzhiyun : [result] "=&a" (result), [tmp] "=&a" (tmp)
138*4882a593Smuzhiyun : [val] "a" (val), [addr] "a" (m)
139*4882a593Smuzhiyun : "memory"
140*4882a593Smuzhiyun );
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return result;
143*4882a593Smuzhiyun #elif XCHAL_HAVE_S32C1I
144*4882a593Smuzhiyun unsigned long tmp, result;
145*4882a593Smuzhiyun __asm__ __volatile__(
146*4882a593Smuzhiyun "1: l32i %[tmp], %[mem]\n"
147*4882a593Smuzhiyun " mov %[result], %[val]\n"
148*4882a593Smuzhiyun " wsr %[tmp], scompare1\n"
149*4882a593Smuzhiyun " s32c1i %[result], %[mem]\n"
150*4882a593Smuzhiyun " bne %[result], %[tmp], 1b\n"
151*4882a593Smuzhiyun : [result] "=&a" (result), [tmp] "=&a" (tmp),
152*4882a593Smuzhiyun [mem] "+m" (*m)
153*4882a593Smuzhiyun : [val] "a" (val)
154*4882a593Smuzhiyun : "memory"
155*4882a593Smuzhiyun );
156*4882a593Smuzhiyun return result;
157*4882a593Smuzhiyun #else
158*4882a593Smuzhiyun unsigned long tmp;
159*4882a593Smuzhiyun __asm__ __volatile__(
160*4882a593Smuzhiyun " rsil a15, "__stringify(TOPLEVEL)"\n"
161*4882a593Smuzhiyun " l32i %[tmp], %[mem]\n"
162*4882a593Smuzhiyun " s32i %[val], %[mem]\n"
163*4882a593Smuzhiyun " wsr a15, ps\n"
164*4882a593Smuzhiyun " rsync\n"
165*4882a593Smuzhiyun : [tmp] "=&a" (tmp), [mem] "+m" (*m)
166*4882a593Smuzhiyun : [val] "a" (val)
167*4882a593Smuzhiyun : "a15", "memory");
168*4882a593Smuzhiyun return tmp;
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #define xchg(ptr,x) \
173*4882a593Smuzhiyun ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
174*4882a593Smuzhiyun
xchg_small(volatile void * ptr,u32 x,int size)175*4882a593Smuzhiyun static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun int off = (unsigned long)ptr % sizeof(u32);
178*4882a593Smuzhiyun volatile u32 *p = ptr - off;
179*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
180*4882a593Smuzhiyun int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
181*4882a593Smuzhiyun #else
182*4882a593Smuzhiyun int bitoff = off * BITS_PER_BYTE;
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
185*4882a593Smuzhiyun u32 oldv, newv;
186*4882a593Smuzhiyun u32 ret;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun do {
189*4882a593Smuzhiyun oldv = READ_ONCE(*p);
190*4882a593Smuzhiyun ret = (oldv & bitmask) >> bitoff;
191*4882a593Smuzhiyun newv = (oldv & ~bitmask) | (x << bitoff);
192*4882a593Smuzhiyun } while (__cmpxchg_u32(p, oldv, newv) != oldv);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return ret;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * This only works if the compiler isn't horribly bad at optimizing.
199*4882a593Smuzhiyun * gcc-2.5.8 reportedly can't handle this, but I define that one to
200*4882a593Smuzhiyun * be dead anyway.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun extern void __xchg_called_with_bad_pointer(void);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun static __inline__ unsigned long
__xchg(unsigned long x,volatile void * ptr,int size)206*4882a593Smuzhiyun __xchg(unsigned long x, volatile void * ptr, int size)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun switch (size) {
209*4882a593Smuzhiyun case 1:
210*4882a593Smuzhiyun return xchg_small(ptr, x, 1);
211*4882a593Smuzhiyun case 2:
212*4882a593Smuzhiyun return xchg_small(ptr, x, 2);
213*4882a593Smuzhiyun case 4:
214*4882a593Smuzhiyun return xchg_u32(ptr, x);
215*4882a593Smuzhiyun default:
216*4882a593Smuzhiyun __xchg_called_with_bad_pointer();
217*4882a593Smuzhiyun return x;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun #endif /* _XTENSA_CMPXCHG_H */
224