1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_IA64_SPINLOCK_H
3*4882a593Smuzhiyun #define _ASM_IA64_SPINLOCK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Copyright (C) 1998-2003 Hewlett-Packard Co
7*4882a593Smuzhiyun * David Mosberger-Tang <davidm@hpl.hp.com>
8*4882a593Smuzhiyun * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This file is used for SMP configurations only.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/compiler.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/bitops.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/atomic.h>
18*4882a593Smuzhiyun #include <asm/intrinsics.h>
19*4882a593Smuzhiyun #include <asm/barrier.h>
20*4882a593Smuzhiyun #include <asm/processor.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define arch_spin_lock_init(x) ((x)->lock = 0)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Ticket locks are conceptually two parts, one indicating the current head of
26*4882a593Smuzhiyun * the queue, and the other indicating the current tail. The lock is acquired
27*4882a593Smuzhiyun * by atomically noting the tail and incrementing it by one (thus adding
28*4882a593Smuzhiyun * ourself to the queue and noting our position), then waiting until the head
29*4882a593Smuzhiyun * becomes equal to the the initial value of the tail.
30*4882a593Smuzhiyun * The pad bits in the middle are used to prevent the next_ticket number
31*4882a593Smuzhiyun * overflowing into the now_serving number.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * 31 17 16 15 14 0
34*4882a593Smuzhiyun * +----------------------------------------------------+
35*4882a593Smuzhiyun * | now_serving | padding | next_ticket |
36*4882a593Smuzhiyun * +----------------------------------------------------+
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define TICKET_SHIFT 17
40*4882a593Smuzhiyun #define TICKET_BITS 15
41*4882a593Smuzhiyun #define TICKET_MASK ((1 << TICKET_BITS) - 1)
42*4882a593Smuzhiyun
__ticket_spin_lock(arch_spinlock_t * lock)43*4882a593Smuzhiyun static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun int *p = (int *)&lock->lock, ticket, serve;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun ticket = ia64_fetchadd(1, p, acq);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
50*4882a593Smuzhiyun return;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun ia64_invala();
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun for (;;) {
55*4882a593Smuzhiyun asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
58*4882a593Smuzhiyun return;
59*4882a593Smuzhiyun cpu_relax();
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
__ticket_spin_trylock(arch_spinlock_t * lock)63*4882a593Smuzhiyun static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int tmp = READ_ONCE(lock->lock);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
68*4882a593Smuzhiyun return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
69*4882a593Smuzhiyun return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
__ticket_spin_unlock(arch_spinlock_t * lock)72*4882a593Smuzhiyun static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* This could be optimised with ARCH_HAS_MMIOWB */
77*4882a593Smuzhiyun mmiowb();
78*4882a593Smuzhiyun asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
79*4882a593Smuzhiyun WRITE_ONCE(*p, (tmp + 2) & ~1);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
__ticket_spin_is_locked(arch_spinlock_t * lock)82*4882a593Smuzhiyun static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun long tmp = READ_ONCE(lock->lock);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
__ticket_spin_is_contended(arch_spinlock_t * lock)89*4882a593Smuzhiyun static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun long tmp = READ_ONCE(lock->lock);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
arch_spin_value_unlocked(arch_spinlock_t lock)96*4882a593Smuzhiyun static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
arch_spin_is_locked(arch_spinlock_t * lock)101*4882a593Smuzhiyun static inline int arch_spin_is_locked(arch_spinlock_t *lock)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun return __ticket_spin_is_locked(lock);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
arch_spin_is_contended(arch_spinlock_t * lock)106*4882a593Smuzhiyun static inline int arch_spin_is_contended(arch_spinlock_t *lock)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun return __ticket_spin_is_contended(lock);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun #define arch_spin_is_contended arch_spin_is_contended
111*4882a593Smuzhiyun
arch_spin_lock(arch_spinlock_t * lock)112*4882a593Smuzhiyun static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun __ticket_spin_lock(lock);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
arch_spin_trylock(arch_spinlock_t * lock)117*4882a593Smuzhiyun static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun return __ticket_spin_trylock(lock);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
arch_spin_unlock(arch_spinlock_t * lock)122*4882a593Smuzhiyun static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun __ticket_spin_unlock(lock);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
arch_spin_lock_flags(arch_spinlock_t * lock,unsigned long flags)127*4882a593Smuzhiyun static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
128*4882a593Smuzhiyun unsigned long flags)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun arch_spin_lock(lock);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun #define arch_spin_lock_flags arch_spin_lock_flags
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #ifdef ASM_SUPPORTED
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static __always_inline void
arch_read_lock_flags(arch_rwlock_t * lock,unsigned long flags)137*4882a593Smuzhiyun arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun __asm__ __volatile__ (
140*4882a593Smuzhiyun "tbit.nz p6, p0 = %1,%2\n"
141*4882a593Smuzhiyun "br.few 3f\n"
142*4882a593Smuzhiyun "1:\n"
143*4882a593Smuzhiyun "fetchadd4.rel r2 = [%0], -1;;\n"
144*4882a593Smuzhiyun "(p6) ssm psr.i\n"
145*4882a593Smuzhiyun "2:\n"
146*4882a593Smuzhiyun "hint @pause\n"
147*4882a593Smuzhiyun "ld4 r2 = [%0];;\n"
148*4882a593Smuzhiyun "cmp4.lt p7,p0 = r2, r0\n"
149*4882a593Smuzhiyun "(p7) br.cond.spnt.few 2b\n"
150*4882a593Smuzhiyun "(p6) rsm psr.i\n"
151*4882a593Smuzhiyun ";;\n"
152*4882a593Smuzhiyun "3:\n"
153*4882a593Smuzhiyun "fetchadd4.acq r2 = [%0], 1;;\n"
154*4882a593Smuzhiyun "cmp4.lt p7,p0 = r2, r0\n"
155*4882a593Smuzhiyun "(p7) br.cond.spnt.few 1b\n"
156*4882a593Smuzhiyun : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
157*4882a593Smuzhiyun : "p6", "p7", "r2", "memory");
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun #define arch_read_lock_flags arch_read_lock_flags
161*4882a593Smuzhiyun #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun #else /* !ASM_SUPPORTED */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun #define arch_read_lock(rw) \
168*4882a593Smuzhiyun do { \
169*4882a593Smuzhiyun arch_rwlock_t *__read_lock_ptr = (rw); \
170*4882a593Smuzhiyun \
171*4882a593Smuzhiyun while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
172*4882a593Smuzhiyun ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
173*4882a593Smuzhiyun while (*(volatile int *)__read_lock_ptr < 0) \
174*4882a593Smuzhiyun cpu_relax(); \
175*4882a593Smuzhiyun } \
176*4882a593Smuzhiyun } while (0)
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #endif /* !ASM_SUPPORTED */
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun #define arch_read_unlock(rw) \
181*4882a593Smuzhiyun do { \
182*4882a593Smuzhiyun arch_rwlock_t *__read_lock_ptr = (rw); \
183*4882a593Smuzhiyun ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
184*4882a593Smuzhiyun } while (0)
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #ifdef ASM_SUPPORTED
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun static __always_inline void
arch_write_lock_flags(arch_rwlock_t * lock,unsigned long flags)189*4882a593Smuzhiyun arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun __asm__ __volatile__ (
192*4882a593Smuzhiyun "tbit.nz p6, p0 = %1, %2\n"
193*4882a593Smuzhiyun "mov ar.ccv = r0\n"
194*4882a593Smuzhiyun "dep r29 = -1, r0, 31, 1\n"
195*4882a593Smuzhiyun "br.few 3f;;\n"
196*4882a593Smuzhiyun "1:\n"
197*4882a593Smuzhiyun "(p6) ssm psr.i\n"
198*4882a593Smuzhiyun "2:\n"
199*4882a593Smuzhiyun "hint @pause\n"
200*4882a593Smuzhiyun "ld4 r2 = [%0];;\n"
201*4882a593Smuzhiyun "cmp4.eq p0,p7 = r0, r2\n"
202*4882a593Smuzhiyun "(p7) br.cond.spnt.few 2b\n"
203*4882a593Smuzhiyun "(p6) rsm psr.i\n"
204*4882a593Smuzhiyun ";;\n"
205*4882a593Smuzhiyun "3:\n"
206*4882a593Smuzhiyun "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
207*4882a593Smuzhiyun "cmp4.eq p0,p7 = r0, r2\n"
208*4882a593Smuzhiyun "(p7) br.cond.spnt.few 1b;;\n"
209*4882a593Smuzhiyun : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
210*4882a593Smuzhiyun : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #define arch_write_lock_flags arch_write_lock_flags
214*4882a593Smuzhiyun #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define arch_write_trylock(rw) \
217*4882a593Smuzhiyun ({ \
218*4882a593Smuzhiyun register long result; \
219*4882a593Smuzhiyun \
220*4882a593Smuzhiyun __asm__ __volatile__ ( \
221*4882a593Smuzhiyun "mov ar.ccv = r0\n" \
222*4882a593Smuzhiyun "dep r29 = -1, r0, 31, 1;;\n" \
223*4882a593Smuzhiyun "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
224*4882a593Smuzhiyun : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
225*4882a593Smuzhiyun (result == 0); \
226*4882a593Smuzhiyun })
227*4882a593Smuzhiyun
arch_write_unlock(arch_rwlock_t * x)228*4882a593Smuzhiyun static inline void arch_write_unlock(arch_rwlock_t *x)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun u8 *y = (u8 *)x;
231*4882a593Smuzhiyun barrier();
232*4882a593Smuzhiyun asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #else /* !ASM_SUPPORTED */
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun #define arch_write_lock(l) \
238*4882a593Smuzhiyun ({ \
239*4882a593Smuzhiyun __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
240*4882a593Smuzhiyun __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
241*4882a593Smuzhiyun do { \
242*4882a593Smuzhiyun while (*ia64_write_lock_ptr) \
243*4882a593Smuzhiyun ia64_barrier(); \
244*4882a593Smuzhiyun ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
245*4882a593Smuzhiyun } while (ia64_val); \
246*4882a593Smuzhiyun })
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun #define arch_write_trylock(rw) \
249*4882a593Smuzhiyun ({ \
250*4882a593Smuzhiyun __u64 ia64_val; \
251*4882a593Smuzhiyun __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
252*4882a593Smuzhiyun ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
253*4882a593Smuzhiyun (ia64_val == 0); \
254*4882a593Smuzhiyun })
255*4882a593Smuzhiyun
arch_write_unlock(arch_rwlock_t * x)256*4882a593Smuzhiyun static inline void arch_write_unlock(arch_rwlock_t *x)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun barrier();
259*4882a593Smuzhiyun x->write_lock = 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun #endif /* !ASM_SUPPORTED */
263*4882a593Smuzhiyun
arch_read_trylock(arch_rwlock_t * x)264*4882a593Smuzhiyun static inline int arch_read_trylock(arch_rwlock_t *x)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun union {
267*4882a593Smuzhiyun arch_rwlock_t lock;
268*4882a593Smuzhiyun __u32 word;
269*4882a593Smuzhiyun } old, new;
270*4882a593Smuzhiyun old.lock = new.lock = *x;
271*4882a593Smuzhiyun old.lock.write_lock = new.lock.write_lock = 0;
272*4882a593Smuzhiyun ++new.lock.read_counter;
273*4882a593Smuzhiyun return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun #endif /* _ASM_IA64_SPINLOCK_H */
277