xref: /OK3568_Linux_fs/kernel/arch/sh/include/asm/spinlock-cas.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * include/asm-sh/spinlock-cas.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015 SEI
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef __ASM_SH_SPINLOCK_CAS_H
8*4882a593Smuzhiyun #define __ASM_SH_SPINLOCK_CAS_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <asm/barrier.h>
11*4882a593Smuzhiyun #include <asm/processor.h>
12*4882a593Smuzhiyun 
__sl_cas(volatile unsigned * p,unsigned old,unsigned new)13*4882a593Smuzhiyun static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	__asm__ __volatile__("cas.l %1,%0,@r0"
16*4882a593Smuzhiyun 		: "+r"(new)
17*4882a593Smuzhiyun 		: "r"(old), "z"(p)
18*4882a593Smuzhiyun 		: "t", "memory" );
19*4882a593Smuzhiyun 	return new;
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Your basic SMP spinlocks, allowing only a single CPU anywhere
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define arch_spin_is_locked(x)		((x)->lock <= 0)
27*4882a593Smuzhiyun 
arch_spin_lock(arch_spinlock_t * lock)28*4882a593Smuzhiyun static inline void arch_spin_lock(arch_spinlock_t *lock)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	while (!__sl_cas(&lock->lock, 1, 0));
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
arch_spin_unlock(arch_spinlock_t * lock)33*4882a593Smuzhiyun static inline void arch_spin_unlock(arch_spinlock_t *lock)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	__sl_cas(&lock->lock, 0, 1);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
arch_spin_trylock(arch_spinlock_t * lock)38*4882a593Smuzhiyun static inline int arch_spin_trylock(arch_spinlock_t *lock)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	return __sl_cas(&lock->lock, 1, 0);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * Read-write spinlocks, allowing multiple readers but only one writer.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * NOTE! it is quite common to have readers in interrupts but no interrupt
47*4882a593Smuzhiyun  * writers. For those circumstances we can "mix" irq-safe locks - any writer
48*4882a593Smuzhiyun  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
49*4882a593Smuzhiyun  * read-locks.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun 
arch_read_lock(arch_rwlock_t * rw)52*4882a593Smuzhiyun static inline void arch_read_lock(arch_rwlock_t *rw)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	unsigned old;
55*4882a593Smuzhiyun 	do old = rw->lock;
56*4882a593Smuzhiyun 	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
arch_read_unlock(arch_rwlock_t * rw)59*4882a593Smuzhiyun static inline void arch_read_unlock(arch_rwlock_t *rw)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	unsigned old;
62*4882a593Smuzhiyun 	do old = rw->lock;
63*4882a593Smuzhiyun 	while (__sl_cas(&rw->lock, old, old+1) != old);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
arch_write_lock(arch_rwlock_t * rw)66*4882a593Smuzhiyun static inline void arch_write_lock(arch_rwlock_t *rw)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
arch_write_unlock(arch_rwlock_t * rw)71*4882a593Smuzhiyun static inline void arch_write_unlock(arch_rwlock_t *rw)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
arch_read_trylock(arch_rwlock_t * rw)76*4882a593Smuzhiyun static inline int arch_read_trylock(arch_rwlock_t *rw)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	unsigned old;
79*4882a593Smuzhiyun 	do old = rw->lock;
80*4882a593Smuzhiyun 	while (old && __sl_cas(&rw->lock, old, old-1) != old);
81*4882a593Smuzhiyun 	return !!old;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
arch_write_trylock(arch_rwlock_t * rw)84*4882a593Smuzhiyun static inline int arch_write_trylock(arch_rwlock_t *rw)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #endif /* __ASM_SH_SPINLOCK_CAS_H */
90