xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/spinlock.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  S390 version
4*4882a593Smuzhiyun  *    Copyright IBM Corp. 1999
5*4882a593Smuzhiyun  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *  Derived from "include/asm-i386/spinlock.h"
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __ASM_SPINLOCK_H
11*4882a593Smuzhiyun #define __ASM_SPINLOCK_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/smp.h>
14*4882a593Smuzhiyun #include <asm/atomic_ops.h>
15*4882a593Smuzhiyun #include <asm/barrier.h>
16*4882a593Smuzhiyun #include <asm/processor.h>
17*4882a593Smuzhiyun #include <asm/alternative.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun extern int spin_retry;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun bool arch_vcpu_is_preempted(int cpu);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define vcpu_is_preempted arch_vcpu_is_preempted
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Simple spin lock operations.  There are two variants, one clears IRQ's
29*4882a593Smuzhiyun  * on the local processor, one does not.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * We make no fairness assumptions. They have a cost.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * (the type definitions are in asm/spinlock_types.h)
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun void arch_spin_relax(arch_spinlock_t *lock);
37*4882a593Smuzhiyun #define arch_spin_relax	arch_spin_relax
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun void arch_spin_lock_wait(arch_spinlock_t *);
40*4882a593Smuzhiyun int arch_spin_trylock_retry(arch_spinlock_t *);
41*4882a593Smuzhiyun void arch_spin_lock_setup(int cpu);
42*4882a593Smuzhiyun 
arch_spin_lockval(int cpu)43*4882a593Smuzhiyun static inline u32 arch_spin_lockval(int cpu)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return cpu + 1;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
arch_spin_value_unlocked(arch_spinlock_t lock)48*4882a593Smuzhiyun static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	return lock.lock == 0;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
arch_spin_is_locked(arch_spinlock_t * lp)53*4882a593Smuzhiyun static inline int arch_spin_is_locked(arch_spinlock_t *lp)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return READ_ONCE(lp->lock) != 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
arch_spin_trylock_once(arch_spinlock_t * lp)58*4882a593Smuzhiyun static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	barrier();
61*4882a593Smuzhiyun 	return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
arch_spin_lock(arch_spinlock_t * lp)64*4882a593Smuzhiyun static inline void arch_spin_lock(arch_spinlock_t *lp)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	if (!arch_spin_trylock_once(lp))
67*4882a593Smuzhiyun 		arch_spin_lock_wait(lp);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
arch_spin_lock_flags(arch_spinlock_t * lp,unsigned long flags)70*4882a593Smuzhiyun static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
71*4882a593Smuzhiyun 					unsigned long flags)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	if (!arch_spin_trylock_once(lp))
74*4882a593Smuzhiyun 		arch_spin_lock_wait(lp);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun #define arch_spin_lock_flags	arch_spin_lock_flags
77*4882a593Smuzhiyun 
arch_spin_trylock(arch_spinlock_t * lp)78*4882a593Smuzhiyun static inline int arch_spin_trylock(arch_spinlock_t *lp)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	if (!arch_spin_trylock_once(lp))
81*4882a593Smuzhiyun 		return arch_spin_trylock_retry(lp);
82*4882a593Smuzhiyun 	return 1;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
arch_spin_unlock(arch_spinlock_t * lp)85*4882a593Smuzhiyun static inline void arch_spin_unlock(arch_spinlock_t *lp)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	typecheck(int, lp->lock);
88*4882a593Smuzhiyun 	asm_inline volatile(
89*4882a593Smuzhiyun 		ALTERNATIVE("", ".long 0xb2fa0070", 49)	/* NIAI 7 */
90*4882a593Smuzhiyun 		"	sth	%1,%0\n"
91*4882a593Smuzhiyun 		: "=Q" (((unsigned short *) &lp->lock)[1])
92*4882a593Smuzhiyun 		: "d" (0) : "cc", "memory");
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun  * Read-write spinlocks, allowing multiple readers
97*4882a593Smuzhiyun  * but only one writer.
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * NOTE! it is quite common to have readers in interrupts
100*4882a593Smuzhiyun  * but no interrupt writers. For those circumstances we
101*4882a593Smuzhiyun  * can "mix" irq-safe locks - any writer needs to get a
102*4882a593Smuzhiyun  * irq-safe write-lock, but readers can get non-irqsafe
103*4882a593Smuzhiyun  * read-locks.
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define arch_read_relax(rw) barrier()
107*4882a593Smuzhiyun #define arch_write_relax(rw) barrier()
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun void arch_read_lock_wait(arch_rwlock_t *lp);
110*4882a593Smuzhiyun void arch_write_lock_wait(arch_rwlock_t *lp);
111*4882a593Smuzhiyun 
arch_read_lock(arch_rwlock_t * rw)112*4882a593Smuzhiyun static inline void arch_read_lock(arch_rwlock_t *rw)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	int old;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	old = __atomic_add(1, &rw->cnts);
117*4882a593Smuzhiyun 	if (old & 0xffff0000)
118*4882a593Smuzhiyun 		arch_read_lock_wait(rw);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
arch_read_unlock(arch_rwlock_t * rw)121*4882a593Smuzhiyun static inline void arch_read_unlock(arch_rwlock_t *rw)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	__atomic_add_const_barrier(-1, &rw->cnts);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
arch_write_lock(arch_rwlock_t * rw)126*4882a593Smuzhiyun static inline void arch_write_lock(arch_rwlock_t *rw)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
129*4882a593Smuzhiyun 		arch_write_lock_wait(rw);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
arch_write_unlock(arch_rwlock_t * rw)132*4882a593Smuzhiyun static inline void arch_write_unlock(arch_rwlock_t *rw)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	__atomic_add_barrier(-0x30000, &rw->cnts);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 
arch_read_trylock(arch_rwlock_t * rw)138*4882a593Smuzhiyun static inline int arch_read_trylock(arch_rwlock_t *rw)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	int old;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	old = READ_ONCE(rw->cnts);
143*4882a593Smuzhiyun 	return (!(old & 0xffff0000) &&
144*4882a593Smuzhiyun 		__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
arch_write_trylock(arch_rwlock_t * rw)147*4882a593Smuzhiyun static inline int arch_write_trylock(arch_rwlock_t *rw)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	int old;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	old = READ_ONCE(rw->cnts);
152*4882a593Smuzhiyun 	return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #endif /* __ASM_SPINLOCK_H */
156