xref: /OK3568_Linux_fs/kernel/include/asm-generic/qrwlock.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Queue read/write lock
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors: Waiman Long <waiman.long@hp.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #ifndef __ASM_GENERIC_QRWLOCK_H
10*4882a593Smuzhiyun #define __ASM_GENERIC_QRWLOCK_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/atomic.h>
13*4882a593Smuzhiyun #include <asm/barrier.h>
14*4882a593Smuzhiyun #include <asm/processor.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm-generic/qrwlock_types.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Writer states & reader shift and bias.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun #define	_QW_WAITING	0x100		/* A writer is waiting	   */
22*4882a593Smuzhiyun #define	_QW_LOCKED	0x0ff		/* A writer holds the lock */
23*4882a593Smuzhiyun #define	_QW_WMASK	0x1ff		/* Writer mask		   */
24*4882a593Smuzhiyun #define	_QR_SHIFT	9		/* Reader count shift	   */
25*4882a593Smuzhiyun #define _QR_BIAS	(1U << _QR_SHIFT)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * External function declarations
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun extern void queued_read_lock_slowpath(struct qrwlock *lock);
31*4882a593Smuzhiyun extern void queued_write_lock_slowpath(struct qrwlock *lock);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * queued_read_trylock - try to acquire read lock of a queue rwlock
35*4882a593Smuzhiyun  * @lock : Pointer to queue rwlock structure
36*4882a593Smuzhiyun  * Return: 1 if lock acquired, 0 if failed
37*4882a593Smuzhiyun  */
queued_read_trylock(struct qrwlock * lock)38*4882a593Smuzhiyun static inline int queued_read_trylock(struct qrwlock *lock)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	u32 cnts;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	cnts = atomic_read(&lock->cnts);
43*4882a593Smuzhiyun 	if (likely(!(cnts & _QW_WMASK))) {
44*4882a593Smuzhiyun 		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
45*4882a593Smuzhiyun 		if (likely(!(cnts & _QW_WMASK)))
46*4882a593Smuzhiyun 			return 1;
47*4882a593Smuzhiyun 		atomic_sub(_QR_BIAS, &lock->cnts);
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 	return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * queued_write_trylock - try to acquire write lock of a queue rwlock
54*4882a593Smuzhiyun  * @lock : Pointer to queue rwlock structure
55*4882a593Smuzhiyun  * Return: 1 if lock acquired, 0 if failed
56*4882a593Smuzhiyun  */
queued_write_trylock(struct qrwlock * lock)57*4882a593Smuzhiyun static inline int queued_write_trylock(struct qrwlock *lock)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	u32 cnts;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	cnts = atomic_read(&lock->cnts);
62*4882a593Smuzhiyun 	if (unlikely(cnts))
63*4882a593Smuzhiyun 		return 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
66*4882a593Smuzhiyun 				_QW_LOCKED));
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun /**
69*4882a593Smuzhiyun  * queued_read_lock - acquire read lock of a queue rwlock
70*4882a593Smuzhiyun  * @lock: Pointer to queue rwlock structure
71*4882a593Smuzhiyun  */
queued_read_lock(struct qrwlock * lock)72*4882a593Smuzhiyun static inline void queued_read_lock(struct qrwlock *lock)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	u32 cnts;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
77*4882a593Smuzhiyun 	if (likely(!(cnts & _QW_WMASK)))
78*4882a593Smuzhiyun 		return;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* The slowpath will decrement the reader count, if necessary. */
81*4882a593Smuzhiyun 	queued_read_lock_slowpath(lock);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun  * queued_write_lock - acquire write lock of a queue rwlock
86*4882a593Smuzhiyun  * @lock : Pointer to queue rwlock structure
87*4882a593Smuzhiyun  */
queued_write_lock(struct qrwlock * lock)88*4882a593Smuzhiyun static inline void queued_write_lock(struct qrwlock *lock)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	u32 cnts = 0;
91*4882a593Smuzhiyun 	/* Optimize for the unfair lock case where the fair flag is 0. */
92*4882a593Smuzhiyun 	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
93*4882a593Smuzhiyun 		return;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	queued_write_lock_slowpath(lock);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun  * queued_read_unlock - release read lock of a queue rwlock
100*4882a593Smuzhiyun  * @lock : Pointer to queue rwlock structure
101*4882a593Smuzhiyun  */
queued_read_unlock(struct qrwlock * lock)102*4882a593Smuzhiyun static inline void queued_read_unlock(struct qrwlock *lock)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * Atomically decrement the reader count
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun  * queued_write_unlock - release write lock of a queue rwlock
112*4882a593Smuzhiyun  * @lock : Pointer to queue rwlock structure
113*4882a593Smuzhiyun  */
queued_write_unlock(struct qrwlock * lock)114*4882a593Smuzhiyun static inline void queued_write_unlock(struct qrwlock *lock)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	smp_store_release(&lock->wlocked, 0);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Remapping rwlock architecture specific functions to the corresponding
121*4882a593Smuzhiyun  * queue rwlock functions.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun #define arch_read_lock(l)	queued_read_lock(l)
124*4882a593Smuzhiyun #define arch_write_lock(l)	queued_write_lock(l)
125*4882a593Smuzhiyun #define arch_read_trylock(l)	queued_read_trylock(l)
126*4882a593Smuzhiyun #define arch_write_trylock(l)	queued_write_trylock(l)
127*4882a593Smuzhiyun #define arch_read_unlock(l)	queued_read_unlock(l)
128*4882a593Smuzhiyun #define arch_write_unlock(l)	queued_write_unlock(l)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #endif /* __ASM_GENERIC_QRWLOCK_H */
131