xref: /OK3568_Linux_fs/kernel/fs/btrfs/locking.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2008 Oracle.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun #include <linux/pagemap.h>
8*4882a593Smuzhiyun #include <linux/spinlock.h>
9*4882a593Smuzhiyun #include <linux/page-flags.h>
10*4882a593Smuzhiyun #include <asm/bug.h>
11*4882a593Smuzhiyun #include "misc.h"
12*4882a593Smuzhiyun #include "ctree.h"
13*4882a593Smuzhiyun #include "extent_io.h"
14*4882a593Smuzhiyun #include "locking.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Extent buffer locking
18*4882a593Smuzhiyun  * =====================
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * The locks use a custom scheme that allows to do more operations than are
21*4882a593Smuzhiyun  * available fromt current locking primitives. The building blocks are still
22*4882a593Smuzhiyun  * rwlock and wait queues.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * Required semantics:
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * - reader/writer exclusion
27*4882a593Smuzhiyun  * - writer/writer exclusion
28*4882a593Smuzhiyun  * - reader/reader sharing
29*4882a593Smuzhiyun  * - spinning lock semantics
30*4882a593Smuzhiyun  * - blocking lock semantics
31*4882a593Smuzhiyun  * - try-lock semantics for readers and writers
32*4882a593Smuzhiyun  * - one level nesting, allowing read lock to be taken by the same thread that
33*4882a593Smuzhiyun  *   already has write lock
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * The extent buffer locks (also called tree locks) manage access to eb data
36*4882a593Smuzhiyun  * related to the storage in the b-tree (keys, items, but not the individual
37*4882a593Smuzhiyun  * members of eb).
38*4882a593Smuzhiyun  * We want concurrency of many readers and safe updates. The underlying locking
39*4882a593Smuzhiyun  * is done by read-write spinlock and the blocking part is implemented using
40*4882a593Smuzhiyun  * counters and wait queues.
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * spinning semantics - the low-level rwlock is held so all other threads that
43*4882a593Smuzhiyun  *                      want to take it are spinning on it.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * blocking semantics - the low-level rwlock is not held but the counter
46*4882a593Smuzhiyun  *                      denotes how many times the blocking lock was held;
47*4882a593Smuzhiyun  *                      sleeping is possible
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * Write lock always allows only one thread to access the data.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * Debugging
53*4882a593Smuzhiyun  * ---------
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * There are additional state counters that are asserted in various contexts,
56*4882a593Smuzhiyun  * removed from non-debug build to reduce extent_buffer size and for
57*4882a593Smuzhiyun  * performance reasons.
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * Lock recursion
61*4882a593Smuzhiyun  * --------------
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * A write operation on a tree might indirectly start a look up on the same
64*4882a593Smuzhiyun  * tree.  This can happen when btrfs_cow_block locks the tree and needs to
65*4882a593Smuzhiyun  * lookup free extents.
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * btrfs_cow_block
68*4882a593Smuzhiyun  *   ..
69*4882a593Smuzhiyun  *   alloc_tree_block_no_bg_flush
70*4882a593Smuzhiyun  *     btrfs_alloc_tree_block
71*4882a593Smuzhiyun  *       btrfs_reserve_extent
72*4882a593Smuzhiyun  *         ..
73*4882a593Smuzhiyun  *         load_free_space_cache
74*4882a593Smuzhiyun  *           ..
75*4882a593Smuzhiyun  *           btrfs_lookup_file_extent
76*4882a593Smuzhiyun  *             btrfs_search_slot
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Locking pattern - spinning
80*4882a593Smuzhiyun  * --------------------------
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  * The simple locking scenario, the +--+ denotes the spinning section.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * +- btrfs_tree_lock
85*4882a593Smuzhiyun  * | - extent_buffer::rwlock is held
86*4882a593Smuzhiyun  * | - no heavy operations should happen, eg. IO, memory allocations, large
87*4882a593Smuzhiyun  * |   structure traversals
88*4882a593Smuzhiyun  * +- btrfs_tree_unock
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun  * Locking pattern - blocking
92*4882a593Smuzhiyun  * --------------------------
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * The blocking write uses the following scheme.  The +--+ denotes the spinning
95*4882a593Smuzhiyun  * section.
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  * +- btrfs_tree_lock
98*4882a593Smuzhiyun  * |
99*4882a593Smuzhiyun  * +- btrfs_set_lock_blocking_write
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  *   - allowed: IO, memory allocations, etc.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * -- btrfs_tree_unlock - note, no explicit unblocking necessary
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Blocking read is similar.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * +- btrfs_tree_read_lock
109*4882a593Smuzhiyun  * |
110*4882a593Smuzhiyun  * +- btrfs_set_lock_blocking_read
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  *  - heavy operations allowed
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * +- btrfs_tree_read_unlock_blocking
115*4882a593Smuzhiyun  * |
116*4882a593Smuzhiyun  * +- btrfs_tree_read_unlock
117*4882a593Smuzhiyun  *
118*4882a593Smuzhiyun  */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
btrfs_assert_spinning_writers_get(struct extent_buffer * eb)121*4882a593Smuzhiyun static inline void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	WARN_ON(eb->spinning_writers);
124*4882a593Smuzhiyun 	eb->spinning_writers++;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
btrfs_assert_spinning_writers_put(struct extent_buffer * eb)127*4882a593Smuzhiyun static inline void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	WARN_ON(eb->spinning_writers != 1);
130*4882a593Smuzhiyun 	eb->spinning_writers--;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
btrfs_assert_no_spinning_writers(struct extent_buffer * eb)133*4882a593Smuzhiyun static inline void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	WARN_ON(eb->spinning_writers);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
btrfs_assert_spinning_readers_get(struct extent_buffer * eb)138*4882a593Smuzhiyun static inline void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	atomic_inc(&eb->spinning_readers);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
btrfs_assert_spinning_readers_put(struct extent_buffer * eb)143*4882a593Smuzhiyun static inline void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
146*4882a593Smuzhiyun 	atomic_dec(&eb->spinning_readers);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
btrfs_assert_tree_read_locks_get(struct extent_buffer * eb)149*4882a593Smuzhiyun static inline void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	atomic_inc(&eb->read_locks);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
btrfs_assert_tree_read_locks_put(struct extent_buffer * eb)154*4882a593Smuzhiyun static inline void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	atomic_dec(&eb->read_locks);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
btrfs_assert_tree_read_locked(struct extent_buffer * eb)159*4882a593Smuzhiyun static inline void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	BUG_ON(!atomic_read(&eb->read_locks));
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
btrfs_assert_tree_write_locks_get(struct extent_buffer * eb)164*4882a593Smuzhiyun static inline void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	eb->write_locks++;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
btrfs_assert_tree_write_locks_put(struct extent_buffer * eb)169*4882a593Smuzhiyun static inline void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	eb->write_locks--;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #else
btrfs_assert_spinning_writers_get(struct extent_buffer * eb)175*4882a593Smuzhiyun static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
btrfs_assert_spinning_writers_put(struct extent_buffer * eb)176*4882a593Smuzhiyun static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
btrfs_assert_no_spinning_writers(struct extent_buffer * eb)177*4882a593Smuzhiyun static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
btrfs_assert_spinning_readers_put(struct extent_buffer * eb)178*4882a593Smuzhiyun static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
btrfs_assert_spinning_readers_get(struct extent_buffer * eb)179*4882a593Smuzhiyun static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locked(struct extent_buffer * eb)180*4882a593Smuzhiyun static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locks_get(struct extent_buffer * eb)181*4882a593Smuzhiyun static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
btrfs_assert_tree_read_locks_put(struct extent_buffer * eb)182*4882a593Smuzhiyun static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
btrfs_assert_tree_write_locks_get(struct extent_buffer * eb)183*4882a593Smuzhiyun static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
btrfs_assert_tree_write_locks_put(struct extent_buffer * eb)184*4882a593Smuzhiyun static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
185*4882a593Smuzhiyun #endif
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Mark already held read lock as blocking. Can be nested in write lock by the
189*4882a593Smuzhiyun  * same thread.
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * Use when there are potentially long operations ahead so other thread waiting
192*4882a593Smuzhiyun  * on the lock will not actively spin but sleep instead.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * The rwlock is released and blocking reader counter is increased.
195*4882a593Smuzhiyun  */
btrfs_set_lock_blocking_read(struct extent_buffer * eb)196*4882a593Smuzhiyun void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	trace_btrfs_set_lock_blocking_read(eb);
199*4882a593Smuzhiyun 	/*
200*4882a593Smuzhiyun 	 * No lock is required.  The lock owner may change if we have a read
201*4882a593Smuzhiyun 	 * lock, but it won't change to or away from us.  If we have the write
202*4882a593Smuzhiyun 	 * lock, we are the owner and it'll never change.
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	if (eb->lock_recursed && current->pid == eb->lock_owner)
205*4882a593Smuzhiyun 		return;
206*4882a593Smuzhiyun 	btrfs_assert_tree_read_locked(eb);
207*4882a593Smuzhiyun 	atomic_inc(&eb->blocking_readers);
208*4882a593Smuzhiyun 	btrfs_assert_spinning_readers_put(eb);
209*4882a593Smuzhiyun 	read_unlock(&eb->lock);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun  * Mark already held write lock as blocking.
214*4882a593Smuzhiyun  *
215*4882a593Smuzhiyun  * Use when there are potentially long operations ahead so other threads
216*4882a593Smuzhiyun  * waiting on the lock will not actively spin but sleep instead.
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * The rwlock is released and blocking writers is set.
219*4882a593Smuzhiyun  */
btrfs_set_lock_blocking_write(struct extent_buffer * eb)220*4882a593Smuzhiyun void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	trace_btrfs_set_lock_blocking_write(eb);
223*4882a593Smuzhiyun 	/*
224*4882a593Smuzhiyun 	 * No lock is required.  The lock owner may change if we have a read
225*4882a593Smuzhiyun 	 * lock, but it won't change to or away from us.  If we have the write
226*4882a593Smuzhiyun 	 * lock, we are the owner and it'll never change.
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	if (eb->lock_recursed && current->pid == eb->lock_owner)
229*4882a593Smuzhiyun 		return;
230*4882a593Smuzhiyun 	if (eb->blocking_writers == 0) {
231*4882a593Smuzhiyun 		btrfs_assert_spinning_writers_put(eb);
232*4882a593Smuzhiyun 		btrfs_assert_tree_locked(eb);
233*4882a593Smuzhiyun 		WRITE_ONCE(eb->blocking_writers, 1);
234*4882a593Smuzhiyun 		write_unlock(&eb->lock);
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun  * Lock the extent buffer for read. Wait for any writers (spinning or blocking).
240*4882a593Smuzhiyun  * Can be nested in write lock by the same thread.
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * Use when the locked section does only lightweight actions and busy waiting
243*4882a593Smuzhiyun  * would be cheaper than making other threads do the wait/wake loop.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * The rwlock is held upon exit.
246*4882a593Smuzhiyun  */
__btrfs_tree_read_lock(struct extent_buffer * eb,enum btrfs_lock_nesting nest,bool recurse)247*4882a593Smuzhiyun void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
248*4882a593Smuzhiyun 			    bool recurse)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	u64 start_ns = 0;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (trace_btrfs_tree_read_lock_enabled())
253*4882a593Smuzhiyun 		start_ns = ktime_get_ns();
254*4882a593Smuzhiyun again:
255*4882a593Smuzhiyun 	read_lock(&eb->lock);
256*4882a593Smuzhiyun 	BUG_ON(eb->blocking_writers == 0 &&
257*4882a593Smuzhiyun 	       current->pid == eb->lock_owner);
258*4882a593Smuzhiyun 	if (eb->blocking_writers) {
259*4882a593Smuzhiyun 		if (current->pid == eb->lock_owner) {
260*4882a593Smuzhiyun 			/*
261*4882a593Smuzhiyun 			 * This extent is already write-locked by our thread.
262*4882a593Smuzhiyun 			 * We allow an additional read lock to be added because
263*4882a593Smuzhiyun 			 * it's for the same thread. btrfs_find_all_roots()
264*4882a593Smuzhiyun 			 * depends on this as it may be called on a partly
265*4882a593Smuzhiyun 			 * (write-)locked tree.
266*4882a593Smuzhiyun 			 */
267*4882a593Smuzhiyun 			WARN_ON(!recurse);
268*4882a593Smuzhiyun 			BUG_ON(eb->lock_recursed);
269*4882a593Smuzhiyun 			eb->lock_recursed = true;
270*4882a593Smuzhiyun 			read_unlock(&eb->lock);
271*4882a593Smuzhiyun 			trace_btrfs_tree_read_lock(eb, start_ns);
272*4882a593Smuzhiyun 			return;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 		read_unlock(&eb->lock);
275*4882a593Smuzhiyun 		wait_event(eb->write_lock_wq,
276*4882a593Smuzhiyun 			   READ_ONCE(eb->blocking_writers) == 0);
277*4882a593Smuzhiyun 		goto again;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 	btrfs_assert_tree_read_locks_get(eb);
280*4882a593Smuzhiyun 	btrfs_assert_spinning_readers_get(eb);
281*4882a593Smuzhiyun 	trace_btrfs_tree_read_lock(eb, start_ns);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
btrfs_tree_read_lock(struct extent_buffer * eb)284*4882a593Smuzhiyun void btrfs_tree_read_lock(struct extent_buffer *eb)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Lock extent buffer for read, optimistically expecting that there are no
291*4882a593Smuzhiyun  * contending blocking writers. If there are, don't wait.
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * Return 1 if the rwlock has been taken, 0 otherwise
294*4882a593Smuzhiyun  */
btrfs_tree_read_lock_atomic(struct extent_buffer * eb)295*4882a593Smuzhiyun int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers))
298*4882a593Smuzhiyun 		return 0;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	read_lock(&eb->lock);
301*4882a593Smuzhiyun 	/* Refetch value after lock */
302*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers)) {
303*4882a593Smuzhiyun 		read_unlock(&eb->lock);
304*4882a593Smuzhiyun 		return 0;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	btrfs_assert_tree_read_locks_get(eb);
307*4882a593Smuzhiyun 	btrfs_assert_spinning_readers_get(eb);
308*4882a593Smuzhiyun 	trace_btrfs_tree_read_lock_atomic(eb);
309*4882a593Smuzhiyun 	return 1;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * Try-lock for read. Don't block or wait for contending writers.
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * Retrun 1 if the rwlock has been taken, 0 otherwise
316*4882a593Smuzhiyun  */
btrfs_try_tree_read_lock(struct extent_buffer * eb)317*4882a593Smuzhiyun int btrfs_try_tree_read_lock(struct extent_buffer *eb)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers))
320*4882a593Smuzhiyun 		return 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (!read_trylock(&eb->lock))
323*4882a593Smuzhiyun 		return 0;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Refetch value after lock */
326*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers)) {
327*4882a593Smuzhiyun 		read_unlock(&eb->lock);
328*4882a593Smuzhiyun 		return 0;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 	btrfs_assert_tree_read_locks_get(eb);
331*4882a593Smuzhiyun 	btrfs_assert_spinning_readers_get(eb);
332*4882a593Smuzhiyun 	trace_btrfs_try_tree_read_lock(eb);
333*4882a593Smuzhiyun 	return 1;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun  * Try-lock for write. May block until the lock is uncontended, but does not
338*4882a593Smuzhiyun  * wait until it is free.
339*4882a593Smuzhiyun  *
340*4882a593Smuzhiyun  * Retrun 1 if the rwlock has been taken, 0 otherwise
341*4882a593Smuzhiyun  */
btrfs_try_tree_write_lock(struct extent_buffer * eb)342*4882a593Smuzhiyun int btrfs_try_tree_write_lock(struct extent_buffer *eb)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers))
345*4882a593Smuzhiyun 		return 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	write_lock(&eb->lock);
348*4882a593Smuzhiyun 	/* Refetch value after lock */
349*4882a593Smuzhiyun 	if (READ_ONCE(eb->blocking_writers) || atomic_read(&eb->blocking_readers)) {
350*4882a593Smuzhiyun 		write_unlock(&eb->lock);
351*4882a593Smuzhiyun 		return 0;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 	btrfs_assert_tree_write_locks_get(eb);
354*4882a593Smuzhiyun 	btrfs_assert_spinning_writers_get(eb);
355*4882a593Smuzhiyun 	eb->lock_owner = current->pid;
356*4882a593Smuzhiyun 	trace_btrfs_try_tree_write_lock(eb);
357*4882a593Smuzhiyun 	return 1;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun  * Release read lock. Must be used only if the lock is in spinning mode.  If
362*4882a593Smuzhiyun  * the read lock is nested, must pair with read lock before the write unlock.
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * The rwlock is not held upon exit.
365*4882a593Smuzhiyun  */
btrfs_tree_read_unlock(struct extent_buffer * eb)366*4882a593Smuzhiyun void btrfs_tree_read_unlock(struct extent_buffer *eb)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	trace_btrfs_tree_read_unlock(eb);
369*4882a593Smuzhiyun 	/*
370*4882a593Smuzhiyun 	 * if we're nested, we have the write lock.  No new locking
371*4882a593Smuzhiyun 	 * is needed as long as we are the lock owner.
372*4882a593Smuzhiyun 	 * The write unlock will do a barrier for us, and the lock_recursed
373*4882a593Smuzhiyun 	 * field only matters to the lock owner.
374*4882a593Smuzhiyun 	 */
375*4882a593Smuzhiyun 	if (eb->lock_recursed && current->pid == eb->lock_owner) {
376*4882a593Smuzhiyun 		eb->lock_recursed = false;
377*4882a593Smuzhiyun 		return;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	btrfs_assert_tree_read_locked(eb);
380*4882a593Smuzhiyun 	btrfs_assert_spinning_readers_put(eb);
381*4882a593Smuzhiyun 	btrfs_assert_tree_read_locks_put(eb);
382*4882a593Smuzhiyun 	read_unlock(&eb->lock);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun  * Release read lock, previously set to blocking by a pairing call to
387*4882a593Smuzhiyun  * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
388*4882a593Smuzhiyun  * thread.
389*4882a593Smuzhiyun  *
390*4882a593Smuzhiyun  * State of rwlock is unchanged, last reader wakes waiting threads.
391*4882a593Smuzhiyun  */
btrfs_tree_read_unlock_blocking(struct extent_buffer * eb)392*4882a593Smuzhiyun void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	trace_btrfs_tree_read_unlock_blocking(eb);
395*4882a593Smuzhiyun 	/*
396*4882a593Smuzhiyun 	 * if we're nested, we have the write lock.  No new locking
397*4882a593Smuzhiyun 	 * is needed as long as we are the lock owner.
398*4882a593Smuzhiyun 	 * The write unlock will do a barrier for us, and the lock_recursed
399*4882a593Smuzhiyun 	 * field only matters to the lock owner.
400*4882a593Smuzhiyun 	 */
401*4882a593Smuzhiyun 	if (eb->lock_recursed && current->pid == eb->lock_owner) {
402*4882a593Smuzhiyun 		eb->lock_recursed = false;
403*4882a593Smuzhiyun 		return;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 	btrfs_assert_tree_read_locked(eb);
406*4882a593Smuzhiyun 	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
407*4882a593Smuzhiyun 	/* atomic_dec_and_test implies a barrier */
408*4882a593Smuzhiyun 	if (atomic_dec_and_test(&eb->blocking_readers))
409*4882a593Smuzhiyun 		cond_wake_up_nomb(&eb->read_lock_wq);
410*4882a593Smuzhiyun 	btrfs_assert_tree_read_locks_put(eb);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun  * Lock for write. Wait for all blocking and spinning readers and writers. This
415*4882a593Smuzhiyun  * starts context where reader lock could be nested by the same thread.
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * The rwlock is held for write upon exit.
418*4882a593Smuzhiyun  */
__btrfs_tree_lock(struct extent_buffer * eb,enum btrfs_lock_nesting nest)419*4882a593Smuzhiyun void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
420*4882a593Smuzhiyun 	__acquires(&eb->lock)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	u64 start_ns = 0;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (trace_btrfs_tree_lock_enabled())
425*4882a593Smuzhiyun 		start_ns = ktime_get_ns();
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	WARN_ON(eb->lock_owner == current->pid);
428*4882a593Smuzhiyun again:
429*4882a593Smuzhiyun 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
430*4882a593Smuzhiyun 	wait_event(eb->write_lock_wq, READ_ONCE(eb->blocking_writers) == 0);
431*4882a593Smuzhiyun 	write_lock(&eb->lock);
432*4882a593Smuzhiyun 	/* Refetch value after lock */
433*4882a593Smuzhiyun 	if (atomic_read(&eb->blocking_readers) ||
434*4882a593Smuzhiyun 	    READ_ONCE(eb->blocking_writers)) {
435*4882a593Smuzhiyun 		write_unlock(&eb->lock);
436*4882a593Smuzhiyun 		goto again;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 	btrfs_assert_spinning_writers_get(eb);
439*4882a593Smuzhiyun 	btrfs_assert_tree_write_locks_get(eb);
440*4882a593Smuzhiyun 	eb->lock_owner = current->pid;
441*4882a593Smuzhiyun 	trace_btrfs_tree_lock(eb, start_ns);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
btrfs_tree_lock(struct extent_buffer * eb)444*4882a593Smuzhiyun void btrfs_tree_lock(struct extent_buffer *eb)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	__btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun  * Release the write lock, either blocking or spinning (ie. there's no need
451*4882a593Smuzhiyun  * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
452*4882a593Smuzhiyun  * This also ends the context for nesting, the read lock must have been
453*4882a593Smuzhiyun  * released already.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * Tasks blocked and waiting are woken, rwlock is not held upon exit.
456*4882a593Smuzhiyun  */
btrfs_tree_unlock(struct extent_buffer * eb)457*4882a593Smuzhiyun void btrfs_tree_unlock(struct extent_buffer *eb)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	/*
460*4882a593Smuzhiyun 	 * This is read both locked and unlocked but always by the same thread
461*4882a593Smuzhiyun 	 * that already owns the lock so we don't need to use READ_ONCE
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	int blockers = eb->blocking_writers;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	BUG_ON(blockers > 1);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	btrfs_assert_tree_locked(eb);
468*4882a593Smuzhiyun 	trace_btrfs_tree_unlock(eb);
469*4882a593Smuzhiyun 	eb->lock_owner = 0;
470*4882a593Smuzhiyun 	btrfs_assert_tree_write_locks_put(eb);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (blockers) {
473*4882a593Smuzhiyun 		btrfs_assert_no_spinning_writers(eb);
474*4882a593Smuzhiyun 		/* Unlocked write */
475*4882a593Smuzhiyun 		WRITE_ONCE(eb->blocking_writers, 0);
476*4882a593Smuzhiyun 		/*
477*4882a593Smuzhiyun 		 * We need to order modifying blocking_writers above with
478*4882a593Smuzhiyun 		 * actually waking up the sleepers to ensure they see the
479*4882a593Smuzhiyun 		 * updated value of blocking_writers
480*4882a593Smuzhiyun 		 */
481*4882a593Smuzhiyun 		cond_wake_up(&eb->write_lock_wq);
482*4882a593Smuzhiyun 	} else {
483*4882a593Smuzhiyun 		btrfs_assert_spinning_writers_put(eb);
484*4882a593Smuzhiyun 		write_unlock(&eb->lock);
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun  * Set all locked nodes in the path to blocking locks.  This should be done
490*4882a593Smuzhiyun  * before scheduling
491*4882a593Smuzhiyun  */
btrfs_set_path_blocking(struct btrfs_path * p)492*4882a593Smuzhiyun void btrfs_set_path_blocking(struct btrfs_path *p)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	int i;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
497*4882a593Smuzhiyun 		if (!p->nodes[i] || !p->locks[i])
498*4882a593Smuzhiyun 			continue;
499*4882a593Smuzhiyun 		/*
500*4882a593Smuzhiyun 		 * If we currently have a spinning reader or writer lock this
501*4882a593Smuzhiyun 		 * will bump the count of blocking holders and drop the
502*4882a593Smuzhiyun 		 * spinlock.
503*4882a593Smuzhiyun 		 */
504*4882a593Smuzhiyun 		if (p->locks[i] == BTRFS_READ_LOCK) {
505*4882a593Smuzhiyun 			btrfs_set_lock_blocking_read(p->nodes[i]);
506*4882a593Smuzhiyun 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
507*4882a593Smuzhiyun 		} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
508*4882a593Smuzhiyun 			btrfs_set_lock_blocking_write(p->nodes[i]);
509*4882a593Smuzhiyun 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun  * This releases any locks held in the path starting at level and going all the
516*4882a593Smuzhiyun  * way up to the root.
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * btrfs_search_slot will keep the lock held on higher nodes in a few corner
519*4882a593Smuzhiyun  * cases, such as COW of the block at slot zero in the node.  This ignores
520*4882a593Smuzhiyun  * those rules, and it should only be called when there are no more updates to
521*4882a593Smuzhiyun  * be done higher up in the tree.
522*4882a593Smuzhiyun  */
btrfs_unlock_up_safe(struct btrfs_path * path,int level)523*4882a593Smuzhiyun void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	int i;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (path->keep_locks)
528*4882a593Smuzhiyun 		return;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
531*4882a593Smuzhiyun 		if (!path->nodes[i])
532*4882a593Smuzhiyun 			continue;
533*4882a593Smuzhiyun 		if (!path->locks[i])
534*4882a593Smuzhiyun 			continue;
535*4882a593Smuzhiyun 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
536*4882a593Smuzhiyun 		path->locks[i] = 0;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun  * Loop around taking references on and locking the root node of the tree until
542*4882a593Smuzhiyun  * we end up with a lock on the root node.
543*4882a593Smuzhiyun  *
544*4882a593Smuzhiyun  * Return: root extent buffer with write lock held
545*4882a593Smuzhiyun  */
btrfs_lock_root_node(struct btrfs_root * root)546*4882a593Smuzhiyun struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	struct extent_buffer *eb;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	while (1) {
551*4882a593Smuzhiyun 		eb = btrfs_root_node(root);
552*4882a593Smuzhiyun 		btrfs_tree_lock(eb);
553*4882a593Smuzhiyun 		if (eb == root->node)
554*4882a593Smuzhiyun 			break;
555*4882a593Smuzhiyun 		btrfs_tree_unlock(eb);
556*4882a593Smuzhiyun 		free_extent_buffer(eb);
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 	return eb;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun  * Loop around taking references on and locking the root node of the tree until
563*4882a593Smuzhiyun  * we end up with a lock on the root node.
564*4882a593Smuzhiyun  *
565*4882a593Smuzhiyun  * Return: root extent buffer with read lock held
566*4882a593Smuzhiyun  */
__btrfs_read_lock_root_node(struct btrfs_root * root,bool recurse)567*4882a593Smuzhiyun struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
568*4882a593Smuzhiyun 						  bool recurse)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct extent_buffer *eb;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	while (1) {
573*4882a593Smuzhiyun 		eb = btrfs_root_node(root);
574*4882a593Smuzhiyun 		__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, recurse);
575*4882a593Smuzhiyun 		if (eb == root->node)
576*4882a593Smuzhiyun 			break;
577*4882a593Smuzhiyun 		btrfs_tree_read_unlock(eb);
578*4882a593Smuzhiyun 		free_extent_buffer(eb);
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 	return eb;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun /*
584*4882a593Smuzhiyun  * DREW locks
585*4882a593Smuzhiyun  * ==========
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  * DREW stands for double-reader-writer-exclusion lock. It's used in situation
588*4882a593Smuzhiyun  * where you want to provide A-B exclusion but not AA or BB.
589*4882a593Smuzhiyun  *
590*4882a593Smuzhiyun  * Currently implementation gives more priority to reader. If a reader and a
591*4882a593Smuzhiyun  * writer both race to acquire their respective sides of the lock the writer
592*4882a593Smuzhiyun  * would yield its lock as soon as it detects a concurrent reader. Additionally
593*4882a593Smuzhiyun  * if there are pending readers no new writers would be allowed to come in and
594*4882a593Smuzhiyun  * acquire the lock.
595*4882a593Smuzhiyun  */
596*4882a593Smuzhiyun 
btrfs_drew_lock_init(struct btrfs_drew_lock * lock)597*4882a593Smuzhiyun int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	int ret;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
602*4882a593Smuzhiyun 	if (ret)
603*4882a593Smuzhiyun 		return ret;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	atomic_set(&lock->readers, 0);
606*4882a593Smuzhiyun 	init_waitqueue_head(&lock->pending_readers);
607*4882a593Smuzhiyun 	init_waitqueue_head(&lock->pending_writers);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return 0;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
btrfs_drew_lock_destroy(struct btrfs_drew_lock * lock)612*4882a593Smuzhiyun void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	percpu_counter_destroy(&lock->writers);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun /* Return true if acquisition is successful, false otherwise */
btrfs_drew_try_write_lock(struct btrfs_drew_lock * lock)618*4882a593Smuzhiyun bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	if (atomic_read(&lock->readers))
621*4882a593Smuzhiyun 		return false;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	percpu_counter_inc(&lock->writers);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* Ensure writers count is updated before we check for pending readers */
626*4882a593Smuzhiyun 	smp_mb();
627*4882a593Smuzhiyun 	if (atomic_read(&lock->readers)) {
628*4882a593Smuzhiyun 		btrfs_drew_write_unlock(lock);
629*4882a593Smuzhiyun 		return false;
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return true;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
btrfs_drew_write_lock(struct btrfs_drew_lock * lock)635*4882a593Smuzhiyun void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	while (true) {
638*4882a593Smuzhiyun 		if (btrfs_drew_try_write_lock(lock))
639*4882a593Smuzhiyun 			return;
640*4882a593Smuzhiyun 		wait_event(lock->pending_writers, !atomic_read(&lock->readers));
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
btrfs_drew_write_unlock(struct btrfs_drew_lock * lock)644*4882a593Smuzhiyun void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	percpu_counter_dec(&lock->writers);
647*4882a593Smuzhiyun 	cond_wake_up(&lock->pending_readers);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun 
btrfs_drew_read_lock(struct btrfs_drew_lock * lock)650*4882a593Smuzhiyun void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	atomic_inc(&lock->readers);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/*
655*4882a593Smuzhiyun 	 * Ensure the pending reader count is perceieved BEFORE this reader
656*4882a593Smuzhiyun 	 * goes to sleep in case of active writers. This guarantees new writers
657*4882a593Smuzhiyun 	 * won't be allowed and that the current reader will be woken up when
658*4882a593Smuzhiyun 	 * the last active writer finishes its jobs.
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	smp_mb__after_atomic();
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	wait_event(lock->pending_readers,
663*4882a593Smuzhiyun 		   percpu_counter_sum(&lock->writers) == 0);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
btrfs_drew_read_unlock(struct btrfs_drew_lock * lock)666*4882a593Smuzhiyun void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	/*
669*4882a593Smuzhiyun 	 * atomic_dec_and_test implies a full barrier, so woken up writers
670*4882a593Smuzhiyun 	 * are guaranteed to see the decrement
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	if (atomic_dec_and_test(&lock->readers))
673*4882a593Smuzhiyun 		wake_up(&lock->pending_writers);
674*4882a593Smuzhiyun }
675