1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2008 Oracle. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef BTRFS_LOCKING_H
7*4882a593Smuzhiyun #define BTRFS_LOCKING_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/atomic.h>
10*4882a593Smuzhiyun #include <linux/wait.h>
11*4882a593Smuzhiyun #include <linux/percpu_counter.h>
12*4882a593Smuzhiyun #include "extent_io.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define BTRFS_WRITE_LOCK 1
15*4882a593Smuzhiyun #define BTRFS_READ_LOCK 2
16*4882a593Smuzhiyun #define BTRFS_WRITE_LOCK_BLOCKING 3
17*4882a593Smuzhiyun #define BTRFS_READ_LOCK_BLOCKING 4
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
21*4882a593Smuzhiyun * the time of this patch is 8, which is how many we use. Keep this in mind if
22*4882a593Smuzhiyun * you decide you want to add another subclass.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun enum btrfs_lock_nesting {
25*4882a593Smuzhiyun BTRFS_NESTING_NORMAL,
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * When we COW a block we are holding the lock on the original block,
29*4882a593Smuzhiyun * and since our lockdep maps are rootid+level, this confuses lockdep
30*4882a593Smuzhiyun * when we lock the newly allocated COW'd block. Handle this by having
31*4882a593Smuzhiyun * a subclass for COW'ed blocks so that lockdep doesn't complain.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun BTRFS_NESTING_COW,
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Oftentimes we need to lock adjacent nodes on the same level while
37*4882a593Smuzhiyun * still holding the lock on the original node we searched to, such as
38*4882a593Smuzhiyun * for searching forward or for split/balance.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Because of this we need to indicate to lockdep that this is
41*4882a593Smuzhiyun * acceptable by having a different subclass for each of these
42*4882a593Smuzhiyun * operations.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun BTRFS_NESTING_LEFT,
45*4882a593Smuzhiyun BTRFS_NESTING_RIGHT,
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * When splitting we will be holding a lock on the left/right node when
49*4882a593Smuzhiyun * we need to cow that node, thus we need a new set of subclasses for
50*4882a593Smuzhiyun * these two operations.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun BTRFS_NESTING_LEFT_COW,
53*4882a593Smuzhiyun BTRFS_NESTING_RIGHT_COW,
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * When splitting we may push nodes to the left or right, but still use
57*4882a593Smuzhiyun * the subsequent nodes in our path, keeping our locks on those adjacent
58*4882a593Smuzhiyun * blocks. Thus when we go to allocate a new split block we've already
59*4882a593Smuzhiyun * used up all of our available subclasses, so this subclass exists to
60*4882a593Smuzhiyun * handle this case where we need to allocate a new split block.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun BTRFS_NESTING_SPLIT,
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * When promoting a new block to a root we need to have a special
66*4882a593Smuzhiyun * subclass so we don't confuse lockdep, as it will appear that we are
67*4882a593Smuzhiyun * locking a higher level node before a lower level one. Copying also
68*4882a593Smuzhiyun * has this problem as it appears we're locking the same block again
69*4882a593Smuzhiyun * when we make a snapshot of an existing root.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun BTRFS_NESTING_NEW_ROOT,
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
75*4882a593Smuzhiyun * add this in here and add a static_assert to keep us from going over
76*4882a593Smuzhiyun * the limit. As of this writing we're limited to 8, and we're
77*4882a593Smuzhiyun * definitely using 8, hence this check to keep us from messing up in
78*4882a593Smuzhiyun * the future.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun BTRFS_NESTING_MAX,
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
84*4882a593Smuzhiyun "too many lock subclasses defined");
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct btrfs_path;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
89*4882a593Smuzhiyun void btrfs_tree_lock(struct extent_buffer *eb);
90*4882a593Smuzhiyun void btrfs_tree_unlock(struct extent_buffer *eb);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
93*4882a593Smuzhiyun bool recurse);
94*4882a593Smuzhiyun void btrfs_tree_read_lock(struct extent_buffer *eb);
95*4882a593Smuzhiyun void btrfs_tree_read_unlock(struct extent_buffer *eb);
96*4882a593Smuzhiyun void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
97*4882a593Smuzhiyun void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
98*4882a593Smuzhiyun void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
99*4882a593Smuzhiyun int btrfs_try_tree_read_lock(struct extent_buffer *eb);
100*4882a593Smuzhiyun int btrfs_try_tree_write_lock(struct extent_buffer *eb);
101*4882a593Smuzhiyun int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
102*4882a593Smuzhiyun struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
103*4882a593Smuzhiyun struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
104*4882a593Smuzhiyun bool recurse);
105*4882a593Smuzhiyun
btrfs_read_lock_root_node(struct btrfs_root * root)106*4882a593Smuzhiyun static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun return __btrfs_read_lock_root_node(root, false);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
btrfs_assert_tree_locked(struct extent_buffer * eb)112*4882a593Smuzhiyun static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
113*4882a593Smuzhiyun BUG_ON(!eb->write_locks);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun #else
btrfs_assert_tree_locked(struct extent_buffer * eb)116*4882a593Smuzhiyun static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun void btrfs_set_path_blocking(struct btrfs_path *p);
120*4882a593Smuzhiyun void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
121*4882a593Smuzhiyun
btrfs_tree_unlock_rw(struct extent_buffer * eb,int rw)122*4882a593Smuzhiyun static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
125*4882a593Smuzhiyun btrfs_tree_unlock(eb);
126*4882a593Smuzhiyun else if (rw == BTRFS_READ_LOCK_BLOCKING)
127*4882a593Smuzhiyun btrfs_tree_read_unlock_blocking(eb);
128*4882a593Smuzhiyun else if (rw == BTRFS_READ_LOCK)
129*4882a593Smuzhiyun btrfs_tree_read_unlock(eb);
130*4882a593Smuzhiyun else
131*4882a593Smuzhiyun BUG();
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct btrfs_drew_lock {
135*4882a593Smuzhiyun atomic_t readers;
136*4882a593Smuzhiyun struct percpu_counter writers;
137*4882a593Smuzhiyun wait_queue_head_t pending_writers;
138*4882a593Smuzhiyun wait_queue_head_t pending_readers;
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
142*4882a593Smuzhiyun void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
143*4882a593Smuzhiyun void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
144*4882a593Smuzhiyun bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
145*4882a593Smuzhiyun void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
146*4882a593Smuzhiyun void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
147*4882a593Smuzhiyun void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #endif
150