1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #include <linux/bitops.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include <linux/bio.h>
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <linux/pagemap.h>
8*4882a593Smuzhiyun #include <linux/page-flags.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/swap.h>
12*4882a593Smuzhiyun #include <linux/writeback.h>
13*4882a593Smuzhiyun #include <linux/pagevec.h>
14*4882a593Smuzhiyun #include <linux/prefetch.h>
15*4882a593Smuzhiyun #include <linux/cleancache.h>
16*4882a593Smuzhiyun #include "extent_io.h"
17*4882a593Smuzhiyun #include "extent-io-tree.h"
18*4882a593Smuzhiyun #include "extent_map.h"
19*4882a593Smuzhiyun #include "ctree.h"
20*4882a593Smuzhiyun #include "btrfs_inode.h"
21*4882a593Smuzhiyun #include "volumes.h"
22*4882a593Smuzhiyun #include "check-integrity.h"
23*4882a593Smuzhiyun #include "locking.h"
24*4882a593Smuzhiyun #include "rcu-string.h"
25*4882a593Smuzhiyun #include "backref.h"
26*4882a593Smuzhiyun #include "disk-io.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct kmem_cache *extent_state_cache;
29*4882a593Smuzhiyun static struct kmem_cache *extent_buffer_cache;
30*4882a593Smuzhiyun static struct bio_set btrfs_bioset;
31*4882a593Smuzhiyun
extent_state_in_tree(const struct extent_state * state)32*4882a593Smuzhiyun static inline bool extent_state_in_tree(const struct extent_state *state)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun return !RB_EMPTY_NODE(&state->rb_node);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
38*4882a593Smuzhiyun static LIST_HEAD(states);
39*4882a593Smuzhiyun static DEFINE_SPINLOCK(leak_lock);
40*4882a593Smuzhiyun
btrfs_leak_debug_add(spinlock_t * lock,struct list_head * new,struct list_head * head)41*4882a593Smuzhiyun static inline void btrfs_leak_debug_add(spinlock_t *lock,
42*4882a593Smuzhiyun struct list_head *new,
43*4882a593Smuzhiyun struct list_head *head)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun unsigned long flags;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun spin_lock_irqsave(lock, flags);
48*4882a593Smuzhiyun list_add(new, head);
49*4882a593Smuzhiyun spin_unlock_irqrestore(lock, flags);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
btrfs_leak_debug_del(spinlock_t * lock,struct list_head * entry)52*4882a593Smuzhiyun static inline void btrfs_leak_debug_del(spinlock_t *lock,
53*4882a593Smuzhiyun struct list_head *entry)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun unsigned long flags;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun spin_lock_irqsave(lock, flags);
58*4882a593Smuzhiyun list_del(entry);
59*4882a593Smuzhiyun spin_unlock_irqrestore(lock, flags);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info * fs_info)62*4882a593Smuzhiyun void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct extent_buffer *eb;
65*4882a593Smuzhiyun unsigned long flags;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * If we didn't get into open_ctree our allocated_ebs will not be
69*4882a593Smuzhiyun * initialized, so just skip this.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun if (!fs_info->allocated_ebs.next)
72*4882a593Smuzhiyun return;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
75*4882a593Smuzhiyun while (!list_empty(&fs_info->allocated_ebs)) {
76*4882a593Smuzhiyun eb = list_first_entry(&fs_info->allocated_ebs,
77*4882a593Smuzhiyun struct extent_buffer, leak_list);
78*4882a593Smuzhiyun pr_err(
79*4882a593Smuzhiyun "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
80*4882a593Smuzhiyun eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
81*4882a593Smuzhiyun btrfs_header_owner(eb));
82*4882a593Smuzhiyun list_del(&eb->leak_list);
83*4882a593Smuzhiyun kmem_cache_free(extent_buffer_cache, eb);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
btrfs_extent_state_leak_debug_check(void)88*4882a593Smuzhiyun static inline void btrfs_extent_state_leak_debug_check(void)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct extent_state *state;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun while (!list_empty(&states)) {
93*4882a593Smuzhiyun state = list_entry(states.next, struct extent_state, leak_list);
94*4882a593Smuzhiyun pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
95*4882a593Smuzhiyun state->start, state->end, state->state,
96*4882a593Smuzhiyun extent_state_in_tree(state),
97*4882a593Smuzhiyun refcount_read(&state->refs));
98*4882a593Smuzhiyun list_del(&state->leak_list);
99*4882a593Smuzhiyun kmem_cache_free(extent_state_cache, state);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define btrfs_debug_check_extent_io_range(tree, start, end) \
104*4882a593Smuzhiyun __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
__btrfs_debug_check_extent_io_range(const char * caller,struct extent_io_tree * tree,u64 start,u64 end)105*4882a593Smuzhiyun static inline void __btrfs_debug_check_extent_io_range(const char *caller,
106*4882a593Smuzhiyun struct extent_io_tree *tree, u64 start, u64 end)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct inode *inode = tree->private_data;
109*4882a593Smuzhiyun u64 isize;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (!inode || !is_data_inode(inode))
112*4882a593Smuzhiyun return;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun isize = i_size_read(inode);
115*4882a593Smuzhiyun if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
116*4882a593Smuzhiyun btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
117*4882a593Smuzhiyun "%s: ino %llu isize %llu odd range [%llu,%llu]",
118*4882a593Smuzhiyun caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun #else
122*4882a593Smuzhiyun #define btrfs_leak_debug_add(lock, new, head) do {} while (0)
123*4882a593Smuzhiyun #define btrfs_leak_debug_del(lock, entry) do {} while (0)
124*4882a593Smuzhiyun #define btrfs_extent_state_leak_debug_check() do {} while (0)
125*4882a593Smuzhiyun #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun struct tree_entry {
129*4882a593Smuzhiyun u64 start;
130*4882a593Smuzhiyun u64 end;
131*4882a593Smuzhiyun struct rb_node rb_node;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct extent_page_data {
135*4882a593Smuzhiyun struct bio *bio;
136*4882a593Smuzhiyun /* tells writepage not to lock the state bits for this range
137*4882a593Smuzhiyun * it still does the unlocking
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun unsigned int extent_locked:1;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* tells the submit_bio code to use REQ_SYNC */
142*4882a593Smuzhiyun unsigned int sync_io:1;
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
add_extent_changeset(struct extent_state * state,unsigned bits,struct extent_changeset * changeset,int set)145*4882a593Smuzhiyun static int add_extent_changeset(struct extent_state *state, unsigned bits,
146*4882a593Smuzhiyun struct extent_changeset *changeset,
147*4882a593Smuzhiyun int set)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun int ret;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (!changeset)
152*4882a593Smuzhiyun return 0;
153*4882a593Smuzhiyun if (set && (state->state & bits) == bits)
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun if (!set && (state->state & bits) == 0)
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun changeset->bytes_changed += state->end - state->start + 1;
158*4882a593Smuzhiyun ret = ulist_add(&changeset->range_changed, state->start, state->end,
159*4882a593Smuzhiyun GFP_ATOMIC);
160*4882a593Smuzhiyun return ret;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
submit_one_bio(struct bio * bio,int mirror_num,unsigned long bio_flags)163*4882a593Smuzhiyun int __must_check submit_one_bio(struct bio *bio, int mirror_num,
164*4882a593Smuzhiyun unsigned long bio_flags)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun blk_status_t ret = 0;
167*4882a593Smuzhiyun struct extent_io_tree *tree = bio->bi_private;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun bio->bi_private = NULL;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (is_data_inode(tree->private_data))
172*4882a593Smuzhiyun ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num,
173*4882a593Smuzhiyun bio_flags);
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun ret = btrfs_submit_metadata_bio(tree->private_data, bio,
176*4882a593Smuzhiyun mirror_num, bio_flags);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return blk_status_to_errno(ret);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Cleanup unsubmitted bios */
end_write_bio(struct extent_page_data * epd,int ret)182*4882a593Smuzhiyun static void end_write_bio(struct extent_page_data *epd, int ret)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun if (epd->bio) {
185*4882a593Smuzhiyun epd->bio->bi_status = errno_to_blk_status(ret);
186*4882a593Smuzhiyun bio_endio(epd->bio);
187*4882a593Smuzhiyun epd->bio = NULL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Submit bio from extent page data via submit_one_bio
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Return 0 if everything is OK.
195*4882a593Smuzhiyun * Return <0 for error.
196*4882a593Smuzhiyun */
flush_write_bio(struct extent_page_data * epd)197*4882a593Smuzhiyun static int __must_check flush_write_bio(struct extent_page_data *epd)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun int ret = 0;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (epd->bio) {
202*4882a593Smuzhiyun ret = submit_one_bio(epd->bio, 0, 0);
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Clean up of epd->bio is handled by its endio function.
205*4882a593Smuzhiyun * And endio is either triggered by successful bio execution
206*4882a593Smuzhiyun * or the error handler of submit bio hook.
207*4882a593Smuzhiyun * So at this point, no matter what happened, we don't need
208*4882a593Smuzhiyun * to clean up epd->bio.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun epd->bio = NULL;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun return ret;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
extent_state_cache_init(void)215*4882a593Smuzhiyun int __init extent_state_cache_init(void)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun extent_state_cache = kmem_cache_create("btrfs_extent_state",
218*4882a593Smuzhiyun sizeof(struct extent_state), 0,
219*4882a593Smuzhiyun SLAB_MEM_SPREAD, NULL);
220*4882a593Smuzhiyun if (!extent_state_cache)
221*4882a593Smuzhiyun return -ENOMEM;
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
extent_io_init(void)225*4882a593Smuzhiyun int __init extent_io_init(void)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
228*4882a593Smuzhiyun sizeof(struct extent_buffer), 0,
229*4882a593Smuzhiyun SLAB_MEM_SPREAD, NULL);
230*4882a593Smuzhiyun if (!extent_buffer_cache)
231*4882a593Smuzhiyun return -ENOMEM;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE,
234*4882a593Smuzhiyun offsetof(struct btrfs_io_bio, bio),
235*4882a593Smuzhiyun BIOSET_NEED_BVECS))
236*4882a593Smuzhiyun goto free_buffer_cache;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE))
239*4882a593Smuzhiyun goto free_bioset;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun free_bioset:
244*4882a593Smuzhiyun bioset_exit(&btrfs_bioset);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun free_buffer_cache:
247*4882a593Smuzhiyun kmem_cache_destroy(extent_buffer_cache);
248*4882a593Smuzhiyun extent_buffer_cache = NULL;
249*4882a593Smuzhiyun return -ENOMEM;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
extent_state_cache_exit(void)252*4882a593Smuzhiyun void __cold extent_state_cache_exit(void)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun btrfs_extent_state_leak_debug_check();
255*4882a593Smuzhiyun kmem_cache_destroy(extent_state_cache);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
extent_io_exit(void)258*4882a593Smuzhiyun void __cold extent_io_exit(void)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Make sure all delayed rcu free are flushed before we
262*4882a593Smuzhiyun * destroy caches.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun rcu_barrier();
265*4882a593Smuzhiyun kmem_cache_destroy(extent_buffer_cache);
266*4882a593Smuzhiyun bioset_exit(&btrfs_bioset);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * For the file_extent_tree, we want to hold the inode lock when we lookup and
271*4882a593Smuzhiyun * update the disk_i_size, but lockdep will complain because our io_tree we hold
272*4882a593Smuzhiyun * the tree lock and get the inode lock when setting delalloc. These two things
273*4882a593Smuzhiyun * are unrelated, so make a class for the file_extent_tree so we don't get the
274*4882a593Smuzhiyun * two locking patterns mixed up.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun static struct lock_class_key file_extent_tree_class;
277*4882a593Smuzhiyun
extent_io_tree_init(struct btrfs_fs_info * fs_info,struct extent_io_tree * tree,unsigned int owner,void * private_data)278*4882a593Smuzhiyun void extent_io_tree_init(struct btrfs_fs_info *fs_info,
279*4882a593Smuzhiyun struct extent_io_tree *tree, unsigned int owner,
280*4882a593Smuzhiyun void *private_data)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun tree->fs_info = fs_info;
283*4882a593Smuzhiyun tree->state = RB_ROOT;
284*4882a593Smuzhiyun tree->dirty_bytes = 0;
285*4882a593Smuzhiyun spin_lock_init(&tree->lock);
286*4882a593Smuzhiyun tree->private_data = private_data;
287*4882a593Smuzhiyun tree->owner = owner;
288*4882a593Smuzhiyun if (owner == IO_TREE_INODE_FILE_EXTENT)
289*4882a593Smuzhiyun lockdep_set_class(&tree->lock, &file_extent_tree_class);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
extent_io_tree_release(struct extent_io_tree * tree)292*4882a593Smuzhiyun void extent_io_tree_release(struct extent_io_tree *tree)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun spin_lock(&tree->lock);
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * Do a single barrier for the waitqueue_active check here, the state
297*4882a593Smuzhiyun * of the waitqueue should not change once extent_io_tree_release is
298*4882a593Smuzhiyun * called.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun smp_mb();
301*4882a593Smuzhiyun while (!RB_EMPTY_ROOT(&tree->state)) {
302*4882a593Smuzhiyun struct rb_node *node;
303*4882a593Smuzhiyun struct extent_state *state;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun node = rb_first(&tree->state);
306*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
307*4882a593Smuzhiyun rb_erase(&state->rb_node, &tree->state);
308*4882a593Smuzhiyun RB_CLEAR_NODE(&state->rb_node);
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun * btree io trees aren't supposed to have tasks waiting for
311*4882a593Smuzhiyun * changes in the flags of extent states ever.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun ASSERT(!waitqueue_active(&state->wq));
314*4882a593Smuzhiyun free_extent_state(state);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun cond_resched_lock(&tree->lock);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun spin_unlock(&tree->lock);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
alloc_extent_state(gfp_t mask)321*4882a593Smuzhiyun static struct extent_state *alloc_extent_state(gfp_t mask)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct extent_state *state;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * The given mask might be not appropriate for the slab allocator,
327*4882a593Smuzhiyun * drop the unsupported bits
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
330*4882a593Smuzhiyun state = kmem_cache_alloc(extent_state_cache, mask);
331*4882a593Smuzhiyun if (!state)
332*4882a593Smuzhiyun return state;
333*4882a593Smuzhiyun state->state = 0;
334*4882a593Smuzhiyun state->failrec = NULL;
335*4882a593Smuzhiyun RB_CLEAR_NODE(&state->rb_node);
336*4882a593Smuzhiyun btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states);
337*4882a593Smuzhiyun refcount_set(&state->refs, 1);
338*4882a593Smuzhiyun init_waitqueue_head(&state->wq);
339*4882a593Smuzhiyun trace_alloc_extent_state(state, mask, _RET_IP_);
340*4882a593Smuzhiyun return state;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
free_extent_state(struct extent_state * state)343*4882a593Smuzhiyun void free_extent_state(struct extent_state *state)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun if (!state)
346*4882a593Smuzhiyun return;
347*4882a593Smuzhiyun if (refcount_dec_and_test(&state->refs)) {
348*4882a593Smuzhiyun WARN_ON(extent_state_in_tree(state));
349*4882a593Smuzhiyun btrfs_leak_debug_del(&leak_lock, &state->leak_list);
350*4882a593Smuzhiyun trace_free_extent_state(state, _RET_IP_);
351*4882a593Smuzhiyun kmem_cache_free(extent_state_cache, state);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
tree_insert(struct rb_root * root,struct rb_node * search_start,u64 offset,struct rb_node * node,struct rb_node *** p_in,struct rb_node ** parent_in)355*4882a593Smuzhiyun static struct rb_node *tree_insert(struct rb_root *root,
356*4882a593Smuzhiyun struct rb_node *search_start,
357*4882a593Smuzhiyun u64 offset,
358*4882a593Smuzhiyun struct rb_node *node,
359*4882a593Smuzhiyun struct rb_node ***p_in,
360*4882a593Smuzhiyun struct rb_node **parent_in)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct rb_node **p;
363*4882a593Smuzhiyun struct rb_node *parent = NULL;
364*4882a593Smuzhiyun struct tree_entry *entry;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (p_in && parent_in) {
367*4882a593Smuzhiyun p = *p_in;
368*4882a593Smuzhiyun parent = *parent_in;
369*4882a593Smuzhiyun goto do_insert;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun p = search_start ? &search_start : &root->rb_node;
373*4882a593Smuzhiyun while (*p) {
374*4882a593Smuzhiyun parent = *p;
375*4882a593Smuzhiyun entry = rb_entry(parent, struct tree_entry, rb_node);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (offset < entry->start)
378*4882a593Smuzhiyun p = &(*p)->rb_left;
379*4882a593Smuzhiyun else if (offset > entry->end)
380*4882a593Smuzhiyun p = &(*p)->rb_right;
381*4882a593Smuzhiyun else
382*4882a593Smuzhiyun return parent;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun do_insert:
386*4882a593Smuzhiyun rb_link_node(node, parent, p);
387*4882a593Smuzhiyun rb_insert_color(node, root);
388*4882a593Smuzhiyun return NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun * __etree_search - searche @tree for an entry that contains @offset. Such
393*4882a593Smuzhiyun * entry would have entry->start <= offset && entry->end >= offset.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * @tree - the tree to search
396*4882a593Smuzhiyun * @offset - offset that should fall within an entry in @tree
397*4882a593Smuzhiyun * @next_ret - pointer to the first entry whose range ends after @offset
398*4882a593Smuzhiyun * @prev - pointer to the first entry whose range begins before @offset
399*4882a593Smuzhiyun * @p_ret - pointer where new node should be anchored (used when inserting an
400*4882a593Smuzhiyun * entry in the tree)
401*4882a593Smuzhiyun * @parent_ret - points to entry which would have been the parent of the entry,
402*4882a593Smuzhiyun * containing @offset
403*4882a593Smuzhiyun *
404*4882a593Smuzhiyun * This function returns a pointer to the entry that contains @offset byte
405*4882a593Smuzhiyun * address. If no such entry exists, then NULL is returned and the other
406*4882a593Smuzhiyun * pointer arguments to the function are filled, otherwise the found entry is
407*4882a593Smuzhiyun * returned and other pointers are left untouched.
408*4882a593Smuzhiyun */
__etree_search(struct extent_io_tree * tree,u64 offset,struct rb_node ** next_ret,struct rb_node ** prev_ret,struct rb_node *** p_ret,struct rb_node ** parent_ret)409*4882a593Smuzhiyun static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
410*4882a593Smuzhiyun struct rb_node **next_ret,
411*4882a593Smuzhiyun struct rb_node **prev_ret,
412*4882a593Smuzhiyun struct rb_node ***p_ret,
413*4882a593Smuzhiyun struct rb_node **parent_ret)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun struct rb_root *root = &tree->state;
416*4882a593Smuzhiyun struct rb_node **n = &root->rb_node;
417*4882a593Smuzhiyun struct rb_node *prev = NULL;
418*4882a593Smuzhiyun struct rb_node *orig_prev = NULL;
419*4882a593Smuzhiyun struct tree_entry *entry;
420*4882a593Smuzhiyun struct tree_entry *prev_entry = NULL;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun while (*n) {
423*4882a593Smuzhiyun prev = *n;
424*4882a593Smuzhiyun entry = rb_entry(prev, struct tree_entry, rb_node);
425*4882a593Smuzhiyun prev_entry = entry;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (offset < entry->start)
428*4882a593Smuzhiyun n = &(*n)->rb_left;
429*4882a593Smuzhiyun else if (offset > entry->end)
430*4882a593Smuzhiyun n = &(*n)->rb_right;
431*4882a593Smuzhiyun else
432*4882a593Smuzhiyun return *n;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (p_ret)
436*4882a593Smuzhiyun *p_ret = n;
437*4882a593Smuzhiyun if (parent_ret)
438*4882a593Smuzhiyun *parent_ret = prev;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (next_ret) {
441*4882a593Smuzhiyun orig_prev = prev;
442*4882a593Smuzhiyun while (prev && offset > prev_entry->end) {
443*4882a593Smuzhiyun prev = rb_next(prev);
444*4882a593Smuzhiyun prev_entry = rb_entry(prev, struct tree_entry, rb_node);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun *next_ret = prev;
447*4882a593Smuzhiyun prev = orig_prev;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (prev_ret) {
451*4882a593Smuzhiyun prev_entry = rb_entry(prev, struct tree_entry, rb_node);
452*4882a593Smuzhiyun while (prev && offset < prev_entry->start) {
453*4882a593Smuzhiyun prev = rb_prev(prev);
454*4882a593Smuzhiyun prev_entry = rb_entry(prev, struct tree_entry, rb_node);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun *prev_ret = prev;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun return NULL;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun static inline struct rb_node *
tree_search_for_insert(struct extent_io_tree * tree,u64 offset,struct rb_node *** p_ret,struct rb_node ** parent_ret)462*4882a593Smuzhiyun tree_search_for_insert(struct extent_io_tree *tree,
463*4882a593Smuzhiyun u64 offset,
464*4882a593Smuzhiyun struct rb_node ***p_ret,
465*4882a593Smuzhiyun struct rb_node **parent_ret)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct rb_node *next= NULL;
468*4882a593Smuzhiyun struct rb_node *ret;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret);
471*4882a593Smuzhiyun if (!ret)
472*4882a593Smuzhiyun return next;
473*4882a593Smuzhiyun return ret;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
tree_search(struct extent_io_tree * tree,u64 offset)476*4882a593Smuzhiyun static inline struct rb_node *tree_search(struct extent_io_tree *tree,
477*4882a593Smuzhiyun u64 offset)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun return tree_search_for_insert(tree, offset, NULL, NULL);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * utility function to look for merge candidates inside a given range.
484*4882a593Smuzhiyun * Any extents with matching state are merged together into a single
485*4882a593Smuzhiyun * extent in the tree. Extents with EXTENT_IO in their state field
486*4882a593Smuzhiyun * are not merged because the end_io handlers need to be able to do
487*4882a593Smuzhiyun * operations on them without sleeping (or doing allocations/splits).
488*4882a593Smuzhiyun *
489*4882a593Smuzhiyun * This should be called with the tree lock held.
490*4882a593Smuzhiyun */
merge_state(struct extent_io_tree * tree,struct extent_state * state)491*4882a593Smuzhiyun static void merge_state(struct extent_io_tree *tree,
492*4882a593Smuzhiyun struct extent_state *state)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun struct extent_state *other;
495*4882a593Smuzhiyun struct rb_node *other_node;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
498*4882a593Smuzhiyun return;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun other_node = rb_prev(&state->rb_node);
501*4882a593Smuzhiyun if (other_node) {
502*4882a593Smuzhiyun other = rb_entry(other_node, struct extent_state, rb_node);
503*4882a593Smuzhiyun if (other->end == state->start - 1 &&
504*4882a593Smuzhiyun other->state == state->state) {
505*4882a593Smuzhiyun if (tree->private_data &&
506*4882a593Smuzhiyun is_data_inode(tree->private_data))
507*4882a593Smuzhiyun btrfs_merge_delalloc_extent(tree->private_data,
508*4882a593Smuzhiyun state, other);
509*4882a593Smuzhiyun state->start = other->start;
510*4882a593Smuzhiyun rb_erase(&other->rb_node, &tree->state);
511*4882a593Smuzhiyun RB_CLEAR_NODE(&other->rb_node);
512*4882a593Smuzhiyun free_extent_state(other);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun other_node = rb_next(&state->rb_node);
516*4882a593Smuzhiyun if (other_node) {
517*4882a593Smuzhiyun other = rb_entry(other_node, struct extent_state, rb_node);
518*4882a593Smuzhiyun if (other->start == state->end + 1 &&
519*4882a593Smuzhiyun other->state == state->state) {
520*4882a593Smuzhiyun if (tree->private_data &&
521*4882a593Smuzhiyun is_data_inode(tree->private_data))
522*4882a593Smuzhiyun btrfs_merge_delalloc_extent(tree->private_data,
523*4882a593Smuzhiyun state, other);
524*4882a593Smuzhiyun state->end = other->end;
525*4882a593Smuzhiyun rb_erase(&other->rb_node, &tree->state);
526*4882a593Smuzhiyun RB_CLEAR_NODE(&other->rb_node);
527*4882a593Smuzhiyun free_extent_state(other);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun static void set_state_bits(struct extent_io_tree *tree,
533*4882a593Smuzhiyun struct extent_state *state, unsigned *bits,
534*4882a593Smuzhiyun struct extent_changeset *changeset);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * insert an extent_state struct into the tree. 'bits' are set on the
538*4882a593Smuzhiyun * struct before it is inserted.
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * This may return -EEXIST if the extent is already there, in which case the
541*4882a593Smuzhiyun * state struct is freed.
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * The tree lock is not taken internally. This is a utility function and
544*4882a593Smuzhiyun * probably isn't what you want to call (see set/clear_extent_bit).
545*4882a593Smuzhiyun */
insert_state(struct extent_io_tree * tree,struct extent_state * state,u64 start,u64 end,struct rb_node *** p,struct rb_node ** parent,unsigned * bits,struct extent_changeset * changeset)546*4882a593Smuzhiyun static int insert_state(struct extent_io_tree *tree,
547*4882a593Smuzhiyun struct extent_state *state, u64 start, u64 end,
548*4882a593Smuzhiyun struct rb_node ***p,
549*4882a593Smuzhiyun struct rb_node **parent,
550*4882a593Smuzhiyun unsigned *bits, struct extent_changeset *changeset)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct rb_node *node;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (end < start) {
555*4882a593Smuzhiyun btrfs_err(tree->fs_info,
556*4882a593Smuzhiyun "insert state: end < start %llu %llu", end, start);
557*4882a593Smuzhiyun WARN_ON(1);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun state->start = start;
560*4882a593Smuzhiyun state->end = end;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun set_state_bits(tree, state, bits, changeset);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
565*4882a593Smuzhiyun if (node) {
566*4882a593Smuzhiyun struct extent_state *found;
567*4882a593Smuzhiyun found = rb_entry(node, struct extent_state, rb_node);
568*4882a593Smuzhiyun btrfs_err(tree->fs_info,
569*4882a593Smuzhiyun "found node %llu %llu on insert of %llu %llu",
570*4882a593Smuzhiyun found->start, found->end, start, end);
571*4882a593Smuzhiyun return -EEXIST;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun merge_state(tree, state);
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * split a given extent state struct in two, inserting the preallocated
579*4882a593Smuzhiyun * struct 'prealloc' as the newly created second half. 'split' indicates an
580*4882a593Smuzhiyun * offset inside 'orig' where it should be split.
581*4882a593Smuzhiyun *
582*4882a593Smuzhiyun * Before calling,
583*4882a593Smuzhiyun * the tree has 'orig' at [orig->start, orig->end]. After calling, there
584*4882a593Smuzhiyun * are two extent state structs in the tree:
585*4882a593Smuzhiyun * prealloc: [orig->start, split - 1]
586*4882a593Smuzhiyun * orig: [ split, orig->end ]
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * The tree locks are not taken by this function. They need to be held
589*4882a593Smuzhiyun * by the caller.
590*4882a593Smuzhiyun */
split_state(struct extent_io_tree * tree,struct extent_state * orig,struct extent_state * prealloc,u64 split)591*4882a593Smuzhiyun static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
592*4882a593Smuzhiyun struct extent_state *prealloc, u64 split)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct rb_node *node;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (tree->private_data && is_data_inode(tree->private_data))
597*4882a593Smuzhiyun btrfs_split_delalloc_extent(tree->private_data, orig, split);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun prealloc->start = orig->start;
600*4882a593Smuzhiyun prealloc->end = split - 1;
601*4882a593Smuzhiyun prealloc->state = orig->state;
602*4882a593Smuzhiyun orig->start = split;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
605*4882a593Smuzhiyun &prealloc->rb_node, NULL, NULL);
606*4882a593Smuzhiyun if (node) {
607*4882a593Smuzhiyun free_extent_state(prealloc);
608*4882a593Smuzhiyun return -EEXIST;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
next_state(struct extent_state * state)613*4882a593Smuzhiyun static struct extent_state *next_state(struct extent_state *state)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct rb_node *next = rb_next(&state->rb_node);
616*4882a593Smuzhiyun if (next)
617*4882a593Smuzhiyun return rb_entry(next, struct extent_state, rb_node);
618*4882a593Smuzhiyun else
619*4882a593Smuzhiyun return NULL;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun * utility function to clear some bits in an extent state struct.
624*4882a593Smuzhiyun * it will optionally wake up anyone waiting on this state (wake == 1).
625*4882a593Smuzhiyun *
626*4882a593Smuzhiyun * If no bits are set on the state struct after clearing things, the
627*4882a593Smuzhiyun * struct is freed and removed from the tree
628*4882a593Smuzhiyun */
clear_state_bit(struct extent_io_tree * tree,struct extent_state * state,unsigned * bits,int wake,struct extent_changeset * changeset)629*4882a593Smuzhiyun static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
630*4882a593Smuzhiyun struct extent_state *state,
631*4882a593Smuzhiyun unsigned *bits, int wake,
632*4882a593Smuzhiyun struct extent_changeset *changeset)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct extent_state *next;
635*4882a593Smuzhiyun unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
636*4882a593Smuzhiyun int ret;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
639*4882a593Smuzhiyun u64 range = state->end - state->start + 1;
640*4882a593Smuzhiyun WARN_ON(range > tree->dirty_bytes);
641*4882a593Smuzhiyun tree->dirty_bytes -= range;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (tree->private_data && is_data_inode(tree->private_data))
645*4882a593Smuzhiyun btrfs_clear_delalloc_extent(tree->private_data, state, bits);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
648*4882a593Smuzhiyun BUG_ON(ret < 0);
649*4882a593Smuzhiyun state->state &= ~bits_to_clear;
650*4882a593Smuzhiyun if (wake)
651*4882a593Smuzhiyun wake_up(&state->wq);
652*4882a593Smuzhiyun if (state->state == 0) {
653*4882a593Smuzhiyun next = next_state(state);
654*4882a593Smuzhiyun if (extent_state_in_tree(state)) {
655*4882a593Smuzhiyun rb_erase(&state->rb_node, &tree->state);
656*4882a593Smuzhiyun RB_CLEAR_NODE(&state->rb_node);
657*4882a593Smuzhiyun free_extent_state(state);
658*4882a593Smuzhiyun } else {
659*4882a593Smuzhiyun WARN_ON(1);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun } else {
662*4882a593Smuzhiyun merge_state(tree, state);
663*4882a593Smuzhiyun next = next_state(state);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun return next;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun static struct extent_state *
alloc_extent_state_atomic(struct extent_state * prealloc)669*4882a593Smuzhiyun alloc_extent_state_atomic(struct extent_state *prealloc)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun if (!prealloc)
672*4882a593Smuzhiyun prealloc = alloc_extent_state(GFP_ATOMIC);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return prealloc;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
extent_io_tree_panic(struct extent_io_tree * tree,int err)677*4882a593Smuzhiyun static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun btrfs_panic(tree->fs_info, err,
680*4882a593Smuzhiyun "locking error: extent tree was modified by another thread while locked");
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * clear some bits on a range in the tree. This may require splitting
685*4882a593Smuzhiyun * or inserting elements in the tree, so the gfp mask is used to
686*4882a593Smuzhiyun * indicate which allocations or sleeping are allowed.
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
689*4882a593Smuzhiyun * the given range from the tree regardless of state (ie for truncate).
690*4882a593Smuzhiyun *
691*4882a593Smuzhiyun * the range [start, end] is inclusive.
692*4882a593Smuzhiyun *
693*4882a593Smuzhiyun * This takes the tree lock, and returns 0 on success and < 0 on error.
694*4882a593Smuzhiyun */
__clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,int wake,int delete,struct extent_state ** cached_state,gfp_t mask,struct extent_changeset * changeset)695*4882a593Smuzhiyun int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
696*4882a593Smuzhiyun unsigned bits, int wake, int delete,
697*4882a593Smuzhiyun struct extent_state **cached_state,
698*4882a593Smuzhiyun gfp_t mask, struct extent_changeset *changeset)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct extent_state *state;
701*4882a593Smuzhiyun struct extent_state *cached;
702*4882a593Smuzhiyun struct extent_state *prealloc = NULL;
703*4882a593Smuzhiyun struct rb_node *node;
704*4882a593Smuzhiyun u64 last_end;
705*4882a593Smuzhiyun int err;
706*4882a593Smuzhiyun int clear = 0;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun btrfs_debug_check_extent_io_range(tree, start, end);
709*4882a593Smuzhiyun trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (bits & EXTENT_DELALLOC)
712*4882a593Smuzhiyun bits |= EXTENT_NORESERVE;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (delete)
715*4882a593Smuzhiyun bits |= ~EXTENT_CTLBITS;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
718*4882a593Smuzhiyun clear = 1;
719*4882a593Smuzhiyun again:
720*4882a593Smuzhiyun if (!prealloc && gfpflags_allow_blocking(mask)) {
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun * Don't care for allocation failure here because we might end
723*4882a593Smuzhiyun * up not needing the pre-allocated extent state at all, which
724*4882a593Smuzhiyun * is the case if we only have in the tree extent states that
725*4882a593Smuzhiyun * cover our input range and don't cover too any other range.
726*4882a593Smuzhiyun * If we end up needing a new extent state we allocate it later.
727*4882a593Smuzhiyun */
728*4882a593Smuzhiyun prealloc = alloc_extent_state(mask);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun spin_lock(&tree->lock);
732*4882a593Smuzhiyun if (cached_state) {
733*4882a593Smuzhiyun cached = *cached_state;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (clear) {
736*4882a593Smuzhiyun *cached_state = NULL;
737*4882a593Smuzhiyun cached_state = NULL;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (cached && extent_state_in_tree(cached) &&
741*4882a593Smuzhiyun cached->start <= start && cached->end > start) {
742*4882a593Smuzhiyun if (clear)
743*4882a593Smuzhiyun refcount_dec(&cached->refs);
744*4882a593Smuzhiyun state = cached;
745*4882a593Smuzhiyun goto hit_next;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun if (clear)
748*4882a593Smuzhiyun free_extent_state(cached);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun * this search will find the extents that end after
752*4882a593Smuzhiyun * our range starts
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun node = tree_search(tree, start);
755*4882a593Smuzhiyun if (!node)
756*4882a593Smuzhiyun goto out;
757*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
758*4882a593Smuzhiyun hit_next:
759*4882a593Smuzhiyun if (state->start > end)
760*4882a593Smuzhiyun goto out;
761*4882a593Smuzhiyun WARN_ON(state->end < start);
762*4882a593Smuzhiyun last_end = state->end;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* the state doesn't have the wanted bits, go ahead */
765*4882a593Smuzhiyun if (!(state->state & bits)) {
766*4882a593Smuzhiyun state = next_state(state);
767*4882a593Smuzhiyun goto next;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun * | ---- desired range ---- |
772*4882a593Smuzhiyun * | state | or
773*4882a593Smuzhiyun * | ------------- state -------------- |
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * We need to split the extent we found, and may flip
776*4882a593Smuzhiyun * bits on second half.
777*4882a593Smuzhiyun *
778*4882a593Smuzhiyun * If the extent we found extends past our range, we
779*4882a593Smuzhiyun * just split and search again. It'll get split again
780*4882a593Smuzhiyun * the next time though.
781*4882a593Smuzhiyun *
782*4882a593Smuzhiyun * If the extent we found is inside our range, we clear
783*4882a593Smuzhiyun * the desired bit on it.
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (state->start < start) {
787*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
788*4882a593Smuzhiyun BUG_ON(!prealloc);
789*4882a593Smuzhiyun err = split_state(tree, state, prealloc, start);
790*4882a593Smuzhiyun if (err)
791*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun prealloc = NULL;
794*4882a593Smuzhiyun if (err)
795*4882a593Smuzhiyun goto out;
796*4882a593Smuzhiyun if (state->end <= end) {
797*4882a593Smuzhiyun state = clear_state_bit(tree, state, &bits, wake,
798*4882a593Smuzhiyun changeset);
799*4882a593Smuzhiyun goto next;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun goto search_again;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun /*
804*4882a593Smuzhiyun * | ---- desired range ---- |
805*4882a593Smuzhiyun * | state |
806*4882a593Smuzhiyun * We need to split the extent, and clear the bit
807*4882a593Smuzhiyun * on the first half
808*4882a593Smuzhiyun */
809*4882a593Smuzhiyun if (state->start <= end && state->end > end) {
810*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
811*4882a593Smuzhiyun BUG_ON(!prealloc);
812*4882a593Smuzhiyun err = split_state(tree, state, prealloc, end + 1);
813*4882a593Smuzhiyun if (err)
814*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (wake)
817*4882a593Smuzhiyun wake_up(&state->wq);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun clear_state_bit(tree, prealloc, &bits, wake, changeset);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun prealloc = NULL;
822*4882a593Smuzhiyun goto out;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun state = clear_state_bit(tree, state, &bits, wake, changeset);
826*4882a593Smuzhiyun next:
827*4882a593Smuzhiyun if (last_end == (u64)-1)
828*4882a593Smuzhiyun goto out;
829*4882a593Smuzhiyun start = last_end + 1;
830*4882a593Smuzhiyun if (start <= end && state && !need_resched())
831*4882a593Smuzhiyun goto hit_next;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun search_again:
834*4882a593Smuzhiyun if (start > end)
835*4882a593Smuzhiyun goto out;
836*4882a593Smuzhiyun spin_unlock(&tree->lock);
837*4882a593Smuzhiyun if (gfpflags_allow_blocking(mask))
838*4882a593Smuzhiyun cond_resched();
839*4882a593Smuzhiyun goto again;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun out:
842*4882a593Smuzhiyun spin_unlock(&tree->lock);
843*4882a593Smuzhiyun if (prealloc)
844*4882a593Smuzhiyun free_extent_state(prealloc);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun return 0;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
wait_on_state(struct extent_io_tree * tree,struct extent_state * state)850*4882a593Smuzhiyun static void wait_on_state(struct extent_io_tree *tree,
851*4882a593Smuzhiyun struct extent_state *state)
852*4882a593Smuzhiyun __releases(tree->lock)
853*4882a593Smuzhiyun __acquires(tree->lock)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun DEFINE_WAIT(wait);
856*4882a593Smuzhiyun prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
857*4882a593Smuzhiyun spin_unlock(&tree->lock);
858*4882a593Smuzhiyun schedule();
859*4882a593Smuzhiyun spin_lock(&tree->lock);
860*4882a593Smuzhiyun finish_wait(&state->wq, &wait);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /*
864*4882a593Smuzhiyun * waits for one or more bits to clear on a range in the state tree.
865*4882a593Smuzhiyun * The range [start, end] is inclusive.
866*4882a593Smuzhiyun * The tree lock is taken by this function
867*4882a593Smuzhiyun */
wait_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned long bits)868*4882a593Smuzhiyun static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
869*4882a593Smuzhiyun unsigned long bits)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun struct extent_state *state;
872*4882a593Smuzhiyun struct rb_node *node;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun btrfs_debug_check_extent_io_range(tree, start, end);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun spin_lock(&tree->lock);
877*4882a593Smuzhiyun again:
878*4882a593Smuzhiyun while (1) {
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * this search will find all the extents that end after
881*4882a593Smuzhiyun * our range starts
882*4882a593Smuzhiyun */
883*4882a593Smuzhiyun node = tree_search(tree, start);
884*4882a593Smuzhiyun process_node:
885*4882a593Smuzhiyun if (!node)
886*4882a593Smuzhiyun break;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (state->start > end)
891*4882a593Smuzhiyun goto out;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (state->state & bits) {
894*4882a593Smuzhiyun start = state->start;
895*4882a593Smuzhiyun refcount_inc(&state->refs);
896*4882a593Smuzhiyun wait_on_state(tree, state);
897*4882a593Smuzhiyun free_extent_state(state);
898*4882a593Smuzhiyun goto again;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun start = state->end + 1;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun if (start > end)
903*4882a593Smuzhiyun break;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (!cond_resched_lock(&tree->lock)) {
906*4882a593Smuzhiyun node = rb_next(node);
907*4882a593Smuzhiyun goto process_node;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun out:
911*4882a593Smuzhiyun spin_unlock(&tree->lock);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
set_state_bits(struct extent_io_tree * tree,struct extent_state * state,unsigned * bits,struct extent_changeset * changeset)914*4882a593Smuzhiyun static void set_state_bits(struct extent_io_tree *tree,
915*4882a593Smuzhiyun struct extent_state *state,
916*4882a593Smuzhiyun unsigned *bits, struct extent_changeset *changeset)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
919*4882a593Smuzhiyun int ret;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (tree->private_data && is_data_inode(tree->private_data))
922*4882a593Smuzhiyun btrfs_set_delalloc_extent(tree->private_data, state, bits);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
925*4882a593Smuzhiyun u64 range = state->end - state->start + 1;
926*4882a593Smuzhiyun tree->dirty_bytes += range;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun ret = add_extent_changeset(state, bits_to_set, changeset, 1);
929*4882a593Smuzhiyun BUG_ON(ret < 0);
930*4882a593Smuzhiyun state->state |= bits_to_set;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
cache_state_if_flags(struct extent_state * state,struct extent_state ** cached_ptr,unsigned flags)933*4882a593Smuzhiyun static void cache_state_if_flags(struct extent_state *state,
934*4882a593Smuzhiyun struct extent_state **cached_ptr,
935*4882a593Smuzhiyun unsigned flags)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun if (cached_ptr && !(*cached_ptr)) {
938*4882a593Smuzhiyun if (!flags || (state->state & flags)) {
939*4882a593Smuzhiyun *cached_ptr = state;
940*4882a593Smuzhiyun refcount_inc(&state->refs);
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
cache_state(struct extent_state * state,struct extent_state ** cached_ptr)945*4882a593Smuzhiyun static void cache_state(struct extent_state *state,
946*4882a593Smuzhiyun struct extent_state **cached_ptr)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun return cache_state_if_flags(state, cached_ptr,
949*4882a593Smuzhiyun EXTENT_LOCKED | EXTENT_BOUNDARY);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun * set some bits on a range in the tree. This may require allocations or
954*4882a593Smuzhiyun * sleeping, so the gfp mask is used to indicate what is allowed.
955*4882a593Smuzhiyun *
956*4882a593Smuzhiyun * If any of the exclusive bits are set, this will fail with -EEXIST if some
957*4882a593Smuzhiyun * part of the range already has the desired bits set. The start of the
958*4882a593Smuzhiyun * existing range is returned in failed_start in this case.
959*4882a593Smuzhiyun *
960*4882a593Smuzhiyun * [start, end] is inclusive This takes the tree lock.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun static int __must_check
__set_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,unsigned exclusive_bits,u64 * failed_start,struct extent_state ** cached_state,gfp_t mask,struct extent_changeset * changeset)964*4882a593Smuzhiyun __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
965*4882a593Smuzhiyun unsigned bits, unsigned exclusive_bits,
966*4882a593Smuzhiyun u64 *failed_start, struct extent_state **cached_state,
967*4882a593Smuzhiyun gfp_t mask, struct extent_changeset *changeset)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct extent_state *state;
970*4882a593Smuzhiyun struct extent_state *prealloc = NULL;
971*4882a593Smuzhiyun struct rb_node *node;
972*4882a593Smuzhiyun struct rb_node **p;
973*4882a593Smuzhiyun struct rb_node *parent;
974*4882a593Smuzhiyun int err = 0;
975*4882a593Smuzhiyun u64 last_start;
976*4882a593Smuzhiyun u64 last_end;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun btrfs_debug_check_extent_io_range(tree, start, end);
979*4882a593Smuzhiyun trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun again:
982*4882a593Smuzhiyun if (!prealloc && gfpflags_allow_blocking(mask)) {
983*4882a593Smuzhiyun /*
984*4882a593Smuzhiyun * Don't care for allocation failure here because we might end
985*4882a593Smuzhiyun * up not needing the pre-allocated extent state at all, which
986*4882a593Smuzhiyun * is the case if we only have in the tree extent states that
987*4882a593Smuzhiyun * cover our input range and don't cover too any other range.
988*4882a593Smuzhiyun * If we end up needing a new extent state we allocate it later.
989*4882a593Smuzhiyun */
990*4882a593Smuzhiyun prealloc = alloc_extent_state(mask);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun spin_lock(&tree->lock);
994*4882a593Smuzhiyun if (cached_state && *cached_state) {
995*4882a593Smuzhiyun state = *cached_state;
996*4882a593Smuzhiyun if (state->start <= start && state->end > start &&
997*4882a593Smuzhiyun extent_state_in_tree(state)) {
998*4882a593Smuzhiyun node = &state->rb_node;
999*4882a593Smuzhiyun goto hit_next;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun /*
1003*4882a593Smuzhiyun * this search will find all the extents that end after
1004*4882a593Smuzhiyun * our range starts.
1005*4882a593Smuzhiyun */
1006*4882a593Smuzhiyun node = tree_search_for_insert(tree, start, &p, &parent);
1007*4882a593Smuzhiyun if (!node) {
1008*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1009*4882a593Smuzhiyun BUG_ON(!prealloc);
1010*4882a593Smuzhiyun err = insert_state(tree, prealloc, start, end,
1011*4882a593Smuzhiyun &p, &parent, &bits, changeset);
1012*4882a593Smuzhiyun if (err)
1013*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1016*4882a593Smuzhiyun prealloc = NULL;
1017*4882a593Smuzhiyun goto out;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1020*4882a593Smuzhiyun hit_next:
1021*4882a593Smuzhiyun last_start = state->start;
1022*4882a593Smuzhiyun last_end = state->end;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /*
1025*4882a593Smuzhiyun * | ---- desired range ---- |
1026*4882a593Smuzhiyun * | state |
1027*4882a593Smuzhiyun *
1028*4882a593Smuzhiyun * Just lock what we found and keep going
1029*4882a593Smuzhiyun */
1030*4882a593Smuzhiyun if (state->start == start && state->end <= end) {
1031*4882a593Smuzhiyun if (state->state & exclusive_bits) {
1032*4882a593Smuzhiyun *failed_start = state->start;
1033*4882a593Smuzhiyun err = -EEXIST;
1034*4882a593Smuzhiyun goto out;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun set_state_bits(tree, state, &bits, changeset);
1038*4882a593Smuzhiyun cache_state(state, cached_state);
1039*4882a593Smuzhiyun merge_state(tree, state);
1040*4882a593Smuzhiyun if (last_end == (u64)-1)
1041*4882a593Smuzhiyun goto out;
1042*4882a593Smuzhiyun start = last_end + 1;
1043*4882a593Smuzhiyun state = next_state(state);
1044*4882a593Smuzhiyun if (start < end && state && state->start == start &&
1045*4882a593Smuzhiyun !need_resched())
1046*4882a593Smuzhiyun goto hit_next;
1047*4882a593Smuzhiyun goto search_again;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /*
1051*4882a593Smuzhiyun * | ---- desired range ---- |
1052*4882a593Smuzhiyun * | state |
1053*4882a593Smuzhiyun * or
1054*4882a593Smuzhiyun * | ------------- state -------------- |
1055*4882a593Smuzhiyun *
1056*4882a593Smuzhiyun * We need to split the extent we found, and may flip bits on
1057*4882a593Smuzhiyun * second half.
1058*4882a593Smuzhiyun *
1059*4882a593Smuzhiyun * If the extent we found extends past our
1060*4882a593Smuzhiyun * range, we just split and search again. It'll get split
1061*4882a593Smuzhiyun * again the next time though.
1062*4882a593Smuzhiyun *
1063*4882a593Smuzhiyun * If the extent we found is inside our range, we set the
1064*4882a593Smuzhiyun * desired bit on it.
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun if (state->start < start) {
1067*4882a593Smuzhiyun if (state->state & exclusive_bits) {
1068*4882a593Smuzhiyun *failed_start = start;
1069*4882a593Smuzhiyun err = -EEXIST;
1070*4882a593Smuzhiyun goto out;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun /*
1074*4882a593Smuzhiyun * If this extent already has all the bits we want set, then
1075*4882a593Smuzhiyun * skip it, not necessary to split it or do anything with it.
1076*4882a593Smuzhiyun */
1077*4882a593Smuzhiyun if ((state->state & bits) == bits) {
1078*4882a593Smuzhiyun start = state->end + 1;
1079*4882a593Smuzhiyun cache_state(state, cached_state);
1080*4882a593Smuzhiyun goto search_again;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1084*4882a593Smuzhiyun BUG_ON(!prealloc);
1085*4882a593Smuzhiyun err = split_state(tree, state, prealloc, start);
1086*4882a593Smuzhiyun if (err)
1087*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun prealloc = NULL;
1090*4882a593Smuzhiyun if (err)
1091*4882a593Smuzhiyun goto out;
1092*4882a593Smuzhiyun if (state->end <= end) {
1093*4882a593Smuzhiyun set_state_bits(tree, state, &bits, changeset);
1094*4882a593Smuzhiyun cache_state(state, cached_state);
1095*4882a593Smuzhiyun merge_state(tree, state);
1096*4882a593Smuzhiyun if (last_end == (u64)-1)
1097*4882a593Smuzhiyun goto out;
1098*4882a593Smuzhiyun start = last_end + 1;
1099*4882a593Smuzhiyun state = next_state(state);
1100*4882a593Smuzhiyun if (start < end && state && state->start == start &&
1101*4882a593Smuzhiyun !need_resched())
1102*4882a593Smuzhiyun goto hit_next;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun goto search_again;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun /*
1107*4882a593Smuzhiyun * | ---- desired range ---- |
1108*4882a593Smuzhiyun * | state | or | state |
1109*4882a593Smuzhiyun *
1110*4882a593Smuzhiyun * There's a hole, we need to insert something in it and
1111*4882a593Smuzhiyun * ignore the extent we found.
1112*4882a593Smuzhiyun */
1113*4882a593Smuzhiyun if (state->start > start) {
1114*4882a593Smuzhiyun u64 this_end;
1115*4882a593Smuzhiyun if (end < last_start)
1116*4882a593Smuzhiyun this_end = end;
1117*4882a593Smuzhiyun else
1118*4882a593Smuzhiyun this_end = last_start - 1;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1121*4882a593Smuzhiyun BUG_ON(!prealloc);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun /*
1124*4882a593Smuzhiyun * Avoid to free 'prealloc' if it can be merged with
1125*4882a593Smuzhiyun * the later extent.
1126*4882a593Smuzhiyun */
1127*4882a593Smuzhiyun err = insert_state(tree, prealloc, start, this_end,
1128*4882a593Smuzhiyun NULL, NULL, &bits, changeset);
1129*4882a593Smuzhiyun if (err)
1130*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1133*4882a593Smuzhiyun prealloc = NULL;
1134*4882a593Smuzhiyun start = this_end + 1;
1135*4882a593Smuzhiyun goto search_again;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun * | ---- desired range ---- |
1139*4882a593Smuzhiyun * | state |
1140*4882a593Smuzhiyun * We need to split the extent, and set the bit
1141*4882a593Smuzhiyun * on the first half
1142*4882a593Smuzhiyun */
1143*4882a593Smuzhiyun if (state->start <= end && state->end > end) {
1144*4882a593Smuzhiyun if (state->state & exclusive_bits) {
1145*4882a593Smuzhiyun *failed_start = start;
1146*4882a593Smuzhiyun err = -EEXIST;
1147*4882a593Smuzhiyun goto out;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1151*4882a593Smuzhiyun BUG_ON(!prealloc);
1152*4882a593Smuzhiyun err = split_state(tree, state, prealloc, end + 1);
1153*4882a593Smuzhiyun if (err)
1154*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun set_state_bits(tree, prealloc, &bits, changeset);
1157*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1158*4882a593Smuzhiyun merge_state(tree, prealloc);
1159*4882a593Smuzhiyun prealloc = NULL;
1160*4882a593Smuzhiyun goto out;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun search_again:
1164*4882a593Smuzhiyun if (start > end)
1165*4882a593Smuzhiyun goto out;
1166*4882a593Smuzhiyun spin_unlock(&tree->lock);
1167*4882a593Smuzhiyun if (gfpflags_allow_blocking(mask))
1168*4882a593Smuzhiyun cond_resched();
1169*4882a593Smuzhiyun goto again;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun out:
1172*4882a593Smuzhiyun spin_unlock(&tree->lock);
1173*4882a593Smuzhiyun if (prealloc)
1174*4882a593Smuzhiyun free_extent_state(prealloc);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun return err;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
set_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,u64 * failed_start,struct extent_state ** cached_state,gfp_t mask)1180*4882a593Smuzhiyun int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1181*4882a593Smuzhiyun unsigned bits, u64 * failed_start,
1182*4882a593Smuzhiyun struct extent_state **cached_state, gfp_t mask)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun return __set_extent_bit(tree, start, end, bits, 0, failed_start,
1185*4882a593Smuzhiyun cached_state, mask, NULL);
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /**
1190*4882a593Smuzhiyun * convert_extent_bit - convert all bits in a given range from one bit to
1191*4882a593Smuzhiyun * another
1192*4882a593Smuzhiyun * @tree: the io tree to search
1193*4882a593Smuzhiyun * @start: the start offset in bytes
1194*4882a593Smuzhiyun * @end: the end offset in bytes (inclusive)
1195*4882a593Smuzhiyun * @bits: the bits to set in this range
1196*4882a593Smuzhiyun * @clear_bits: the bits to clear in this range
1197*4882a593Smuzhiyun * @cached_state: state that we're going to cache
1198*4882a593Smuzhiyun *
1199*4882a593Smuzhiyun * This will go through and set bits for the given range. If any states exist
1200*4882a593Smuzhiyun * already in this range they are set with the given bit and cleared of the
1201*4882a593Smuzhiyun * clear_bits. This is only meant to be used by things that are mergeable, ie
1202*4882a593Smuzhiyun * converting from say DELALLOC to DIRTY. This is not meant to be used with
1203*4882a593Smuzhiyun * boundary bits like LOCK.
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * All allocations are done with GFP_NOFS.
1206*4882a593Smuzhiyun */
convert_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,unsigned clear_bits,struct extent_state ** cached_state)1207*4882a593Smuzhiyun int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1208*4882a593Smuzhiyun unsigned bits, unsigned clear_bits,
1209*4882a593Smuzhiyun struct extent_state **cached_state)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun struct extent_state *state;
1212*4882a593Smuzhiyun struct extent_state *prealloc = NULL;
1213*4882a593Smuzhiyun struct rb_node *node;
1214*4882a593Smuzhiyun struct rb_node **p;
1215*4882a593Smuzhiyun struct rb_node *parent;
1216*4882a593Smuzhiyun int err = 0;
1217*4882a593Smuzhiyun u64 last_start;
1218*4882a593Smuzhiyun u64 last_end;
1219*4882a593Smuzhiyun bool first_iteration = true;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun btrfs_debug_check_extent_io_range(tree, start, end);
1222*4882a593Smuzhiyun trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1223*4882a593Smuzhiyun clear_bits);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun again:
1226*4882a593Smuzhiyun if (!prealloc) {
1227*4882a593Smuzhiyun /*
1228*4882a593Smuzhiyun * Best effort, don't worry if extent state allocation fails
1229*4882a593Smuzhiyun * here for the first iteration. We might have a cached state
1230*4882a593Smuzhiyun * that matches exactly the target range, in which case no
1231*4882a593Smuzhiyun * extent state allocations are needed. We'll only know this
1232*4882a593Smuzhiyun * after locking the tree.
1233*4882a593Smuzhiyun */
1234*4882a593Smuzhiyun prealloc = alloc_extent_state(GFP_NOFS);
1235*4882a593Smuzhiyun if (!prealloc && !first_iteration)
1236*4882a593Smuzhiyun return -ENOMEM;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun spin_lock(&tree->lock);
1240*4882a593Smuzhiyun if (cached_state && *cached_state) {
1241*4882a593Smuzhiyun state = *cached_state;
1242*4882a593Smuzhiyun if (state->start <= start && state->end > start &&
1243*4882a593Smuzhiyun extent_state_in_tree(state)) {
1244*4882a593Smuzhiyun node = &state->rb_node;
1245*4882a593Smuzhiyun goto hit_next;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /*
1250*4882a593Smuzhiyun * this search will find all the extents that end after
1251*4882a593Smuzhiyun * our range starts.
1252*4882a593Smuzhiyun */
1253*4882a593Smuzhiyun node = tree_search_for_insert(tree, start, &p, &parent);
1254*4882a593Smuzhiyun if (!node) {
1255*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1256*4882a593Smuzhiyun if (!prealloc) {
1257*4882a593Smuzhiyun err = -ENOMEM;
1258*4882a593Smuzhiyun goto out;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun err = insert_state(tree, prealloc, start, end,
1261*4882a593Smuzhiyun &p, &parent, &bits, NULL);
1262*4882a593Smuzhiyun if (err)
1263*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1264*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1265*4882a593Smuzhiyun prealloc = NULL;
1266*4882a593Smuzhiyun goto out;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1269*4882a593Smuzhiyun hit_next:
1270*4882a593Smuzhiyun last_start = state->start;
1271*4882a593Smuzhiyun last_end = state->end;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun /*
1274*4882a593Smuzhiyun * | ---- desired range ---- |
1275*4882a593Smuzhiyun * | state |
1276*4882a593Smuzhiyun *
1277*4882a593Smuzhiyun * Just lock what we found and keep going
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun if (state->start == start && state->end <= end) {
1280*4882a593Smuzhiyun set_state_bits(tree, state, &bits, NULL);
1281*4882a593Smuzhiyun cache_state(state, cached_state);
1282*4882a593Smuzhiyun state = clear_state_bit(tree, state, &clear_bits, 0, NULL);
1283*4882a593Smuzhiyun if (last_end == (u64)-1)
1284*4882a593Smuzhiyun goto out;
1285*4882a593Smuzhiyun start = last_end + 1;
1286*4882a593Smuzhiyun if (start < end && state && state->start == start &&
1287*4882a593Smuzhiyun !need_resched())
1288*4882a593Smuzhiyun goto hit_next;
1289*4882a593Smuzhiyun goto search_again;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * | ---- desired range ---- |
1294*4882a593Smuzhiyun * | state |
1295*4882a593Smuzhiyun * or
1296*4882a593Smuzhiyun * | ------------- state -------------- |
1297*4882a593Smuzhiyun *
1298*4882a593Smuzhiyun * We need to split the extent we found, and may flip bits on
1299*4882a593Smuzhiyun * second half.
1300*4882a593Smuzhiyun *
1301*4882a593Smuzhiyun * If the extent we found extends past our
1302*4882a593Smuzhiyun * range, we just split and search again. It'll get split
1303*4882a593Smuzhiyun * again the next time though.
1304*4882a593Smuzhiyun *
1305*4882a593Smuzhiyun * If the extent we found is inside our range, we set the
1306*4882a593Smuzhiyun * desired bit on it.
1307*4882a593Smuzhiyun */
1308*4882a593Smuzhiyun if (state->start < start) {
1309*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1310*4882a593Smuzhiyun if (!prealloc) {
1311*4882a593Smuzhiyun err = -ENOMEM;
1312*4882a593Smuzhiyun goto out;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun err = split_state(tree, state, prealloc, start);
1315*4882a593Smuzhiyun if (err)
1316*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1317*4882a593Smuzhiyun prealloc = NULL;
1318*4882a593Smuzhiyun if (err)
1319*4882a593Smuzhiyun goto out;
1320*4882a593Smuzhiyun if (state->end <= end) {
1321*4882a593Smuzhiyun set_state_bits(tree, state, &bits, NULL);
1322*4882a593Smuzhiyun cache_state(state, cached_state);
1323*4882a593Smuzhiyun state = clear_state_bit(tree, state, &clear_bits, 0,
1324*4882a593Smuzhiyun NULL);
1325*4882a593Smuzhiyun if (last_end == (u64)-1)
1326*4882a593Smuzhiyun goto out;
1327*4882a593Smuzhiyun start = last_end + 1;
1328*4882a593Smuzhiyun if (start < end && state && state->start == start &&
1329*4882a593Smuzhiyun !need_resched())
1330*4882a593Smuzhiyun goto hit_next;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun goto search_again;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun /*
1335*4882a593Smuzhiyun * | ---- desired range ---- |
1336*4882a593Smuzhiyun * | state | or | state |
1337*4882a593Smuzhiyun *
1338*4882a593Smuzhiyun * There's a hole, we need to insert something in it and
1339*4882a593Smuzhiyun * ignore the extent we found.
1340*4882a593Smuzhiyun */
1341*4882a593Smuzhiyun if (state->start > start) {
1342*4882a593Smuzhiyun u64 this_end;
1343*4882a593Smuzhiyun if (end < last_start)
1344*4882a593Smuzhiyun this_end = end;
1345*4882a593Smuzhiyun else
1346*4882a593Smuzhiyun this_end = last_start - 1;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1349*4882a593Smuzhiyun if (!prealloc) {
1350*4882a593Smuzhiyun err = -ENOMEM;
1351*4882a593Smuzhiyun goto out;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /*
1355*4882a593Smuzhiyun * Avoid to free 'prealloc' if it can be merged with
1356*4882a593Smuzhiyun * the later extent.
1357*4882a593Smuzhiyun */
1358*4882a593Smuzhiyun err = insert_state(tree, prealloc, start, this_end,
1359*4882a593Smuzhiyun NULL, NULL, &bits, NULL);
1360*4882a593Smuzhiyun if (err)
1361*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1362*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1363*4882a593Smuzhiyun prealloc = NULL;
1364*4882a593Smuzhiyun start = this_end + 1;
1365*4882a593Smuzhiyun goto search_again;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun /*
1368*4882a593Smuzhiyun * | ---- desired range ---- |
1369*4882a593Smuzhiyun * | state |
1370*4882a593Smuzhiyun * We need to split the extent, and set the bit
1371*4882a593Smuzhiyun * on the first half
1372*4882a593Smuzhiyun */
1373*4882a593Smuzhiyun if (state->start <= end && state->end > end) {
1374*4882a593Smuzhiyun prealloc = alloc_extent_state_atomic(prealloc);
1375*4882a593Smuzhiyun if (!prealloc) {
1376*4882a593Smuzhiyun err = -ENOMEM;
1377*4882a593Smuzhiyun goto out;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun err = split_state(tree, state, prealloc, end + 1);
1381*4882a593Smuzhiyun if (err)
1382*4882a593Smuzhiyun extent_io_tree_panic(tree, err);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun set_state_bits(tree, prealloc, &bits, NULL);
1385*4882a593Smuzhiyun cache_state(prealloc, cached_state);
1386*4882a593Smuzhiyun clear_state_bit(tree, prealloc, &clear_bits, 0, NULL);
1387*4882a593Smuzhiyun prealloc = NULL;
1388*4882a593Smuzhiyun goto out;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun search_again:
1392*4882a593Smuzhiyun if (start > end)
1393*4882a593Smuzhiyun goto out;
1394*4882a593Smuzhiyun spin_unlock(&tree->lock);
1395*4882a593Smuzhiyun cond_resched();
1396*4882a593Smuzhiyun first_iteration = false;
1397*4882a593Smuzhiyun goto again;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun out:
1400*4882a593Smuzhiyun spin_unlock(&tree->lock);
1401*4882a593Smuzhiyun if (prealloc)
1402*4882a593Smuzhiyun free_extent_state(prealloc);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun return err;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /* wrappers around set/clear extent bit */
set_record_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,struct extent_changeset * changeset)1408*4882a593Smuzhiyun int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1409*4882a593Smuzhiyun unsigned bits, struct extent_changeset *changeset)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun /*
1412*4882a593Smuzhiyun * We don't support EXTENT_LOCKED yet, as current changeset will
1413*4882a593Smuzhiyun * record any bits changed, so for EXTENT_LOCKED case, it will
1414*4882a593Smuzhiyun * either fail with -EEXIST or changeset will record the whole
1415*4882a593Smuzhiyun * range.
1416*4882a593Smuzhiyun */
1417*4882a593Smuzhiyun BUG_ON(bits & EXTENT_LOCKED);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
1420*4882a593Smuzhiyun changeset);
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
set_extent_bits_nowait(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits)1423*4882a593Smuzhiyun int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
1424*4882a593Smuzhiyun unsigned bits)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL,
1427*4882a593Smuzhiyun GFP_NOWAIT, NULL);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,int wake,int delete,struct extent_state ** cached)1430*4882a593Smuzhiyun int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1431*4882a593Smuzhiyun unsigned bits, int wake, int delete,
1432*4882a593Smuzhiyun struct extent_state **cached)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun return __clear_extent_bit(tree, start, end, bits, wake, delete,
1435*4882a593Smuzhiyun cached, GFP_NOFS, NULL);
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
clear_record_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,struct extent_changeset * changeset)1438*4882a593Smuzhiyun int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1439*4882a593Smuzhiyun unsigned bits, struct extent_changeset *changeset)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun /*
1442*4882a593Smuzhiyun * Don't support EXTENT_LOCKED case, same reason as
1443*4882a593Smuzhiyun * set_record_extent_bits().
1444*4882a593Smuzhiyun */
1445*4882a593Smuzhiyun BUG_ON(bits & EXTENT_LOCKED);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
1448*4882a593Smuzhiyun changeset);
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /*
1452*4882a593Smuzhiyun * either insert or lock state struct between start and end use mask to tell
1453*4882a593Smuzhiyun * us if waiting is desired.
1454*4882a593Smuzhiyun */
lock_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state)1455*4882a593Smuzhiyun int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1456*4882a593Smuzhiyun struct extent_state **cached_state)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun int err;
1459*4882a593Smuzhiyun u64 failed_start;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun while (1) {
1462*4882a593Smuzhiyun err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1463*4882a593Smuzhiyun EXTENT_LOCKED, &failed_start,
1464*4882a593Smuzhiyun cached_state, GFP_NOFS, NULL);
1465*4882a593Smuzhiyun if (err == -EEXIST) {
1466*4882a593Smuzhiyun wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1467*4882a593Smuzhiyun start = failed_start;
1468*4882a593Smuzhiyun } else
1469*4882a593Smuzhiyun break;
1470*4882a593Smuzhiyun WARN_ON(start > end);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun return err;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
try_lock_extent(struct extent_io_tree * tree,u64 start,u64 end)1475*4882a593Smuzhiyun int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun int err;
1478*4882a593Smuzhiyun u64 failed_start;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1481*4882a593Smuzhiyun &failed_start, NULL, GFP_NOFS, NULL);
1482*4882a593Smuzhiyun if (err == -EEXIST) {
1483*4882a593Smuzhiyun if (failed_start > start)
1484*4882a593Smuzhiyun clear_extent_bit(tree, start, failed_start - 1,
1485*4882a593Smuzhiyun EXTENT_LOCKED, 1, 0, NULL);
1486*4882a593Smuzhiyun return 0;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun return 1;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun
extent_range_clear_dirty_for_io(struct inode * inode,u64 start,u64 end)1491*4882a593Smuzhiyun void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun unsigned long index = start >> PAGE_SHIFT;
1494*4882a593Smuzhiyun unsigned long end_index = end >> PAGE_SHIFT;
1495*4882a593Smuzhiyun struct page *page;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun while (index <= end_index) {
1498*4882a593Smuzhiyun page = find_get_page(inode->i_mapping, index);
1499*4882a593Smuzhiyun BUG_ON(!page); /* Pages should be in the extent_io_tree */
1500*4882a593Smuzhiyun clear_page_dirty_for_io(page);
1501*4882a593Smuzhiyun put_page(page);
1502*4882a593Smuzhiyun index++;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
extent_range_redirty_for_io(struct inode * inode,u64 start,u64 end)1506*4882a593Smuzhiyun void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun unsigned long index = start >> PAGE_SHIFT;
1509*4882a593Smuzhiyun unsigned long end_index = end >> PAGE_SHIFT;
1510*4882a593Smuzhiyun struct page *page;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun while (index <= end_index) {
1513*4882a593Smuzhiyun page = find_get_page(inode->i_mapping, index);
1514*4882a593Smuzhiyun BUG_ON(!page); /* Pages should be in the extent_io_tree */
1515*4882a593Smuzhiyun __set_page_dirty_nobuffers(page);
1516*4882a593Smuzhiyun account_page_redirty(page);
1517*4882a593Smuzhiyun put_page(page);
1518*4882a593Smuzhiyun index++;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /* find the first state struct with 'bits' set after 'start', and
1523*4882a593Smuzhiyun * return it. tree->lock must be held. NULL will returned if
1524*4882a593Smuzhiyun * nothing was found after 'start'
1525*4882a593Smuzhiyun */
1526*4882a593Smuzhiyun static struct extent_state *
find_first_extent_bit_state(struct extent_io_tree * tree,u64 start,unsigned bits)1527*4882a593Smuzhiyun find_first_extent_bit_state(struct extent_io_tree *tree,
1528*4882a593Smuzhiyun u64 start, unsigned bits)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun struct rb_node *node;
1531*4882a593Smuzhiyun struct extent_state *state;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun * this search will find all the extents that end after
1535*4882a593Smuzhiyun * our range starts.
1536*4882a593Smuzhiyun */
1537*4882a593Smuzhiyun node = tree_search(tree, start);
1538*4882a593Smuzhiyun if (!node)
1539*4882a593Smuzhiyun goto out;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun while (1) {
1542*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1543*4882a593Smuzhiyun if (state->end >= start && (state->state & bits))
1544*4882a593Smuzhiyun return state;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun node = rb_next(node);
1547*4882a593Smuzhiyun if (!node)
1548*4882a593Smuzhiyun break;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun out:
1551*4882a593Smuzhiyun return NULL;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /*
1555*4882a593Smuzhiyun * find the first offset in the io tree with 'bits' set. zero is
1556*4882a593Smuzhiyun * returned if we find something, and *start_ret and *end_ret are
1557*4882a593Smuzhiyun * set to reflect the state struct that was found.
1558*4882a593Smuzhiyun *
1559*4882a593Smuzhiyun * If nothing was found, 1 is returned. If found something, return 0.
1560*4882a593Smuzhiyun */
find_first_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,unsigned bits,struct extent_state ** cached_state)1561*4882a593Smuzhiyun int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1562*4882a593Smuzhiyun u64 *start_ret, u64 *end_ret, unsigned bits,
1563*4882a593Smuzhiyun struct extent_state **cached_state)
1564*4882a593Smuzhiyun {
1565*4882a593Smuzhiyun struct extent_state *state;
1566*4882a593Smuzhiyun int ret = 1;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun spin_lock(&tree->lock);
1569*4882a593Smuzhiyun if (cached_state && *cached_state) {
1570*4882a593Smuzhiyun state = *cached_state;
1571*4882a593Smuzhiyun if (state->end == start - 1 && extent_state_in_tree(state)) {
1572*4882a593Smuzhiyun while ((state = next_state(state)) != NULL) {
1573*4882a593Smuzhiyun if (state->state & bits)
1574*4882a593Smuzhiyun goto got_it;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun free_extent_state(*cached_state);
1577*4882a593Smuzhiyun *cached_state = NULL;
1578*4882a593Smuzhiyun goto out;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun free_extent_state(*cached_state);
1581*4882a593Smuzhiyun *cached_state = NULL;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun state = find_first_extent_bit_state(tree, start, bits);
1585*4882a593Smuzhiyun got_it:
1586*4882a593Smuzhiyun if (state) {
1587*4882a593Smuzhiyun cache_state_if_flags(state, cached_state, 0);
1588*4882a593Smuzhiyun *start_ret = state->start;
1589*4882a593Smuzhiyun *end_ret = state->end;
1590*4882a593Smuzhiyun ret = 0;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun out:
1593*4882a593Smuzhiyun spin_unlock(&tree->lock);
1594*4882a593Smuzhiyun return ret;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun /**
1598*4882a593Smuzhiyun * find_contiguous_extent_bit: find a contiguous area of bits
1599*4882a593Smuzhiyun * @tree - io tree to check
1600*4882a593Smuzhiyun * @start - offset to start the search from
1601*4882a593Smuzhiyun * @start_ret - the first offset we found with the bits set
1602*4882a593Smuzhiyun * @end_ret - the final contiguous range of the bits that were set
1603*4882a593Smuzhiyun * @bits - bits to look for
1604*4882a593Smuzhiyun *
1605*4882a593Smuzhiyun * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
1606*4882a593Smuzhiyun * to set bits appropriately, and then merge them again. During this time it
1607*4882a593Smuzhiyun * will drop the tree->lock, so use this helper if you want to find the actual
1608*4882a593Smuzhiyun * contiguous area for given bits. We will search to the first bit we find, and
1609*4882a593Smuzhiyun * then walk down the tree until we find a non-contiguous area. The area
1610*4882a593Smuzhiyun * returned will be the full contiguous area with the bits set.
1611*4882a593Smuzhiyun */
find_contiguous_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,unsigned bits)1612*4882a593Smuzhiyun int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
1613*4882a593Smuzhiyun u64 *start_ret, u64 *end_ret, unsigned bits)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun struct extent_state *state;
1616*4882a593Smuzhiyun int ret = 1;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun spin_lock(&tree->lock);
1619*4882a593Smuzhiyun state = find_first_extent_bit_state(tree, start, bits);
1620*4882a593Smuzhiyun if (state) {
1621*4882a593Smuzhiyun *start_ret = state->start;
1622*4882a593Smuzhiyun *end_ret = state->end;
1623*4882a593Smuzhiyun while ((state = next_state(state)) != NULL) {
1624*4882a593Smuzhiyun if (state->start > (*end_ret + 1))
1625*4882a593Smuzhiyun break;
1626*4882a593Smuzhiyun *end_ret = state->end;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun ret = 0;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun spin_unlock(&tree->lock);
1631*4882a593Smuzhiyun return ret;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun /**
1635*4882a593Smuzhiyun * find_first_clear_extent_bit - find the first range that has @bits not set.
1636*4882a593Smuzhiyun * This range could start before @start.
1637*4882a593Smuzhiyun *
1638*4882a593Smuzhiyun * @tree - the tree to search
1639*4882a593Smuzhiyun * @start - the offset at/after which the found extent should start
1640*4882a593Smuzhiyun * @start_ret - records the beginning of the range
1641*4882a593Smuzhiyun * @end_ret - records the end of the range (inclusive)
1642*4882a593Smuzhiyun * @bits - the set of bits which must be unset
1643*4882a593Smuzhiyun *
1644*4882a593Smuzhiyun * Since unallocated range is also considered one which doesn't have the bits
1645*4882a593Smuzhiyun * set it's possible that @end_ret contains -1, this happens in case the range
1646*4882a593Smuzhiyun * spans (last_range_end, end of device]. In this case it's up to the caller to
1647*4882a593Smuzhiyun * trim @end_ret to the appropriate size.
1648*4882a593Smuzhiyun */
find_first_clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,unsigned bits)1649*4882a593Smuzhiyun void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1650*4882a593Smuzhiyun u64 *start_ret, u64 *end_ret, unsigned bits)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun struct extent_state *state;
1653*4882a593Smuzhiyun struct rb_node *node, *prev = NULL, *next;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun spin_lock(&tree->lock);
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun /* Find first extent with bits cleared */
1658*4882a593Smuzhiyun while (1) {
1659*4882a593Smuzhiyun node = __etree_search(tree, start, &next, &prev, NULL, NULL);
1660*4882a593Smuzhiyun if (!node && !next && !prev) {
1661*4882a593Smuzhiyun /*
1662*4882a593Smuzhiyun * Tree is completely empty, send full range and let
1663*4882a593Smuzhiyun * caller deal with it
1664*4882a593Smuzhiyun */
1665*4882a593Smuzhiyun *start_ret = 0;
1666*4882a593Smuzhiyun *end_ret = -1;
1667*4882a593Smuzhiyun goto out;
1668*4882a593Smuzhiyun } else if (!node && !next) {
1669*4882a593Smuzhiyun /*
1670*4882a593Smuzhiyun * We are past the last allocated chunk, set start at
1671*4882a593Smuzhiyun * the end of the last extent.
1672*4882a593Smuzhiyun */
1673*4882a593Smuzhiyun state = rb_entry(prev, struct extent_state, rb_node);
1674*4882a593Smuzhiyun *start_ret = state->end + 1;
1675*4882a593Smuzhiyun *end_ret = -1;
1676*4882a593Smuzhiyun goto out;
1677*4882a593Smuzhiyun } else if (!node) {
1678*4882a593Smuzhiyun node = next;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun /*
1681*4882a593Smuzhiyun * At this point 'node' either contains 'start' or start is
1682*4882a593Smuzhiyun * before 'node'
1683*4882a593Smuzhiyun */
1684*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun if (in_range(start, state->start, state->end - state->start + 1)) {
1687*4882a593Smuzhiyun if (state->state & bits) {
1688*4882a593Smuzhiyun /*
1689*4882a593Smuzhiyun * |--range with bits sets--|
1690*4882a593Smuzhiyun * |
1691*4882a593Smuzhiyun * start
1692*4882a593Smuzhiyun */
1693*4882a593Smuzhiyun start = state->end + 1;
1694*4882a593Smuzhiyun } else {
1695*4882a593Smuzhiyun /*
1696*4882a593Smuzhiyun * 'start' falls within a range that doesn't
1697*4882a593Smuzhiyun * have the bits set, so take its start as
1698*4882a593Smuzhiyun * the beginning of the desired range
1699*4882a593Smuzhiyun *
1700*4882a593Smuzhiyun * |--range with bits cleared----|
1701*4882a593Smuzhiyun * |
1702*4882a593Smuzhiyun * start
1703*4882a593Smuzhiyun */
1704*4882a593Smuzhiyun *start_ret = state->start;
1705*4882a593Smuzhiyun break;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun } else {
1708*4882a593Smuzhiyun /*
1709*4882a593Smuzhiyun * |---prev range---|---hole/unset---|---node range---|
1710*4882a593Smuzhiyun * |
1711*4882a593Smuzhiyun * start
1712*4882a593Smuzhiyun *
1713*4882a593Smuzhiyun * or
1714*4882a593Smuzhiyun *
1715*4882a593Smuzhiyun * |---hole/unset--||--first node--|
1716*4882a593Smuzhiyun * 0 |
1717*4882a593Smuzhiyun * start
1718*4882a593Smuzhiyun */
1719*4882a593Smuzhiyun if (prev) {
1720*4882a593Smuzhiyun state = rb_entry(prev, struct extent_state,
1721*4882a593Smuzhiyun rb_node);
1722*4882a593Smuzhiyun *start_ret = state->end + 1;
1723*4882a593Smuzhiyun } else {
1724*4882a593Smuzhiyun *start_ret = 0;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun break;
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /*
1731*4882a593Smuzhiyun * Find the longest stretch from start until an entry which has the
1732*4882a593Smuzhiyun * bits set
1733*4882a593Smuzhiyun */
1734*4882a593Smuzhiyun while (1) {
1735*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1736*4882a593Smuzhiyun if (state->end >= start && !(state->state & bits)) {
1737*4882a593Smuzhiyun *end_ret = state->end;
1738*4882a593Smuzhiyun } else {
1739*4882a593Smuzhiyun *end_ret = state->start - 1;
1740*4882a593Smuzhiyun break;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun node = rb_next(node);
1744*4882a593Smuzhiyun if (!node)
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun out:
1748*4882a593Smuzhiyun spin_unlock(&tree->lock);
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun /*
1752*4882a593Smuzhiyun * find a contiguous range of bytes in the file marked as delalloc, not
1753*4882a593Smuzhiyun * more than 'max_bytes'. start and end are used to return the range,
1754*4882a593Smuzhiyun *
1755*4882a593Smuzhiyun * true is returned if we find something, false if nothing was in the tree
1756*4882a593Smuzhiyun */
btrfs_find_delalloc_range(struct extent_io_tree * tree,u64 * start,u64 * end,u64 max_bytes,struct extent_state ** cached_state)1757*4882a593Smuzhiyun bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
1758*4882a593Smuzhiyun u64 *end, u64 max_bytes,
1759*4882a593Smuzhiyun struct extent_state **cached_state)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun struct rb_node *node;
1762*4882a593Smuzhiyun struct extent_state *state;
1763*4882a593Smuzhiyun u64 cur_start = *start;
1764*4882a593Smuzhiyun bool found = false;
1765*4882a593Smuzhiyun u64 total_bytes = 0;
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun spin_lock(&tree->lock);
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun /*
1770*4882a593Smuzhiyun * this search will find all the extents that end after
1771*4882a593Smuzhiyun * our range starts.
1772*4882a593Smuzhiyun */
1773*4882a593Smuzhiyun node = tree_search(tree, cur_start);
1774*4882a593Smuzhiyun if (!node) {
1775*4882a593Smuzhiyun *end = (u64)-1;
1776*4882a593Smuzhiyun goto out;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun while (1) {
1780*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
1781*4882a593Smuzhiyun if (found && (state->start != cur_start ||
1782*4882a593Smuzhiyun (state->state & EXTENT_BOUNDARY))) {
1783*4882a593Smuzhiyun goto out;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun if (!(state->state & EXTENT_DELALLOC)) {
1786*4882a593Smuzhiyun if (!found)
1787*4882a593Smuzhiyun *end = state->end;
1788*4882a593Smuzhiyun goto out;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun if (!found) {
1791*4882a593Smuzhiyun *start = state->start;
1792*4882a593Smuzhiyun *cached_state = state;
1793*4882a593Smuzhiyun refcount_inc(&state->refs);
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun found = true;
1796*4882a593Smuzhiyun *end = state->end;
1797*4882a593Smuzhiyun cur_start = state->end + 1;
1798*4882a593Smuzhiyun node = rb_next(node);
1799*4882a593Smuzhiyun total_bytes += state->end - state->start + 1;
1800*4882a593Smuzhiyun if (total_bytes >= max_bytes)
1801*4882a593Smuzhiyun break;
1802*4882a593Smuzhiyun if (!node)
1803*4882a593Smuzhiyun break;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun out:
1806*4882a593Smuzhiyun spin_unlock(&tree->lock);
1807*4882a593Smuzhiyun return found;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun static int __process_pages_contig(struct address_space *mapping,
1811*4882a593Smuzhiyun struct page *locked_page,
1812*4882a593Smuzhiyun pgoff_t start_index, pgoff_t end_index,
1813*4882a593Smuzhiyun unsigned long page_ops, pgoff_t *index_ret);
1814*4882a593Smuzhiyun
__unlock_for_delalloc(struct inode * inode,struct page * locked_page,u64 start,u64 end)1815*4882a593Smuzhiyun static noinline void __unlock_for_delalloc(struct inode *inode,
1816*4882a593Smuzhiyun struct page *locked_page,
1817*4882a593Smuzhiyun u64 start, u64 end)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun unsigned long index = start >> PAGE_SHIFT;
1820*4882a593Smuzhiyun unsigned long end_index = end >> PAGE_SHIFT;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun ASSERT(locked_page);
1823*4882a593Smuzhiyun if (index == locked_page->index && end_index == index)
1824*4882a593Smuzhiyun return;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun __process_pages_contig(inode->i_mapping, locked_page, index, end_index,
1827*4882a593Smuzhiyun PAGE_UNLOCK, NULL);
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
lock_delalloc_pages(struct inode * inode,struct page * locked_page,u64 delalloc_start,u64 delalloc_end)1830*4882a593Smuzhiyun static noinline int lock_delalloc_pages(struct inode *inode,
1831*4882a593Smuzhiyun struct page *locked_page,
1832*4882a593Smuzhiyun u64 delalloc_start,
1833*4882a593Smuzhiyun u64 delalloc_end)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun unsigned long index = delalloc_start >> PAGE_SHIFT;
1836*4882a593Smuzhiyun unsigned long index_ret = index;
1837*4882a593Smuzhiyun unsigned long end_index = delalloc_end >> PAGE_SHIFT;
1838*4882a593Smuzhiyun int ret;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun ASSERT(locked_page);
1841*4882a593Smuzhiyun if (index == locked_page->index && index == end_index)
1842*4882a593Smuzhiyun return 0;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun ret = __process_pages_contig(inode->i_mapping, locked_page, index,
1845*4882a593Smuzhiyun end_index, PAGE_LOCK, &index_ret);
1846*4882a593Smuzhiyun if (ret == -EAGAIN)
1847*4882a593Smuzhiyun __unlock_for_delalloc(inode, locked_page, delalloc_start,
1848*4882a593Smuzhiyun (u64)index_ret << PAGE_SHIFT);
1849*4882a593Smuzhiyun return ret;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun /*
1853*4882a593Smuzhiyun * Find and lock a contiguous range of bytes in the file marked as delalloc, no
1854*4882a593Smuzhiyun * more than @max_bytes. @Start and @end are used to return the range,
1855*4882a593Smuzhiyun *
1856*4882a593Smuzhiyun * Return: true if we find something
1857*4882a593Smuzhiyun * false if nothing was in the tree
1858*4882a593Smuzhiyun */
1859*4882a593Smuzhiyun EXPORT_FOR_TESTS
find_lock_delalloc_range(struct inode * inode,struct page * locked_page,u64 * start,u64 * end)1860*4882a593Smuzhiyun noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
1861*4882a593Smuzhiyun struct page *locked_page, u64 *start,
1862*4882a593Smuzhiyun u64 *end)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1865*4882a593Smuzhiyun u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
1866*4882a593Smuzhiyun u64 delalloc_start;
1867*4882a593Smuzhiyun u64 delalloc_end;
1868*4882a593Smuzhiyun bool found;
1869*4882a593Smuzhiyun struct extent_state *cached_state = NULL;
1870*4882a593Smuzhiyun int ret;
1871*4882a593Smuzhiyun int loops = 0;
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun again:
1874*4882a593Smuzhiyun /* step one, find a bunch of delalloc bytes starting at start */
1875*4882a593Smuzhiyun delalloc_start = *start;
1876*4882a593Smuzhiyun delalloc_end = 0;
1877*4882a593Smuzhiyun found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1878*4882a593Smuzhiyun max_bytes, &cached_state);
1879*4882a593Smuzhiyun if (!found || delalloc_end <= *start) {
1880*4882a593Smuzhiyun *start = delalloc_start;
1881*4882a593Smuzhiyun *end = delalloc_end;
1882*4882a593Smuzhiyun free_extent_state(cached_state);
1883*4882a593Smuzhiyun return false;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun /*
1887*4882a593Smuzhiyun * start comes from the offset of locked_page. We have to lock
1888*4882a593Smuzhiyun * pages in order, so we can't process delalloc bytes before
1889*4882a593Smuzhiyun * locked_page
1890*4882a593Smuzhiyun */
1891*4882a593Smuzhiyun if (delalloc_start < *start)
1892*4882a593Smuzhiyun delalloc_start = *start;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun /*
1895*4882a593Smuzhiyun * make sure to limit the number of pages we try to lock down
1896*4882a593Smuzhiyun */
1897*4882a593Smuzhiyun if (delalloc_end + 1 - delalloc_start > max_bytes)
1898*4882a593Smuzhiyun delalloc_end = delalloc_start + max_bytes - 1;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun /* step two, lock all the pages after the page that has start */
1901*4882a593Smuzhiyun ret = lock_delalloc_pages(inode, locked_page,
1902*4882a593Smuzhiyun delalloc_start, delalloc_end);
1903*4882a593Smuzhiyun ASSERT(!ret || ret == -EAGAIN);
1904*4882a593Smuzhiyun if (ret == -EAGAIN) {
1905*4882a593Smuzhiyun /* some of the pages are gone, lets avoid looping by
1906*4882a593Smuzhiyun * shortening the size of the delalloc range we're searching
1907*4882a593Smuzhiyun */
1908*4882a593Smuzhiyun free_extent_state(cached_state);
1909*4882a593Smuzhiyun cached_state = NULL;
1910*4882a593Smuzhiyun if (!loops) {
1911*4882a593Smuzhiyun max_bytes = PAGE_SIZE;
1912*4882a593Smuzhiyun loops = 1;
1913*4882a593Smuzhiyun goto again;
1914*4882a593Smuzhiyun } else {
1915*4882a593Smuzhiyun found = false;
1916*4882a593Smuzhiyun goto out_failed;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /* step three, lock the state bits for the whole range */
1921*4882a593Smuzhiyun lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun /* then test to make sure it is all still delalloc */
1924*4882a593Smuzhiyun ret = test_range_bit(tree, delalloc_start, delalloc_end,
1925*4882a593Smuzhiyun EXTENT_DELALLOC, 1, cached_state);
1926*4882a593Smuzhiyun if (!ret) {
1927*4882a593Smuzhiyun unlock_extent_cached(tree, delalloc_start, delalloc_end,
1928*4882a593Smuzhiyun &cached_state);
1929*4882a593Smuzhiyun __unlock_for_delalloc(inode, locked_page,
1930*4882a593Smuzhiyun delalloc_start, delalloc_end);
1931*4882a593Smuzhiyun cond_resched();
1932*4882a593Smuzhiyun goto again;
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun free_extent_state(cached_state);
1935*4882a593Smuzhiyun *start = delalloc_start;
1936*4882a593Smuzhiyun *end = delalloc_end;
1937*4882a593Smuzhiyun out_failed:
1938*4882a593Smuzhiyun return found;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
__process_pages_contig(struct address_space * mapping,struct page * locked_page,pgoff_t start_index,pgoff_t end_index,unsigned long page_ops,pgoff_t * index_ret)1941*4882a593Smuzhiyun static int __process_pages_contig(struct address_space *mapping,
1942*4882a593Smuzhiyun struct page *locked_page,
1943*4882a593Smuzhiyun pgoff_t start_index, pgoff_t end_index,
1944*4882a593Smuzhiyun unsigned long page_ops, pgoff_t *index_ret)
1945*4882a593Smuzhiyun {
1946*4882a593Smuzhiyun unsigned long nr_pages = end_index - start_index + 1;
1947*4882a593Smuzhiyun unsigned long pages_locked = 0;
1948*4882a593Smuzhiyun pgoff_t index = start_index;
1949*4882a593Smuzhiyun struct page *pages[16];
1950*4882a593Smuzhiyun unsigned ret;
1951*4882a593Smuzhiyun int err = 0;
1952*4882a593Smuzhiyun int i;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun if (page_ops & PAGE_LOCK) {
1955*4882a593Smuzhiyun ASSERT(page_ops == PAGE_LOCK);
1956*4882a593Smuzhiyun ASSERT(index_ret && *index_ret == start_index);
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1960*4882a593Smuzhiyun mapping_set_error(mapping, -EIO);
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun while (nr_pages > 0) {
1963*4882a593Smuzhiyun ret = find_get_pages_contig(mapping, index,
1964*4882a593Smuzhiyun min_t(unsigned long,
1965*4882a593Smuzhiyun nr_pages, ARRAY_SIZE(pages)), pages);
1966*4882a593Smuzhiyun if (ret == 0) {
1967*4882a593Smuzhiyun /*
1968*4882a593Smuzhiyun * Only if we're going to lock these pages,
1969*4882a593Smuzhiyun * can we find nothing at @index.
1970*4882a593Smuzhiyun */
1971*4882a593Smuzhiyun ASSERT(page_ops & PAGE_LOCK);
1972*4882a593Smuzhiyun err = -EAGAIN;
1973*4882a593Smuzhiyun goto out;
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun for (i = 0; i < ret; i++) {
1977*4882a593Smuzhiyun if (page_ops & PAGE_SET_PRIVATE2)
1978*4882a593Smuzhiyun SetPagePrivate2(pages[i]);
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun if (locked_page && pages[i] == locked_page) {
1981*4882a593Smuzhiyun put_page(pages[i]);
1982*4882a593Smuzhiyun pages_locked++;
1983*4882a593Smuzhiyun continue;
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun if (page_ops & PAGE_CLEAR_DIRTY)
1986*4882a593Smuzhiyun clear_page_dirty_for_io(pages[i]);
1987*4882a593Smuzhiyun if (page_ops & PAGE_SET_WRITEBACK)
1988*4882a593Smuzhiyun set_page_writeback(pages[i]);
1989*4882a593Smuzhiyun if (page_ops & PAGE_SET_ERROR)
1990*4882a593Smuzhiyun SetPageError(pages[i]);
1991*4882a593Smuzhiyun if (page_ops & PAGE_END_WRITEBACK)
1992*4882a593Smuzhiyun end_page_writeback(pages[i]);
1993*4882a593Smuzhiyun if (page_ops & PAGE_UNLOCK)
1994*4882a593Smuzhiyun unlock_page(pages[i]);
1995*4882a593Smuzhiyun if (page_ops & PAGE_LOCK) {
1996*4882a593Smuzhiyun lock_page(pages[i]);
1997*4882a593Smuzhiyun if (!PageDirty(pages[i]) ||
1998*4882a593Smuzhiyun pages[i]->mapping != mapping) {
1999*4882a593Smuzhiyun unlock_page(pages[i]);
2000*4882a593Smuzhiyun for (; i < ret; i++)
2001*4882a593Smuzhiyun put_page(pages[i]);
2002*4882a593Smuzhiyun err = -EAGAIN;
2003*4882a593Smuzhiyun goto out;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun }
2006*4882a593Smuzhiyun put_page(pages[i]);
2007*4882a593Smuzhiyun pages_locked++;
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun nr_pages -= ret;
2010*4882a593Smuzhiyun index += ret;
2011*4882a593Smuzhiyun cond_resched();
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun out:
2014*4882a593Smuzhiyun if (err && index_ret)
2015*4882a593Smuzhiyun *index_ret = start_index + pages_locked - 1;
2016*4882a593Smuzhiyun return err;
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun
extent_clear_unlock_delalloc(struct btrfs_inode * inode,u64 start,u64 end,struct page * locked_page,unsigned clear_bits,unsigned long page_ops)2019*4882a593Smuzhiyun void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
2020*4882a593Smuzhiyun struct page *locked_page,
2021*4882a593Smuzhiyun unsigned clear_bits,
2022*4882a593Smuzhiyun unsigned long page_ops)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL);
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
2027*4882a593Smuzhiyun start >> PAGE_SHIFT, end >> PAGE_SHIFT,
2028*4882a593Smuzhiyun page_ops, NULL);
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun /*
2032*4882a593Smuzhiyun * count the number of bytes in the tree that have a given bit(s)
2033*4882a593Smuzhiyun * set. This can be fairly slow, except for EXTENT_DIRTY which is
2034*4882a593Smuzhiyun * cached. The total number found is returned.
2035*4882a593Smuzhiyun */
count_range_bits(struct extent_io_tree * tree,u64 * start,u64 search_end,u64 max_bytes,unsigned bits,int contig)2036*4882a593Smuzhiyun u64 count_range_bits(struct extent_io_tree *tree,
2037*4882a593Smuzhiyun u64 *start, u64 search_end, u64 max_bytes,
2038*4882a593Smuzhiyun unsigned bits, int contig)
2039*4882a593Smuzhiyun {
2040*4882a593Smuzhiyun struct rb_node *node;
2041*4882a593Smuzhiyun struct extent_state *state;
2042*4882a593Smuzhiyun u64 cur_start = *start;
2043*4882a593Smuzhiyun u64 total_bytes = 0;
2044*4882a593Smuzhiyun u64 last = 0;
2045*4882a593Smuzhiyun int found = 0;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun if (WARN_ON(search_end <= cur_start))
2048*4882a593Smuzhiyun return 0;
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun spin_lock(&tree->lock);
2051*4882a593Smuzhiyun if (cur_start == 0 && bits == EXTENT_DIRTY) {
2052*4882a593Smuzhiyun total_bytes = tree->dirty_bytes;
2053*4882a593Smuzhiyun goto out;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun /*
2056*4882a593Smuzhiyun * this search will find all the extents that end after
2057*4882a593Smuzhiyun * our range starts.
2058*4882a593Smuzhiyun */
2059*4882a593Smuzhiyun node = tree_search(tree, cur_start);
2060*4882a593Smuzhiyun if (!node)
2061*4882a593Smuzhiyun goto out;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun while (1) {
2064*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
2065*4882a593Smuzhiyun if (state->start > search_end)
2066*4882a593Smuzhiyun break;
2067*4882a593Smuzhiyun if (contig && found && state->start > last + 1)
2068*4882a593Smuzhiyun break;
2069*4882a593Smuzhiyun if (state->end >= cur_start && (state->state & bits) == bits) {
2070*4882a593Smuzhiyun total_bytes += min(search_end, state->end) + 1 -
2071*4882a593Smuzhiyun max(cur_start, state->start);
2072*4882a593Smuzhiyun if (total_bytes >= max_bytes)
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun if (!found) {
2075*4882a593Smuzhiyun *start = max(cur_start, state->start);
2076*4882a593Smuzhiyun found = 1;
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun last = state->end;
2079*4882a593Smuzhiyun } else if (contig && found) {
2080*4882a593Smuzhiyun break;
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun node = rb_next(node);
2083*4882a593Smuzhiyun if (!node)
2084*4882a593Smuzhiyun break;
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun out:
2087*4882a593Smuzhiyun spin_unlock(&tree->lock);
2088*4882a593Smuzhiyun return total_bytes;
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun /*
2092*4882a593Smuzhiyun * set the private field for a given byte offset in the tree. If there isn't
2093*4882a593Smuzhiyun * an extent_state there already, this does nothing.
2094*4882a593Smuzhiyun */
set_state_failrec(struct extent_io_tree * tree,u64 start,struct io_failure_record * failrec)2095*4882a593Smuzhiyun int set_state_failrec(struct extent_io_tree *tree, u64 start,
2096*4882a593Smuzhiyun struct io_failure_record *failrec)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun struct rb_node *node;
2099*4882a593Smuzhiyun struct extent_state *state;
2100*4882a593Smuzhiyun int ret = 0;
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun spin_lock(&tree->lock);
2103*4882a593Smuzhiyun /*
2104*4882a593Smuzhiyun * this search will find all the extents that end after
2105*4882a593Smuzhiyun * our range starts.
2106*4882a593Smuzhiyun */
2107*4882a593Smuzhiyun node = tree_search(tree, start);
2108*4882a593Smuzhiyun if (!node) {
2109*4882a593Smuzhiyun ret = -ENOENT;
2110*4882a593Smuzhiyun goto out;
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
2113*4882a593Smuzhiyun if (state->start != start) {
2114*4882a593Smuzhiyun ret = -ENOENT;
2115*4882a593Smuzhiyun goto out;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun state->failrec = failrec;
2118*4882a593Smuzhiyun out:
2119*4882a593Smuzhiyun spin_unlock(&tree->lock);
2120*4882a593Smuzhiyun return ret;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun
get_state_failrec(struct extent_io_tree * tree,u64 start)2123*4882a593Smuzhiyun struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start)
2124*4882a593Smuzhiyun {
2125*4882a593Smuzhiyun struct rb_node *node;
2126*4882a593Smuzhiyun struct extent_state *state;
2127*4882a593Smuzhiyun struct io_failure_record *failrec;
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun spin_lock(&tree->lock);
2130*4882a593Smuzhiyun /*
2131*4882a593Smuzhiyun * this search will find all the extents that end after
2132*4882a593Smuzhiyun * our range starts.
2133*4882a593Smuzhiyun */
2134*4882a593Smuzhiyun node = tree_search(tree, start);
2135*4882a593Smuzhiyun if (!node) {
2136*4882a593Smuzhiyun failrec = ERR_PTR(-ENOENT);
2137*4882a593Smuzhiyun goto out;
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
2140*4882a593Smuzhiyun if (state->start != start) {
2141*4882a593Smuzhiyun failrec = ERR_PTR(-ENOENT);
2142*4882a593Smuzhiyun goto out;
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun failrec = state->failrec;
2146*4882a593Smuzhiyun out:
2147*4882a593Smuzhiyun spin_unlock(&tree->lock);
2148*4882a593Smuzhiyun return failrec;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun /*
2152*4882a593Smuzhiyun * searches a range in the state tree for a given mask.
2153*4882a593Smuzhiyun * If 'filled' == 1, this returns 1 only if every extent in the tree
2154*4882a593Smuzhiyun * has the bits set. Otherwise, 1 is returned if any bit in the
2155*4882a593Smuzhiyun * range is found set.
2156*4882a593Smuzhiyun */
test_range_bit(struct extent_io_tree * tree,u64 start,u64 end,unsigned bits,int filled,struct extent_state * cached)2157*4882a593Smuzhiyun int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
2158*4882a593Smuzhiyun unsigned bits, int filled, struct extent_state *cached)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun struct extent_state *state = NULL;
2161*4882a593Smuzhiyun struct rb_node *node;
2162*4882a593Smuzhiyun int bitset = 0;
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun spin_lock(&tree->lock);
2165*4882a593Smuzhiyun if (cached && extent_state_in_tree(cached) && cached->start <= start &&
2166*4882a593Smuzhiyun cached->end > start)
2167*4882a593Smuzhiyun node = &cached->rb_node;
2168*4882a593Smuzhiyun else
2169*4882a593Smuzhiyun node = tree_search(tree, start);
2170*4882a593Smuzhiyun while (node && start <= end) {
2171*4882a593Smuzhiyun state = rb_entry(node, struct extent_state, rb_node);
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun if (filled && state->start > start) {
2174*4882a593Smuzhiyun bitset = 0;
2175*4882a593Smuzhiyun break;
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun if (state->start > end)
2179*4882a593Smuzhiyun break;
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun if (state->state & bits) {
2182*4882a593Smuzhiyun bitset = 1;
2183*4882a593Smuzhiyun if (!filled)
2184*4882a593Smuzhiyun break;
2185*4882a593Smuzhiyun } else if (filled) {
2186*4882a593Smuzhiyun bitset = 0;
2187*4882a593Smuzhiyun break;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun if (state->end == (u64)-1)
2191*4882a593Smuzhiyun break;
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun start = state->end + 1;
2194*4882a593Smuzhiyun if (start > end)
2195*4882a593Smuzhiyun break;
2196*4882a593Smuzhiyun node = rb_next(node);
2197*4882a593Smuzhiyun if (!node) {
2198*4882a593Smuzhiyun if (filled)
2199*4882a593Smuzhiyun bitset = 0;
2200*4882a593Smuzhiyun break;
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun spin_unlock(&tree->lock);
2204*4882a593Smuzhiyun return bitset;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun /*
2208*4882a593Smuzhiyun * helper function to set a given page up to date if all the
2209*4882a593Smuzhiyun * extents in the tree for that page are up to date
2210*4882a593Smuzhiyun */
check_page_uptodate(struct extent_io_tree * tree,struct page * page)2211*4882a593Smuzhiyun static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
2212*4882a593Smuzhiyun {
2213*4882a593Smuzhiyun u64 start = page_offset(page);
2214*4882a593Smuzhiyun u64 end = start + PAGE_SIZE - 1;
2215*4882a593Smuzhiyun if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
2216*4882a593Smuzhiyun SetPageUptodate(page);
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun
free_io_failure(struct extent_io_tree * failure_tree,struct extent_io_tree * io_tree,struct io_failure_record * rec)2219*4882a593Smuzhiyun int free_io_failure(struct extent_io_tree *failure_tree,
2220*4882a593Smuzhiyun struct extent_io_tree *io_tree,
2221*4882a593Smuzhiyun struct io_failure_record *rec)
2222*4882a593Smuzhiyun {
2223*4882a593Smuzhiyun int ret;
2224*4882a593Smuzhiyun int err = 0;
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun set_state_failrec(failure_tree, rec->start, NULL);
2227*4882a593Smuzhiyun ret = clear_extent_bits(failure_tree, rec->start,
2228*4882a593Smuzhiyun rec->start + rec->len - 1,
2229*4882a593Smuzhiyun EXTENT_LOCKED | EXTENT_DIRTY);
2230*4882a593Smuzhiyun if (ret)
2231*4882a593Smuzhiyun err = ret;
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun ret = clear_extent_bits(io_tree, rec->start,
2234*4882a593Smuzhiyun rec->start + rec->len - 1,
2235*4882a593Smuzhiyun EXTENT_DAMAGED);
2236*4882a593Smuzhiyun if (ret && !err)
2237*4882a593Smuzhiyun err = ret;
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun kfree(rec);
2240*4882a593Smuzhiyun return err;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun /*
2244*4882a593Smuzhiyun * this bypasses the standard btrfs submit functions deliberately, as
2245*4882a593Smuzhiyun * the standard behavior is to write all copies in a raid setup. here we only
2246*4882a593Smuzhiyun * want to write the one bad copy. so we do the mapping for ourselves and issue
2247*4882a593Smuzhiyun * submit_bio directly.
2248*4882a593Smuzhiyun * to avoid any synchronization issues, wait for the data after writing, which
2249*4882a593Smuzhiyun * actually prevents the read that triggered the error from finishing.
2250*4882a593Smuzhiyun * currently, there can be no more than two copies of every data bit. thus,
2251*4882a593Smuzhiyun * exactly one rewrite is required.
2252*4882a593Smuzhiyun */
repair_io_failure(struct btrfs_fs_info * fs_info,u64 ino,u64 start,u64 length,u64 logical,struct page * page,unsigned int pg_offset,int mirror_num)2253*4882a593Smuzhiyun int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
2254*4882a593Smuzhiyun u64 length, u64 logical, struct page *page,
2255*4882a593Smuzhiyun unsigned int pg_offset, int mirror_num)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun struct bio *bio;
2258*4882a593Smuzhiyun struct btrfs_device *dev;
2259*4882a593Smuzhiyun u64 map_length = 0;
2260*4882a593Smuzhiyun u64 sector;
2261*4882a593Smuzhiyun struct btrfs_bio *bbio = NULL;
2262*4882a593Smuzhiyun int ret;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
2265*4882a593Smuzhiyun BUG_ON(!mirror_num);
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun bio = btrfs_io_bio_alloc(1);
2268*4882a593Smuzhiyun bio->bi_iter.bi_size = 0;
2269*4882a593Smuzhiyun map_length = length;
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun /*
2272*4882a593Smuzhiyun * Avoid races with device replace and make sure our bbio has devices
2273*4882a593Smuzhiyun * associated to its stripes that don't go away while we are doing the
2274*4882a593Smuzhiyun * read repair operation.
2275*4882a593Smuzhiyun */
2276*4882a593Smuzhiyun btrfs_bio_counter_inc_blocked(fs_info);
2277*4882a593Smuzhiyun if (btrfs_is_parity_mirror(fs_info, logical, length)) {
2278*4882a593Smuzhiyun /*
2279*4882a593Smuzhiyun * Note that we don't use BTRFS_MAP_WRITE because it's supposed
2280*4882a593Smuzhiyun * to update all raid stripes, but here we just want to correct
2281*4882a593Smuzhiyun * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad
2282*4882a593Smuzhiyun * stripe's dev and sector.
2283*4882a593Smuzhiyun */
2284*4882a593Smuzhiyun ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
2285*4882a593Smuzhiyun &map_length, &bbio, 0);
2286*4882a593Smuzhiyun if (ret) {
2287*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
2288*4882a593Smuzhiyun bio_put(bio);
2289*4882a593Smuzhiyun return -EIO;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun ASSERT(bbio->mirror_num == 1);
2292*4882a593Smuzhiyun } else {
2293*4882a593Smuzhiyun ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
2294*4882a593Smuzhiyun &map_length, &bbio, mirror_num);
2295*4882a593Smuzhiyun if (ret) {
2296*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
2297*4882a593Smuzhiyun bio_put(bio);
2298*4882a593Smuzhiyun return -EIO;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun BUG_ON(mirror_num != bbio->mirror_num);
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9;
2304*4882a593Smuzhiyun bio->bi_iter.bi_sector = sector;
2305*4882a593Smuzhiyun dev = bbio->stripes[bbio->mirror_num - 1].dev;
2306*4882a593Smuzhiyun btrfs_put_bbio(bbio);
2307*4882a593Smuzhiyun if (!dev || !dev->bdev ||
2308*4882a593Smuzhiyun !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2309*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
2310*4882a593Smuzhiyun bio_put(bio);
2311*4882a593Smuzhiyun return -EIO;
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun bio_set_dev(bio, dev->bdev);
2314*4882a593Smuzhiyun bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
2315*4882a593Smuzhiyun bio_add_page(bio, page, length, pg_offset);
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun if (btrfsic_submit_bio_wait(bio)) {
2318*4882a593Smuzhiyun /* try to remap that extent elsewhere? */
2319*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
2320*4882a593Smuzhiyun bio_put(bio);
2321*4882a593Smuzhiyun btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
2322*4882a593Smuzhiyun return -EIO;
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun btrfs_info_rl_in_rcu(fs_info,
2326*4882a593Smuzhiyun "read error corrected: ino %llu off %llu (dev %s sector %llu)",
2327*4882a593Smuzhiyun ino, start,
2328*4882a593Smuzhiyun rcu_str_deref(dev->name), sector);
2329*4882a593Smuzhiyun btrfs_bio_counter_dec(fs_info);
2330*4882a593Smuzhiyun bio_put(bio);
2331*4882a593Smuzhiyun return 0;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun
btrfs_repair_eb_io_failure(const struct extent_buffer * eb,int mirror_num)2334*4882a593Smuzhiyun int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
2335*4882a593Smuzhiyun {
2336*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = eb->fs_info;
2337*4882a593Smuzhiyun u64 start = eb->start;
2338*4882a593Smuzhiyun int i, num_pages = num_extent_pages(eb);
2339*4882a593Smuzhiyun int ret = 0;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun if (sb_rdonly(fs_info->sb))
2342*4882a593Smuzhiyun return -EROFS;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
2345*4882a593Smuzhiyun struct page *p = eb->pages[i];
2346*4882a593Smuzhiyun
2347*4882a593Smuzhiyun ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p,
2348*4882a593Smuzhiyun start - page_offset(p), mirror_num);
2349*4882a593Smuzhiyun if (ret)
2350*4882a593Smuzhiyun break;
2351*4882a593Smuzhiyun start += PAGE_SIZE;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun return ret;
2355*4882a593Smuzhiyun }
2356*4882a593Smuzhiyun
2357*4882a593Smuzhiyun /*
2358*4882a593Smuzhiyun * each time an IO finishes, we do a fast check in the IO failure tree
2359*4882a593Smuzhiyun * to see if we need to process or clean up an io_failure_record
2360*4882a593Smuzhiyun */
clean_io_failure(struct btrfs_fs_info * fs_info,struct extent_io_tree * failure_tree,struct extent_io_tree * io_tree,u64 start,struct page * page,u64 ino,unsigned int pg_offset)2361*4882a593Smuzhiyun int clean_io_failure(struct btrfs_fs_info *fs_info,
2362*4882a593Smuzhiyun struct extent_io_tree *failure_tree,
2363*4882a593Smuzhiyun struct extent_io_tree *io_tree, u64 start,
2364*4882a593Smuzhiyun struct page *page, u64 ino, unsigned int pg_offset)
2365*4882a593Smuzhiyun {
2366*4882a593Smuzhiyun u64 private;
2367*4882a593Smuzhiyun struct io_failure_record *failrec;
2368*4882a593Smuzhiyun struct extent_state *state;
2369*4882a593Smuzhiyun int num_copies;
2370*4882a593Smuzhiyun int ret;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun private = 0;
2373*4882a593Smuzhiyun ret = count_range_bits(failure_tree, &private, (u64)-1, 1,
2374*4882a593Smuzhiyun EXTENT_DIRTY, 0);
2375*4882a593Smuzhiyun if (!ret)
2376*4882a593Smuzhiyun return 0;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun failrec = get_state_failrec(failure_tree, start);
2379*4882a593Smuzhiyun if (IS_ERR(failrec))
2380*4882a593Smuzhiyun return 0;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun BUG_ON(!failrec->this_mirror);
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun if (failrec->in_validation) {
2385*4882a593Smuzhiyun /* there was no real error, just free the record */
2386*4882a593Smuzhiyun btrfs_debug(fs_info,
2387*4882a593Smuzhiyun "clean_io_failure: freeing dummy error at %llu",
2388*4882a593Smuzhiyun failrec->start);
2389*4882a593Smuzhiyun goto out;
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun if (sb_rdonly(fs_info->sb))
2392*4882a593Smuzhiyun goto out;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun spin_lock(&io_tree->lock);
2395*4882a593Smuzhiyun state = find_first_extent_bit_state(io_tree,
2396*4882a593Smuzhiyun failrec->start,
2397*4882a593Smuzhiyun EXTENT_LOCKED);
2398*4882a593Smuzhiyun spin_unlock(&io_tree->lock);
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun if (state && state->start <= failrec->start &&
2401*4882a593Smuzhiyun state->end >= failrec->start + failrec->len - 1) {
2402*4882a593Smuzhiyun num_copies = btrfs_num_copies(fs_info, failrec->logical,
2403*4882a593Smuzhiyun failrec->len);
2404*4882a593Smuzhiyun if (num_copies > 1) {
2405*4882a593Smuzhiyun repair_io_failure(fs_info, ino, start, failrec->len,
2406*4882a593Smuzhiyun failrec->logical, page, pg_offset,
2407*4882a593Smuzhiyun failrec->failed_mirror);
2408*4882a593Smuzhiyun }
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun out:
2412*4882a593Smuzhiyun free_io_failure(failure_tree, io_tree, failrec);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun return 0;
2415*4882a593Smuzhiyun }
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun /*
2418*4882a593Smuzhiyun * Can be called when
2419*4882a593Smuzhiyun * - hold extent lock
2420*4882a593Smuzhiyun * - under ordered extent
2421*4882a593Smuzhiyun * - the inode is freeing
2422*4882a593Smuzhiyun */
btrfs_free_io_failure_record(struct btrfs_inode * inode,u64 start,u64 end)2423*4882a593Smuzhiyun void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun struct extent_io_tree *failure_tree = &inode->io_failure_tree;
2426*4882a593Smuzhiyun struct io_failure_record *failrec;
2427*4882a593Smuzhiyun struct extent_state *state, *next;
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun if (RB_EMPTY_ROOT(&failure_tree->state))
2430*4882a593Smuzhiyun return;
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun spin_lock(&failure_tree->lock);
2433*4882a593Smuzhiyun state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY);
2434*4882a593Smuzhiyun while (state) {
2435*4882a593Smuzhiyun if (state->start > end)
2436*4882a593Smuzhiyun break;
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun ASSERT(state->end <= end);
2439*4882a593Smuzhiyun
2440*4882a593Smuzhiyun next = next_state(state);
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun failrec = state->failrec;
2443*4882a593Smuzhiyun free_extent_state(state);
2444*4882a593Smuzhiyun kfree(failrec);
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun state = next;
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun spin_unlock(&failure_tree->lock);
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun
btrfs_get_io_failure_record(struct inode * inode,u64 start,u64 end)2451*4882a593Smuzhiyun static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode,
2452*4882a593Smuzhiyun u64 start, u64 end)
2453*4882a593Smuzhiyun {
2454*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2455*4882a593Smuzhiyun struct io_failure_record *failrec;
2456*4882a593Smuzhiyun struct extent_map *em;
2457*4882a593Smuzhiyun struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2458*4882a593Smuzhiyun struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2459*4882a593Smuzhiyun struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2460*4882a593Smuzhiyun int ret;
2461*4882a593Smuzhiyun u64 logical;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun failrec = get_state_failrec(failure_tree, start);
2464*4882a593Smuzhiyun if (!IS_ERR(failrec)) {
2465*4882a593Smuzhiyun btrfs_debug(fs_info,
2466*4882a593Smuzhiyun "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu, validation=%d",
2467*4882a593Smuzhiyun failrec->logical, failrec->start, failrec->len,
2468*4882a593Smuzhiyun failrec->in_validation);
2469*4882a593Smuzhiyun /*
2470*4882a593Smuzhiyun * when data can be on disk more than twice, add to failrec here
2471*4882a593Smuzhiyun * (e.g. with a list for failed_mirror) to make
2472*4882a593Smuzhiyun * clean_io_failure() clean all those errors at once.
2473*4882a593Smuzhiyun */
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun return failrec;
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun
2478*4882a593Smuzhiyun failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
2479*4882a593Smuzhiyun if (!failrec)
2480*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun failrec->start = start;
2483*4882a593Smuzhiyun failrec->len = end - start + 1;
2484*4882a593Smuzhiyun failrec->this_mirror = 0;
2485*4882a593Smuzhiyun failrec->bio_flags = 0;
2486*4882a593Smuzhiyun failrec->in_validation = 0;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun read_lock(&em_tree->lock);
2489*4882a593Smuzhiyun em = lookup_extent_mapping(em_tree, start, failrec->len);
2490*4882a593Smuzhiyun if (!em) {
2491*4882a593Smuzhiyun read_unlock(&em_tree->lock);
2492*4882a593Smuzhiyun kfree(failrec);
2493*4882a593Smuzhiyun return ERR_PTR(-EIO);
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun if (em->start > start || em->start + em->len <= start) {
2497*4882a593Smuzhiyun free_extent_map(em);
2498*4882a593Smuzhiyun em = NULL;
2499*4882a593Smuzhiyun }
2500*4882a593Smuzhiyun read_unlock(&em_tree->lock);
2501*4882a593Smuzhiyun if (!em) {
2502*4882a593Smuzhiyun kfree(failrec);
2503*4882a593Smuzhiyun return ERR_PTR(-EIO);
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun logical = start - em->start;
2507*4882a593Smuzhiyun logical = em->block_start + logical;
2508*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
2509*4882a593Smuzhiyun logical = em->block_start;
2510*4882a593Smuzhiyun failrec->bio_flags = EXTENT_BIO_COMPRESSED;
2511*4882a593Smuzhiyun extent_set_compress_type(&failrec->bio_flags, em->compress_type);
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun btrfs_debug(fs_info,
2515*4882a593Smuzhiyun "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu",
2516*4882a593Smuzhiyun logical, start, failrec->len);
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun failrec->logical = logical;
2519*4882a593Smuzhiyun free_extent_map(em);
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun /* Set the bits in the private failure tree */
2522*4882a593Smuzhiyun ret = set_extent_bits(failure_tree, start, end,
2523*4882a593Smuzhiyun EXTENT_LOCKED | EXTENT_DIRTY);
2524*4882a593Smuzhiyun if (ret >= 0) {
2525*4882a593Smuzhiyun ret = set_state_failrec(failure_tree, start, failrec);
2526*4882a593Smuzhiyun /* Set the bits in the inode's tree */
2527*4882a593Smuzhiyun ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
2528*4882a593Smuzhiyun } else if (ret < 0) {
2529*4882a593Smuzhiyun kfree(failrec);
2530*4882a593Smuzhiyun return ERR_PTR(ret);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun return failrec;
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun
btrfs_check_repairable(struct inode * inode,bool needs_validation,struct io_failure_record * failrec,int failed_mirror)2536*4882a593Smuzhiyun static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
2537*4882a593Smuzhiyun struct io_failure_record *failrec,
2538*4882a593Smuzhiyun int failed_mirror)
2539*4882a593Smuzhiyun {
2540*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2541*4882a593Smuzhiyun int num_copies;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
2544*4882a593Smuzhiyun if (num_copies == 1) {
2545*4882a593Smuzhiyun /*
2546*4882a593Smuzhiyun * we only have a single copy of the data, so don't bother with
2547*4882a593Smuzhiyun * all the retry and error correction code that follows. no
2548*4882a593Smuzhiyun * matter what the error is, it is very likely to persist.
2549*4882a593Smuzhiyun */
2550*4882a593Smuzhiyun btrfs_debug(fs_info,
2551*4882a593Smuzhiyun "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
2552*4882a593Smuzhiyun num_copies, failrec->this_mirror, failed_mirror);
2553*4882a593Smuzhiyun return false;
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun /*
2557*4882a593Smuzhiyun * there are two premises:
2558*4882a593Smuzhiyun * a) deliver good data to the caller
2559*4882a593Smuzhiyun * b) correct the bad sectors on disk
2560*4882a593Smuzhiyun */
2561*4882a593Smuzhiyun if (needs_validation) {
2562*4882a593Smuzhiyun /*
2563*4882a593Smuzhiyun * to fulfill b), we need to know the exact failing sectors, as
2564*4882a593Smuzhiyun * we don't want to rewrite any more than the failed ones. thus,
2565*4882a593Smuzhiyun * we need separate read requests for the failed bio
2566*4882a593Smuzhiyun *
2567*4882a593Smuzhiyun * if the following BUG_ON triggers, our validation request got
2568*4882a593Smuzhiyun * merged. we need separate requests for our algorithm to work.
2569*4882a593Smuzhiyun */
2570*4882a593Smuzhiyun BUG_ON(failrec->in_validation);
2571*4882a593Smuzhiyun failrec->in_validation = 1;
2572*4882a593Smuzhiyun failrec->this_mirror = failed_mirror;
2573*4882a593Smuzhiyun } else {
2574*4882a593Smuzhiyun /*
2575*4882a593Smuzhiyun * we're ready to fulfill a) and b) alongside. get a good copy
2576*4882a593Smuzhiyun * of the failed sector and if we succeed, we have setup
2577*4882a593Smuzhiyun * everything for repair_io_failure to do the rest for us.
2578*4882a593Smuzhiyun */
2579*4882a593Smuzhiyun if (failrec->in_validation) {
2580*4882a593Smuzhiyun BUG_ON(failrec->this_mirror != failed_mirror);
2581*4882a593Smuzhiyun failrec->in_validation = 0;
2582*4882a593Smuzhiyun failrec->this_mirror = 0;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun failrec->failed_mirror = failed_mirror;
2585*4882a593Smuzhiyun failrec->this_mirror++;
2586*4882a593Smuzhiyun if (failrec->this_mirror == failed_mirror)
2587*4882a593Smuzhiyun failrec->this_mirror++;
2588*4882a593Smuzhiyun }
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun if (failrec->this_mirror > num_copies) {
2591*4882a593Smuzhiyun btrfs_debug(fs_info,
2592*4882a593Smuzhiyun "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
2593*4882a593Smuzhiyun num_copies, failrec->this_mirror, failed_mirror);
2594*4882a593Smuzhiyun return false;
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun return true;
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun
btrfs_io_needs_validation(struct inode * inode,struct bio * bio)2600*4882a593Smuzhiyun static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
2601*4882a593Smuzhiyun {
2602*4882a593Smuzhiyun u64 len = 0;
2603*4882a593Smuzhiyun const u32 blocksize = inode->i_sb->s_blocksize;
2604*4882a593Smuzhiyun
2605*4882a593Smuzhiyun /*
2606*4882a593Smuzhiyun * If bi_status is BLK_STS_OK, then this was a checksum error, not an
2607*4882a593Smuzhiyun * I/O error. In this case, we already know exactly which sector was
2608*4882a593Smuzhiyun * bad, so we don't need to validate.
2609*4882a593Smuzhiyun */
2610*4882a593Smuzhiyun if (bio->bi_status == BLK_STS_OK)
2611*4882a593Smuzhiyun return false;
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun /*
2614*4882a593Smuzhiyun * We need to validate each sector individually if the failed I/O was
2615*4882a593Smuzhiyun * for multiple sectors.
2616*4882a593Smuzhiyun *
2617*4882a593Smuzhiyun * There are a few possible bios that can end up here:
2618*4882a593Smuzhiyun * 1. A buffered read bio, which is not cloned.
2619*4882a593Smuzhiyun * 2. A direct I/O read bio, which is cloned.
2620*4882a593Smuzhiyun * 3. A (buffered or direct) repair bio, which is not cloned.
2621*4882a593Smuzhiyun *
2622*4882a593Smuzhiyun * For cloned bios (case 2), we can get the size from
2623*4882a593Smuzhiyun * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
2624*4882a593Smuzhiyun * it from the bvecs.
2625*4882a593Smuzhiyun */
2626*4882a593Smuzhiyun if (bio_flagged(bio, BIO_CLONED)) {
2627*4882a593Smuzhiyun if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
2628*4882a593Smuzhiyun return true;
2629*4882a593Smuzhiyun } else {
2630*4882a593Smuzhiyun struct bio_vec *bvec;
2631*4882a593Smuzhiyun int i;
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun bio_for_each_bvec_all(bvec, bio, i) {
2634*4882a593Smuzhiyun len += bvec->bv_len;
2635*4882a593Smuzhiyun if (len > blocksize)
2636*4882a593Smuzhiyun return true;
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun return false;
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun
btrfs_submit_read_repair(struct inode * inode,struct bio * failed_bio,u64 phy_offset,struct page * page,unsigned int pgoff,u64 start,u64 end,int failed_mirror,submit_bio_hook_t * submit_bio_hook)2642*4882a593Smuzhiyun blk_status_t btrfs_submit_read_repair(struct inode *inode,
2643*4882a593Smuzhiyun struct bio *failed_bio, u64 phy_offset,
2644*4882a593Smuzhiyun struct page *page, unsigned int pgoff,
2645*4882a593Smuzhiyun u64 start, u64 end, int failed_mirror,
2646*4882a593Smuzhiyun submit_bio_hook_t *submit_bio_hook)
2647*4882a593Smuzhiyun {
2648*4882a593Smuzhiyun struct io_failure_record *failrec;
2649*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2650*4882a593Smuzhiyun struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2651*4882a593Smuzhiyun struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
2652*4882a593Smuzhiyun struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
2653*4882a593Smuzhiyun const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits;
2654*4882a593Smuzhiyun bool need_validation;
2655*4882a593Smuzhiyun struct bio *repair_bio;
2656*4882a593Smuzhiyun struct btrfs_io_bio *repair_io_bio;
2657*4882a593Smuzhiyun blk_status_t status;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun btrfs_debug(fs_info,
2660*4882a593Smuzhiyun "repair read error: read error at %llu", start);
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun failrec = btrfs_get_io_failure_record(inode, start, end);
2665*4882a593Smuzhiyun if (IS_ERR(failrec))
2666*4882a593Smuzhiyun return errno_to_blk_status(PTR_ERR(failrec));
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun need_validation = btrfs_io_needs_validation(inode, failed_bio);
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun if (!btrfs_check_repairable(inode, need_validation, failrec,
2671*4882a593Smuzhiyun failed_mirror)) {
2672*4882a593Smuzhiyun free_io_failure(failure_tree, tree, failrec);
2673*4882a593Smuzhiyun return BLK_STS_IOERR;
2674*4882a593Smuzhiyun }
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun repair_bio = btrfs_io_bio_alloc(1);
2677*4882a593Smuzhiyun repair_io_bio = btrfs_io_bio(repair_bio);
2678*4882a593Smuzhiyun repair_bio->bi_opf = REQ_OP_READ;
2679*4882a593Smuzhiyun if (need_validation)
2680*4882a593Smuzhiyun repair_bio->bi_opf |= REQ_FAILFAST_DEV;
2681*4882a593Smuzhiyun repair_bio->bi_end_io = failed_bio->bi_end_io;
2682*4882a593Smuzhiyun repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
2683*4882a593Smuzhiyun repair_bio->bi_private = failed_bio->bi_private;
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun if (failed_io_bio->csum) {
2686*4882a593Smuzhiyun const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun repair_io_bio->csum = repair_io_bio->csum_inline;
2689*4882a593Smuzhiyun memcpy(repair_io_bio->csum,
2690*4882a593Smuzhiyun failed_io_bio->csum + csum_size * icsum, csum_size);
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun bio_add_page(repair_bio, page, failrec->len, pgoff);
2694*4882a593Smuzhiyun repair_io_bio->logical = failrec->start;
2695*4882a593Smuzhiyun repair_io_bio->iter = repair_bio->bi_iter;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun btrfs_debug(btrfs_sb(inode->i_sb),
2698*4882a593Smuzhiyun "repair read error: submitting new read to mirror %d, in_validation=%d",
2699*4882a593Smuzhiyun failrec->this_mirror, failrec->in_validation);
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
2702*4882a593Smuzhiyun failrec->bio_flags);
2703*4882a593Smuzhiyun if (status) {
2704*4882a593Smuzhiyun free_io_failure(failure_tree, tree, failrec);
2705*4882a593Smuzhiyun bio_put(repair_bio);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun return status;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun /* lots and lots of room for performance fixes in the end_bio funcs */
2711*4882a593Smuzhiyun
end_extent_writepage(struct page * page,int err,u64 start,u64 end)2712*4882a593Smuzhiyun void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2713*4882a593Smuzhiyun {
2714*4882a593Smuzhiyun int uptodate = (err == 0);
2715*4882a593Smuzhiyun int ret = 0;
2716*4882a593Smuzhiyun
2717*4882a593Smuzhiyun btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun if (!uptodate) {
2720*4882a593Smuzhiyun ClearPageUptodate(page);
2721*4882a593Smuzhiyun SetPageError(page);
2722*4882a593Smuzhiyun ret = err < 0 ? err : -EIO;
2723*4882a593Smuzhiyun mapping_set_error(page->mapping, ret);
2724*4882a593Smuzhiyun }
2725*4882a593Smuzhiyun }
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun /*
2728*4882a593Smuzhiyun * after a writepage IO is done, we need to:
2729*4882a593Smuzhiyun * clear the uptodate bits on error
2730*4882a593Smuzhiyun * clear the writeback bits in the extent tree for this IO
2731*4882a593Smuzhiyun * end_page_writeback if the page has no more pending IO
2732*4882a593Smuzhiyun *
2733*4882a593Smuzhiyun * Scheduling is not allowed, so the extent state tree is expected
2734*4882a593Smuzhiyun * to have one and only one object corresponding to this IO.
2735*4882a593Smuzhiyun */
end_bio_extent_writepage(struct bio * bio)2736*4882a593Smuzhiyun static void end_bio_extent_writepage(struct bio *bio)
2737*4882a593Smuzhiyun {
2738*4882a593Smuzhiyun int error = blk_status_to_errno(bio->bi_status);
2739*4882a593Smuzhiyun struct bio_vec *bvec;
2740*4882a593Smuzhiyun u64 start;
2741*4882a593Smuzhiyun u64 end;
2742*4882a593Smuzhiyun struct bvec_iter_all iter_all;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun ASSERT(!bio_flagged(bio, BIO_CLONED));
2745*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
2746*4882a593Smuzhiyun struct page *page = bvec->bv_page;
2747*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
2748*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun /* We always issue full-page reads, but if some block
2751*4882a593Smuzhiyun * in a page fails to read, blk_update_request() will
2752*4882a593Smuzhiyun * advance bv_offset and adjust bv_len to compensate.
2753*4882a593Smuzhiyun * Print a warning for nonzero offsets, and an error
2754*4882a593Smuzhiyun * if they don't add up to a full page. */
2755*4882a593Smuzhiyun if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2756*4882a593Smuzhiyun if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2757*4882a593Smuzhiyun btrfs_err(fs_info,
2758*4882a593Smuzhiyun "partial page write in btrfs with offset %u and length %u",
2759*4882a593Smuzhiyun bvec->bv_offset, bvec->bv_len);
2760*4882a593Smuzhiyun else
2761*4882a593Smuzhiyun btrfs_info(fs_info,
2762*4882a593Smuzhiyun "incomplete page write in btrfs with offset %u and length %u",
2763*4882a593Smuzhiyun bvec->bv_offset, bvec->bv_len);
2764*4882a593Smuzhiyun }
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun start = page_offset(page);
2767*4882a593Smuzhiyun end = start + bvec->bv_offset + bvec->bv_len - 1;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun end_extent_writepage(page, error, start, end);
2770*4882a593Smuzhiyun end_page_writeback(page);
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun bio_put(bio);
2774*4882a593Smuzhiyun }
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun static void
endio_readpage_release_extent(struct extent_io_tree * tree,u64 start,u64 len,int uptodate)2777*4882a593Smuzhiyun endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2778*4882a593Smuzhiyun int uptodate)
2779*4882a593Smuzhiyun {
2780*4882a593Smuzhiyun struct extent_state *cached = NULL;
2781*4882a593Smuzhiyun u64 end = start + len - 1;
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun if (uptodate && tree->track_uptodate)
2784*4882a593Smuzhiyun set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
2785*4882a593Smuzhiyun unlock_extent_cached_atomic(tree, start, end, &cached);
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun
2788*4882a593Smuzhiyun /*
2789*4882a593Smuzhiyun * after a readpage IO is done, we need to:
2790*4882a593Smuzhiyun * clear the uptodate bits on error
2791*4882a593Smuzhiyun * set the uptodate bits if things worked
2792*4882a593Smuzhiyun * set the page up to date if all extents in the tree are uptodate
2793*4882a593Smuzhiyun * clear the lock bit in the extent tree
2794*4882a593Smuzhiyun * unlock the page if there are no other extents locked for it
2795*4882a593Smuzhiyun *
2796*4882a593Smuzhiyun * Scheduling is not allowed, so the extent state tree is expected
2797*4882a593Smuzhiyun * to have one and only one object corresponding to this IO.
2798*4882a593Smuzhiyun */
end_bio_extent_readpage(struct bio * bio)2799*4882a593Smuzhiyun static void end_bio_extent_readpage(struct bio *bio)
2800*4882a593Smuzhiyun {
2801*4882a593Smuzhiyun struct bio_vec *bvec;
2802*4882a593Smuzhiyun int uptodate = !bio->bi_status;
2803*4882a593Smuzhiyun struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2804*4882a593Smuzhiyun struct extent_io_tree *tree, *failure_tree;
2805*4882a593Smuzhiyun u64 offset = 0;
2806*4882a593Smuzhiyun u64 start;
2807*4882a593Smuzhiyun u64 end;
2808*4882a593Smuzhiyun u64 len;
2809*4882a593Smuzhiyun u64 extent_start = 0;
2810*4882a593Smuzhiyun u64 extent_len = 0;
2811*4882a593Smuzhiyun int mirror;
2812*4882a593Smuzhiyun int ret;
2813*4882a593Smuzhiyun struct bvec_iter_all iter_all;
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun ASSERT(!bio_flagged(bio, BIO_CLONED));
2816*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
2817*4882a593Smuzhiyun struct page *page = bvec->bv_page;
2818*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
2819*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun btrfs_debug(fs_info,
2822*4882a593Smuzhiyun "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
2823*4882a593Smuzhiyun (u64)bio->bi_iter.bi_sector, bio->bi_status,
2824*4882a593Smuzhiyun io_bio->mirror_num);
2825*4882a593Smuzhiyun tree = &BTRFS_I(inode)->io_tree;
2826*4882a593Smuzhiyun failure_tree = &BTRFS_I(inode)->io_failure_tree;
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun /* We always issue full-page reads, but if some block
2829*4882a593Smuzhiyun * in a page fails to read, blk_update_request() will
2830*4882a593Smuzhiyun * advance bv_offset and adjust bv_len to compensate.
2831*4882a593Smuzhiyun * Print a warning for nonzero offsets, and an error
2832*4882a593Smuzhiyun * if they don't add up to a full page. */
2833*4882a593Smuzhiyun if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
2834*4882a593Smuzhiyun if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
2835*4882a593Smuzhiyun btrfs_err(fs_info,
2836*4882a593Smuzhiyun "partial page read in btrfs with offset %u and length %u",
2837*4882a593Smuzhiyun bvec->bv_offset, bvec->bv_len);
2838*4882a593Smuzhiyun else
2839*4882a593Smuzhiyun btrfs_info(fs_info,
2840*4882a593Smuzhiyun "incomplete page read in btrfs with offset %u and length %u",
2841*4882a593Smuzhiyun bvec->bv_offset, bvec->bv_len);
2842*4882a593Smuzhiyun }
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun start = page_offset(page);
2845*4882a593Smuzhiyun end = start + bvec->bv_offset + bvec->bv_len - 1;
2846*4882a593Smuzhiyun len = bvec->bv_len;
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun mirror = io_bio->mirror_num;
2849*4882a593Smuzhiyun if (likely(uptodate)) {
2850*4882a593Smuzhiyun if (is_data_inode(inode))
2851*4882a593Smuzhiyun ret = btrfs_verify_data_csum(io_bio, offset, page,
2852*4882a593Smuzhiyun start, end, mirror);
2853*4882a593Smuzhiyun else
2854*4882a593Smuzhiyun ret = btrfs_validate_metadata_buffer(io_bio,
2855*4882a593Smuzhiyun offset, page, start, end, mirror);
2856*4882a593Smuzhiyun if (ret)
2857*4882a593Smuzhiyun uptodate = 0;
2858*4882a593Smuzhiyun else
2859*4882a593Smuzhiyun clean_io_failure(BTRFS_I(inode)->root->fs_info,
2860*4882a593Smuzhiyun failure_tree, tree, start,
2861*4882a593Smuzhiyun page,
2862*4882a593Smuzhiyun btrfs_ino(BTRFS_I(inode)), 0);
2863*4882a593Smuzhiyun }
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun if (likely(uptodate))
2866*4882a593Smuzhiyun goto readpage_ok;
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun if (is_data_inode(inode)) {
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun /*
2871*4882a593Smuzhiyun * The generic bio_readpage_error handles errors the
2872*4882a593Smuzhiyun * following way: If possible, new read requests are
2873*4882a593Smuzhiyun * created and submitted and will end up in
2874*4882a593Smuzhiyun * end_bio_extent_readpage as well (if we're lucky,
2875*4882a593Smuzhiyun * not in the !uptodate case). In that case it returns
2876*4882a593Smuzhiyun * 0 and we just go on with the next page in our bio.
2877*4882a593Smuzhiyun * If it can't handle the error it will return -EIO and
2878*4882a593Smuzhiyun * we remain responsible for that page.
2879*4882a593Smuzhiyun */
2880*4882a593Smuzhiyun if (!btrfs_submit_read_repair(inode, bio, offset, page,
2881*4882a593Smuzhiyun start - page_offset(page),
2882*4882a593Smuzhiyun start, end, mirror,
2883*4882a593Smuzhiyun btrfs_submit_data_bio)) {
2884*4882a593Smuzhiyun uptodate = !bio->bi_status;
2885*4882a593Smuzhiyun offset += len;
2886*4882a593Smuzhiyun continue;
2887*4882a593Smuzhiyun }
2888*4882a593Smuzhiyun } else {
2889*4882a593Smuzhiyun struct extent_buffer *eb;
2890*4882a593Smuzhiyun
2891*4882a593Smuzhiyun eb = (struct extent_buffer *)page->private;
2892*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
2893*4882a593Smuzhiyun eb->read_mirror = mirror;
2894*4882a593Smuzhiyun atomic_dec(&eb->io_pages);
2895*4882a593Smuzhiyun if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
2896*4882a593Smuzhiyun &eb->bflags))
2897*4882a593Smuzhiyun btree_readahead_hook(eb, -EIO);
2898*4882a593Smuzhiyun }
2899*4882a593Smuzhiyun readpage_ok:
2900*4882a593Smuzhiyun if (likely(uptodate)) {
2901*4882a593Smuzhiyun loff_t i_size = i_size_read(inode);
2902*4882a593Smuzhiyun pgoff_t end_index = i_size >> PAGE_SHIFT;
2903*4882a593Smuzhiyun unsigned off;
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun /* Zero out the end if this page straddles i_size */
2906*4882a593Smuzhiyun off = offset_in_page(i_size);
2907*4882a593Smuzhiyun if (page->index == end_index && off)
2908*4882a593Smuzhiyun zero_user_segment(page, off, PAGE_SIZE);
2909*4882a593Smuzhiyun SetPageUptodate(page);
2910*4882a593Smuzhiyun } else {
2911*4882a593Smuzhiyun ClearPageUptodate(page);
2912*4882a593Smuzhiyun SetPageError(page);
2913*4882a593Smuzhiyun }
2914*4882a593Smuzhiyun unlock_page(page);
2915*4882a593Smuzhiyun offset += len;
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun if (unlikely(!uptodate)) {
2918*4882a593Smuzhiyun if (extent_len) {
2919*4882a593Smuzhiyun endio_readpage_release_extent(tree,
2920*4882a593Smuzhiyun extent_start,
2921*4882a593Smuzhiyun extent_len, 1);
2922*4882a593Smuzhiyun extent_start = 0;
2923*4882a593Smuzhiyun extent_len = 0;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun endio_readpage_release_extent(tree, start,
2926*4882a593Smuzhiyun end - start + 1, 0);
2927*4882a593Smuzhiyun } else if (!extent_len) {
2928*4882a593Smuzhiyun extent_start = start;
2929*4882a593Smuzhiyun extent_len = end + 1 - start;
2930*4882a593Smuzhiyun } else if (extent_start + extent_len == start) {
2931*4882a593Smuzhiyun extent_len += end + 1 - start;
2932*4882a593Smuzhiyun } else {
2933*4882a593Smuzhiyun endio_readpage_release_extent(tree, extent_start,
2934*4882a593Smuzhiyun extent_len, uptodate);
2935*4882a593Smuzhiyun extent_start = start;
2936*4882a593Smuzhiyun extent_len = end + 1 - start;
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun }
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun if (extent_len)
2941*4882a593Smuzhiyun endio_readpage_release_extent(tree, extent_start, extent_len,
2942*4882a593Smuzhiyun uptodate);
2943*4882a593Smuzhiyun btrfs_io_bio_free_csum(io_bio);
2944*4882a593Smuzhiyun bio_put(bio);
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun /*
2948*4882a593Smuzhiyun * Initialize the members up to but not including 'bio'. Use after allocating a
2949*4882a593Smuzhiyun * new bio by bio_alloc_bioset as it does not initialize the bytes outside of
2950*4882a593Smuzhiyun * 'bio' because use of __GFP_ZERO is not supported.
2951*4882a593Smuzhiyun */
btrfs_io_bio_init(struct btrfs_io_bio * btrfs_bio)2952*4882a593Smuzhiyun static inline void btrfs_io_bio_init(struct btrfs_io_bio *btrfs_bio)
2953*4882a593Smuzhiyun {
2954*4882a593Smuzhiyun memset(btrfs_bio, 0, offsetof(struct btrfs_io_bio, bio));
2955*4882a593Smuzhiyun }
2956*4882a593Smuzhiyun
2957*4882a593Smuzhiyun /*
2958*4882a593Smuzhiyun * The following helpers allocate a bio. As it's backed by a bioset, it'll
2959*4882a593Smuzhiyun * never fail. We're returning a bio right now but you can call btrfs_io_bio
2960*4882a593Smuzhiyun * for the appropriate container_of magic
2961*4882a593Smuzhiyun */
btrfs_bio_alloc(u64 first_byte)2962*4882a593Smuzhiyun struct bio *btrfs_bio_alloc(u64 first_byte)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun struct bio *bio;
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
2967*4882a593Smuzhiyun bio->bi_iter.bi_sector = first_byte >> 9;
2968*4882a593Smuzhiyun btrfs_io_bio_init(btrfs_io_bio(bio));
2969*4882a593Smuzhiyun return bio;
2970*4882a593Smuzhiyun }
2971*4882a593Smuzhiyun
btrfs_bio_clone(struct bio * bio)2972*4882a593Smuzhiyun struct bio *btrfs_bio_clone(struct bio *bio)
2973*4882a593Smuzhiyun {
2974*4882a593Smuzhiyun struct btrfs_io_bio *btrfs_bio;
2975*4882a593Smuzhiyun struct bio *new;
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun /* Bio allocation backed by a bioset does not fail */
2978*4882a593Smuzhiyun new = bio_clone_fast(bio, GFP_NOFS, &btrfs_bioset);
2979*4882a593Smuzhiyun btrfs_bio = btrfs_io_bio(new);
2980*4882a593Smuzhiyun btrfs_io_bio_init(btrfs_bio);
2981*4882a593Smuzhiyun btrfs_bio->iter = bio->bi_iter;
2982*4882a593Smuzhiyun return new;
2983*4882a593Smuzhiyun }
2984*4882a593Smuzhiyun
btrfs_io_bio_alloc(unsigned int nr_iovecs)2985*4882a593Smuzhiyun struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
2986*4882a593Smuzhiyun {
2987*4882a593Smuzhiyun struct bio *bio;
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun /* Bio allocation backed by a bioset does not fail */
2990*4882a593Smuzhiyun bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
2991*4882a593Smuzhiyun btrfs_io_bio_init(btrfs_io_bio(bio));
2992*4882a593Smuzhiyun return bio;
2993*4882a593Smuzhiyun }
2994*4882a593Smuzhiyun
btrfs_bio_clone_partial(struct bio * orig,int offset,int size)2995*4882a593Smuzhiyun struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
2996*4882a593Smuzhiyun {
2997*4882a593Smuzhiyun struct bio *bio;
2998*4882a593Smuzhiyun struct btrfs_io_bio *btrfs_bio;
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun /* this will never fail when it's backed by a bioset */
3001*4882a593Smuzhiyun bio = bio_clone_fast(orig, GFP_NOFS, &btrfs_bioset);
3002*4882a593Smuzhiyun ASSERT(bio);
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun btrfs_bio = btrfs_io_bio(bio);
3005*4882a593Smuzhiyun btrfs_io_bio_init(btrfs_bio);
3006*4882a593Smuzhiyun
3007*4882a593Smuzhiyun bio_trim(bio, offset >> 9, size >> 9);
3008*4882a593Smuzhiyun btrfs_bio->iter = bio->bi_iter;
3009*4882a593Smuzhiyun return bio;
3010*4882a593Smuzhiyun }
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun /*
3013*4882a593Smuzhiyun * @opf: bio REQ_OP_* and REQ_* flags as one value
3014*4882a593Smuzhiyun * @wbc: optional writeback control for io accounting
3015*4882a593Smuzhiyun * @page: page to add to the bio
3016*4882a593Smuzhiyun * @pg_offset: offset of the new bio or to check whether we are adding
3017*4882a593Smuzhiyun * a contiguous page to the previous one
3018*4882a593Smuzhiyun * @size: portion of page that we want to write
3019*4882a593Smuzhiyun * @offset: starting offset in the page
3020*4882a593Smuzhiyun * @bio_ret: must be valid pointer, newly allocated bio will be stored there
3021*4882a593Smuzhiyun * @end_io_func: end_io callback for new bio
3022*4882a593Smuzhiyun * @mirror_num: desired mirror to read/write
3023*4882a593Smuzhiyun * @prev_bio_flags: flags of previous bio to see if we can merge the current one
3024*4882a593Smuzhiyun * @bio_flags: flags of the current bio to see if we can merge them
3025*4882a593Smuzhiyun */
submit_extent_page(unsigned int opf,struct writeback_control * wbc,struct page * page,u64 offset,size_t size,unsigned long pg_offset,struct bio ** bio_ret,bio_end_io_t end_io_func,int mirror_num,unsigned long prev_bio_flags,unsigned long bio_flags,bool force_bio_submit)3026*4882a593Smuzhiyun static int submit_extent_page(unsigned int opf,
3027*4882a593Smuzhiyun struct writeback_control *wbc,
3028*4882a593Smuzhiyun struct page *page, u64 offset,
3029*4882a593Smuzhiyun size_t size, unsigned long pg_offset,
3030*4882a593Smuzhiyun struct bio **bio_ret,
3031*4882a593Smuzhiyun bio_end_io_t end_io_func,
3032*4882a593Smuzhiyun int mirror_num,
3033*4882a593Smuzhiyun unsigned long prev_bio_flags,
3034*4882a593Smuzhiyun unsigned long bio_flags,
3035*4882a593Smuzhiyun bool force_bio_submit)
3036*4882a593Smuzhiyun {
3037*4882a593Smuzhiyun int ret = 0;
3038*4882a593Smuzhiyun struct bio *bio;
3039*4882a593Smuzhiyun size_t page_size = min_t(size_t, size, PAGE_SIZE);
3040*4882a593Smuzhiyun sector_t sector = offset >> 9;
3041*4882a593Smuzhiyun struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun ASSERT(bio_ret);
3044*4882a593Smuzhiyun
3045*4882a593Smuzhiyun if (*bio_ret) {
3046*4882a593Smuzhiyun bool contig;
3047*4882a593Smuzhiyun bool can_merge = true;
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun bio = *bio_ret;
3050*4882a593Smuzhiyun if (prev_bio_flags & EXTENT_BIO_COMPRESSED)
3051*4882a593Smuzhiyun contig = bio->bi_iter.bi_sector == sector;
3052*4882a593Smuzhiyun else
3053*4882a593Smuzhiyun contig = bio_end_sector(bio) == sector;
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
3056*4882a593Smuzhiyun can_merge = false;
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun if (prev_bio_flags != bio_flags || !contig || !can_merge ||
3059*4882a593Smuzhiyun force_bio_submit ||
3060*4882a593Smuzhiyun bio_add_page(bio, page, page_size, pg_offset) < page_size) {
3061*4882a593Smuzhiyun ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
3062*4882a593Smuzhiyun if (ret < 0) {
3063*4882a593Smuzhiyun *bio_ret = NULL;
3064*4882a593Smuzhiyun return ret;
3065*4882a593Smuzhiyun }
3066*4882a593Smuzhiyun bio = NULL;
3067*4882a593Smuzhiyun } else {
3068*4882a593Smuzhiyun if (wbc)
3069*4882a593Smuzhiyun wbc_account_cgroup_owner(wbc, page, page_size);
3070*4882a593Smuzhiyun return 0;
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun }
3073*4882a593Smuzhiyun
3074*4882a593Smuzhiyun bio = btrfs_bio_alloc(offset);
3075*4882a593Smuzhiyun bio_add_page(bio, page, page_size, pg_offset);
3076*4882a593Smuzhiyun bio->bi_end_io = end_io_func;
3077*4882a593Smuzhiyun bio->bi_private = tree;
3078*4882a593Smuzhiyun bio->bi_write_hint = page->mapping->host->i_write_hint;
3079*4882a593Smuzhiyun bio->bi_opf = opf;
3080*4882a593Smuzhiyun if (wbc) {
3081*4882a593Smuzhiyun struct block_device *bdev;
3082*4882a593Smuzhiyun
3083*4882a593Smuzhiyun bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev;
3084*4882a593Smuzhiyun bio_set_dev(bio, bdev);
3085*4882a593Smuzhiyun wbc_init_bio(wbc, bio);
3086*4882a593Smuzhiyun wbc_account_cgroup_owner(wbc, page, page_size);
3087*4882a593Smuzhiyun }
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun *bio_ret = bio;
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun return ret;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun
attach_extent_buffer_page(struct extent_buffer * eb,struct page * page)3094*4882a593Smuzhiyun static void attach_extent_buffer_page(struct extent_buffer *eb,
3095*4882a593Smuzhiyun struct page *page)
3096*4882a593Smuzhiyun {
3097*4882a593Smuzhiyun if (!PagePrivate(page))
3098*4882a593Smuzhiyun attach_page_private(page, eb);
3099*4882a593Smuzhiyun else
3100*4882a593Smuzhiyun WARN_ON(page->private != (unsigned long)eb);
3101*4882a593Smuzhiyun }
3102*4882a593Smuzhiyun
set_page_extent_mapped(struct page * page)3103*4882a593Smuzhiyun void set_page_extent_mapped(struct page *page)
3104*4882a593Smuzhiyun {
3105*4882a593Smuzhiyun if (!PagePrivate(page))
3106*4882a593Smuzhiyun attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun static struct extent_map *
__get_extent_map(struct inode * inode,struct page * page,size_t pg_offset,u64 start,u64 len,struct extent_map ** em_cached)3110*4882a593Smuzhiyun __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
3111*4882a593Smuzhiyun u64 start, u64 len, struct extent_map **em_cached)
3112*4882a593Smuzhiyun {
3113*4882a593Smuzhiyun struct extent_map *em;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun if (em_cached && *em_cached) {
3116*4882a593Smuzhiyun em = *em_cached;
3117*4882a593Smuzhiyun if (extent_map_in_tree(em) && start >= em->start &&
3118*4882a593Smuzhiyun start < extent_map_end(em)) {
3119*4882a593Smuzhiyun refcount_inc(&em->refs);
3120*4882a593Smuzhiyun return em;
3121*4882a593Smuzhiyun }
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun free_extent_map(em);
3124*4882a593Smuzhiyun *em_cached = NULL;
3125*4882a593Smuzhiyun }
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
3128*4882a593Smuzhiyun if (em_cached && !IS_ERR_OR_NULL(em)) {
3129*4882a593Smuzhiyun BUG_ON(*em_cached);
3130*4882a593Smuzhiyun refcount_inc(&em->refs);
3131*4882a593Smuzhiyun *em_cached = em;
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun return em;
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun /*
3136*4882a593Smuzhiyun * basic readpage implementation. Locked extent state structs are inserted
3137*4882a593Smuzhiyun * into the tree that are removed when the IO is done (by the end_io
3138*4882a593Smuzhiyun * handlers)
3139*4882a593Smuzhiyun * XXX JDM: This needs looking at to ensure proper page locking
3140*4882a593Smuzhiyun * return 0 on success, otherwise return error
3141*4882a593Smuzhiyun */
btrfs_do_readpage(struct page * page,struct extent_map ** em_cached,struct bio ** bio,unsigned long * bio_flags,unsigned int read_flags,u64 * prev_em_start)3142*4882a593Smuzhiyun int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
3143*4882a593Smuzhiyun struct bio **bio, unsigned long *bio_flags,
3144*4882a593Smuzhiyun unsigned int read_flags, u64 *prev_em_start)
3145*4882a593Smuzhiyun {
3146*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
3147*4882a593Smuzhiyun u64 start = page_offset(page);
3148*4882a593Smuzhiyun const u64 end = start + PAGE_SIZE - 1;
3149*4882a593Smuzhiyun u64 cur = start;
3150*4882a593Smuzhiyun u64 extent_offset;
3151*4882a593Smuzhiyun u64 last_byte = i_size_read(inode);
3152*4882a593Smuzhiyun u64 block_start;
3153*4882a593Smuzhiyun u64 cur_end;
3154*4882a593Smuzhiyun struct extent_map *em;
3155*4882a593Smuzhiyun int ret = 0;
3156*4882a593Smuzhiyun int nr = 0;
3157*4882a593Smuzhiyun size_t pg_offset = 0;
3158*4882a593Smuzhiyun size_t iosize;
3159*4882a593Smuzhiyun size_t disk_io_size;
3160*4882a593Smuzhiyun size_t blocksize = inode->i_sb->s_blocksize;
3161*4882a593Smuzhiyun unsigned long this_bio_flag = 0;
3162*4882a593Smuzhiyun struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun set_page_extent_mapped(page);
3165*4882a593Smuzhiyun
3166*4882a593Smuzhiyun if (!PageUptodate(page)) {
3167*4882a593Smuzhiyun if (cleancache_get_page(page) == 0) {
3168*4882a593Smuzhiyun BUG_ON(blocksize != PAGE_SIZE);
3169*4882a593Smuzhiyun unlock_extent(tree, start, end);
3170*4882a593Smuzhiyun goto out;
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun }
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun if (page->index == last_byte >> PAGE_SHIFT) {
3175*4882a593Smuzhiyun char *userpage;
3176*4882a593Smuzhiyun size_t zero_offset = offset_in_page(last_byte);
3177*4882a593Smuzhiyun
3178*4882a593Smuzhiyun if (zero_offset) {
3179*4882a593Smuzhiyun iosize = PAGE_SIZE - zero_offset;
3180*4882a593Smuzhiyun userpage = kmap_atomic(page);
3181*4882a593Smuzhiyun memset(userpage + zero_offset, 0, iosize);
3182*4882a593Smuzhiyun flush_dcache_page(page);
3183*4882a593Smuzhiyun kunmap_atomic(userpage);
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun }
3186*4882a593Smuzhiyun while (cur <= end) {
3187*4882a593Smuzhiyun bool force_bio_submit = false;
3188*4882a593Smuzhiyun u64 offset;
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun if (cur >= last_byte) {
3191*4882a593Smuzhiyun char *userpage;
3192*4882a593Smuzhiyun struct extent_state *cached = NULL;
3193*4882a593Smuzhiyun
3194*4882a593Smuzhiyun iosize = PAGE_SIZE - pg_offset;
3195*4882a593Smuzhiyun userpage = kmap_atomic(page);
3196*4882a593Smuzhiyun memset(userpage + pg_offset, 0, iosize);
3197*4882a593Smuzhiyun flush_dcache_page(page);
3198*4882a593Smuzhiyun kunmap_atomic(userpage);
3199*4882a593Smuzhiyun set_extent_uptodate(tree, cur, cur + iosize - 1,
3200*4882a593Smuzhiyun &cached, GFP_NOFS);
3201*4882a593Smuzhiyun unlock_extent_cached(tree, cur,
3202*4882a593Smuzhiyun cur + iosize - 1, &cached);
3203*4882a593Smuzhiyun break;
3204*4882a593Smuzhiyun }
3205*4882a593Smuzhiyun em = __get_extent_map(inode, page, pg_offset, cur,
3206*4882a593Smuzhiyun end - cur + 1, em_cached);
3207*4882a593Smuzhiyun if (IS_ERR_OR_NULL(em)) {
3208*4882a593Smuzhiyun SetPageError(page);
3209*4882a593Smuzhiyun unlock_extent(tree, cur, end);
3210*4882a593Smuzhiyun break;
3211*4882a593Smuzhiyun }
3212*4882a593Smuzhiyun extent_offset = cur - em->start;
3213*4882a593Smuzhiyun BUG_ON(extent_map_end(em) <= cur);
3214*4882a593Smuzhiyun BUG_ON(end < cur);
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3217*4882a593Smuzhiyun this_bio_flag |= EXTENT_BIO_COMPRESSED;
3218*4882a593Smuzhiyun extent_set_compress_type(&this_bio_flag,
3219*4882a593Smuzhiyun em->compress_type);
3220*4882a593Smuzhiyun }
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun iosize = min(extent_map_end(em) - cur, end - cur + 1);
3223*4882a593Smuzhiyun cur_end = min(extent_map_end(em) - 1, end);
3224*4882a593Smuzhiyun iosize = ALIGN(iosize, blocksize);
3225*4882a593Smuzhiyun if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
3226*4882a593Smuzhiyun disk_io_size = em->block_len;
3227*4882a593Smuzhiyun offset = em->block_start;
3228*4882a593Smuzhiyun } else {
3229*4882a593Smuzhiyun offset = em->block_start + extent_offset;
3230*4882a593Smuzhiyun disk_io_size = iosize;
3231*4882a593Smuzhiyun }
3232*4882a593Smuzhiyun block_start = em->block_start;
3233*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3234*4882a593Smuzhiyun block_start = EXTENT_MAP_HOLE;
3235*4882a593Smuzhiyun
3236*4882a593Smuzhiyun /*
3237*4882a593Smuzhiyun * If we have a file range that points to a compressed extent
3238*4882a593Smuzhiyun * and it's followed by a consecutive file range that points
3239*4882a593Smuzhiyun * to the same compressed extent (possibly with a different
3240*4882a593Smuzhiyun * offset and/or length, so it either points to the whole extent
3241*4882a593Smuzhiyun * or only part of it), we must make sure we do not submit a
3242*4882a593Smuzhiyun * single bio to populate the pages for the 2 ranges because
3243*4882a593Smuzhiyun * this makes the compressed extent read zero out the pages
3244*4882a593Smuzhiyun * belonging to the 2nd range. Imagine the following scenario:
3245*4882a593Smuzhiyun *
3246*4882a593Smuzhiyun * File layout
3247*4882a593Smuzhiyun * [0 - 8K] [8K - 24K]
3248*4882a593Smuzhiyun * | |
3249*4882a593Smuzhiyun * | |
3250*4882a593Smuzhiyun * points to extent X, points to extent X,
3251*4882a593Smuzhiyun * offset 4K, length of 8K offset 0, length 16K
3252*4882a593Smuzhiyun *
3253*4882a593Smuzhiyun * [extent X, compressed length = 4K uncompressed length = 16K]
3254*4882a593Smuzhiyun *
3255*4882a593Smuzhiyun * If the bio to read the compressed extent covers both ranges,
3256*4882a593Smuzhiyun * it will decompress extent X into the pages belonging to the
3257*4882a593Smuzhiyun * first range and then it will stop, zeroing out the remaining
3258*4882a593Smuzhiyun * pages that belong to the other range that points to extent X.
3259*4882a593Smuzhiyun * So here we make sure we submit 2 bios, one for the first
3260*4882a593Smuzhiyun * range and another one for the third range. Both will target
3261*4882a593Smuzhiyun * the same physical extent from disk, but we can't currently
3262*4882a593Smuzhiyun * make the compressed bio endio callback populate the pages
3263*4882a593Smuzhiyun * for both ranges because each compressed bio is tightly
3264*4882a593Smuzhiyun * coupled with a single extent map, and each range can have
3265*4882a593Smuzhiyun * an extent map with a different offset value relative to the
3266*4882a593Smuzhiyun * uncompressed data of our extent and different lengths. This
3267*4882a593Smuzhiyun * is a corner case so we prioritize correctness over
3268*4882a593Smuzhiyun * non-optimal behavior (submitting 2 bios for the same extent).
3269*4882a593Smuzhiyun */
3270*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3271*4882a593Smuzhiyun prev_em_start && *prev_em_start != (u64)-1 &&
3272*4882a593Smuzhiyun *prev_em_start != em->start)
3273*4882a593Smuzhiyun force_bio_submit = true;
3274*4882a593Smuzhiyun
3275*4882a593Smuzhiyun if (prev_em_start)
3276*4882a593Smuzhiyun *prev_em_start = em->start;
3277*4882a593Smuzhiyun
3278*4882a593Smuzhiyun free_extent_map(em);
3279*4882a593Smuzhiyun em = NULL;
3280*4882a593Smuzhiyun
3281*4882a593Smuzhiyun /* we've found a hole, just zero and go on */
3282*4882a593Smuzhiyun if (block_start == EXTENT_MAP_HOLE) {
3283*4882a593Smuzhiyun char *userpage;
3284*4882a593Smuzhiyun struct extent_state *cached = NULL;
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun userpage = kmap_atomic(page);
3287*4882a593Smuzhiyun memset(userpage + pg_offset, 0, iosize);
3288*4882a593Smuzhiyun flush_dcache_page(page);
3289*4882a593Smuzhiyun kunmap_atomic(userpage);
3290*4882a593Smuzhiyun
3291*4882a593Smuzhiyun set_extent_uptodate(tree, cur, cur + iosize - 1,
3292*4882a593Smuzhiyun &cached, GFP_NOFS);
3293*4882a593Smuzhiyun unlock_extent_cached(tree, cur,
3294*4882a593Smuzhiyun cur + iosize - 1, &cached);
3295*4882a593Smuzhiyun cur = cur + iosize;
3296*4882a593Smuzhiyun pg_offset += iosize;
3297*4882a593Smuzhiyun continue;
3298*4882a593Smuzhiyun }
3299*4882a593Smuzhiyun /* the get_extent function already copied into the page */
3300*4882a593Smuzhiyun if (test_range_bit(tree, cur, cur_end,
3301*4882a593Smuzhiyun EXTENT_UPTODATE, 1, NULL)) {
3302*4882a593Smuzhiyun check_page_uptodate(tree, page);
3303*4882a593Smuzhiyun unlock_extent(tree, cur, cur + iosize - 1);
3304*4882a593Smuzhiyun cur = cur + iosize;
3305*4882a593Smuzhiyun pg_offset += iosize;
3306*4882a593Smuzhiyun continue;
3307*4882a593Smuzhiyun }
3308*4882a593Smuzhiyun /* we have an inline extent but it didn't get marked up
3309*4882a593Smuzhiyun * to date. Error out
3310*4882a593Smuzhiyun */
3311*4882a593Smuzhiyun if (block_start == EXTENT_MAP_INLINE) {
3312*4882a593Smuzhiyun SetPageError(page);
3313*4882a593Smuzhiyun unlock_extent(tree, cur, cur + iosize - 1);
3314*4882a593Smuzhiyun cur = cur + iosize;
3315*4882a593Smuzhiyun pg_offset += iosize;
3316*4882a593Smuzhiyun continue;
3317*4882a593Smuzhiyun }
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun ret = submit_extent_page(REQ_OP_READ | read_flags, NULL,
3320*4882a593Smuzhiyun page, offset, disk_io_size,
3321*4882a593Smuzhiyun pg_offset, bio,
3322*4882a593Smuzhiyun end_bio_extent_readpage, 0,
3323*4882a593Smuzhiyun *bio_flags,
3324*4882a593Smuzhiyun this_bio_flag,
3325*4882a593Smuzhiyun force_bio_submit);
3326*4882a593Smuzhiyun if (!ret) {
3327*4882a593Smuzhiyun nr++;
3328*4882a593Smuzhiyun *bio_flags = this_bio_flag;
3329*4882a593Smuzhiyun } else {
3330*4882a593Smuzhiyun SetPageError(page);
3331*4882a593Smuzhiyun unlock_extent(tree, cur, cur + iosize - 1);
3332*4882a593Smuzhiyun goto out;
3333*4882a593Smuzhiyun }
3334*4882a593Smuzhiyun cur = cur + iosize;
3335*4882a593Smuzhiyun pg_offset += iosize;
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun out:
3338*4882a593Smuzhiyun if (!nr) {
3339*4882a593Smuzhiyun if (!PageError(page))
3340*4882a593Smuzhiyun SetPageUptodate(page);
3341*4882a593Smuzhiyun unlock_page(page);
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun return ret;
3344*4882a593Smuzhiyun }
3345*4882a593Smuzhiyun
contiguous_readpages(struct page * pages[],int nr_pages,u64 start,u64 end,struct extent_map ** em_cached,struct bio ** bio,unsigned long * bio_flags,u64 * prev_em_start)3346*4882a593Smuzhiyun static inline void contiguous_readpages(struct page *pages[], int nr_pages,
3347*4882a593Smuzhiyun u64 start, u64 end,
3348*4882a593Smuzhiyun struct extent_map **em_cached,
3349*4882a593Smuzhiyun struct bio **bio,
3350*4882a593Smuzhiyun unsigned long *bio_flags,
3351*4882a593Smuzhiyun u64 *prev_em_start)
3352*4882a593Smuzhiyun {
3353*4882a593Smuzhiyun struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
3354*4882a593Smuzhiyun int index;
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
3357*4882a593Smuzhiyun
3358*4882a593Smuzhiyun for (index = 0; index < nr_pages; index++) {
3359*4882a593Smuzhiyun btrfs_do_readpage(pages[index], em_cached, bio, bio_flags,
3360*4882a593Smuzhiyun REQ_RAHEAD, prev_em_start);
3361*4882a593Smuzhiyun put_page(pages[index]);
3362*4882a593Smuzhiyun }
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun
update_nr_written(struct writeback_control * wbc,unsigned long nr_written)3365*4882a593Smuzhiyun static void update_nr_written(struct writeback_control *wbc,
3366*4882a593Smuzhiyun unsigned long nr_written)
3367*4882a593Smuzhiyun {
3368*4882a593Smuzhiyun wbc->nr_to_write -= nr_written;
3369*4882a593Smuzhiyun }
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun /*
3372*4882a593Smuzhiyun * helper for __extent_writepage, doing all of the delayed allocation setup.
3373*4882a593Smuzhiyun *
3374*4882a593Smuzhiyun * This returns 1 if btrfs_run_delalloc_range function did all the work required
3375*4882a593Smuzhiyun * to write the page (copy into inline extent). In this case the IO has
3376*4882a593Smuzhiyun * been started and the page is already unlocked.
3377*4882a593Smuzhiyun *
3378*4882a593Smuzhiyun * This returns 0 if all went well (page still locked)
3379*4882a593Smuzhiyun * This returns < 0 if there were errors (page still locked)
3380*4882a593Smuzhiyun */
writepage_delalloc(struct btrfs_inode * inode,struct page * page,struct writeback_control * wbc,u64 delalloc_start,unsigned long * nr_written)3381*4882a593Smuzhiyun static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
3382*4882a593Smuzhiyun struct page *page, struct writeback_control *wbc,
3383*4882a593Smuzhiyun u64 delalloc_start, unsigned long *nr_written)
3384*4882a593Smuzhiyun {
3385*4882a593Smuzhiyun u64 page_end = delalloc_start + PAGE_SIZE - 1;
3386*4882a593Smuzhiyun bool found;
3387*4882a593Smuzhiyun u64 delalloc_to_write = 0;
3388*4882a593Smuzhiyun u64 delalloc_end = 0;
3389*4882a593Smuzhiyun int ret;
3390*4882a593Smuzhiyun int page_started = 0;
3391*4882a593Smuzhiyun
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun while (delalloc_end < page_end) {
3394*4882a593Smuzhiyun found = find_lock_delalloc_range(&inode->vfs_inode, page,
3395*4882a593Smuzhiyun &delalloc_start,
3396*4882a593Smuzhiyun &delalloc_end);
3397*4882a593Smuzhiyun if (!found) {
3398*4882a593Smuzhiyun delalloc_start = delalloc_end + 1;
3399*4882a593Smuzhiyun continue;
3400*4882a593Smuzhiyun }
3401*4882a593Smuzhiyun ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
3402*4882a593Smuzhiyun delalloc_end, &page_started, nr_written, wbc);
3403*4882a593Smuzhiyun if (ret) {
3404*4882a593Smuzhiyun SetPageError(page);
3405*4882a593Smuzhiyun /*
3406*4882a593Smuzhiyun * btrfs_run_delalloc_range should return < 0 for error
3407*4882a593Smuzhiyun * but just in case, we use > 0 here meaning the IO is
3408*4882a593Smuzhiyun * started, so we don't want to return > 0 unless
3409*4882a593Smuzhiyun * things are going well.
3410*4882a593Smuzhiyun */
3411*4882a593Smuzhiyun return ret < 0 ? ret : -EIO;
3412*4882a593Smuzhiyun }
3413*4882a593Smuzhiyun /*
3414*4882a593Smuzhiyun * delalloc_end is already one less than the total length, so
3415*4882a593Smuzhiyun * we don't subtract one from PAGE_SIZE
3416*4882a593Smuzhiyun */
3417*4882a593Smuzhiyun delalloc_to_write += (delalloc_end - delalloc_start +
3418*4882a593Smuzhiyun PAGE_SIZE) >> PAGE_SHIFT;
3419*4882a593Smuzhiyun delalloc_start = delalloc_end + 1;
3420*4882a593Smuzhiyun }
3421*4882a593Smuzhiyun if (wbc->nr_to_write < delalloc_to_write) {
3422*4882a593Smuzhiyun int thresh = 8192;
3423*4882a593Smuzhiyun
3424*4882a593Smuzhiyun if (delalloc_to_write < thresh * 2)
3425*4882a593Smuzhiyun thresh = delalloc_to_write;
3426*4882a593Smuzhiyun wbc->nr_to_write = min_t(u64, delalloc_to_write,
3427*4882a593Smuzhiyun thresh);
3428*4882a593Smuzhiyun }
3429*4882a593Smuzhiyun
3430*4882a593Smuzhiyun /* did the fill delalloc function already unlock and start
3431*4882a593Smuzhiyun * the IO?
3432*4882a593Smuzhiyun */
3433*4882a593Smuzhiyun if (page_started) {
3434*4882a593Smuzhiyun /*
3435*4882a593Smuzhiyun * we've unlocked the page, so we can't update
3436*4882a593Smuzhiyun * the mapping's writeback index, just update
3437*4882a593Smuzhiyun * nr_to_write.
3438*4882a593Smuzhiyun */
3439*4882a593Smuzhiyun wbc->nr_to_write -= *nr_written;
3440*4882a593Smuzhiyun return 1;
3441*4882a593Smuzhiyun }
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun return 0;
3444*4882a593Smuzhiyun }
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun /*
3447*4882a593Smuzhiyun * helper for __extent_writepage. This calls the writepage start hooks,
3448*4882a593Smuzhiyun * and does the loop to map the page into extents and bios.
3449*4882a593Smuzhiyun *
3450*4882a593Smuzhiyun * We return 1 if the IO is started and the page is unlocked,
3451*4882a593Smuzhiyun * 0 if all went well (page still locked)
3452*4882a593Smuzhiyun * < 0 if there were errors (page still locked)
3453*4882a593Smuzhiyun */
__extent_writepage_io(struct btrfs_inode * inode,struct page * page,struct writeback_control * wbc,struct extent_page_data * epd,loff_t i_size,unsigned long nr_written,int * nr_ret)3454*4882a593Smuzhiyun static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
3455*4882a593Smuzhiyun struct page *page,
3456*4882a593Smuzhiyun struct writeback_control *wbc,
3457*4882a593Smuzhiyun struct extent_page_data *epd,
3458*4882a593Smuzhiyun loff_t i_size,
3459*4882a593Smuzhiyun unsigned long nr_written,
3460*4882a593Smuzhiyun int *nr_ret)
3461*4882a593Smuzhiyun {
3462*4882a593Smuzhiyun struct extent_io_tree *tree = &inode->io_tree;
3463*4882a593Smuzhiyun u64 start = page_offset(page);
3464*4882a593Smuzhiyun u64 page_end = start + PAGE_SIZE - 1;
3465*4882a593Smuzhiyun u64 end;
3466*4882a593Smuzhiyun u64 cur = start;
3467*4882a593Smuzhiyun u64 extent_offset;
3468*4882a593Smuzhiyun u64 block_start;
3469*4882a593Smuzhiyun u64 iosize;
3470*4882a593Smuzhiyun struct extent_map *em;
3471*4882a593Smuzhiyun size_t pg_offset = 0;
3472*4882a593Smuzhiyun size_t blocksize;
3473*4882a593Smuzhiyun int ret = 0;
3474*4882a593Smuzhiyun int nr = 0;
3475*4882a593Smuzhiyun const unsigned int write_flags = wbc_to_write_flags(wbc);
3476*4882a593Smuzhiyun bool compressed;
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun ret = btrfs_writepage_cow_fixup(page, start, page_end);
3479*4882a593Smuzhiyun if (ret) {
3480*4882a593Smuzhiyun /* Fixup worker will requeue */
3481*4882a593Smuzhiyun redirty_page_for_writepage(wbc, page);
3482*4882a593Smuzhiyun update_nr_written(wbc, nr_written);
3483*4882a593Smuzhiyun unlock_page(page);
3484*4882a593Smuzhiyun return 1;
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
3487*4882a593Smuzhiyun /*
3488*4882a593Smuzhiyun * we don't want to touch the inode after unlocking the page,
3489*4882a593Smuzhiyun * so we update the mapping writeback index now
3490*4882a593Smuzhiyun */
3491*4882a593Smuzhiyun update_nr_written(wbc, nr_written + 1);
3492*4882a593Smuzhiyun
3493*4882a593Smuzhiyun end = page_end;
3494*4882a593Smuzhiyun blocksize = inode->vfs_inode.i_sb->s_blocksize;
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun while (cur <= end) {
3497*4882a593Smuzhiyun u64 em_end;
3498*4882a593Smuzhiyun u64 offset;
3499*4882a593Smuzhiyun
3500*4882a593Smuzhiyun if (cur >= i_size) {
3501*4882a593Smuzhiyun btrfs_writepage_endio_finish_ordered(page, cur,
3502*4882a593Smuzhiyun page_end, 1);
3503*4882a593Smuzhiyun break;
3504*4882a593Smuzhiyun }
3505*4882a593Smuzhiyun em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
3506*4882a593Smuzhiyun if (IS_ERR_OR_NULL(em)) {
3507*4882a593Smuzhiyun SetPageError(page);
3508*4882a593Smuzhiyun ret = PTR_ERR_OR_ZERO(em);
3509*4882a593Smuzhiyun break;
3510*4882a593Smuzhiyun }
3511*4882a593Smuzhiyun
3512*4882a593Smuzhiyun extent_offset = cur - em->start;
3513*4882a593Smuzhiyun em_end = extent_map_end(em);
3514*4882a593Smuzhiyun BUG_ON(em_end <= cur);
3515*4882a593Smuzhiyun BUG_ON(end < cur);
3516*4882a593Smuzhiyun iosize = min(em_end - cur, end - cur + 1);
3517*4882a593Smuzhiyun iosize = ALIGN(iosize, blocksize);
3518*4882a593Smuzhiyun offset = em->block_start + extent_offset;
3519*4882a593Smuzhiyun block_start = em->block_start;
3520*4882a593Smuzhiyun compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3521*4882a593Smuzhiyun free_extent_map(em);
3522*4882a593Smuzhiyun em = NULL;
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun /*
3525*4882a593Smuzhiyun * compressed and inline extents are written through other
3526*4882a593Smuzhiyun * paths in the FS
3527*4882a593Smuzhiyun */
3528*4882a593Smuzhiyun if (compressed || block_start == EXTENT_MAP_HOLE ||
3529*4882a593Smuzhiyun block_start == EXTENT_MAP_INLINE) {
3530*4882a593Smuzhiyun if (compressed)
3531*4882a593Smuzhiyun nr++;
3532*4882a593Smuzhiyun else
3533*4882a593Smuzhiyun btrfs_writepage_endio_finish_ordered(page, cur,
3534*4882a593Smuzhiyun cur + iosize - 1, 1);
3535*4882a593Smuzhiyun cur += iosize;
3536*4882a593Smuzhiyun pg_offset += iosize;
3537*4882a593Smuzhiyun continue;
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun
3540*4882a593Smuzhiyun btrfs_set_range_writeback(tree, cur, cur + iosize - 1);
3541*4882a593Smuzhiyun if (!PageWriteback(page)) {
3542*4882a593Smuzhiyun btrfs_err(inode->root->fs_info,
3543*4882a593Smuzhiyun "page %lu not writeback, cur %llu end %llu",
3544*4882a593Smuzhiyun page->index, cur, end);
3545*4882a593Smuzhiyun }
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3548*4882a593Smuzhiyun page, offset, iosize, pg_offset,
3549*4882a593Smuzhiyun &epd->bio,
3550*4882a593Smuzhiyun end_bio_extent_writepage,
3551*4882a593Smuzhiyun 0, 0, 0, false);
3552*4882a593Smuzhiyun if (ret) {
3553*4882a593Smuzhiyun SetPageError(page);
3554*4882a593Smuzhiyun if (PageWriteback(page))
3555*4882a593Smuzhiyun end_page_writeback(page);
3556*4882a593Smuzhiyun }
3557*4882a593Smuzhiyun
3558*4882a593Smuzhiyun cur = cur + iosize;
3559*4882a593Smuzhiyun pg_offset += iosize;
3560*4882a593Smuzhiyun nr++;
3561*4882a593Smuzhiyun }
3562*4882a593Smuzhiyun *nr_ret = nr;
3563*4882a593Smuzhiyun return ret;
3564*4882a593Smuzhiyun }
3565*4882a593Smuzhiyun
3566*4882a593Smuzhiyun /*
3567*4882a593Smuzhiyun * the writepage semantics are similar to regular writepage. extent
3568*4882a593Smuzhiyun * records are inserted to lock ranges in the tree, and as dirty areas
3569*4882a593Smuzhiyun * are found, they are marked writeback. Then the lock bits are removed
3570*4882a593Smuzhiyun * and the end_io handler clears the writeback ranges
3571*4882a593Smuzhiyun *
3572*4882a593Smuzhiyun * Return 0 if everything goes well.
3573*4882a593Smuzhiyun * Return <0 for error.
3574*4882a593Smuzhiyun */
__extent_writepage(struct page * page,struct writeback_control * wbc,struct extent_page_data * epd)3575*4882a593Smuzhiyun static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3576*4882a593Smuzhiyun struct extent_page_data *epd)
3577*4882a593Smuzhiyun {
3578*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
3579*4882a593Smuzhiyun u64 start = page_offset(page);
3580*4882a593Smuzhiyun u64 page_end = start + PAGE_SIZE - 1;
3581*4882a593Smuzhiyun int ret;
3582*4882a593Smuzhiyun int nr = 0;
3583*4882a593Smuzhiyun size_t pg_offset;
3584*4882a593Smuzhiyun loff_t i_size = i_size_read(inode);
3585*4882a593Smuzhiyun unsigned long end_index = i_size >> PAGE_SHIFT;
3586*4882a593Smuzhiyun unsigned long nr_written = 0;
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun trace___extent_writepage(page, inode, wbc);
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun WARN_ON(!PageLocked(page));
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun ClearPageError(page);
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun pg_offset = offset_in_page(i_size);
3595*4882a593Smuzhiyun if (page->index > end_index ||
3596*4882a593Smuzhiyun (page->index == end_index && !pg_offset)) {
3597*4882a593Smuzhiyun page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
3598*4882a593Smuzhiyun unlock_page(page);
3599*4882a593Smuzhiyun return 0;
3600*4882a593Smuzhiyun }
3601*4882a593Smuzhiyun
3602*4882a593Smuzhiyun if (page->index == end_index) {
3603*4882a593Smuzhiyun char *userpage;
3604*4882a593Smuzhiyun
3605*4882a593Smuzhiyun userpage = kmap_atomic(page);
3606*4882a593Smuzhiyun memset(userpage + pg_offset, 0,
3607*4882a593Smuzhiyun PAGE_SIZE - pg_offset);
3608*4882a593Smuzhiyun kunmap_atomic(userpage);
3609*4882a593Smuzhiyun flush_dcache_page(page);
3610*4882a593Smuzhiyun }
3611*4882a593Smuzhiyun
3612*4882a593Smuzhiyun set_page_extent_mapped(page);
3613*4882a593Smuzhiyun
3614*4882a593Smuzhiyun if (!epd->extent_locked) {
3615*4882a593Smuzhiyun ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start,
3616*4882a593Smuzhiyun &nr_written);
3617*4882a593Smuzhiyun if (ret == 1)
3618*4882a593Smuzhiyun return 0;
3619*4882a593Smuzhiyun if (ret)
3620*4882a593Smuzhiyun goto done;
3621*4882a593Smuzhiyun }
3622*4882a593Smuzhiyun
3623*4882a593Smuzhiyun ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
3624*4882a593Smuzhiyun nr_written, &nr);
3625*4882a593Smuzhiyun if (ret == 1)
3626*4882a593Smuzhiyun return 0;
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun done:
3629*4882a593Smuzhiyun if (nr == 0) {
3630*4882a593Smuzhiyun /* make sure the mapping tag for page dirty gets cleared */
3631*4882a593Smuzhiyun set_page_writeback(page);
3632*4882a593Smuzhiyun end_page_writeback(page);
3633*4882a593Smuzhiyun }
3634*4882a593Smuzhiyun if (PageError(page)) {
3635*4882a593Smuzhiyun ret = ret < 0 ? ret : -EIO;
3636*4882a593Smuzhiyun end_extent_writepage(page, ret, start, page_end);
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun unlock_page(page);
3639*4882a593Smuzhiyun ASSERT(ret <= 0);
3640*4882a593Smuzhiyun return ret;
3641*4882a593Smuzhiyun }
3642*4882a593Smuzhiyun
wait_on_extent_buffer_writeback(struct extent_buffer * eb)3643*4882a593Smuzhiyun void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3644*4882a593Smuzhiyun {
3645*4882a593Smuzhiyun wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
3646*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
3647*4882a593Smuzhiyun }
3648*4882a593Smuzhiyun
end_extent_buffer_writeback(struct extent_buffer * eb)3649*4882a593Smuzhiyun static void end_extent_buffer_writeback(struct extent_buffer *eb)
3650*4882a593Smuzhiyun {
3651*4882a593Smuzhiyun clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3652*4882a593Smuzhiyun smp_mb__after_atomic();
3653*4882a593Smuzhiyun wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3654*4882a593Smuzhiyun }
3655*4882a593Smuzhiyun
3656*4882a593Smuzhiyun /*
3657*4882a593Smuzhiyun * Lock eb pages and flush the bio if we can't the locks
3658*4882a593Smuzhiyun *
3659*4882a593Smuzhiyun * Return 0 if nothing went wrong
3660*4882a593Smuzhiyun * Return >0 is same as 0, except bio is not submitted
3661*4882a593Smuzhiyun * Return <0 if something went wrong, no page is locked
3662*4882a593Smuzhiyun */
lock_extent_buffer_for_io(struct extent_buffer * eb,struct extent_page_data * epd)3663*4882a593Smuzhiyun static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
3664*4882a593Smuzhiyun struct extent_page_data *epd)
3665*4882a593Smuzhiyun {
3666*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = eb->fs_info;
3667*4882a593Smuzhiyun int i, num_pages, failed_page_nr;
3668*4882a593Smuzhiyun int flush = 0;
3669*4882a593Smuzhiyun int ret = 0;
3670*4882a593Smuzhiyun
3671*4882a593Smuzhiyun if (!btrfs_try_tree_write_lock(eb)) {
3672*4882a593Smuzhiyun ret = flush_write_bio(epd);
3673*4882a593Smuzhiyun if (ret < 0)
3674*4882a593Smuzhiyun return ret;
3675*4882a593Smuzhiyun flush = 1;
3676*4882a593Smuzhiyun btrfs_tree_lock(eb);
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3680*4882a593Smuzhiyun btrfs_tree_unlock(eb);
3681*4882a593Smuzhiyun if (!epd->sync_io)
3682*4882a593Smuzhiyun return 0;
3683*4882a593Smuzhiyun if (!flush) {
3684*4882a593Smuzhiyun ret = flush_write_bio(epd);
3685*4882a593Smuzhiyun if (ret < 0)
3686*4882a593Smuzhiyun return ret;
3687*4882a593Smuzhiyun flush = 1;
3688*4882a593Smuzhiyun }
3689*4882a593Smuzhiyun while (1) {
3690*4882a593Smuzhiyun wait_on_extent_buffer_writeback(eb);
3691*4882a593Smuzhiyun btrfs_tree_lock(eb);
3692*4882a593Smuzhiyun if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3693*4882a593Smuzhiyun break;
3694*4882a593Smuzhiyun btrfs_tree_unlock(eb);
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun }
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun /*
3699*4882a593Smuzhiyun * We need to do this to prevent races in people who check if the eb is
3700*4882a593Smuzhiyun * under IO since we can end up having no IO bits set for a short period
3701*4882a593Smuzhiyun * of time.
3702*4882a593Smuzhiyun */
3703*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
3704*4882a593Smuzhiyun if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3705*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3706*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
3707*4882a593Smuzhiyun btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3708*4882a593Smuzhiyun percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3709*4882a593Smuzhiyun -eb->len,
3710*4882a593Smuzhiyun fs_info->dirty_metadata_batch);
3711*4882a593Smuzhiyun ret = 1;
3712*4882a593Smuzhiyun } else {
3713*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
3714*4882a593Smuzhiyun }
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun btrfs_tree_unlock(eb);
3717*4882a593Smuzhiyun
3718*4882a593Smuzhiyun if (!ret)
3719*4882a593Smuzhiyun return ret;
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
3722*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
3723*4882a593Smuzhiyun struct page *p = eb->pages[i];
3724*4882a593Smuzhiyun
3725*4882a593Smuzhiyun if (!trylock_page(p)) {
3726*4882a593Smuzhiyun if (!flush) {
3727*4882a593Smuzhiyun int err;
3728*4882a593Smuzhiyun
3729*4882a593Smuzhiyun err = flush_write_bio(epd);
3730*4882a593Smuzhiyun if (err < 0) {
3731*4882a593Smuzhiyun ret = err;
3732*4882a593Smuzhiyun failed_page_nr = i;
3733*4882a593Smuzhiyun goto err_unlock;
3734*4882a593Smuzhiyun }
3735*4882a593Smuzhiyun flush = 1;
3736*4882a593Smuzhiyun }
3737*4882a593Smuzhiyun lock_page(p);
3738*4882a593Smuzhiyun }
3739*4882a593Smuzhiyun }
3740*4882a593Smuzhiyun
3741*4882a593Smuzhiyun return ret;
3742*4882a593Smuzhiyun err_unlock:
3743*4882a593Smuzhiyun /* Unlock already locked pages */
3744*4882a593Smuzhiyun for (i = 0; i < failed_page_nr; i++)
3745*4882a593Smuzhiyun unlock_page(eb->pages[i]);
3746*4882a593Smuzhiyun /*
3747*4882a593Smuzhiyun * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
3748*4882a593Smuzhiyun * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
3749*4882a593Smuzhiyun * be made and undo everything done before.
3750*4882a593Smuzhiyun */
3751*4882a593Smuzhiyun btrfs_tree_lock(eb);
3752*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
3753*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3754*4882a593Smuzhiyun end_extent_buffer_writeback(eb);
3755*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
3756*4882a593Smuzhiyun percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
3757*4882a593Smuzhiyun fs_info->dirty_metadata_batch);
3758*4882a593Smuzhiyun btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3759*4882a593Smuzhiyun btrfs_tree_unlock(eb);
3760*4882a593Smuzhiyun return ret;
3761*4882a593Smuzhiyun }
3762*4882a593Smuzhiyun
set_btree_ioerr(struct page * page)3763*4882a593Smuzhiyun static void set_btree_ioerr(struct page *page)
3764*4882a593Smuzhiyun {
3765*4882a593Smuzhiyun struct extent_buffer *eb = (struct extent_buffer *)page->private;
3766*4882a593Smuzhiyun struct btrfs_fs_info *fs_info;
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun SetPageError(page);
3769*4882a593Smuzhiyun if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3770*4882a593Smuzhiyun return;
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun /*
3773*4882a593Smuzhiyun * A read may stumble upon this buffer later, make sure that it gets an
3774*4882a593Smuzhiyun * error and knows there was an error.
3775*4882a593Smuzhiyun */
3776*4882a593Smuzhiyun clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3777*4882a593Smuzhiyun
3778*4882a593Smuzhiyun /*
3779*4882a593Smuzhiyun * If we error out, we should add back the dirty_metadata_bytes
3780*4882a593Smuzhiyun * to make it consistent.
3781*4882a593Smuzhiyun */
3782*4882a593Smuzhiyun fs_info = eb->fs_info;
3783*4882a593Smuzhiyun percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
3784*4882a593Smuzhiyun eb->len, fs_info->dirty_metadata_batch);
3785*4882a593Smuzhiyun
3786*4882a593Smuzhiyun /*
3787*4882a593Smuzhiyun * If writeback for a btree extent that doesn't belong to a log tree
3788*4882a593Smuzhiyun * failed, increment the counter transaction->eb_write_errors.
3789*4882a593Smuzhiyun * We do this because while the transaction is running and before it's
3790*4882a593Smuzhiyun * committing (when we call filemap_fdata[write|wait]_range against
3791*4882a593Smuzhiyun * the btree inode), we might have
3792*4882a593Smuzhiyun * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
3793*4882a593Smuzhiyun * returns an error or an error happens during writeback, when we're
3794*4882a593Smuzhiyun * committing the transaction we wouldn't know about it, since the pages
3795*4882a593Smuzhiyun * can be no longer dirty nor marked anymore for writeback (if a
3796*4882a593Smuzhiyun * subsequent modification to the extent buffer didn't happen before the
3797*4882a593Smuzhiyun * transaction commit), which makes filemap_fdata[write|wait]_range not
3798*4882a593Smuzhiyun * able to find the pages tagged with SetPageError at transaction
3799*4882a593Smuzhiyun * commit time. So if this happens we must abort the transaction,
3800*4882a593Smuzhiyun * otherwise we commit a super block with btree roots that point to
3801*4882a593Smuzhiyun * btree nodes/leafs whose content on disk is invalid - either garbage
3802*4882a593Smuzhiyun * or the content of some node/leaf from a past generation that got
3803*4882a593Smuzhiyun * cowed or deleted and is no longer valid.
3804*4882a593Smuzhiyun *
3805*4882a593Smuzhiyun * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
3806*4882a593Smuzhiyun * not be enough - we need to distinguish between log tree extents vs
3807*4882a593Smuzhiyun * non-log tree extents, and the next filemap_fdatawait_range() call
3808*4882a593Smuzhiyun * will catch and clear such errors in the mapping - and that call might
3809*4882a593Smuzhiyun * be from a log sync and not from a transaction commit. Also, checking
3810*4882a593Smuzhiyun * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
3811*4882a593Smuzhiyun * not done and would not be reliable - the eb might have been released
3812*4882a593Smuzhiyun * from memory and reading it back again means that flag would not be
3813*4882a593Smuzhiyun * set (since it's a runtime flag, not persisted on disk).
3814*4882a593Smuzhiyun *
3815*4882a593Smuzhiyun * Using the flags below in the btree inode also makes us achieve the
3816*4882a593Smuzhiyun * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
3817*4882a593Smuzhiyun * writeback for all dirty pages and before filemap_fdatawait_range()
3818*4882a593Smuzhiyun * is called, the writeback for all dirty pages had already finished
3819*4882a593Smuzhiyun * with errors - because we were not using AS_EIO/AS_ENOSPC,
3820*4882a593Smuzhiyun * filemap_fdatawait_range() would return success, as it could not know
3821*4882a593Smuzhiyun * that writeback errors happened (the pages were no longer tagged for
3822*4882a593Smuzhiyun * writeback).
3823*4882a593Smuzhiyun */
3824*4882a593Smuzhiyun switch (eb->log_index) {
3825*4882a593Smuzhiyun case -1:
3826*4882a593Smuzhiyun set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags);
3827*4882a593Smuzhiyun break;
3828*4882a593Smuzhiyun case 0:
3829*4882a593Smuzhiyun set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags);
3830*4882a593Smuzhiyun break;
3831*4882a593Smuzhiyun case 1:
3832*4882a593Smuzhiyun set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags);
3833*4882a593Smuzhiyun break;
3834*4882a593Smuzhiyun default:
3835*4882a593Smuzhiyun BUG(); /* unexpected, logic error */
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun }
3838*4882a593Smuzhiyun
end_bio_extent_buffer_writepage(struct bio * bio)3839*4882a593Smuzhiyun static void end_bio_extent_buffer_writepage(struct bio *bio)
3840*4882a593Smuzhiyun {
3841*4882a593Smuzhiyun struct bio_vec *bvec;
3842*4882a593Smuzhiyun struct extent_buffer *eb;
3843*4882a593Smuzhiyun int done;
3844*4882a593Smuzhiyun struct bvec_iter_all iter_all;
3845*4882a593Smuzhiyun
3846*4882a593Smuzhiyun ASSERT(!bio_flagged(bio, BIO_CLONED));
3847*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
3848*4882a593Smuzhiyun struct page *page = bvec->bv_page;
3849*4882a593Smuzhiyun
3850*4882a593Smuzhiyun eb = (struct extent_buffer *)page->private;
3851*4882a593Smuzhiyun BUG_ON(!eb);
3852*4882a593Smuzhiyun done = atomic_dec_and_test(&eb->io_pages);
3853*4882a593Smuzhiyun
3854*4882a593Smuzhiyun if (bio->bi_status ||
3855*4882a593Smuzhiyun test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
3856*4882a593Smuzhiyun ClearPageUptodate(page);
3857*4882a593Smuzhiyun set_btree_ioerr(page);
3858*4882a593Smuzhiyun }
3859*4882a593Smuzhiyun
3860*4882a593Smuzhiyun end_page_writeback(page);
3861*4882a593Smuzhiyun
3862*4882a593Smuzhiyun if (!done)
3863*4882a593Smuzhiyun continue;
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun end_extent_buffer_writeback(eb);
3866*4882a593Smuzhiyun }
3867*4882a593Smuzhiyun
3868*4882a593Smuzhiyun bio_put(bio);
3869*4882a593Smuzhiyun }
3870*4882a593Smuzhiyun
write_one_eb(struct extent_buffer * eb,struct writeback_control * wbc,struct extent_page_data * epd)3871*4882a593Smuzhiyun static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3872*4882a593Smuzhiyun struct writeback_control *wbc,
3873*4882a593Smuzhiyun struct extent_page_data *epd)
3874*4882a593Smuzhiyun {
3875*4882a593Smuzhiyun u64 offset = eb->start;
3876*4882a593Smuzhiyun u32 nritems;
3877*4882a593Smuzhiyun int i, num_pages;
3878*4882a593Smuzhiyun unsigned long start, end;
3879*4882a593Smuzhiyun unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3880*4882a593Smuzhiyun int ret = 0;
3881*4882a593Smuzhiyun
3882*4882a593Smuzhiyun clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
3883*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
3884*4882a593Smuzhiyun atomic_set(&eb->io_pages, num_pages);
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun /* set btree blocks beyond nritems with 0 to avoid stale content. */
3887*4882a593Smuzhiyun nritems = btrfs_header_nritems(eb);
3888*4882a593Smuzhiyun if (btrfs_header_level(eb) > 0) {
3889*4882a593Smuzhiyun end = btrfs_node_key_ptr_offset(nritems);
3890*4882a593Smuzhiyun
3891*4882a593Smuzhiyun memzero_extent_buffer(eb, end, eb->len - end);
3892*4882a593Smuzhiyun } else {
3893*4882a593Smuzhiyun /*
3894*4882a593Smuzhiyun * leaf:
3895*4882a593Smuzhiyun * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
3896*4882a593Smuzhiyun */
3897*4882a593Smuzhiyun start = btrfs_item_nr_offset(nritems);
3898*4882a593Smuzhiyun end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb);
3899*4882a593Smuzhiyun memzero_extent_buffer(eb, start, end - start);
3900*4882a593Smuzhiyun }
3901*4882a593Smuzhiyun
3902*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
3903*4882a593Smuzhiyun struct page *p = eb->pages[i];
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun clear_page_dirty_for_io(p);
3906*4882a593Smuzhiyun set_page_writeback(p);
3907*4882a593Smuzhiyun ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
3908*4882a593Smuzhiyun p, offset, PAGE_SIZE, 0,
3909*4882a593Smuzhiyun &epd->bio,
3910*4882a593Smuzhiyun end_bio_extent_buffer_writepage,
3911*4882a593Smuzhiyun 0, 0, 0, false);
3912*4882a593Smuzhiyun if (ret) {
3913*4882a593Smuzhiyun set_btree_ioerr(p);
3914*4882a593Smuzhiyun if (PageWriteback(p))
3915*4882a593Smuzhiyun end_page_writeback(p);
3916*4882a593Smuzhiyun if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3917*4882a593Smuzhiyun end_extent_buffer_writeback(eb);
3918*4882a593Smuzhiyun ret = -EIO;
3919*4882a593Smuzhiyun break;
3920*4882a593Smuzhiyun }
3921*4882a593Smuzhiyun offset += PAGE_SIZE;
3922*4882a593Smuzhiyun update_nr_written(wbc, 1);
3923*4882a593Smuzhiyun unlock_page(p);
3924*4882a593Smuzhiyun }
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun if (unlikely(ret)) {
3927*4882a593Smuzhiyun for (; i < num_pages; i++) {
3928*4882a593Smuzhiyun struct page *p = eb->pages[i];
3929*4882a593Smuzhiyun clear_page_dirty_for_io(p);
3930*4882a593Smuzhiyun unlock_page(p);
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun }
3933*4882a593Smuzhiyun
3934*4882a593Smuzhiyun return ret;
3935*4882a593Smuzhiyun }
3936*4882a593Smuzhiyun
btree_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc)3937*4882a593Smuzhiyun int btree_write_cache_pages(struct address_space *mapping,
3938*4882a593Smuzhiyun struct writeback_control *wbc)
3939*4882a593Smuzhiyun {
3940*4882a593Smuzhiyun struct extent_buffer *eb, *prev_eb = NULL;
3941*4882a593Smuzhiyun struct extent_page_data epd = {
3942*4882a593Smuzhiyun .bio = NULL,
3943*4882a593Smuzhiyun .extent_locked = 0,
3944*4882a593Smuzhiyun .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3945*4882a593Smuzhiyun };
3946*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3947*4882a593Smuzhiyun int ret = 0;
3948*4882a593Smuzhiyun int done = 0;
3949*4882a593Smuzhiyun int nr_to_write_done = 0;
3950*4882a593Smuzhiyun struct pagevec pvec;
3951*4882a593Smuzhiyun int nr_pages;
3952*4882a593Smuzhiyun pgoff_t index;
3953*4882a593Smuzhiyun pgoff_t end; /* Inclusive */
3954*4882a593Smuzhiyun int scanned = 0;
3955*4882a593Smuzhiyun xa_mark_t tag;
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun pagevec_init(&pvec);
3958*4882a593Smuzhiyun if (wbc->range_cyclic) {
3959*4882a593Smuzhiyun index = mapping->writeback_index; /* Start from prev offset */
3960*4882a593Smuzhiyun end = -1;
3961*4882a593Smuzhiyun /*
3962*4882a593Smuzhiyun * Start from the beginning does not need to cycle over the
3963*4882a593Smuzhiyun * range, mark it as scanned.
3964*4882a593Smuzhiyun */
3965*4882a593Smuzhiyun scanned = (index == 0);
3966*4882a593Smuzhiyun } else {
3967*4882a593Smuzhiyun index = wbc->range_start >> PAGE_SHIFT;
3968*4882a593Smuzhiyun end = wbc->range_end >> PAGE_SHIFT;
3969*4882a593Smuzhiyun scanned = 1;
3970*4882a593Smuzhiyun }
3971*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL)
3972*4882a593Smuzhiyun tag = PAGECACHE_TAG_TOWRITE;
3973*4882a593Smuzhiyun else
3974*4882a593Smuzhiyun tag = PAGECACHE_TAG_DIRTY;
3975*4882a593Smuzhiyun retry:
3976*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL)
3977*4882a593Smuzhiyun tag_pages_for_writeback(mapping, index, end);
3978*4882a593Smuzhiyun while (!done && !nr_to_write_done && (index <= end) &&
3979*4882a593Smuzhiyun (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3980*4882a593Smuzhiyun tag))) {
3981*4882a593Smuzhiyun unsigned i;
3982*4882a593Smuzhiyun
3983*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
3984*4882a593Smuzhiyun struct page *page = pvec.pages[i];
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun if (!PagePrivate(page))
3987*4882a593Smuzhiyun continue;
3988*4882a593Smuzhiyun
3989*4882a593Smuzhiyun spin_lock(&mapping->private_lock);
3990*4882a593Smuzhiyun if (!PagePrivate(page)) {
3991*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
3992*4882a593Smuzhiyun continue;
3993*4882a593Smuzhiyun }
3994*4882a593Smuzhiyun
3995*4882a593Smuzhiyun eb = (struct extent_buffer *)page->private;
3996*4882a593Smuzhiyun
3997*4882a593Smuzhiyun /*
3998*4882a593Smuzhiyun * Shouldn't happen and normally this would be a BUG_ON
3999*4882a593Smuzhiyun * but no sense in crashing the users box for something
4000*4882a593Smuzhiyun * we can survive anyway.
4001*4882a593Smuzhiyun */
4002*4882a593Smuzhiyun if (WARN_ON(!eb)) {
4003*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
4004*4882a593Smuzhiyun continue;
4005*4882a593Smuzhiyun }
4006*4882a593Smuzhiyun
4007*4882a593Smuzhiyun if (eb == prev_eb) {
4008*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
4009*4882a593Smuzhiyun continue;
4010*4882a593Smuzhiyun }
4011*4882a593Smuzhiyun
4012*4882a593Smuzhiyun ret = atomic_inc_not_zero(&eb->refs);
4013*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
4014*4882a593Smuzhiyun if (!ret)
4015*4882a593Smuzhiyun continue;
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun prev_eb = eb;
4018*4882a593Smuzhiyun ret = lock_extent_buffer_for_io(eb, &epd);
4019*4882a593Smuzhiyun if (!ret) {
4020*4882a593Smuzhiyun free_extent_buffer(eb);
4021*4882a593Smuzhiyun continue;
4022*4882a593Smuzhiyun } else if (ret < 0) {
4023*4882a593Smuzhiyun done = 1;
4024*4882a593Smuzhiyun free_extent_buffer(eb);
4025*4882a593Smuzhiyun break;
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun ret = write_one_eb(eb, wbc, &epd);
4029*4882a593Smuzhiyun if (ret) {
4030*4882a593Smuzhiyun done = 1;
4031*4882a593Smuzhiyun free_extent_buffer(eb);
4032*4882a593Smuzhiyun break;
4033*4882a593Smuzhiyun }
4034*4882a593Smuzhiyun free_extent_buffer(eb);
4035*4882a593Smuzhiyun
4036*4882a593Smuzhiyun /*
4037*4882a593Smuzhiyun * the filesystem may choose to bump up nr_to_write.
4038*4882a593Smuzhiyun * We have to make sure to honor the new nr_to_write
4039*4882a593Smuzhiyun * at any time
4040*4882a593Smuzhiyun */
4041*4882a593Smuzhiyun nr_to_write_done = wbc->nr_to_write <= 0;
4042*4882a593Smuzhiyun }
4043*4882a593Smuzhiyun pagevec_release(&pvec);
4044*4882a593Smuzhiyun cond_resched();
4045*4882a593Smuzhiyun }
4046*4882a593Smuzhiyun if (!scanned && !done) {
4047*4882a593Smuzhiyun /*
4048*4882a593Smuzhiyun * We hit the last page and there is more work to be done: wrap
4049*4882a593Smuzhiyun * back to the start of the file
4050*4882a593Smuzhiyun */
4051*4882a593Smuzhiyun scanned = 1;
4052*4882a593Smuzhiyun index = 0;
4053*4882a593Smuzhiyun goto retry;
4054*4882a593Smuzhiyun }
4055*4882a593Smuzhiyun ASSERT(ret <= 0);
4056*4882a593Smuzhiyun if (ret < 0) {
4057*4882a593Smuzhiyun end_write_bio(&epd, ret);
4058*4882a593Smuzhiyun return ret;
4059*4882a593Smuzhiyun }
4060*4882a593Smuzhiyun /*
4061*4882a593Smuzhiyun * If something went wrong, don't allow any metadata write bio to be
4062*4882a593Smuzhiyun * submitted.
4063*4882a593Smuzhiyun *
4064*4882a593Smuzhiyun * This would prevent use-after-free if we had dirty pages not
4065*4882a593Smuzhiyun * cleaned up, which can still happen by fuzzed images.
4066*4882a593Smuzhiyun *
4067*4882a593Smuzhiyun * - Bad extent tree
4068*4882a593Smuzhiyun * Allowing existing tree block to be allocated for other trees.
4069*4882a593Smuzhiyun *
4070*4882a593Smuzhiyun * - Log tree operations
4071*4882a593Smuzhiyun * Exiting tree blocks get allocated to log tree, bumps its
4072*4882a593Smuzhiyun * generation, then get cleaned in tree re-balance.
4073*4882a593Smuzhiyun * Such tree block will not be written back, since it's clean,
4074*4882a593Smuzhiyun * thus no WRITTEN flag set.
4075*4882a593Smuzhiyun * And after log writes back, this tree block is not traced by
4076*4882a593Smuzhiyun * any dirty extent_io_tree.
4077*4882a593Smuzhiyun *
4078*4882a593Smuzhiyun * - Offending tree block gets re-dirtied from its original owner
4079*4882a593Smuzhiyun * Since it has bumped generation, no WRITTEN flag, it can be
4080*4882a593Smuzhiyun * reused without COWing. This tree block will not be traced
4081*4882a593Smuzhiyun * by btrfs_transaction::dirty_pages.
4082*4882a593Smuzhiyun *
4083*4882a593Smuzhiyun * Now such dirty tree block will not be cleaned by any dirty
4084*4882a593Smuzhiyun * extent io tree. Thus we don't want to submit such wild eb
4085*4882a593Smuzhiyun * if the fs already has error.
4086*4882a593Smuzhiyun */
4087*4882a593Smuzhiyun if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
4088*4882a593Smuzhiyun ret = flush_write_bio(&epd);
4089*4882a593Smuzhiyun } else {
4090*4882a593Smuzhiyun ret = -EROFS;
4091*4882a593Smuzhiyun end_write_bio(&epd, ret);
4092*4882a593Smuzhiyun }
4093*4882a593Smuzhiyun return ret;
4094*4882a593Smuzhiyun }
4095*4882a593Smuzhiyun
4096*4882a593Smuzhiyun /**
4097*4882a593Smuzhiyun * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
4098*4882a593Smuzhiyun * @mapping: address space structure to write
4099*4882a593Smuzhiyun * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4100*4882a593Smuzhiyun * @data: data passed to __extent_writepage function
4101*4882a593Smuzhiyun *
4102*4882a593Smuzhiyun * If a page is already under I/O, write_cache_pages() skips it, even
4103*4882a593Smuzhiyun * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4104*4882a593Smuzhiyun * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4105*4882a593Smuzhiyun * and msync() need to guarantee that all the data which was dirty at the time
4106*4882a593Smuzhiyun * the call was made get new I/O started against them. If wbc->sync_mode is
4107*4882a593Smuzhiyun * WB_SYNC_ALL then we were called for data integrity and we must wait for
4108*4882a593Smuzhiyun * existing IO to complete.
4109*4882a593Smuzhiyun */
extent_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,struct extent_page_data * epd)4110*4882a593Smuzhiyun static int extent_write_cache_pages(struct address_space *mapping,
4111*4882a593Smuzhiyun struct writeback_control *wbc,
4112*4882a593Smuzhiyun struct extent_page_data *epd)
4113*4882a593Smuzhiyun {
4114*4882a593Smuzhiyun struct inode *inode = mapping->host;
4115*4882a593Smuzhiyun int ret = 0;
4116*4882a593Smuzhiyun int done = 0;
4117*4882a593Smuzhiyun int nr_to_write_done = 0;
4118*4882a593Smuzhiyun struct pagevec pvec;
4119*4882a593Smuzhiyun int nr_pages;
4120*4882a593Smuzhiyun pgoff_t index;
4121*4882a593Smuzhiyun pgoff_t end; /* Inclusive */
4122*4882a593Smuzhiyun pgoff_t done_index;
4123*4882a593Smuzhiyun int range_whole = 0;
4124*4882a593Smuzhiyun int scanned = 0;
4125*4882a593Smuzhiyun xa_mark_t tag;
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun /*
4128*4882a593Smuzhiyun * We have to hold onto the inode so that ordered extents can do their
4129*4882a593Smuzhiyun * work when the IO finishes. The alternative to this is failing to add
4130*4882a593Smuzhiyun * an ordered extent if the igrab() fails there and that is a huge pain
4131*4882a593Smuzhiyun * to deal with, so instead just hold onto the inode throughout the
4132*4882a593Smuzhiyun * writepages operation. If it fails here we are freeing up the inode
4133*4882a593Smuzhiyun * anyway and we'd rather not waste our time writing out stuff that is
4134*4882a593Smuzhiyun * going to be truncated anyway.
4135*4882a593Smuzhiyun */
4136*4882a593Smuzhiyun if (!igrab(inode))
4137*4882a593Smuzhiyun return 0;
4138*4882a593Smuzhiyun
4139*4882a593Smuzhiyun pagevec_init(&pvec);
4140*4882a593Smuzhiyun if (wbc->range_cyclic) {
4141*4882a593Smuzhiyun index = mapping->writeback_index; /* Start from prev offset */
4142*4882a593Smuzhiyun end = -1;
4143*4882a593Smuzhiyun /*
4144*4882a593Smuzhiyun * Start from the beginning does not need to cycle over the
4145*4882a593Smuzhiyun * range, mark it as scanned.
4146*4882a593Smuzhiyun */
4147*4882a593Smuzhiyun scanned = (index == 0);
4148*4882a593Smuzhiyun } else {
4149*4882a593Smuzhiyun index = wbc->range_start >> PAGE_SHIFT;
4150*4882a593Smuzhiyun end = wbc->range_end >> PAGE_SHIFT;
4151*4882a593Smuzhiyun if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4152*4882a593Smuzhiyun range_whole = 1;
4153*4882a593Smuzhiyun scanned = 1;
4154*4882a593Smuzhiyun }
4155*4882a593Smuzhiyun
4156*4882a593Smuzhiyun /*
4157*4882a593Smuzhiyun * We do the tagged writepage as long as the snapshot flush bit is set
4158*4882a593Smuzhiyun * and we are the first one who do the filemap_flush() on this inode.
4159*4882a593Smuzhiyun *
4160*4882a593Smuzhiyun * The nr_to_write == LONG_MAX is needed to make sure other flushers do
4161*4882a593Smuzhiyun * not race in and drop the bit.
4162*4882a593Smuzhiyun */
4163*4882a593Smuzhiyun if (range_whole && wbc->nr_to_write == LONG_MAX &&
4164*4882a593Smuzhiyun test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
4165*4882a593Smuzhiyun &BTRFS_I(inode)->runtime_flags))
4166*4882a593Smuzhiyun wbc->tagged_writepages = 1;
4167*4882a593Smuzhiyun
4168*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4169*4882a593Smuzhiyun tag = PAGECACHE_TAG_TOWRITE;
4170*4882a593Smuzhiyun else
4171*4882a593Smuzhiyun tag = PAGECACHE_TAG_DIRTY;
4172*4882a593Smuzhiyun retry:
4173*4882a593Smuzhiyun if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4174*4882a593Smuzhiyun tag_pages_for_writeback(mapping, index, end);
4175*4882a593Smuzhiyun done_index = index;
4176*4882a593Smuzhiyun while (!done && !nr_to_write_done && (index <= end) &&
4177*4882a593Smuzhiyun (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
4178*4882a593Smuzhiyun &index, end, tag))) {
4179*4882a593Smuzhiyun unsigned i;
4180*4882a593Smuzhiyun
4181*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
4182*4882a593Smuzhiyun struct page *page = pvec.pages[i];
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun done_index = page->index + 1;
4185*4882a593Smuzhiyun /*
4186*4882a593Smuzhiyun * At this point we hold neither the i_pages lock nor
4187*4882a593Smuzhiyun * the page lock: the page may be truncated or
4188*4882a593Smuzhiyun * invalidated (changing page->mapping to NULL),
4189*4882a593Smuzhiyun * or even swizzled back from swapper_space to
4190*4882a593Smuzhiyun * tmpfs file mapping
4191*4882a593Smuzhiyun */
4192*4882a593Smuzhiyun if (!trylock_page(page)) {
4193*4882a593Smuzhiyun ret = flush_write_bio(epd);
4194*4882a593Smuzhiyun BUG_ON(ret < 0);
4195*4882a593Smuzhiyun lock_page(page);
4196*4882a593Smuzhiyun }
4197*4882a593Smuzhiyun
4198*4882a593Smuzhiyun if (unlikely(page->mapping != mapping)) {
4199*4882a593Smuzhiyun unlock_page(page);
4200*4882a593Smuzhiyun continue;
4201*4882a593Smuzhiyun }
4202*4882a593Smuzhiyun
4203*4882a593Smuzhiyun if (wbc->sync_mode != WB_SYNC_NONE) {
4204*4882a593Smuzhiyun if (PageWriteback(page)) {
4205*4882a593Smuzhiyun ret = flush_write_bio(epd);
4206*4882a593Smuzhiyun BUG_ON(ret < 0);
4207*4882a593Smuzhiyun }
4208*4882a593Smuzhiyun wait_on_page_writeback(page);
4209*4882a593Smuzhiyun }
4210*4882a593Smuzhiyun
4211*4882a593Smuzhiyun if (PageWriteback(page) ||
4212*4882a593Smuzhiyun !clear_page_dirty_for_io(page)) {
4213*4882a593Smuzhiyun unlock_page(page);
4214*4882a593Smuzhiyun continue;
4215*4882a593Smuzhiyun }
4216*4882a593Smuzhiyun
4217*4882a593Smuzhiyun ret = __extent_writepage(page, wbc, epd);
4218*4882a593Smuzhiyun if (ret < 0) {
4219*4882a593Smuzhiyun done = 1;
4220*4882a593Smuzhiyun break;
4221*4882a593Smuzhiyun }
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun /*
4224*4882a593Smuzhiyun * the filesystem may choose to bump up nr_to_write.
4225*4882a593Smuzhiyun * We have to make sure to honor the new nr_to_write
4226*4882a593Smuzhiyun * at any time
4227*4882a593Smuzhiyun */
4228*4882a593Smuzhiyun nr_to_write_done = wbc->nr_to_write <= 0;
4229*4882a593Smuzhiyun }
4230*4882a593Smuzhiyun pagevec_release(&pvec);
4231*4882a593Smuzhiyun cond_resched();
4232*4882a593Smuzhiyun }
4233*4882a593Smuzhiyun if (!scanned && !done) {
4234*4882a593Smuzhiyun /*
4235*4882a593Smuzhiyun * We hit the last page and there is more work to be done: wrap
4236*4882a593Smuzhiyun * back to the start of the file
4237*4882a593Smuzhiyun */
4238*4882a593Smuzhiyun scanned = 1;
4239*4882a593Smuzhiyun index = 0;
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun /*
4242*4882a593Smuzhiyun * If we're looping we could run into a page that is locked by a
4243*4882a593Smuzhiyun * writer and that writer could be waiting on writeback for a
4244*4882a593Smuzhiyun * page in our current bio, and thus deadlock, so flush the
4245*4882a593Smuzhiyun * write bio here.
4246*4882a593Smuzhiyun */
4247*4882a593Smuzhiyun ret = flush_write_bio(epd);
4248*4882a593Smuzhiyun if (!ret)
4249*4882a593Smuzhiyun goto retry;
4250*4882a593Smuzhiyun }
4251*4882a593Smuzhiyun
4252*4882a593Smuzhiyun if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
4253*4882a593Smuzhiyun mapping->writeback_index = done_index;
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun btrfs_add_delayed_iput(inode);
4256*4882a593Smuzhiyun return ret;
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun
extent_write_full_page(struct page * page,struct writeback_control * wbc)4259*4882a593Smuzhiyun int extent_write_full_page(struct page *page, struct writeback_control *wbc)
4260*4882a593Smuzhiyun {
4261*4882a593Smuzhiyun int ret;
4262*4882a593Smuzhiyun struct extent_page_data epd = {
4263*4882a593Smuzhiyun .bio = NULL,
4264*4882a593Smuzhiyun .extent_locked = 0,
4265*4882a593Smuzhiyun .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4266*4882a593Smuzhiyun };
4267*4882a593Smuzhiyun
4268*4882a593Smuzhiyun ret = __extent_writepage(page, wbc, &epd);
4269*4882a593Smuzhiyun ASSERT(ret <= 0);
4270*4882a593Smuzhiyun if (ret < 0) {
4271*4882a593Smuzhiyun end_write_bio(&epd, ret);
4272*4882a593Smuzhiyun return ret;
4273*4882a593Smuzhiyun }
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun ret = flush_write_bio(&epd);
4276*4882a593Smuzhiyun ASSERT(ret <= 0);
4277*4882a593Smuzhiyun return ret;
4278*4882a593Smuzhiyun }
4279*4882a593Smuzhiyun
extent_write_locked_range(struct inode * inode,u64 start,u64 end,int mode)4280*4882a593Smuzhiyun int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4281*4882a593Smuzhiyun int mode)
4282*4882a593Smuzhiyun {
4283*4882a593Smuzhiyun int ret = 0;
4284*4882a593Smuzhiyun struct address_space *mapping = inode->i_mapping;
4285*4882a593Smuzhiyun struct page *page;
4286*4882a593Smuzhiyun unsigned long nr_pages = (end - start + PAGE_SIZE) >>
4287*4882a593Smuzhiyun PAGE_SHIFT;
4288*4882a593Smuzhiyun
4289*4882a593Smuzhiyun struct extent_page_data epd = {
4290*4882a593Smuzhiyun .bio = NULL,
4291*4882a593Smuzhiyun .extent_locked = 1,
4292*4882a593Smuzhiyun .sync_io = mode == WB_SYNC_ALL,
4293*4882a593Smuzhiyun };
4294*4882a593Smuzhiyun struct writeback_control wbc_writepages = {
4295*4882a593Smuzhiyun .sync_mode = mode,
4296*4882a593Smuzhiyun .nr_to_write = nr_pages * 2,
4297*4882a593Smuzhiyun .range_start = start,
4298*4882a593Smuzhiyun .range_end = end + 1,
4299*4882a593Smuzhiyun /* We're called from an async helper function */
4300*4882a593Smuzhiyun .punt_to_cgroup = 1,
4301*4882a593Smuzhiyun .no_cgroup_owner = 1,
4302*4882a593Smuzhiyun };
4303*4882a593Smuzhiyun
4304*4882a593Smuzhiyun wbc_attach_fdatawrite_inode(&wbc_writepages, inode);
4305*4882a593Smuzhiyun while (start <= end) {
4306*4882a593Smuzhiyun page = find_get_page(mapping, start >> PAGE_SHIFT);
4307*4882a593Smuzhiyun if (clear_page_dirty_for_io(page))
4308*4882a593Smuzhiyun ret = __extent_writepage(page, &wbc_writepages, &epd);
4309*4882a593Smuzhiyun else {
4310*4882a593Smuzhiyun btrfs_writepage_endio_finish_ordered(page, start,
4311*4882a593Smuzhiyun start + PAGE_SIZE - 1, 1);
4312*4882a593Smuzhiyun unlock_page(page);
4313*4882a593Smuzhiyun }
4314*4882a593Smuzhiyun put_page(page);
4315*4882a593Smuzhiyun start += PAGE_SIZE;
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun
4318*4882a593Smuzhiyun ASSERT(ret <= 0);
4319*4882a593Smuzhiyun if (ret == 0)
4320*4882a593Smuzhiyun ret = flush_write_bio(&epd);
4321*4882a593Smuzhiyun else
4322*4882a593Smuzhiyun end_write_bio(&epd, ret);
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun wbc_detach_inode(&wbc_writepages);
4325*4882a593Smuzhiyun return ret;
4326*4882a593Smuzhiyun }
4327*4882a593Smuzhiyun
extent_writepages(struct address_space * mapping,struct writeback_control * wbc)4328*4882a593Smuzhiyun int extent_writepages(struct address_space *mapping,
4329*4882a593Smuzhiyun struct writeback_control *wbc)
4330*4882a593Smuzhiyun {
4331*4882a593Smuzhiyun int ret = 0;
4332*4882a593Smuzhiyun struct extent_page_data epd = {
4333*4882a593Smuzhiyun .bio = NULL,
4334*4882a593Smuzhiyun .extent_locked = 0,
4335*4882a593Smuzhiyun .sync_io = wbc->sync_mode == WB_SYNC_ALL,
4336*4882a593Smuzhiyun };
4337*4882a593Smuzhiyun
4338*4882a593Smuzhiyun ret = extent_write_cache_pages(mapping, wbc, &epd);
4339*4882a593Smuzhiyun ASSERT(ret <= 0);
4340*4882a593Smuzhiyun if (ret < 0) {
4341*4882a593Smuzhiyun end_write_bio(&epd, ret);
4342*4882a593Smuzhiyun return ret;
4343*4882a593Smuzhiyun }
4344*4882a593Smuzhiyun ret = flush_write_bio(&epd);
4345*4882a593Smuzhiyun return ret;
4346*4882a593Smuzhiyun }
4347*4882a593Smuzhiyun
extent_readahead(struct readahead_control * rac)4348*4882a593Smuzhiyun void extent_readahead(struct readahead_control *rac)
4349*4882a593Smuzhiyun {
4350*4882a593Smuzhiyun struct bio *bio = NULL;
4351*4882a593Smuzhiyun unsigned long bio_flags = 0;
4352*4882a593Smuzhiyun struct page *pagepool[16];
4353*4882a593Smuzhiyun struct extent_map *em_cached = NULL;
4354*4882a593Smuzhiyun u64 prev_em_start = (u64)-1;
4355*4882a593Smuzhiyun int nr;
4356*4882a593Smuzhiyun
4357*4882a593Smuzhiyun while ((nr = readahead_page_batch(rac, pagepool))) {
4358*4882a593Smuzhiyun u64 contig_start = page_offset(pagepool[0]);
4359*4882a593Smuzhiyun u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
4360*4882a593Smuzhiyun
4361*4882a593Smuzhiyun ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
4362*4882a593Smuzhiyun
4363*4882a593Smuzhiyun contiguous_readpages(pagepool, nr, contig_start, contig_end,
4364*4882a593Smuzhiyun &em_cached, &bio, &bio_flags, &prev_em_start);
4365*4882a593Smuzhiyun }
4366*4882a593Smuzhiyun
4367*4882a593Smuzhiyun if (em_cached)
4368*4882a593Smuzhiyun free_extent_map(em_cached);
4369*4882a593Smuzhiyun
4370*4882a593Smuzhiyun if (bio) {
4371*4882a593Smuzhiyun if (submit_one_bio(bio, 0, bio_flags))
4372*4882a593Smuzhiyun return;
4373*4882a593Smuzhiyun }
4374*4882a593Smuzhiyun }
4375*4882a593Smuzhiyun
4376*4882a593Smuzhiyun /*
4377*4882a593Smuzhiyun * basic invalidatepage code, this waits on any locked or writeback
4378*4882a593Smuzhiyun * ranges corresponding to the page, and then deletes any extent state
4379*4882a593Smuzhiyun * records from the tree
4380*4882a593Smuzhiyun */
extent_invalidatepage(struct extent_io_tree * tree,struct page * page,unsigned long offset)4381*4882a593Smuzhiyun int extent_invalidatepage(struct extent_io_tree *tree,
4382*4882a593Smuzhiyun struct page *page, unsigned long offset)
4383*4882a593Smuzhiyun {
4384*4882a593Smuzhiyun struct extent_state *cached_state = NULL;
4385*4882a593Smuzhiyun u64 start = page_offset(page);
4386*4882a593Smuzhiyun u64 end = start + PAGE_SIZE - 1;
4387*4882a593Smuzhiyun size_t blocksize = page->mapping->host->i_sb->s_blocksize;
4388*4882a593Smuzhiyun
4389*4882a593Smuzhiyun start += ALIGN(offset, blocksize);
4390*4882a593Smuzhiyun if (start > end)
4391*4882a593Smuzhiyun return 0;
4392*4882a593Smuzhiyun
4393*4882a593Smuzhiyun lock_extent_bits(tree, start, end, &cached_state);
4394*4882a593Smuzhiyun wait_on_page_writeback(page);
4395*4882a593Smuzhiyun clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC |
4396*4882a593Smuzhiyun EXTENT_DO_ACCOUNTING, 1, 1, &cached_state);
4397*4882a593Smuzhiyun return 0;
4398*4882a593Smuzhiyun }
4399*4882a593Smuzhiyun
4400*4882a593Smuzhiyun /*
4401*4882a593Smuzhiyun * a helper for releasepage, this tests for areas of the page that
4402*4882a593Smuzhiyun * are locked or under IO and drops the related state bits if it is safe
4403*4882a593Smuzhiyun * to drop the page.
4404*4882a593Smuzhiyun */
try_release_extent_state(struct extent_io_tree * tree,struct page * page,gfp_t mask)4405*4882a593Smuzhiyun static int try_release_extent_state(struct extent_io_tree *tree,
4406*4882a593Smuzhiyun struct page *page, gfp_t mask)
4407*4882a593Smuzhiyun {
4408*4882a593Smuzhiyun u64 start = page_offset(page);
4409*4882a593Smuzhiyun u64 end = start + PAGE_SIZE - 1;
4410*4882a593Smuzhiyun int ret = 1;
4411*4882a593Smuzhiyun
4412*4882a593Smuzhiyun if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
4413*4882a593Smuzhiyun ret = 0;
4414*4882a593Smuzhiyun } else {
4415*4882a593Smuzhiyun /*
4416*4882a593Smuzhiyun * at this point we can safely clear everything except the
4417*4882a593Smuzhiyun * locked bit and the nodatasum bit
4418*4882a593Smuzhiyun */
4419*4882a593Smuzhiyun ret = __clear_extent_bit(tree, start, end,
4420*4882a593Smuzhiyun ~(EXTENT_LOCKED | EXTENT_NODATASUM),
4421*4882a593Smuzhiyun 0, 0, NULL, mask, NULL);
4422*4882a593Smuzhiyun
4423*4882a593Smuzhiyun /* if clear_extent_bit failed for enomem reasons,
4424*4882a593Smuzhiyun * we can't allow the release to continue.
4425*4882a593Smuzhiyun */
4426*4882a593Smuzhiyun if (ret < 0)
4427*4882a593Smuzhiyun ret = 0;
4428*4882a593Smuzhiyun else
4429*4882a593Smuzhiyun ret = 1;
4430*4882a593Smuzhiyun }
4431*4882a593Smuzhiyun return ret;
4432*4882a593Smuzhiyun }
4433*4882a593Smuzhiyun
4434*4882a593Smuzhiyun /*
4435*4882a593Smuzhiyun * a helper for releasepage. As long as there are no locked extents
4436*4882a593Smuzhiyun * in the range corresponding to the page, both state records and extent
4437*4882a593Smuzhiyun * map records are removed
4438*4882a593Smuzhiyun */
try_release_extent_mapping(struct page * page,gfp_t mask)4439*4882a593Smuzhiyun int try_release_extent_mapping(struct page *page, gfp_t mask)
4440*4882a593Smuzhiyun {
4441*4882a593Smuzhiyun struct extent_map *em;
4442*4882a593Smuzhiyun u64 start = page_offset(page);
4443*4882a593Smuzhiyun u64 end = start + PAGE_SIZE - 1;
4444*4882a593Smuzhiyun struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
4445*4882a593Smuzhiyun struct extent_io_tree *tree = &btrfs_inode->io_tree;
4446*4882a593Smuzhiyun struct extent_map_tree *map = &btrfs_inode->extent_tree;
4447*4882a593Smuzhiyun
4448*4882a593Smuzhiyun if (gfpflags_allow_blocking(mask) &&
4449*4882a593Smuzhiyun page->mapping->host->i_size > SZ_16M) {
4450*4882a593Smuzhiyun u64 len;
4451*4882a593Smuzhiyun while (start <= end) {
4452*4882a593Smuzhiyun struct btrfs_fs_info *fs_info;
4453*4882a593Smuzhiyun u64 cur_gen;
4454*4882a593Smuzhiyun
4455*4882a593Smuzhiyun len = end - start + 1;
4456*4882a593Smuzhiyun write_lock(&map->lock);
4457*4882a593Smuzhiyun em = lookup_extent_mapping(map, start, len);
4458*4882a593Smuzhiyun if (!em) {
4459*4882a593Smuzhiyun write_unlock(&map->lock);
4460*4882a593Smuzhiyun break;
4461*4882a593Smuzhiyun }
4462*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
4463*4882a593Smuzhiyun em->start != start) {
4464*4882a593Smuzhiyun write_unlock(&map->lock);
4465*4882a593Smuzhiyun free_extent_map(em);
4466*4882a593Smuzhiyun break;
4467*4882a593Smuzhiyun }
4468*4882a593Smuzhiyun if (test_range_bit(tree, em->start,
4469*4882a593Smuzhiyun extent_map_end(em) - 1,
4470*4882a593Smuzhiyun EXTENT_LOCKED, 0, NULL))
4471*4882a593Smuzhiyun goto next;
4472*4882a593Smuzhiyun /*
4473*4882a593Smuzhiyun * If it's not in the list of modified extents, used
4474*4882a593Smuzhiyun * by a fast fsync, we can remove it. If it's being
4475*4882a593Smuzhiyun * logged we can safely remove it since fsync took an
4476*4882a593Smuzhiyun * extra reference on the em.
4477*4882a593Smuzhiyun */
4478*4882a593Smuzhiyun if (list_empty(&em->list) ||
4479*4882a593Smuzhiyun test_bit(EXTENT_FLAG_LOGGING, &em->flags))
4480*4882a593Smuzhiyun goto remove_em;
4481*4882a593Smuzhiyun /*
4482*4882a593Smuzhiyun * If it's in the list of modified extents, remove it
4483*4882a593Smuzhiyun * only if its generation is older then the current one,
4484*4882a593Smuzhiyun * in which case we don't need it for a fast fsync.
4485*4882a593Smuzhiyun * Otherwise don't remove it, we could be racing with an
4486*4882a593Smuzhiyun * ongoing fast fsync that could miss the new extent.
4487*4882a593Smuzhiyun */
4488*4882a593Smuzhiyun fs_info = btrfs_inode->root->fs_info;
4489*4882a593Smuzhiyun spin_lock(&fs_info->trans_lock);
4490*4882a593Smuzhiyun cur_gen = fs_info->generation;
4491*4882a593Smuzhiyun spin_unlock(&fs_info->trans_lock);
4492*4882a593Smuzhiyun if (em->generation >= cur_gen)
4493*4882a593Smuzhiyun goto next;
4494*4882a593Smuzhiyun remove_em:
4495*4882a593Smuzhiyun /*
4496*4882a593Smuzhiyun * We only remove extent maps that are not in the list of
4497*4882a593Smuzhiyun * modified extents or that are in the list but with a
4498*4882a593Smuzhiyun * generation lower then the current generation, so there
4499*4882a593Smuzhiyun * is no need to set the full fsync flag on the inode (it
4500*4882a593Smuzhiyun * hurts the fsync performance for workloads with a data
4501*4882a593Smuzhiyun * size that exceeds or is close to the system's memory).
4502*4882a593Smuzhiyun */
4503*4882a593Smuzhiyun remove_extent_mapping(map, em);
4504*4882a593Smuzhiyun /* once for the rb tree */
4505*4882a593Smuzhiyun free_extent_map(em);
4506*4882a593Smuzhiyun next:
4507*4882a593Smuzhiyun start = extent_map_end(em);
4508*4882a593Smuzhiyun write_unlock(&map->lock);
4509*4882a593Smuzhiyun
4510*4882a593Smuzhiyun /* once for us */
4511*4882a593Smuzhiyun free_extent_map(em);
4512*4882a593Smuzhiyun
4513*4882a593Smuzhiyun cond_resched(); /* Allow large-extent preemption. */
4514*4882a593Smuzhiyun }
4515*4882a593Smuzhiyun }
4516*4882a593Smuzhiyun return try_release_extent_state(tree, page, mask);
4517*4882a593Smuzhiyun }
4518*4882a593Smuzhiyun
4519*4882a593Smuzhiyun /*
4520*4882a593Smuzhiyun * helper function for fiemap, which doesn't want to see any holes.
4521*4882a593Smuzhiyun * This maps until we find something past 'last'
4522*4882a593Smuzhiyun */
get_extent_skip_holes(struct btrfs_inode * inode,u64 offset,u64 last)4523*4882a593Smuzhiyun static struct extent_map *get_extent_skip_holes(struct btrfs_inode *inode,
4524*4882a593Smuzhiyun u64 offset, u64 last)
4525*4882a593Smuzhiyun {
4526*4882a593Smuzhiyun u64 sectorsize = btrfs_inode_sectorsize(inode);
4527*4882a593Smuzhiyun struct extent_map *em;
4528*4882a593Smuzhiyun u64 len;
4529*4882a593Smuzhiyun
4530*4882a593Smuzhiyun if (offset >= last)
4531*4882a593Smuzhiyun return NULL;
4532*4882a593Smuzhiyun
4533*4882a593Smuzhiyun while (1) {
4534*4882a593Smuzhiyun len = last - offset;
4535*4882a593Smuzhiyun if (len == 0)
4536*4882a593Smuzhiyun break;
4537*4882a593Smuzhiyun len = ALIGN(len, sectorsize);
4538*4882a593Smuzhiyun em = btrfs_get_extent_fiemap(inode, offset, len);
4539*4882a593Smuzhiyun if (IS_ERR_OR_NULL(em))
4540*4882a593Smuzhiyun return em;
4541*4882a593Smuzhiyun
4542*4882a593Smuzhiyun /* if this isn't a hole return it */
4543*4882a593Smuzhiyun if (em->block_start != EXTENT_MAP_HOLE)
4544*4882a593Smuzhiyun return em;
4545*4882a593Smuzhiyun
4546*4882a593Smuzhiyun /* this is a hole, advance to the next extent */
4547*4882a593Smuzhiyun offset = extent_map_end(em);
4548*4882a593Smuzhiyun free_extent_map(em);
4549*4882a593Smuzhiyun if (offset >= last)
4550*4882a593Smuzhiyun break;
4551*4882a593Smuzhiyun }
4552*4882a593Smuzhiyun return NULL;
4553*4882a593Smuzhiyun }
4554*4882a593Smuzhiyun
4555*4882a593Smuzhiyun /*
4556*4882a593Smuzhiyun * To cache previous fiemap extent
4557*4882a593Smuzhiyun *
4558*4882a593Smuzhiyun * Will be used for merging fiemap extent
4559*4882a593Smuzhiyun */
4560*4882a593Smuzhiyun struct fiemap_cache {
4561*4882a593Smuzhiyun u64 offset;
4562*4882a593Smuzhiyun u64 phys;
4563*4882a593Smuzhiyun u64 len;
4564*4882a593Smuzhiyun u32 flags;
4565*4882a593Smuzhiyun bool cached;
4566*4882a593Smuzhiyun };
4567*4882a593Smuzhiyun
4568*4882a593Smuzhiyun /*
4569*4882a593Smuzhiyun * Helper to submit fiemap extent.
4570*4882a593Smuzhiyun *
4571*4882a593Smuzhiyun * Will try to merge current fiemap extent specified by @offset, @phys,
4572*4882a593Smuzhiyun * @len and @flags with cached one.
4573*4882a593Smuzhiyun * And only when we fails to merge, cached one will be submitted as
4574*4882a593Smuzhiyun * fiemap extent.
4575*4882a593Smuzhiyun *
4576*4882a593Smuzhiyun * Return value is the same as fiemap_fill_next_extent().
4577*4882a593Smuzhiyun */
emit_fiemap_extent(struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache,u64 offset,u64 phys,u64 len,u32 flags)4578*4882a593Smuzhiyun static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4579*4882a593Smuzhiyun struct fiemap_cache *cache,
4580*4882a593Smuzhiyun u64 offset, u64 phys, u64 len, u32 flags)
4581*4882a593Smuzhiyun {
4582*4882a593Smuzhiyun int ret = 0;
4583*4882a593Smuzhiyun
4584*4882a593Smuzhiyun if (!cache->cached)
4585*4882a593Smuzhiyun goto assign;
4586*4882a593Smuzhiyun
4587*4882a593Smuzhiyun /*
4588*4882a593Smuzhiyun * Sanity check, extent_fiemap() should have ensured that new
4589*4882a593Smuzhiyun * fiemap extent won't overlap with cached one.
4590*4882a593Smuzhiyun * Not recoverable.
4591*4882a593Smuzhiyun *
4592*4882a593Smuzhiyun * NOTE: Physical address can overlap, due to compression
4593*4882a593Smuzhiyun */
4594*4882a593Smuzhiyun if (cache->offset + cache->len > offset) {
4595*4882a593Smuzhiyun WARN_ON(1);
4596*4882a593Smuzhiyun return -EINVAL;
4597*4882a593Smuzhiyun }
4598*4882a593Smuzhiyun
4599*4882a593Smuzhiyun /*
4600*4882a593Smuzhiyun * Only merges fiemap extents if
4601*4882a593Smuzhiyun * 1) Their logical addresses are continuous
4602*4882a593Smuzhiyun *
4603*4882a593Smuzhiyun * 2) Their physical addresses are continuous
4604*4882a593Smuzhiyun * So truly compressed (physical size smaller than logical size)
4605*4882a593Smuzhiyun * extents won't get merged with each other
4606*4882a593Smuzhiyun *
4607*4882a593Smuzhiyun * 3) Share same flags except FIEMAP_EXTENT_LAST
4608*4882a593Smuzhiyun * So regular extent won't get merged with prealloc extent
4609*4882a593Smuzhiyun */
4610*4882a593Smuzhiyun if (cache->offset + cache->len == offset &&
4611*4882a593Smuzhiyun cache->phys + cache->len == phys &&
4612*4882a593Smuzhiyun (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4613*4882a593Smuzhiyun (flags & ~FIEMAP_EXTENT_LAST)) {
4614*4882a593Smuzhiyun cache->len += len;
4615*4882a593Smuzhiyun cache->flags |= flags;
4616*4882a593Smuzhiyun goto try_submit_last;
4617*4882a593Smuzhiyun }
4618*4882a593Smuzhiyun
4619*4882a593Smuzhiyun /* Not mergeable, need to submit cached one */
4620*4882a593Smuzhiyun ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4621*4882a593Smuzhiyun cache->len, cache->flags);
4622*4882a593Smuzhiyun cache->cached = false;
4623*4882a593Smuzhiyun if (ret)
4624*4882a593Smuzhiyun return ret;
4625*4882a593Smuzhiyun assign:
4626*4882a593Smuzhiyun cache->cached = true;
4627*4882a593Smuzhiyun cache->offset = offset;
4628*4882a593Smuzhiyun cache->phys = phys;
4629*4882a593Smuzhiyun cache->len = len;
4630*4882a593Smuzhiyun cache->flags = flags;
4631*4882a593Smuzhiyun try_submit_last:
4632*4882a593Smuzhiyun if (cache->flags & FIEMAP_EXTENT_LAST) {
4633*4882a593Smuzhiyun ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4634*4882a593Smuzhiyun cache->phys, cache->len, cache->flags);
4635*4882a593Smuzhiyun cache->cached = false;
4636*4882a593Smuzhiyun }
4637*4882a593Smuzhiyun return ret;
4638*4882a593Smuzhiyun }
4639*4882a593Smuzhiyun
4640*4882a593Smuzhiyun /*
4641*4882a593Smuzhiyun * Emit last fiemap cache
4642*4882a593Smuzhiyun *
4643*4882a593Smuzhiyun * The last fiemap cache may still be cached in the following case:
4644*4882a593Smuzhiyun * 0 4k 8k
4645*4882a593Smuzhiyun * |<- Fiemap range ->|
4646*4882a593Smuzhiyun * |<------------ First extent ----------->|
4647*4882a593Smuzhiyun *
4648*4882a593Smuzhiyun * In this case, the first extent range will be cached but not emitted.
4649*4882a593Smuzhiyun * So we must emit it before ending extent_fiemap().
4650*4882a593Smuzhiyun */
emit_last_fiemap_cache(struct fiemap_extent_info * fieinfo,struct fiemap_cache * cache)4651*4882a593Smuzhiyun static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
4652*4882a593Smuzhiyun struct fiemap_cache *cache)
4653*4882a593Smuzhiyun {
4654*4882a593Smuzhiyun int ret;
4655*4882a593Smuzhiyun
4656*4882a593Smuzhiyun if (!cache->cached)
4657*4882a593Smuzhiyun return 0;
4658*4882a593Smuzhiyun
4659*4882a593Smuzhiyun ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4660*4882a593Smuzhiyun cache->len, cache->flags);
4661*4882a593Smuzhiyun cache->cached = false;
4662*4882a593Smuzhiyun if (ret > 0)
4663*4882a593Smuzhiyun ret = 0;
4664*4882a593Smuzhiyun return ret;
4665*4882a593Smuzhiyun }
4666*4882a593Smuzhiyun
extent_fiemap(struct btrfs_inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)4667*4882a593Smuzhiyun int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
4668*4882a593Smuzhiyun u64 start, u64 len)
4669*4882a593Smuzhiyun {
4670*4882a593Smuzhiyun int ret = 0;
4671*4882a593Smuzhiyun u64 off;
4672*4882a593Smuzhiyun u64 max = start + len;
4673*4882a593Smuzhiyun u32 flags = 0;
4674*4882a593Smuzhiyun u32 found_type;
4675*4882a593Smuzhiyun u64 last;
4676*4882a593Smuzhiyun u64 last_for_get_extent = 0;
4677*4882a593Smuzhiyun u64 disko = 0;
4678*4882a593Smuzhiyun u64 isize = i_size_read(&inode->vfs_inode);
4679*4882a593Smuzhiyun struct btrfs_key found_key;
4680*4882a593Smuzhiyun struct extent_map *em = NULL;
4681*4882a593Smuzhiyun struct extent_state *cached_state = NULL;
4682*4882a593Smuzhiyun struct btrfs_path *path;
4683*4882a593Smuzhiyun struct btrfs_root *root = inode->root;
4684*4882a593Smuzhiyun struct fiemap_cache cache = { 0 };
4685*4882a593Smuzhiyun struct ulist *roots;
4686*4882a593Smuzhiyun struct ulist *tmp_ulist;
4687*4882a593Smuzhiyun int end = 0;
4688*4882a593Smuzhiyun u64 em_start = 0;
4689*4882a593Smuzhiyun u64 em_len = 0;
4690*4882a593Smuzhiyun u64 em_end = 0;
4691*4882a593Smuzhiyun
4692*4882a593Smuzhiyun if (len == 0)
4693*4882a593Smuzhiyun return -EINVAL;
4694*4882a593Smuzhiyun
4695*4882a593Smuzhiyun path = btrfs_alloc_path();
4696*4882a593Smuzhiyun if (!path)
4697*4882a593Smuzhiyun return -ENOMEM;
4698*4882a593Smuzhiyun path->leave_spinning = 1;
4699*4882a593Smuzhiyun
4700*4882a593Smuzhiyun roots = ulist_alloc(GFP_KERNEL);
4701*4882a593Smuzhiyun tmp_ulist = ulist_alloc(GFP_KERNEL);
4702*4882a593Smuzhiyun if (!roots || !tmp_ulist) {
4703*4882a593Smuzhiyun ret = -ENOMEM;
4704*4882a593Smuzhiyun goto out_free_ulist;
4705*4882a593Smuzhiyun }
4706*4882a593Smuzhiyun
4707*4882a593Smuzhiyun /*
4708*4882a593Smuzhiyun * We can't initialize that to 'start' as this could miss extents due
4709*4882a593Smuzhiyun * to extent item merging
4710*4882a593Smuzhiyun */
4711*4882a593Smuzhiyun off = 0;
4712*4882a593Smuzhiyun start = round_down(start, btrfs_inode_sectorsize(inode));
4713*4882a593Smuzhiyun len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
4714*4882a593Smuzhiyun
4715*4882a593Smuzhiyun /*
4716*4882a593Smuzhiyun * lookup the last file extent. We're not using i_size here
4717*4882a593Smuzhiyun * because there might be preallocation past i_size
4718*4882a593Smuzhiyun */
4719*4882a593Smuzhiyun ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1,
4720*4882a593Smuzhiyun 0);
4721*4882a593Smuzhiyun if (ret < 0) {
4722*4882a593Smuzhiyun goto out_free_ulist;
4723*4882a593Smuzhiyun } else {
4724*4882a593Smuzhiyun WARN_ON(!ret);
4725*4882a593Smuzhiyun if (ret == 1)
4726*4882a593Smuzhiyun ret = 0;
4727*4882a593Smuzhiyun }
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun path->slots[0]--;
4730*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
4731*4882a593Smuzhiyun found_type = found_key.type;
4732*4882a593Smuzhiyun
4733*4882a593Smuzhiyun /* No extents, but there might be delalloc bits */
4734*4882a593Smuzhiyun if (found_key.objectid != btrfs_ino(inode) ||
4735*4882a593Smuzhiyun found_type != BTRFS_EXTENT_DATA_KEY) {
4736*4882a593Smuzhiyun /* have to trust i_size as the end */
4737*4882a593Smuzhiyun last = (u64)-1;
4738*4882a593Smuzhiyun last_for_get_extent = isize;
4739*4882a593Smuzhiyun } else {
4740*4882a593Smuzhiyun /*
4741*4882a593Smuzhiyun * remember the start of the last extent. There are a
4742*4882a593Smuzhiyun * bunch of different factors that go into the length of the
4743*4882a593Smuzhiyun * extent, so its much less complex to remember where it started
4744*4882a593Smuzhiyun */
4745*4882a593Smuzhiyun last = found_key.offset;
4746*4882a593Smuzhiyun last_for_get_extent = last + 1;
4747*4882a593Smuzhiyun }
4748*4882a593Smuzhiyun btrfs_release_path(path);
4749*4882a593Smuzhiyun
4750*4882a593Smuzhiyun /*
4751*4882a593Smuzhiyun * we might have some extents allocated but more delalloc past those
4752*4882a593Smuzhiyun * extents. so, we trust isize unless the start of the last extent is
4753*4882a593Smuzhiyun * beyond isize
4754*4882a593Smuzhiyun */
4755*4882a593Smuzhiyun if (last < isize) {
4756*4882a593Smuzhiyun last = (u64)-1;
4757*4882a593Smuzhiyun last_for_get_extent = isize;
4758*4882a593Smuzhiyun }
4759*4882a593Smuzhiyun
4760*4882a593Smuzhiyun lock_extent_bits(&inode->io_tree, start, start + len - 1,
4761*4882a593Smuzhiyun &cached_state);
4762*4882a593Smuzhiyun
4763*4882a593Smuzhiyun em = get_extent_skip_holes(inode, start, last_for_get_extent);
4764*4882a593Smuzhiyun if (!em)
4765*4882a593Smuzhiyun goto out;
4766*4882a593Smuzhiyun if (IS_ERR(em)) {
4767*4882a593Smuzhiyun ret = PTR_ERR(em);
4768*4882a593Smuzhiyun goto out;
4769*4882a593Smuzhiyun }
4770*4882a593Smuzhiyun
4771*4882a593Smuzhiyun while (!end) {
4772*4882a593Smuzhiyun u64 offset_in_extent = 0;
4773*4882a593Smuzhiyun
4774*4882a593Smuzhiyun /* break if the extent we found is outside the range */
4775*4882a593Smuzhiyun if (em->start >= max || extent_map_end(em) < off)
4776*4882a593Smuzhiyun break;
4777*4882a593Smuzhiyun
4778*4882a593Smuzhiyun /*
4779*4882a593Smuzhiyun * get_extent may return an extent that starts before our
4780*4882a593Smuzhiyun * requested range. We have to make sure the ranges
4781*4882a593Smuzhiyun * we return to fiemap always move forward and don't
4782*4882a593Smuzhiyun * overlap, so adjust the offsets here
4783*4882a593Smuzhiyun */
4784*4882a593Smuzhiyun em_start = max(em->start, off);
4785*4882a593Smuzhiyun
4786*4882a593Smuzhiyun /*
4787*4882a593Smuzhiyun * record the offset from the start of the extent
4788*4882a593Smuzhiyun * for adjusting the disk offset below. Only do this if the
4789*4882a593Smuzhiyun * extent isn't compressed since our in ram offset may be past
4790*4882a593Smuzhiyun * what we have actually allocated on disk.
4791*4882a593Smuzhiyun */
4792*4882a593Smuzhiyun if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4793*4882a593Smuzhiyun offset_in_extent = em_start - em->start;
4794*4882a593Smuzhiyun em_end = extent_map_end(em);
4795*4882a593Smuzhiyun em_len = em_end - em_start;
4796*4882a593Smuzhiyun flags = 0;
4797*4882a593Smuzhiyun if (em->block_start < EXTENT_MAP_LAST_BYTE)
4798*4882a593Smuzhiyun disko = em->block_start + offset_in_extent;
4799*4882a593Smuzhiyun else
4800*4882a593Smuzhiyun disko = 0;
4801*4882a593Smuzhiyun
4802*4882a593Smuzhiyun /*
4803*4882a593Smuzhiyun * bump off for our next call to get_extent
4804*4882a593Smuzhiyun */
4805*4882a593Smuzhiyun off = extent_map_end(em);
4806*4882a593Smuzhiyun if (off >= max)
4807*4882a593Smuzhiyun end = 1;
4808*4882a593Smuzhiyun
4809*4882a593Smuzhiyun if (em->block_start == EXTENT_MAP_LAST_BYTE) {
4810*4882a593Smuzhiyun end = 1;
4811*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_LAST;
4812*4882a593Smuzhiyun } else if (em->block_start == EXTENT_MAP_INLINE) {
4813*4882a593Smuzhiyun flags |= (FIEMAP_EXTENT_DATA_INLINE |
4814*4882a593Smuzhiyun FIEMAP_EXTENT_NOT_ALIGNED);
4815*4882a593Smuzhiyun } else if (em->block_start == EXTENT_MAP_DELALLOC) {
4816*4882a593Smuzhiyun flags |= (FIEMAP_EXTENT_DELALLOC |
4817*4882a593Smuzhiyun FIEMAP_EXTENT_UNKNOWN);
4818*4882a593Smuzhiyun } else if (fieinfo->fi_extents_max) {
4819*4882a593Smuzhiyun u64 bytenr = em->block_start -
4820*4882a593Smuzhiyun (em->start - em->orig_start);
4821*4882a593Smuzhiyun
4822*4882a593Smuzhiyun /*
4823*4882a593Smuzhiyun * As btrfs supports shared space, this information
4824*4882a593Smuzhiyun * can be exported to userspace tools via
4825*4882a593Smuzhiyun * flag FIEMAP_EXTENT_SHARED. If fi_extents_max == 0
4826*4882a593Smuzhiyun * then we're just getting a count and we can skip the
4827*4882a593Smuzhiyun * lookup stuff.
4828*4882a593Smuzhiyun */
4829*4882a593Smuzhiyun ret = btrfs_check_shared(root, btrfs_ino(inode),
4830*4882a593Smuzhiyun bytenr, roots, tmp_ulist);
4831*4882a593Smuzhiyun if (ret < 0)
4832*4882a593Smuzhiyun goto out_free;
4833*4882a593Smuzhiyun if (ret)
4834*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_SHARED;
4835*4882a593Smuzhiyun ret = 0;
4836*4882a593Smuzhiyun }
4837*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
4838*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_ENCODED;
4839*4882a593Smuzhiyun if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4840*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_UNWRITTEN;
4841*4882a593Smuzhiyun
4842*4882a593Smuzhiyun free_extent_map(em);
4843*4882a593Smuzhiyun em = NULL;
4844*4882a593Smuzhiyun if ((em_start >= last) || em_len == (u64)-1 ||
4845*4882a593Smuzhiyun (last == (u64)-1 && isize <= em_end)) {
4846*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_LAST;
4847*4882a593Smuzhiyun end = 1;
4848*4882a593Smuzhiyun }
4849*4882a593Smuzhiyun
4850*4882a593Smuzhiyun /* now scan forward to see if this is really the last extent. */
4851*4882a593Smuzhiyun em = get_extent_skip_holes(inode, off, last_for_get_extent);
4852*4882a593Smuzhiyun if (IS_ERR(em)) {
4853*4882a593Smuzhiyun ret = PTR_ERR(em);
4854*4882a593Smuzhiyun goto out;
4855*4882a593Smuzhiyun }
4856*4882a593Smuzhiyun if (!em) {
4857*4882a593Smuzhiyun flags |= FIEMAP_EXTENT_LAST;
4858*4882a593Smuzhiyun end = 1;
4859*4882a593Smuzhiyun }
4860*4882a593Smuzhiyun ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4861*4882a593Smuzhiyun em_len, flags);
4862*4882a593Smuzhiyun if (ret) {
4863*4882a593Smuzhiyun if (ret == 1)
4864*4882a593Smuzhiyun ret = 0;
4865*4882a593Smuzhiyun goto out_free;
4866*4882a593Smuzhiyun }
4867*4882a593Smuzhiyun }
4868*4882a593Smuzhiyun out_free:
4869*4882a593Smuzhiyun if (!ret)
4870*4882a593Smuzhiyun ret = emit_last_fiemap_cache(fieinfo, &cache);
4871*4882a593Smuzhiyun free_extent_map(em);
4872*4882a593Smuzhiyun out:
4873*4882a593Smuzhiyun unlock_extent_cached(&inode->io_tree, start, start + len - 1,
4874*4882a593Smuzhiyun &cached_state);
4875*4882a593Smuzhiyun
4876*4882a593Smuzhiyun out_free_ulist:
4877*4882a593Smuzhiyun btrfs_free_path(path);
4878*4882a593Smuzhiyun ulist_free(roots);
4879*4882a593Smuzhiyun ulist_free(tmp_ulist);
4880*4882a593Smuzhiyun return ret;
4881*4882a593Smuzhiyun }
4882*4882a593Smuzhiyun
__free_extent_buffer(struct extent_buffer * eb)4883*4882a593Smuzhiyun static void __free_extent_buffer(struct extent_buffer *eb)
4884*4882a593Smuzhiyun {
4885*4882a593Smuzhiyun kmem_cache_free(extent_buffer_cache, eb);
4886*4882a593Smuzhiyun }
4887*4882a593Smuzhiyun
extent_buffer_under_io(const struct extent_buffer * eb)4888*4882a593Smuzhiyun int extent_buffer_under_io(const struct extent_buffer *eb)
4889*4882a593Smuzhiyun {
4890*4882a593Smuzhiyun return (atomic_read(&eb->io_pages) ||
4891*4882a593Smuzhiyun test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
4892*4882a593Smuzhiyun test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4893*4882a593Smuzhiyun }
4894*4882a593Smuzhiyun
4895*4882a593Smuzhiyun /*
4896*4882a593Smuzhiyun * Release all pages attached to the extent buffer.
4897*4882a593Smuzhiyun */
btrfs_release_extent_buffer_pages(struct extent_buffer * eb)4898*4882a593Smuzhiyun static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
4899*4882a593Smuzhiyun {
4900*4882a593Smuzhiyun int i;
4901*4882a593Smuzhiyun int num_pages;
4902*4882a593Smuzhiyun int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
4903*4882a593Smuzhiyun
4904*4882a593Smuzhiyun BUG_ON(extent_buffer_under_io(eb));
4905*4882a593Smuzhiyun
4906*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
4907*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
4908*4882a593Smuzhiyun struct page *page = eb->pages[i];
4909*4882a593Smuzhiyun
4910*4882a593Smuzhiyun if (!page)
4911*4882a593Smuzhiyun continue;
4912*4882a593Smuzhiyun if (mapped)
4913*4882a593Smuzhiyun spin_lock(&page->mapping->private_lock);
4914*4882a593Smuzhiyun /*
4915*4882a593Smuzhiyun * We do this since we'll remove the pages after we've
4916*4882a593Smuzhiyun * removed the eb from the radix tree, so we could race
4917*4882a593Smuzhiyun * and have this page now attached to the new eb. So
4918*4882a593Smuzhiyun * only clear page_private if it's still connected to
4919*4882a593Smuzhiyun * this eb.
4920*4882a593Smuzhiyun */
4921*4882a593Smuzhiyun if (PagePrivate(page) &&
4922*4882a593Smuzhiyun page->private == (unsigned long)eb) {
4923*4882a593Smuzhiyun BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4924*4882a593Smuzhiyun BUG_ON(PageDirty(page));
4925*4882a593Smuzhiyun BUG_ON(PageWriteback(page));
4926*4882a593Smuzhiyun /*
4927*4882a593Smuzhiyun * We need to make sure we haven't be attached
4928*4882a593Smuzhiyun * to a new eb.
4929*4882a593Smuzhiyun */
4930*4882a593Smuzhiyun detach_page_private(page);
4931*4882a593Smuzhiyun }
4932*4882a593Smuzhiyun
4933*4882a593Smuzhiyun if (mapped)
4934*4882a593Smuzhiyun spin_unlock(&page->mapping->private_lock);
4935*4882a593Smuzhiyun
4936*4882a593Smuzhiyun /* One for when we allocated the page */
4937*4882a593Smuzhiyun put_page(page);
4938*4882a593Smuzhiyun }
4939*4882a593Smuzhiyun }
4940*4882a593Smuzhiyun
4941*4882a593Smuzhiyun /*
4942*4882a593Smuzhiyun * Helper for releasing the extent buffer.
4943*4882a593Smuzhiyun */
btrfs_release_extent_buffer(struct extent_buffer * eb)4944*4882a593Smuzhiyun static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
4945*4882a593Smuzhiyun {
4946*4882a593Smuzhiyun btrfs_release_extent_buffer_pages(eb);
4947*4882a593Smuzhiyun btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
4948*4882a593Smuzhiyun __free_extent_buffer(eb);
4949*4882a593Smuzhiyun }
4950*4882a593Smuzhiyun
4951*4882a593Smuzhiyun static struct extent_buffer *
__alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)4952*4882a593Smuzhiyun __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
4953*4882a593Smuzhiyun unsigned long len)
4954*4882a593Smuzhiyun {
4955*4882a593Smuzhiyun struct extent_buffer *eb = NULL;
4956*4882a593Smuzhiyun
4957*4882a593Smuzhiyun eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
4958*4882a593Smuzhiyun eb->start = start;
4959*4882a593Smuzhiyun eb->len = len;
4960*4882a593Smuzhiyun eb->fs_info = fs_info;
4961*4882a593Smuzhiyun eb->bflags = 0;
4962*4882a593Smuzhiyun rwlock_init(&eb->lock);
4963*4882a593Smuzhiyun atomic_set(&eb->blocking_readers, 0);
4964*4882a593Smuzhiyun eb->blocking_writers = 0;
4965*4882a593Smuzhiyun eb->lock_recursed = false;
4966*4882a593Smuzhiyun init_waitqueue_head(&eb->write_lock_wq);
4967*4882a593Smuzhiyun init_waitqueue_head(&eb->read_lock_wq);
4968*4882a593Smuzhiyun
4969*4882a593Smuzhiyun btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list,
4970*4882a593Smuzhiyun &fs_info->allocated_ebs);
4971*4882a593Smuzhiyun
4972*4882a593Smuzhiyun spin_lock_init(&eb->refs_lock);
4973*4882a593Smuzhiyun atomic_set(&eb->refs, 1);
4974*4882a593Smuzhiyun atomic_set(&eb->io_pages, 0);
4975*4882a593Smuzhiyun
4976*4882a593Smuzhiyun /*
4977*4882a593Smuzhiyun * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
4978*4882a593Smuzhiyun */
4979*4882a593Smuzhiyun BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
4980*4882a593Smuzhiyun > MAX_INLINE_EXTENT_BUFFER_SIZE);
4981*4882a593Smuzhiyun BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
4982*4882a593Smuzhiyun
4983*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
4984*4882a593Smuzhiyun eb->spinning_writers = 0;
4985*4882a593Smuzhiyun atomic_set(&eb->spinning_readers, 0);
4986*4882a593Smuzhiyun atomic_set(&eb->read_locks, 0);
4987*4882a593Smuzhiyun eb->write_locks = 0;
4988*4882a593Smuzhiyun #endif
4989*4882a593Smuzhiyun
4990*4882a593Smuzhiyun return eb;
4991*4882a593Smuzhiyun }
4992*4882a593Smuzhiyun
btrfs_clone_extent_buffer(const struct extent_buffer * src)4993*4882a593Smuzhiyun struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
4994*4882a593Smuzhiyun {
4995*4882a593Smuzhiyun int i;
4996*4882a593Smuzhiyun struct page *p;
4997*4882a593Smuzhiyun struct extent_buffer *new;
4998*4882a593Smuzhiyun int num_pages = num_extent_pages(src);
4999*4882a593Smuzhiyun
5000*4882a593Smuzhiyun new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
5001*4882a593Smuzhiyun if (new == NULL)
5002*4882a593Smuzhiyun return NULL;
5003*4882a593Smuzhiyun
5004*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5005*4882a593Smuzhiyun p = alloc_page(GFP_NOFS);
5006*4882a593Smuzhiyun if (!p) {
5007*4882a593Smuzhiyun btrfs_release_extent_buffer(new);
5008*4882a593Smuzhiyun return NULL;
5009*4882a593Smuzhiyun }
5010*4882a593Smuzhiyun attach_extent_buffer_page(new, p);
5011*4882a593Smuzhiyun WARN_ON(PageDirty(p));
5012*4882a593Smuzhiyun SetPageUptodate(p);
5013*4882a593Smuzhiyun new->pages[i] = p;
5014*4882a593Smuzhiyun copy_page(page_address(p), page_address(src->pages[i]));
5015*4882a593Smuzhiyun }
5016*4882a593Smuzhiyun
5017*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
5018*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
5019*4882a593Smuzhiyun
5020*4882a593Smuzhiyun return new;
5021*4882a593Smuzhiyun }
5022*4882a593Smuzhiyun
__alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start,unsigned long len)5023*4882a593Smuzhiyun struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5024*4882a593Smuzhiyun u64 start, unsigned long len)
5025*4882a593Smuzhiyun {
5026*4882a593Smuzhiyun struct extent_buffer *eb;
5027*4882a593Smuzhiyun int num_pages;
5028*4882a593Smuzhiyun int i;
5029*4882a593Smuzhiyun
5030*4882a593Smuzhiyun eb = __alloc_extent_buffer(fs_info, start, len);
5031*4882a593Smuzhiyun if (!eb)
5032*4882a593Smuzhiyun return NULL;
5033*4882a593Smuzhiyun
5034*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5035*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5036*4882a593Smuzhiyun eb->pages[i] = alloc_page(GFP_NOFS);
5037*4882a593Smuzhiyun if (!eb->pages[i])
5038*4882a593Smuzhiyun goto err;
5039*4882a593Smuzhiyun }
5040*4882a593Smuzhiyun set_extent_buffer_uptodate(eb);
5041*4882a593Smuzhiyun btrfs_set_header_nritems(eb, 0);
5042*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
5043*4882a593Smuzhiyun
5044*4882a593Smuzhiyun return eb;
5045*4882a593Smuzhiyun err:
5046*4882a593Smuzhiyun for (; i > 0; i--)
5047*4882a593Smuzhiyun __free_page(eb->pages[i - 1]);
5048*4882a593Smuzhiyun __free_extent_buffer(eb);
5049*4882a593Smuzhiyun return NULL;
5050*4882a593Smuzhiyun }
5051*4882a593Smuzhiyun
alloc_dummy_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)5052*4882a593Smuzhiyun struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
5053*4882a593Smuzhiyun u64 start)
5054*4882a593Smuzhiyun {
5055*4882a593Smuzhiyun return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
5056*4882a593Smuzhiyun }
5057*4882a593Smuzhiyun
check_buffer_tree_ref(struct extent_buffer * eb)5058*4882a593Smuzhiyun static void check_buffer_tree_ref(struct extent_buffer *eb)
5059*4882a593Smuzhiyun {
5060*4882a593Smuzhiyun int refs;
5061*4882a593Smuzhiyun /*
5062*4882a593Smuzhiyun * The TREE_REF bit is first set when the extent_buffer is added
5063*4882a593Smuzhiyun * to the radix tree. It is also reset, if unset, when a new reference
5064*4882a593Smuzhiyun * is created by find_extent_buffer.
5065*4882a593Smuzhiyun *
5066*4882a593Smuzhiyun * It is only cleared in two cases: freeing the last non-tree
5067*4882a593Smuzhiyun * reference to the extent_buffer when its STALE bit is set or
5068*4882a593Smuzhiyun * calling releasepage when the tree reference is the only reference.
5069*4882a593Smuzhiyun *
5070*4882a593Smuzhiyun * In both cases, care is taken to ensure that the extent_buffer's
5071*4882a593Smuzhiyun * pages are not under io. However, releasepage can be concurrently
5072*4882a593Smuzhiyun * called with creating new references, which is prone to race
5073*4882a593Smuzhiyun * conditions between the calls to check_buffer_tree_ref in those
5074*4882a593Smuzhiyun * codepaths and clearing TREE_REF in try_release_extent_buffer.
5075*4882a593Smuzhiyun *
5076*4882a593Smuzhiyun * The actual lifetime of the extent_buffer in the radix tree is
5077*4882a593Smuzhiyun * adequately protected by the refcount, but the TREE_REF bit and
5078*4882a593Smuzhiyun * its corresponding reference are not. To protect against this
5079*4882a593Smuzhiyun * class of races, we call check_buffer_tree_ref from the codepaths
5080*4882a593Smuzhiyun * which trigger io after they set eb->io_pages. Note that once io is
5081*4882a593Smuzhiyun * initiated, TREE_REF can no longer be cleared, so that is the
5082*4882a593Smuzhiyun * moment at which any such race is best fixed.
5083*4882a593Smuzhiyun */
5084*4882a593Smuzhiyun refs = atomic_read(&eb->refs);
5085*4882a593Smuzhiyun if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5086*4882a593Smuzhiyun return;
5087*4882a593Smuzhiyun
5088*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
5089*4882a593Smuzhiyun if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5090*4882a593Smuzhiyun atomic_inc(&eb->refs);
5091*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
5092*4882a593Smuzhiyun }
5093*4882a593Smuzhiyun
mark_extent_buffer_accessed(struct extent_buffer * eb,struct page * accessed)5094*4882a593Smuzhiyun static void mark_extent_buffer_accessed(struct extent_buffer *eb,
5095*4882a593Smuzhiyun struct page *accessed)
5096*4882a593Smuzhiyun {
5097*4882a593Smuzhiyun int num_pages, i;
5098*4882a593Smuzhiyun
5099*4882a593Smuzhiyun check_buffer_tree_ref(eb);
5100*4882a593Smuzhiyun
5101*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5102*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5103*4882a593Smuzhiyun struct page *p = eb->pages[i];
5104*4882a593Smuzhiyun
5105*4882a593Smuzhiyun if (p != accessed)
5106*4882a593Smuzhiyun mark_page_accessed(p);
5107*4882a593Smuzhiyun }
5108*4882a593Smuzhiyun }
5109*4882a593Smuzhiyun
find_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)5110*4882a593Smuzhiyun struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
5111*4882a593Smuzhiyun u64 start)
5112*4882a593Smuzhiyun {
5113*4882a593Smuzhiyun struct extent_buffer *eb;
5114*4882a593Smuzhiyun
5115*4882a593Smuzhiyun rcu_read_lock();
5116*4882a593Smuzhiyun eb = radix_tree_lookup(&fs_info->buffer_radix,
5117*4882a593Smuzhiyun start >> PAGE_SHIFT);
5118*4882a593Smuzhiyun if (eb && atomic_inc_not_zero(&eb->refs)) {
5119*4882a593Smuzhiyun rcu_read_unlock();
5120*4882a593Smuzhiyun /*
5121*4882a593Smuzhiyun * Lock our eb's refs_lock to avoid races with
5122*4882a593Smuzhiyun * free_extent_buffer. When we get our eb it might be flagged
5123*4882a593Smuzhiyun * with EXTENT_BUFFER_STALE and another task running
5124*4882a593Smuzhiyun * free_extent_buffer might have seen that flag set,
5125*4882a593Smuzhiyun * eb->refs == 2, that the buffer isn't under IO (dirty and
5126*4882a593Smuzhiyun * writeback flags not set) and it's still in the tree (flag
5127*4882a593Smuzhiyun * EXTENT_BUFFER_TREE_REF set), therefore being in the process
5128*4882a593Smuzhiyun * of decrementing the extent buffer's reference count twice.
5129*4882a593Smuzhiyun * So here we could race and increment the eb's reference count,
5130*4882a593Smuzhiyun * clear its stale flag, mark it as dirty and drop our reference
5131*4882a593Smuzhiyun * before the other task finishes executing free_extent_buffer,
5132*4882a593Smuzhiyun * which would later result in an attempt to free an extent
5133*4882a593Smuzhiyun * buffer that is dirty.
5134*4882a593Smuzhiyun */
5135*4882a593Smuzhiyun if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
5136*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
5137*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
5138*4882a593Smuzhiyun }
5139*4882a593Smuzhiyun mark_extent_buffer_accessed(eb, NULL);
5140*4882a593Smuzhiyun return eb;
5141*4882a593Smuzhiyun }
5142*4882a593Smuzhiyun rcu_read_unlock();
5143*4882a593Smuzhiyun
5144*4882a593Smuzhiyun return NULL;
5145*4882a593Smuzhiyun }
5146*4882a593Smuzhiyun
5147*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
alloc_test_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)5148*4882a593Smuzhiyun struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
5149*4882a593Smuzhiyun u64 start)
5150*4882a593Smuzhiyun {
5151*4882a593Smuzhiyun struct extent_buffer *eb, *exists = NULL;
5152*4882a593Smuzhiyun int ret;
5153*4882a593Smuzhiyun
5154*4882a593Smuzhiyun eb = find_extent_buffer(fs_info, start);
5155*4882a593Smuzhiyun if (eb)
5156*4882a593Smuzhiyun return eb;
5157*4882a593Smuzhiyun eb = alloc_dummy_extent_buffer(fs_info, start);
5158*4882a593Smuzhiyun if (!eb)
5159*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
5160*4882a593Smuzhiyun eb->fs_info = fs_info;
5161*4882a593Smuzhiyun again:
5162*4882a593Smuzhiyun ret = radix_tree_preload(GFP_NOFS);
5163*4882a593Smuzhiyun if (ret) {
5164*4882a593Smuzhiyun exists = ERR_PTR(ret);
5165*4882a593Smuzhiyun goto free_eb;
5166*4882a593Smuzhiyun }
5167*4882a593Smuzhiyun spin_lock(&fs_info->buffer_lock);
5168*4882a593Smuzhiyun ret = radix_tree_insert(&fs_info->buffer_radix,
5169*4882a593Smuzhiyun start >> PAGE_SHIFT, eb);
5170*4882a593Smuzhiyun spin_unlock(&fs_info->buffer_lock);
5171*4882a593Smuzhiyun radix_tree_preload_end();
5172*4882a593Smuzhiyun if (ret == -EEXIST) {
5173*4882a593Smuzhiyun exists = find_extent_buffer(fs_info, start);
5174*4882a593Smuzhiyun if (exists)
5175*4882a593Smuzhiyun goto free_eb;
5176*4882a593Smuzhiyun else
5177*4882a593Smuzhiyun goto again;
5178*4882a593Smuzhiyun }
5179*4882a593Smuzhiyun check_buffer_tree_ref(eb);
5180*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5181*4882a593Smuzhiyun
5182*4882a593Smuzhiyun return eb;
5183*4882a593Smuzhiyun free_eb:
5184*4882a593Smuzhiyun btrfs_release_extent_buffer(eb);
5185*4882a593Smuzhiyun return exists;
5186*4882a593Smuzhiyun }
5187*4882a593Smuzhiyun #endif
5188*4882a593Smuzhiyun
alloc_extent_buffer(struct btrfs_fs_info * fs_info,u64 start)5189*4882a593Smuzhiyun struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
5190*4882a593Smuzhiyun u64 start)
5191*4882a593Smuzhiyun {
5192*4882a593Smuzhiyun unsigned long len = fs_info->nodesize;
5193*4882a593Smuzhiyun int num_pages;
5194*4882a593Smuzhiyun int i;
5195*4882a593Smuzhiyun unsigned long index = start >> PAGE_SHIFT;
5196*4882a593Smuzhiyun struct extent_buffer *eb;
5197*4882a593Smuzhiyun struct extent_buffer *exists = NULL;
5198*4882a593Smuzhiyun struct page *p;
5199*4882a593Smuzhiyun struct address_space *mapping = fs_info->btree_inode->i_mapping;
5200*4882a593Smuzhiyun int uptodate = 1;
5201*4882a593Smuzhiyun int ret;
5202*4882a593Smuzhiyun
5203*4882a593Smuzhiyun if (!IS_ALIGNED(start, fs_info->sectorsize)) {
5204*4882a593Smuzhiyun btrfs_err(fs_info, "bad tree block start %llu", start);
5205*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
5206*4882a593Smuzhiyun }
5207*4882a593Smuzhiyun
5208*4882a593Smuzhiyun eb = find_extent_buffer(fs_info, start);
5209*4882a593Smuzhiyun if (eb)
5210*4882a593Smuzhiyun return eb;
5211*4882a593Smuzhiyun
5212*4882a593Smuzhiyun eb = __alloc_extent_buffer(fs_info, start, len);
5213*4882a593Smuzhiyun if (!eb)
5214*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
5215*4882a593Smuzhiyun
5216*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5217*4882a593Smuzhiyun for (i = 0; i < num_pages; i++, index++) {
5218*4882a593Smuzhiyun p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
5219*4882a593Smuzhiyun if (!p) {
5220*4882a593Smuzhiyun exists = ERR_PTR(-ENOMEM);
5221*4882a593Smuzhiyun goto free_eb;
5222*4882a593Smuzhiyun }
5223*4882a593Smuzhiyun
5224*4882a593Smuzhiyun spin_lock(&mapping->private_lock);
5225*4882a593Smuzhiyun if (PagePrivate(p)) {
5226*4882a593Smuzhiyun /*
5227*4882a593Smuzhiyun * We could have already allocated an eb for this page
5228*4882a593Smuzhiyun * and attached one so lets see if we can get a ref on
5229*4882a593Smuzhiyun * the existing eb, and if we can we know it's good and
5230*4882a593Smuzhiyun * we can just return that one, else we know we can just
5231*4882a593Smuzhiyun * overwrite page->private.
5232*4882a593Smuzhiyun */
5233*4882a593Smuzhiyun exists = (struct extent_buffer *)p->private;
5234*4882a593Smuzhiyun if (atomic_inc_not_zero(&exists->refs)) {
5235*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
5236*4882a593Smuzhiyun unlock_page(p);
5237*4882a593Smuzhiyun put_page(p);
5238*4882a593Smuzhiyun mark_extent_buffer_accessed(exists, p);
5239*4882a593Smuzhiyun goto free_eb;
5240*4882a593Smuzhiyun }
5241*4882a593Smuzhiyun exists = NULL;
5242*4882a593Smuzhiyun
5243*4882a593Smuzhiyun /*
5244*4882a593Smuzhiyun * Do this so attach doesn't complain and we need to
5245*4882a593Smuzhiyun * drop the ref the old guy had.
5246*4882a593Smuzhiyun */
5247*4882a593Smuzhiyun ClearPagePrivate(p);
5248*4882a593Smuzhiyun WARN_ON(PageDirty(p));
5249*4882a593Smuzhiyun put_page(p);
5250*4882a593Smuzhiyun }
5251*4882a593Smuzhiyun attach_extent_buffer_page(eb, p);
5252*4882a593Smuzhiyun spin_unlock(&mapping->private_lock);
5253*4882a593Smuzhiyun WARN_ON(PageDirty(p));
5254*4882a593Smuzhiyun eb->pages[i] = p;
5255*4882a593Smuzhiyun if (!PageUptodate(p))
5256*4882a593Smuzhiyun uptodate = 0;
5257*4882a593Smuzhiyun
5258*4882a593Smuzhiyun /*
5259*4882a593Smuzhiyun * We can't unlock the pages just yet since the extent buffer
5260*4882a593Smuzhiyun * hasn't been properly inserted in the radix tree, this
5261*4882a593Smuzhiyun * opens a race with btree_releasepage which can free a page
5262*4882a593Smuzhiyun * while we are still filling in all pages for the buffer and
5263*4882a593Smuzhiyun * we could crash.
5264*4882a593Smuzhiyun */
5265*4882a593Smuzhiyun }
5266*4882a593Smuzhiyun if (uptodate)
5267*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5268*4882a593Smuzhiyun again:
5269*4882a593Smuzhiyun ret = radix_tree_preload(GFP_NOFS);
5270*4882a593Smuzhiyun if (ret) {
5271*4882a593Smuzhiyun exists = ERR_PTR(ret);
5272*4882a593Smuzhiyun goto free_eb;
5273*4882a593Smuzhiyun }
5274*4882a593Smuzhiyun
5275*4882a593Smuzhiyun spin_lock(&fs_info->buffer_lock);
5276*4882a593Smuzhiyun ret = radix_tree_insert(&fs_info->buffer_radix,
5277*4882a593Smuzhiyun start >> PAGE_SHIFT, eb);
5278*4882a593Smuzhiyun spin_unlock(&fs_info->buffer_lock);
5279*4882a593Smuzhiyun radix_tree_preload_end();
5280*4882a593Smuzhiyun if (ret == -EEXIST) {
5281*4882a593Smuzhiyun exists = find_extent_buffer(fs_info, start);
5282*4882a593Smuzhiyun if (exists)
5283*4882a593Smuzhiyun goto free_eb;
5284*4882a593Smuzhiyun else
5285*4882a593Smuzhiyun goto again;
5286*4882a593Smuzhiyun }
5287*4882a593Smuzhiyun /* add one reference for the tree */
5288*4882a593Smuzhiyun check_buffer_tree_ref(eb);
5289*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
5290*4882a593Smuzhiyun
5291*4882a593Smuzhiyun /*
5292*4882a593Smuzhiyun * Now it's safe to unlock the pages because any calls to
5293*4882a593Smuzhiyun * btree_releasepage will correctly detect that a page belongs to a
5294*4882a593Smuzhiyun * live buffer and won't free them prematurely.
5295*4882a593Smuzhiyun */
5296*4882a593Smuzhiyun for (i = 0; i < num_pages; i++)
5297*4882a593Smuzhiyun unlock_page(eb->pages[i]);
5298*4882a593Smuzhiyun return eb;
5299*4882a593Smuzhiyun
5300*4882a593Smuzhiyun free_eb:
5301*4882a593Smuzhiyun WARN_ON(!atomic_dec_and_test(&eb->refs));
5302*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5303*4882a593Smuzhiyun if (eb->pages[i])
5304*4882a593Smuzhiyun unlock_page(eb->pages[i]);
5305*4882a593Smuzhiyun }
5306*4882a593Smuzhiyun
5307*4882a593Smuzhiyun btrfs_release_extent_buffer(eb);
5308*4882a593Smuzhiyun return exists;
5309*4882a593Smuzhiyun }
5310*4882a593Smuzhiyun
btrfs_release_extent_buffer_rcu(struct rcu_head * head)5311*4882a593Smuzhiyun static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
5312*4882a593Smuzhiyun {
5313*4882a593Smuzhiyun struct extent_buffer *eb =
5314*4882a593Smuzhiyun container_of(head, struct extent_buffer, rcu_head);
5315*4882a593Smuzhiyun
5316*4882a593Smuzhiyun __free_extent_buffer(eb);
5317*4882a593Smuzhiyun }
5318*4882a593Smuzhiyun
release_extent_buffer(struct extent_buffer * eb)5319*4882a593Smuzhiyun static int release_extent_buffer(struct extent_buffer *eb)
5320*4882a593Smuzhiyun __releases(&eb->refs_lock)
5321*4882a593Smuzhiyun {
5322*4882a593Smuzhiyun lockdep_assert_held(&eb->refs_lock);
5323*4882a593Smuzhiyun
5324*4882a593Smuzhiyun WARN_ON(atomic_read(&eb->refs) == 0);
5325*4882a593Smuzhiyun if (atomic_dec_and_test(&eb->refs)) {
5326*4882a593Smuzhiyun if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
5327*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = eb->fs_info;
5328*4882a593Smuzhiyun
5329*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
5330*4882a593Smuzhiyun
5331*4882a593Smuzhiyun spin_lock(&fs_info->buffer_lock);
5332*4882a593Smuzhiyun radix_tree_delete(&fs_info->buffer_radix,
5333*4882a593Smuzhiyun eb->start >> PAGE_SHIFT);
5334*4882a593Smuzhiyun spin_unlock(&fs_info->buffer_lock);
5335*4882a593Smuzhiyun } else {
5336*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
5337*4882a593Smuzhiyun }
5338*4882a593Smuzhiyun
5339*4882a593Smuzhiyun btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list);
5340*4882a593Smuzhiyun /* Should be safe to release our pages at this point */
5341*4882a593Smuzhiyun btrfs_release_extent_buffer_pages(eb);
5342*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
5343*4882a593Smuzhiyun if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
5344*4882a593Smuzhiyun __free_extent_buffer(eb);
5345*4882a593Smuzhiyun return 1;
5346*4882a593Smuzhiyun }
5347*4882a593Smuzhiyun #endif
5348*4882a593Smuzhiyun call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
5349*4882a593Smuzhiyun return 1;
5350*4882a593Smuzhiyun }
5351*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
5352*4882a593Smuzhiyun
5353*4882a593Smuzhiyun return 0;
5354*4882a593Smuzhiyun }
5355*4882a593Smuzhiyun
free_extent_buffer(struct extent_buffer * eb)5356*4882a593Smuzhiyun void free_extent_buffer(struct extent_buffer *eb)
5357*4882a593Smuzhiyun {
5358*4882a593Smuzhiyun int refs;
5359*4882a593Smuzhiyun int old;
5360*4882a593Smuzhiyun if (!eb)
5361*4882a593Smuzhiyun return;
5362*4882a593Smuzhiyun
5363*4882a593Smuzhiyun while (1) {
5364*4882a593Smuzhiyun refs = atomic_read(&eb->refs);
5365*4882a593Smuzhiyun if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
5366*4882a593Smuzhiyun || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
5367*4882a593Smuzhiyun refs == 1))
5368*4882a593Smuzhiyun break;
5369*4882a593Smuzhiyun old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
5370*4882a593Smuzhiyun if (old == refs)
5371*4882a593Smuzhiyun return;
5372*4882a593Smuzhiyun }
5373*4882a593Smuzhiyun
5374*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
5375*4882a593Smuzhiyun if (atomic_read(&eb->refs) == 2 &&
5376*4882a593Smuzhiyun test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
5377*4882a593Smuzhiyun !extent_buffer_under_io(eb) &&
5378*4882a593Smuzhiyun test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5379*4882a593Smuzhiyun atomic_dec(&eb->refs);
5380*4882a593Smuzhiyun
5381*4882a593Smuzhiyun /*
5382*4882a593Smuzhiyun * I know this is terrible, but it's temporary until we stop tracking
5383*4882a593Smuzhiyun * the uptodate bits and such for the extent buffers.
5384*4882a593Smuzhiyun */
5385*4882a593Smuzhiyun release_extent_buffer(eb);
5386*4882a593Smuzhiyun }
5387*4882a593Smuzhiyun
free_extent_buffer_stale(struct extent_buffer * eb)5388*4882a593Smuzhiyun void free_extent_buffer_stale(struct extent_buffer *eb)
5389*4882a593Smuzhiyun {
5390*4882a593Smuzhiyun if (!eb)
5391*4882a593Smuzhiyun return;
5392*4882a593Smuzhiyun
5393*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
5394*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
5395*4882a593Smuzhiyun
5396*4882a593Smuzhiyun if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
5397*4882a593Smuzhiyun test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
5398*4882a593Smuzhiyun atomic_dec(&eb->refs);
5399*4882a593Smuzhiyun release_extent_buffer(eb);
5400*4882a593Smuzhiyun }
5401*4882a593Smuzhiyun
clear_extent_buffer_dirty(const struct extent_buffer * eb)5402*4882a593Smuzhiyun void clear_extent_buffer_dirty(const struct extent_buffer *eb)
5403*4882a593Smuzhiyun {
5404*4882a593Smuzhiyun int i;
5405*4882a593Smuzhiyun int num_pages;
5406*4882a593Smuzhiyun struct page *page;
5407*4882a593Smuzhiyun
5408*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5409*4882a593Smuzhiyun
5410*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5411*4882a593Smuzhiyun page = eb->pages[i];
5412*4882a593Smuzhiyun if (!PageDirty(page))
5413*4882a593Smuzhiyun continue;
5414*4882a593Smuzhiyun
5415*4882a593Smuzhiyun lock_page(page);
5416*4882a593Smuzhiyun WARN_ON(!PagePrivate(page));
5417*4882a593Smuzhiyun
5418*4882a593Smuzhiyun clear_page_dirty_for_io(page);
5419*4882a593Smuzhiyun xa_lock_irq(&page->mapping->i_pages);
5420*4882a593Smuzhiyun if (!PageDirty(page))
5421*4882a593Smuzhiyun __xa_clear_mark(&page->mapping->i_pages,
5422*4882a593Smuzhiyun page_index(page), PAGECACHE_TAG_DIRTY);
5423*4882a593Smuzhiyun xa_unlock_irq(&page->mapping->i_pages);
5424*4882a593Smuzhiyun ClearPageError(page);
5425*4882a593Smuzhiyun unlock_page(page);
5426*4882a593Smuzhiyun }
5427*4882a593Smuzhiyun WARN_ON(atomic_read(&eb->refs) == 0);
5428*4882a593Smuzhiyun }
5429*4882a593Smuzhiyun
set_extent_buffer_dirty(struct extent_buffer * eb)5430*4882a593Smuzhiyun bool set_extent_buffer_dirty(struct extent_buffer *eb)
5431*4882a593Smuzhiyun {
5432*4882a593Smuzhiyun int i;
5433*4882a593Smuzhiyun int num_pages;
5434*4882a593Smuzhiyun bool was_dirty;
5435*4882a593Smuzhiyun
5436*4882a593Smuzhiyun check_buffer_tree_ref(eb);
5437*4882a593Smuzhiyun
5438*4882a593Smuzhiyun was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
5439*4882a593Smuzhiyun
5440*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5441*4882a593Smuzhiyun WARN_ON(atomic_read(&eb->refs) == 0);
5442*4882a593Smuzhiyun WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
5443*4882a593Smuzhiyun
5444*4882a593Smuzhiyun if (!was_dirty)
5445*4882a593Smuzhiyun for (i = 0; i < num_pages; i++)
5446*4882a593Smuzhiyun set_page_dirty(eb->pages[i]);
5447*4882a593Smuzhiyun
5448*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_DEBUG
5449*4882a593Smuzhiyun for (i = 0; i < num_pages; i++)
5450*4882a593Smuzhiyun ASSERT(PageDirty(eb->pages[i]));
5451*4882a593Smuzhiyun #endif
5452*4882a593Smuzhiyun
5453*4882a593Smuzhiyun return was_dirty;
5454*4882a593Smuzhiyun }
5455*4882a593Smuzhiyun
clear_extent_buffer_uptodate(struct extent_buffer * eb)5456*4882a593Smuzhiyun void clear_extent_buffer_uptodate(struct extent_buffer *eb)
5457*4882a593Smuzhiyun {
5458*4882a593Smuzhiyun int i;
5459*4882a593Smuzhiyun struct page *page;
5460*4882a593Smuzhiyun int num_pages;
5461*4882a593Smuzhiyun
5462*4882a593Smuzhiyun clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5463*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5464*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5465*4882a593Smuzhiyun page = eb->pages[i];
5466*4882a593Smuzhiyun if (page)
5467*4882a593Smuzhiyun ClearPageUptodate(page);
5468*4882a593Smuzhiyun }
5469*4882a593Smuzhiyun }
5470*4882a593Smuzhiyun
set_extent_buffer_uptodate(struct extent_buffer * eb)5471*4882a593Smuzhiyun void set_extent_buffer_uptodate(struct extent_buffer *eb)
5472*4882a593Smuzhiyun {
5473*4882a593Smuzhiyun int i;
5474*4882a593Smuzhiyun struct page *page;
5475*4882a593Smuzhiyun int num_pages;
5476*4882a593Smuzhiyun
5477*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5478*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5479*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5480*4882a593Smuzhiyun page = eb->pages[i];
5481*4882a593Smuzhiyun SetPageUptodate(page);
5482*4882a593Smuzhiyun }
5483*4882a593Smuzhiyun }
5484*4882a593Smuzhiyun
read_extent_buffer_pages(struct extent_buffer * eb,int wait,int mirror_num)5485*4882a593Smuzhiyun int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
5486*4882a593Smuzhiyun {
5487*4882a593Smuzhiyun int i;
5488*4882a593Smuzhiyun struct page *page;
5489*4882a593Smuzhiyun int err;
5490*4882a593Smuzhiyun int ret = 0;
5491*4882a593Smuzhiyun int locked_pages = 0;
5492*4882a593Smuzhiyun int all_uptodate = 1;
5493*4882a593Smuzhiyun int num_pages;
5494*4882a593Smuzhiyun unsigned long num_reads = 0;
5495*4882a593Smuzhiyun struct bio *bio = NULL;
5496*4882a593Smuzhiyun unsigned long bio_flags = 0;
5497*4882a593Smuzhiyun
5498*4882a593Smuzhiyun if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
5499*4882a593Smuzhiyun return 0;
5500*4882a593Smuzhiyun
5501*4882a593Smuzhiyun num_pages = num_extent_pages(eb);
5502*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5503*4882a593Smuzhiyun page = eb->pages[i];
5504*4882a593Smuzhiyun if (wait == WAIT_NONE) {
5505*4882a593Smuzhiyun if (!trylock_page(page))
5506*4882a593Smuzhiyun goto unlock_exit;
5507*4882a593Smuzhiyun } else {
5508*4882a593Smuzhiyun lock_page(page);
5509*4882a593Smuzhiyun }
5510*4882a593Smuzhiyun locked_pages++;
5511*4882a593Smuzhiyun }
5512*4882a593Smuzhiyun /*
5513*4882a593Smuzhiyun * We need to firstly lock all pages to make sure that
5514*4882a593Smuzhiyun * the uptodate bit of our pages won't be affected by
5515*4882a593Smuzhiyun * clear_extent_buffer_uptodate().
5516*4882a593Smuzhiyun */
5517*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5518*4882a593Smuzhiyun page = eb->pages[i];
5519*4882a593Smuzhiyun if (!PageUptodate(page)) {
5520*4882a593Smuzhiyun num_reads++;
5521*4882a593Smuzhiyun all_uptodate = 0;
5522*4882a593Smuzhiyun }
5523*4882a593Smuzhiyun }
5524*4882a593Smuzhiyun
5525*4882a593Smuzhiyun if (all_uptodate) {
5526*4882a593Smuzhiyun set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
5527*4882a593Smuzhiyun goto unlock_exit;
5528*4882a593Smuzhiyun }
5529*4882a593Smuzhiyun
5530*4882a593Smuzhiyun clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
5531*4882a593Smuzhiyun eb->read_mirror = 0;
5532*4882a593Smuzhiyun atomic_set(&eb->io_pages, num_reads);
5533*4882a593Smuzhiyun /*
5534*4882a593Smuzhiyun * It is possible for releasepage to clear the TREE_REF bit before we
5535*4882a593Smuzhiyun * set io_pages. See check_buffer_tree_ref for a more detailed comment.
5536*4882a593Smuzhiyun */
5537*4882a593Smuzhiyun check_buffer_tree_ref(eb);
5538*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5539*4882a593Smuzhiyun page = eb->pages[i];
5540*4882a593Smuzhiyun
5541*4882a593Smuzhiyun if (!PageUptodate(page)) {
5542*4882a593Smuzhiyun if (ret) {
5543*4882a593Smuzhiyun atomic_dec(&eb->io_pages);
5544*4882a593Smuzhiyun unlock_page(page);
5545*4882a593Smuzhiyun continue;
5546*4882a593Smuzhiyun }
5547*4882a593Smuzhiyun
5548*4882a593Smuzhiyun ClearPageError(page);
5549*4882a593Smuzhiyun err = submit_extent_page(REQ_OP_READ | REQ_META, NULL,
5550*4882a593Smuzhiyun page, page_offset(page), PAGE_SIZE, 0,
5551*4882a593Smuzhiyun &bio, end_bio_extent_readpage,
5552*4882a593Smuzhiyun mirror_num, 0, 0, false);
5553*4882a593Smuzhiyun if (err) {
5554*4882a593Smuzhiyun /*
5555*4882a593Smuzhiyun * We failed to submit the bio so it's the
5556*4882a593Smuzhiyun * caller's responsibility to perform cleanup
5557*4882a593Smuzhiyun * i.e unlock page/set error bit.
5558*4882a593Smuzhiyun */
5559*4882a593Smuzhiyun ret = err;
5560*4882a593Smuzhiyun SetPageError(page);
5561*4882a593Smuzhiyun unlock_page(page);
5562*4882a593Smuzhiyun atomic_dec(&eb->io_pages);
5563*4882a593Smuzhiyun }
5564*4882a593Smuzhiyun } else {
5565*4882a593Smuzhiyun unlock_page(page);
5566*4882a593Smuzhiyun }
5567*4882a593Smuzhiyun }
5568*4882a593Smuzhiyun
5569*4882a593Smuzhiyun if (bio) {
5570*4882a593Smuzhiyun err = submit_one_bio(bio, mirror_num, bio_flags);
5571*4882a593Smuzhiyun if (err)
5572*4882a593Smuzhiyun return err;
5573*4882a593Smuzhiyun }
5574*4882a593Smuzhiyun
5575*4882a593Smuzhiyun if (ret || wait != WAIT_COMPLETE)
5576*4882a593Smuzhiyun return ret;
5577*4882a593Smuzhiyun
5578*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
5579*4882a593Smuzhiyun page = eb->pages[i];
5580*4882a593Smuzhiyun wait_on_page_locked(page);
5581*4882a593Smuzhiyun if (!PageUptodate(page))
5582*4882a593Smuzhiyun ret = -EIO;
5583*4882a593Smuzhiyun }
5584*4882a593Smuzhiyun
5585*4882a593Smuzhiyun return ret;
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun unlock_exit:
5588*4882a593Smuzhiyun while (locked_pages > 0) {
5589*4882a593Smuzhiyun locked_pages--;
5590*4882a593Smuzhiyun page = eb->pages[locked_pages];
5591*4882a593Smuzhiyun unlock_page(page);
5592*4882a593Smuzhiyun }
5593*4882a593Smuzhiyun return ret;
5594*4882a593Smuzhiyun }
5595*4882a593Smuzhiyun
report_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)5596*4882a593Smuzhiyun static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
5597*4882a593Smuzhiyun unsigned long len)
5598*4882a593Smuzhiyun {
5599*4882a593Smuzhiyun btrfs_warn(eb->fs_info,
5600*4882a593Smuzhiyun "access to eb bytenr %llu len %lu out of range start %lu len %lu",
5601*4882a593Smuzhiyun eb->start, eb->len, start, len);
5602*4882a593Smuzhiyun WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
5603*4882a593Smuzhiyun
5604*4882a593Smuzhiyun return true;
5605*4882a593Smuzhiyun }
5606*4882a593Smuzhiyun
5607*4882a593Smuzhiyun /*
5608*4882a593Smuzhiyun * Check if the [start, start + len) range is valid before reading/writing
5609*4882a593Smuzhiyun * the eb.
5610*4882a593Smuzhiyun * NOTE: @start and @len are offset inside the eb, not logical address.
5611*4882a593Smuzhiyun *
5612*4882a593Smuzhiyun * Caller should not touch the dst/src memory if this function returns error.
5613*4882a593Smuzhiyun */
check_eb_range(const struct extent_buffer * eb,unsigned long start,unsigned long len)5614*4882a593Smuzhiyun static inline int check_eb_range(const struct extent_buffer *eb,
5615*4882a593Smuzhiyun unsigned long start, unsigned long len)
5616*4882a593Smuzhiyun {
5617*4882a593Smuzhiyun unsigned long offset;
5618*4882a593Smuzhiyun
5619*4882a593Smuzhiyun /* start, start + len should not go beyond eb->len nor overflow */
5620*4882a593Smuzhiyun if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
5621*4882a593Smuzhiyun return report_eb_range(eb, start, len);
5622*4882a593Smuzhiyun
5623*4882a593Smuzhiyun return false;
5624*4882a593Smuzhiyun }
5625*4882a593Smuzhiyun
read_extent_buffer(const struct extent_buffer * eb,void * dstv,unsigned long start,unsigned long len)5626*4882a593Smuzhiyun void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
5627*4882a593Smuzhiyun unsigned long start, unsigned long len)
5628*4882a593Smuzhiyun {
5629*4882a593Smuzhiyun size_t cur;
5630*4882a593Smuzhiyun size_t offset;
5631*4882a593Smuzhiyun struct page *page;
5632*4882a593Smuzhiyun char *kaddr;
5633*4882a593Smuzhiyun char *dst = (char *)dstv;
5634*4882a593Smuzhiyun unsigned long i = start >> PAGE_SHIFT;
5635*4882a593Smuzhiyun
5636*4882a593Smuzhiyun if (check_eb_range(eb, start, len))
5637*4882a593Smuzhiyun return;
5638*4882a593Smuzhiyun
5639*4882a593Smuzhiyun offset = offset_in_page(start);
5640*4882a593Smuzhiyun
5641*4882a593Smuzhiyun while (len > 0) {
5642*4882a593Smuzhiyun page = eb->pages[i];
5643*4882a593Smuzhiyun
5644*4882a593Smuzhiyun cur = min(len, (PAGE_SIZE - offset));
5645*4882a593Smuzhiyun kaddr = page_address(page);
5646*4882a593Smuzhiyun memcpy(dst, kaddr + offset, cur);
5647*4882a593Smuzhiyun
5648*4882a593Smuzhiyun dst += cur;
5649*4882a593Smuzhiyun len -= cur;
5650*4882a593Smuzhiyun offset = 0;
5651*4882a593Smuzhiyun i++;
5652*4882a593Smuzhiyun }
5653*4882a593Smuzhiyun }
5654*4882a593Smuzhiyun
read_extent_buffer_to_user_nofault(const struct extent_buffer * eb,void __user * dstv,unsigned long start,unsigned long len)5655*4882a593Smuzhiyun int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
5656*4882a593Smuzhiyun void __user *dstv,
5657*4882a593Smuzhiyun unsigned long start, unsigned long len)
5658*4882a593Smuzhiyun {
5659*4882a593Smuzhiyun size_t cur;
5660*4882a593Smuzhiyun size_t offset;
5661*4882a593Smuzhiyun struct page *page;
5662*4882a593Smuzhiyun char *kaddr;
5663*4882a593Smuzhiyun char __user *dst = (char __user *)dstv;
5664*4882a593Smuzhiyun unsigned long i = start >> PAGE_SHIFT;
5665*4882a593Smuzhiyun int ret = 0;
5666*4882a593Smuzhiyun
5667*4882a593Smuzhiyun WARN_ON(start > eb->len);
5668*4882a593Smuzhiyun WARN_ON(start + len > eb->start + eb->len);
5669*4882a593Smuzhiyun
5670*4882a593Smuzhiyun offset = offset_in_page(start);
5671*4882a593Smuzhiyun
5672*4882a593Smuzhiyun while (len > 0) {
5673*4882a593Smuzhiyun page = eb->pages[i];
5674*4882a593Smuzhiyun
5675*4882a593Smuzhiyun cur = min(len, (PAGE_SIZE - offset));
5676*4882a593Smuzhiyun kaddr = page_address(page);
5677*4882a593Smuzhiyun if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
5678*4882a593Smuzhiyun ret = -EFAULT;
5679*4882a593Smuzhiyun break;
5680*4882a593Smuzhiyun }
5681*4882a593Smuzhiyun
5682*4882a593Smuzhiyun dst += cur;
5683*4882a593Smuzhiyun len -= cur;
5684*4882a593Smuzhiyun offset = 0;
5685*4882a593Smuzhiyun i++;
5686*4882a593Smuzhiyun }
5687*4882a593Smuzhiyun
5688*4882a593Smuzhiyun return ret;
5689*4882a593Smuzhiyun }
5690*4882a593Smuzhiyun
memcmp_extent_buffer(const struct extent_buffer * eb,const void * ptrv,unsigned long start,unsigned long len)5691*4882a593Smuzhiyun int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
5692*4882a593Smuzhiyun unsigned long start, unsigned long len)
5693*4882a593Smuzhiyun {
5694*4882a593Smuzhiyun size_t cur;
5695*4882a593Smuzhiyun size_t offset;
5696*4882a593Smuzhiyun struct page *page;
5697*4882a593Smuzhiyun char *kaddr;
5698*4882a593Smuzhiyun char *ptr = (char *)ptrv;
5699*4882a593Smuzhiyun unsigned long i = start >> PAGE_SHIFT;
5700*4882a593Smuzhiyun int ret = 0;
5701*4882a593Smuzhiyun
5702*4882a593Smuzhiyun if (check_eb_range(eb, start, len))
5703*4882a593Smuzhiyun return -EINVAL;
5704*4882a593Smuzhiyun
5705*4882a593Smuzhiyun offset = offset_in_page(start);
5706*4882a593Smuzhiyun
5707*4882a593Smuzhiyun while (len > 0) {
5708*4882a593Smuzhiyun page = eb->pages[i];
5709*4882a593Smuzhiyun
5710*4882a593Smuzhiyun cur = min(len, (PAGE_SIZE - offset));
5711*4882a593Smuzhiyun
5712*4882a593Smuzhiyun kaddr = page_address(page);
5713*4882a593Smuzhiyun ret = memcmp(ptr, kaddr + offset, cur);
5714*4882a593Smuzhiyun if (ret)
5715*4882a593Smuzhiyun break;
5716*4882a593Smuzhiyun
5717*4882a593Smuzhiyun ptr += cur;
5718*4882a593Smuzhiyun len -= cur;
5719*4882a593Smuzhiyun offset = 0;
5720*4882a593Smuzhiyun i++;
5721*4882a593Smuzhiyun }
5722*4882a593Smuzhiyun return ret;
5723*4882a593Smuzhiyun }
5724*4882a593Smuzhiyun
write_extent_buffer_chunk_tree_uuid(const struct extent_buffer * eb,const void * srcv)5725*4882a593Smuzhiyun void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
5726*4882a593Smuzhiyun const void *srcv)
5727*4882a593Smuzhiyun {
5728*4882a593Smuzhiyun char *kaddr;
5729*4882a593Smuzhiyun
5730*4882a593Smuzhiyun WARN_ON(!PageUptodate(eb->pages[0]));
5731*4882a593Smuzhiyun kaddr = page_address(eb->pages[0]);
5732*4882a593Smuzhiyun memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
5733*4882a593Smuzhiyun BTRFS_FSID_SIZE);
5734*4882a593Smuzhiyun }
5735*4882a593Smuzhiyun
write_extent_buffer_fsid(const struct extent_buffer * eb,const void * srcv)5736*4882a593Smuzhiyun void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
5737*4882a593Smuzhiyun {
5738*4882a593Smuzhiyun char *kaddr;
5739*4882a593Smuzhiyun
5740*4882a593Smuzhiyun WARN_ON(!PageUptodate(eb->pages[0]));
5741*4882a593Smuzhiyun kaddr = page_address(eb->pages[0]);
5742*4882a593Smuzhiyun memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
5743*4882a593Smuzhiyun BTRFS_FSID_SIZE);
5744*4882a593Smuzhiyun }
5745*4882a593Smuzhiyun
write_extent_buffer(const struct extent_buffer * eb,const void * srcv,unsigned long start,unsigned long len)5746*4882a593Smuzhiyun void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
5747*4882a593Smuzhiyun unsigned long start, unsigned long len)
5748*4882a593Smuzhiyun {
5749*4882a593Smuzhiyun size_t cur;
5750*4882a593Smuzhiyun size_t offset;
5751*4882a593Smuzhiyun struct page *page;
5752*4882a593Smuzhiyun char *kaddr;
5753*4882a593Smuzhiyun char *src = (char *)srcv;
5754*4882a593Smuzhiyun unsigned long i = start >> PAGE_SHIFT;
5755*4882a593Smuzhiyun
5756*4882a593Smuzhiyun if (check_eb_range(eb, start, len))
5757*4882a593Smuzhiyun return;
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun offset = offset_in_page(start);
5760*4882a593Smuzhiyun
5761*4882a593Smuzhiyun while (len > 0) {
5762*4882a593Smuzhiyun page = eb->pages[i];
5763*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5764*4882a593Smuzhiyun
5765*4882a593Smuzhiyun cur = min(len, PAGE_SIZE - offset);
5766*4882a593Smuzhiyun kaddr = page_address(page);
5767*4882a593Smuzhiyun memcpy(kaddr + offset, src, cur);
5768*4882a593Smuzhiyun
5769*4882a593Smuzhiyun src += cur;
5770*4882a593Smuzhiyun len -= cur;
5771*4882a593Smuzhiyun offset = 0;
5772*4882a593Smuzhiyun i++;
5773*4882a593Smuzhiyun }
5774*4882a593Smuzhiyun }
5775*4882a593Smuzhiyun
memzero_extent_buffer(const struct extent_buffer * eb,unsigned long start,unsigned long len)5776*4882a593Smuzhiyun void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
5777*4882a593Smuzhiyun unsigned long len)
5778*4882a593Smuzhiyun {
5779*4882a593Smuzhiyun size_t cur;
5780*4882a593Smuzhiyun size_t offset;
5781*4882a593Smuzhiyun struct page *page;
5782*4882a593Smuzhiyun char *kaddr;
5783*4882a593Smuzhiyun unsigned long i = start >> PAGE_SHIFT;
5784*4882a593Smuzhiyun
5785*4882a593Smuzhiyun if (check_eb_range(eb, start, len))
5786*4882a593Smuzhiyun return;
5787*4882a593Smuzhiyun
5788*4882a593Smuzhiyun offset = offset_in_page(start);
5789*4882a593Smuzhiyun
5790*4882a593Smuzhiyun while (len > 0) {
5791*4882a593Smuzhiyun page = eb->pages[i];
5792*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5793*4882a593Smuzhiyun
5794*4882a593Smuzhiyun cur = min(len, PAGE_SIZE - offset);
5795*4882a593Smuzhiyun kaddr = page_address(page);
5796*4882a593Smuzhiyun memset(kaddr + offset, 0, cur);
5797*4882a593Smuzhiyun
5798*4882a593Smuzhiyun len -= cur;
5799*4882a593Smuzhiyun offset = 0;
5800*4882a593Smuzhiyun i++;
5801*4882a593Smuzhiyun }
5802*4882a593Smuzhiyun }
5803*4882a593Smuzhiyun
copy_extent_buffer_full(const struct extent_buffer * dst,const struct extent_buffer * src)5804*4882a593Smuzhiyun void copy_extent_buffer_full(const struct extent_buffer *dst,
5805*4882a593Smuzhiyun const struct extent_buffer *src)
5806*4882a593Smuzhiyun {
5807*4882a593Smuzhiyun int i;
5808*4882a593Smuzhiyun int num_pages;
5809*4882a593Smuzhiyun
5810*4882a593Smuzhiyun ASSERT(dst->len == src->len);
5811*4882a593Smuzhiyun
5812*4882a593Smuzhiyun num_pages = num_extent_pages(dst);
5813*4882a593Smuzhiyun for (i = 0; i < num_pages; i++)
5814*4882a593Smuzhiyun copy_page(page_address(dst->pages[i]),
5815*4882a593Smuzhiyun page_address(src->pages[i]));
5816*4882a593Smuzhiyun }
5817*4882a593Smuzhiyun
copy_extent_buffer(const struct extent_buffer * dst,const struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,unsigned long len)5818*4882a593Smuzhiyun void copy_extent_buffer(const struct extent_buffer *dst,
5819*4882a593Smuzhiyun const struct extent_buffer *src,
5820*4882a593Smuzhiyun unsigned long dst_offset, unsigned long src_offset,
5821*4882a593Smuzhiyun unsigned long len)
5822*4882a593Smuzhiyun {
5823*4882a593Smuzhiyun u64 dst_len = dst->len;
5824*4882a593Smuzhiyun size_t cur;
5825*4882a593Smuzhiyun size_t offset;
5826*4882a593Smuzhiyun struct page *page;
5827*4882a593Smuzhiyun char *kaddr;
5828*4882a593Smuzhiyun unsigned long i = dst_offset >> PAGE_SHIFT;
5829*4882a593Smuzhiyun
5830*4882a593Smuzhiyun if (check_eb_range(dst, dst_offset, len) ||
5831*4882a593Smuzhiyun check_eb_range(src, src_offset, len))
5832*4882a593Smuzhiyun return;
5833*4882a593Smuzhiyun
5834*4882a593Smuzhiyun WARN_ON(src->len != dst_len);
5835*4882a593Smuzhiyun
5836*4882a593Smuzhiyun offset = offset_in_page(dst_offset);
5837*4882a593Smuzhiyun
5838*4882a593Smuzhiyun while (len > 0) {
5839*4882a593Smuzhiyun page = dst->pages[i];
5840*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5841*4882a593Smuzhiyun
5842*4882a593Smuzhiyun cur = min(len, (unsigned long)(PAGE_SIZE - offset));
5843*4882a593Smuzhiyun
5844*4882a593Smuzhiyun kaddr = page_address(page);
5845*4882a593Smuzhiyun read_extent_buffer(src, kaddr + offset, src_offset, cur);
5846*4882a593Smuzhiyun
5847*4882a593Smuzhiyun src_offset += cur;
5848*4882a593Smuzhiyun len -= cur;
5849*4882a593Smuzhiyun offset = 0;
5850*4882a593Smuzhiyun i++;
5851*4882a593Smuzhiyun }
5852*4882a593Smuzhiyun }
5853*4882a593Smuzhiyun
5854*4882a593Smuzhiyun /*
5855*4882a593Smuzhiyun * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5856*4882a593Smuzhiyun * given bit number
5857*4882a593Smuzhiyun * @eb: the extent buffer
5858*4882a593Smuzhiyun * @start: offset of the bitmap item in the extent buffer
5859*4882a593Smuzhiyun * @nr: bit number
5860*4882a593Smuzhiyun * @page_index: return index of the page in the extent buffer that contains the
5861*4882a593Smuzhiyun * given bit number
5862*4882a593Smuzhiyun * @page_offset: return offset into the page given by page_index
5863*4882a593Smuzhiyun *
5864*4882a593Smuzhiyun * This helper hides the ugliness of finding the byte in an extent buffer which
5865*4882a593Smuzhiyun * contains a given bit.
5866*4882a593Smuzhiyun */
eb_bitmap_offset(const struct extent_buffer * eb,unsigned long start,unsigned long nr,unsigned long * page_index,size_t * page_offset)5867*4882a593Smuzhiyun static inline void eb_bitmap_offset(const struct extent_buffer *eb,
5868*4882a593Smuzhiyun unsigned long start, unsigned long nr,
5869*4882a593Smuzhiyun unsigned long *page_index,
5870*4882a593Smuzhiyun size_t *page_offset)
5871*4882a593Smuzhiyun {
5872*4882a593Smuzhiyun size_t byte_offset = BIT_BYTE(nr);
5873*4882a593Smuzhiyun size_t offset;
5874*4882a593Smuzhiyun
5875*4882a593Smuzhiyun /*
5876*4882a593Smuzhiyun * The byte we want is the offset of the extent buffer + the offset of
5877*4882a593Smuzhiyun * the bitmap item in the extent buffer + the offset of the byte in the
5878*4882a593Smuzhiyun * bitmap item.
5879*4882a593Smuzhiyun */
5880*4882a593Smuzhiyun offset = start + byte_offset;
5881*4882a593Smuzhiyun
5882*4882a593Smuzhiyun *page_index = offset >> PAGE_SHIFT;
5883*4882a593Smuzhiyun *page_offset = offset_in_page(offset);
5884*4882a593Smuzhiyun }
5885*4882a593Smuzhiyun
5886*4882a593Smuzhiyun /**
5887*4882a593Smuzhiyun * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5888*4882a593Smuzhiyun * @eb: the extent buffer
5889*4882a593Smuzhiyun * @start: offset of the bitmap item in the extent buffer
5890*4882a593Smuzhiyun * @nr: bit number to test
5891*4882a593Smuzhiyun */
extent_buffer_test_bit(const struct extent_buffer * eb,unsigned long start,unsigned long nr)5892*4882a593Smuzhiyun int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
5893*4882a593Smuzhiyun unsigned long nr)
5894*4882a593Smuzhiyun {
5895*4882a593Smuzhiyun u8 *kaddr;
5896*4882a593Smuzhiyun struct page *page;
5897*4882a593Smuzhiyun unsigned long i;
5898*4882a593Smuzhiyun size_t offset;
5899*4882a593Smuzhiyun
5900*4882a593Smuzhiyun eb_bitmap_offset(eb, start, nr, &i, &offset);
5901*4882a593Smuzhiyun page = eb->pages[i];
5902*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5903*4882a593Smuzhiyun kaddr = page_address(page);
5904*4882a593Smuzhiyun return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
5905*4882a593Smuzhiyun }
5906*4882a593Smuzhiyun
5907*4882a593Smuzhiyun /**
5908*4882a593Smuzhiyun * extent_buffer_bitmap_set - set an area of a bitmap
5909*4882a593Smuzhiyun * @eb: the extent buffer
5910*4882a593Smuzhiyun * @start: offset of the bitmap item in the extent buffer
5911*4882a593Smuzhiyun * @pos: bit number of the first bit
5912*4882a593Smuzhiyun * @len: number of bits to set
5913*4882a593Smuzhiyun */
extent_buffer_bitmap_set(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)5914*4882a593Smuzhiyun void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
5915*4882a593Smuzhiyun unsigned long pos, unsigned long len)
5916*4882a593Smuzhiyun {
5917*4882a593Smuzhiyun u8 *kaddr;
5918*4882a593Smuzhiyun struct page *page;
5919*4882a593Smuzhiyun unsigned long i;
5920*4882a593Smuzhiyun size_t offset;
5921*4882a593Smuzhiyun const unsigned int size = pos + len;
5922*4882a593Smuzhiyun int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5923*4882a593Smuzhiyun u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
5924*4882a593Smuzhiyun
5925*4882a593Smuzhiyun eb_bitmap_offset(eb, start, pos, &i, &offset);
5926*4882a593Smuzhiyun page = eb->pages[i];
5927*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5928*4882a593Smuzhiyun kaddr = page_address(page);
5929*4882a593Smuzhiyun
5930*4882a593Smuzhiyun while (len >= bits_to_set) {
5931*4882a593Smuzhiyun kaddr[offset] |= mask_to_set;
5932*4882a593Smuzhiyun len -= bits_to_set;
5933*4882a593Smuzhiyun bits_to_set = BITS_PER_BYTE;
5934*4882a593Smuzhiyun mask_to_set = ~0;
5935*4882a593Smuzhiyun if (++offset >= PAGE_SIZE && len > 0) {
5936*4882a593Smuzhiyun offset = 0;
5937*4882a593Smuzhiyun page = eb->pages[++i];
5938*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5939*4882a593Smuzhiyun kaddr = page_address(page);
5940*4882a593Smuzhiyun }
5941*4882a593Smuzhiyun }
5942*4882a593Smuzhiyun if (len) {
5943*4882a593Smuzhiyun mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
5944*4882a593Smuzhiyun kaddr[offset] |= mask_to_set;
5945*4882a593Smuzhiyun }
5946*4882a593Smuzhiyun }
5947*4882a593Smuzhiyun
5948*4882a593Smuzhiyun
5949*4882a593Smuzhiyun /**
5950*4882a593Smuzhiyun * extent_buffer_bitmap_clear - clear an area of a bitmap
5951*4882a593Smuzhiyun * @eb: the extent buffer
5952*4882a593Smuzhiyun * @start: offset of the bitmap item in the extent buffer
5953*4882a593Smuzhiyun * @pos: bit number of the first bit
5954*4882a593Smuzhiyun * @len: number of bits to clear
5955*4882a593Smuzhiyun */
extent_buffer_bitmap_clear(const struct extent_buffer * eb,unsigned long start,unsigned long pos,unsigned long len)5956*4882a593Smuzhiyun void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
5957*4882a593Smuzhiyun unsigned long start, unsigned long pos,
5958*4882a593Smuzhiyun unsigned long len)
5959*4882a593Smuzhiyun {
5960*4882a593Smuzhiyun u8 *kaddr;
5961*4882a593Smuzhiyun struct page *page;
5962*4882a593Smuzhiyun unsigned long i;
5963*4882a593Smuzhiyun size_t offset;
5964*4882a593Smuzhiyun const unsigned int size = pos + len;
5965*4882a593Smuzhiyun int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
5966*4882a593Smuzhiyun u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
5967*4882a593Smuzhiyun
5968*4882a593Smuzhiyun eb_bitmap_offset(eb, start, pos, &i, &offset);
5969*4882a593Smuzhiyun page = eb->pages[i];
5970*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5971*4882a593Smuzhiyun kaddr = page_address(page);
5972*4882a593Smuzhiyun
5973*4882a593Smuzhiyun while (len >= bits_to_clear) {
5974*4882a593Smuzhiyun kaddr[offset] &= ~mask_to_clear;
5975*4882a593Smuzhiyun len -= bits_to_clear;
5976*4882a593Smuzhiyun bits_to_clear = BITS_PER_BYTE;
5977*4882a593Smuzhiyun mask_to_clear = ~0;
5978*4882a593Smuzhiyun if (++offset >= PAGE_SIZE && len > 0) {
5979*4882a593Smuzhiyun offset = 0;
5980*4882a593Smuzhiyun page = eb->pages[++i];
5981*4882a593Smuzhiyun WARN_ON(!PageUptodate(page));
5982*4882a593Smuzhiyun kaddr = page_address(page);
5983*4882a593Smuzhiyun }
5984*4882a593Smuzhiyun }
5985*4882a593Smuzhiyun if (len) {
5986*4882a593Smuzhiyun mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
5987*4882a593Smuzhiyun kaddr[offset] &= ~mask_to_clear;
5988*4882a593Smuzhiyun }
5989*4882a593Smuzhiyun }
5990*4882a593Smuzhiyun
areas_overlap(unsigned long src,unsigned long dst,unsigned long len)5991*4882a593Smuzhiyun static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
5992*4882a593Smuzhiyun {
5993*4882a593Smuzhiyun unsigned long distance = (src > dst) ? src - dst : dst - src;
5994*4882a593Smuzhiyun return distance < len;
5995*4882a593Smuzhiyun }
5996*4882a593Smuzhiyun
copy_pages(struct page * dst_page,struct page * src_page,unsigned long dst_off,unsigned long src_off,unsigned long len)5997*4882a593Smuzhiyun static void copy_pages(struct page *dst_page, struct page *src_page,
5998*4882a593Smuzhiyun unsigned long dst_off, unsigned long src_off,
5999*4882a593Smuzhiyun unsigned long len)
6000*4882a593Smuzhiyun {
6001*4882a593Smuzhiyun char *dst_kaddr = page_address(dst_page);
6002*4882a593Smuzhiyun char *src_kaddr;
6003*4882a593Smuzhiyun int must_memmove = 0;
6004*4882a593Smuzhiyun
6005*4882a593Smuzhiyun if (dst_page != src_page) {
6006*4882a593Smuzhiyun src_kaddr = page_address(src_page);
6007*4882a593Smuzhiyun } else {
6008*4882a593Smuzhiyun src_kaddr = dst_kaddr;
6009*4882a593Smuzhiyun if (areas_overlap(src_off, dst_off, len))
6010*4882a593Smuzhiyun must_memmove = 1;
6011*4882a593Smuzhiyun }
6012*4882a593Smuzhiyun
6013*4882a593Smuzhiyun if (must_memmove)
6014*4882a593Smuzhiyun memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
6015*4882a593Smuzhiyun else
6016*4882a593Smuzhiyun memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
6017*4882a593Smuzhiyun }
6018*4882a593Smuzhiyun
memcpy_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)6019*4882a593Smuzhiyun void memcpy_extent_buffer(const struct extent_buffer *dst,
6020*4882a593Smuzhiyun unsigned long dst_offset, unsigned long src_offset,
6021*4882a593Smuzhiyun unsigned long len)
6022*4882a593Smuzhiyun {
6023*4882a593Smuzhiyun size_t cur;
6024*4882a593Smuzhiyun size_t dst_off_in_page;
6025*4882a593Smuzhiyun size_t src_off_in_page;
6026*4882a593Smuzhiyun unsigned long dst_i;
6027*4882a593Smuzhiyun unsigned long src_i;
6028*4882a593Smuzhiyun
6029*4882a593Smuzhiyun if (check_eb_range(dst, dst_offset, len) ||
6030*4882a593Smuzhiyun check_eb_range(dst, src_offset, len))
6031*4882a593Smuzhiyun return;
6032*4882a593Smuzhiyun
6033*4882a593Smuzhiyun while (len > 0) {
6034*4882a593Smuzhiyun dst_off_in_page = offset_in_page(dst_offset);
6035*4882a593Smuzhiyun src_off_in_page = offset_in_page(src_offset);
6036*4882a593Smuzhiyun
6037*4882a593Smuzhiyun dst_i = dst_offset >> PAGE_SHIFT;
6038*4882a593Smuzhiyun src_i = src_offset >> PAGE_SHIFT;
6039*4882a593Smuzhiyun
6040*4882a593Smuzhiyun cur = min(len, (unsigned long)(PAGE_SIZE -
6041*4882a593Smuzhiyun src_off_in_page));
6042*4882a593Smuzhiyun cur = min_t(unsigned long, cur,
6043*4882a593Smuzhiyun (unsigned long)(PAGE_SIZE - dst_off_in_page));
6044*4882a593Smuzhiyun
6045*4882a593Smuzhiyun copy_pages(dst->pages[dst_i], dst->pages[src_i],
6046*4882a593Smuzhiyun dst_off_in_page, src_off_in_page, cur);
6047*4882a593Smuzhiyun
6048*4882a593Smuzhiyun src_offset += cur;
6049*4882a593Smuzhiyun dst_offset += cur;
6050*4882a593Smuzhiyun len -= cur;
6051*4882a593Smuzhiyun }
6052*4882a593Smuzhiyun }
6053*4882a593Smuzhiyun
memmove_extent_buffer(const struct extent_buffer * dst,unsigned long dst_offset,unsigned long src_offset,unsigned long len)6054*4882a593Smuzhiyun void memmove_extent_buffer(const struct extent_buffer *dst,
6055*4882a593Smuzhiyun unsigned long dst_offset, unsigned long src_offset,
6056*4882a593Smuzhiyun unsigned long len)
6057*4882a593Smuzhiyun {
6058*4882a593Smuzhiyun size_t cur;
6059*4882a593Smuzhiyun size_t dst_off_in_page;
6060*4882a593Smuzhiyun size_t src_off_in_page;
6061*4882a593Smuzhiyun unsigned long dst_end = dst_offset + len - 1;
6062*4882a593Smuzhiyun unsigned long src_end = src_offset + len - 1;
6063*4882a593Smuzhiyun unsigned long dst_i;
6064*4882a593Smuzhiyun unsigned long src_i;
6065*4882a593Smuzhiyun
6066*4882a593Smuzhiyun if (check_eb_range(dst, dst_offset, len) ||
6067*4882a593Smuzhiyun check_eb_range(dst, src_offset, len))
6068*4882a593Smuzhiyun return;
6069*4882a593Smuzhiyun if (dst_offset < src_offset) {
6070*4882a593Smuzhiyun memcpy_extent_buffer(dst, dst_offset, src_offset, len);
6071*4882a593Smuzhiyun return;
6072*4882a593Smuzhiyun }
6073*4882a593Smuzhiyun while (len > 0) {
6074*4882a593Smuzhiyun dst_i = dst_end >> PAGE_SHIFT;
6075*4882a593Smuzhiyun src_i = src_end >> PAGE_SHIFT;
6076*4882a593Smuzhiyun
6077*4882a593Smuzhiyun dst_off_in_page = offset_in_page(dst_end);
6078*4882a593Smuzhiyun src_off_in_page = offset_in_page(src_end);
6079*4882a593Smuzhiyun
6080*4882a593Smuzhiyun cur = min_t(unsigned long, len, src_off_in_page + 1);
6081*4882a593Smuzhiyun cur = min(cur, dst_off_in_page + 1);
6082*4882a593Smuzhiyun copy_pages(dst->pages[dst_i], dst->pages[src_i],
6083*4882a593Smuzhiyun dst_off_in_page - cur + 1,
6084*4882a593Smuzhiyun src_off_in_page - cur + 1, cur);
6085*4882a593Smuzhiyun
6086*4882a593Smuzhiyun dst_end -= cur;
6087*4882a593Smuzhiyun src_end -= cur;
6088*4882a593Smuzhiyun len -= cur;
6089*4882a593Smuzhiyun }
6090*4882a593Smuzhiyun }
6091*4882a593Smuzhiyun
try_release_extent_buffer(struct page * page)6092*4882a593Smuzhiyun int try_release_extent_buffer(struct page *page)
6093*4882a593Smuzhiyun {
6094*4882a593Smuzhiyun struct extent_buffer *eb;
6095*4882a593Smuzhiyun
6096*4882a593Smuzhiyun /*
6097*4882a593Smuzhiyun * We need to make sure nobody is attaching this page to an eb right
6098*4882a593Smuzhiyun * now.
6099*4882a593Smuzhiyun */
6100*4882a593Smuzhiyun spin_lock(&page->mapping->private_lock);
6101*4882a593Smuzhiyun if (!PagePrivate(page)) {
6102*4882a593Smuzhiyun spin_unlock(&page->mapping->private_lock);
6103*4882a593Smuzhiyun return 1;
6104*4882a593Smuzhiyun }
6105*4882a593Smuzhiyun
6106*4882a593Smuzhiyun eb = (struct extent_buffer *)page->private;
6107*4882a593Smuzhiyun BUG_ON(!eb);
6108*4882a593Smuzhiyun
6109*4882a593Smuzhiyun /*
6110*4882a593Smuzhiyun * This is a little awful but should be ok, we need to make sure that
6111*4882a593Smuzhiyun * the eb doesn't disappear out from under us while we're looking at
6112*4882a593Smuzhiyun * this page.
6113*4882a593Smuzhiyun */
6114*4882a593Smuzhiyun spin_lock(&eb->refs_lock);
6115*4882a593Smuzhiyun if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
6116*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
6117*4882a593Smuzhiyun spin_unlock(&page->mapping->private_lock);
6118*4882a593Smuzhiyun return 0;
6119*4882a593Smuzhiyun }
6120*4882a593Smuzhiyun spin_unlock(&page->mapping->private_lock);
6121*4882a593Smuzhiyun
6122*4882a593Smuzhiyun /*
6123*4882a593Smuzhiyun * If tree ref isn't set then we know the ref on this eb is a real ref,
6124*4882a593Smuzhiyun * so just return, this page will likely be freed soon anyway.
6125*4882a593Smuzhiyun */
6126*4882a593Smuzhiyun if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
6127*4882a593Smuzhiyun spin_unlock(&eb->refs_lock);
6128*4882a593Smuzhiyun return 0;
6129*4882a593Smuzhiyun }
6130*4882a593Smuzhiyun
6131*4882a593Smuzhiyun return release_extent_buffer(eb);
6132*4882a593Smuzhiyun }
6133