1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2007 Oracle. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/blkdev.h>
8*4882a593Smuzhiyun #include <linux/writeback.h>
9*4882a593Smuzhiyun #include <linux/sched/mm.h>
10*4882a593Smuzhiyun #include "misc.h"
11*4882a593Smuzhiyun #include "ctree.h"
12*4882a593Smuzhiyun #include "transaction.h"
13*4882a593Smuzhiyun #include "btrfs_inode.h"
14*4882a593Smuzhiyun #include "extent_io.h"
15*4882a593Smuzhiyun #include "disk-io.h"
16*4882a593Smuzhiyun #include "compression.h"
17*4882a593Smuzhiyun #include "delalloc-space.h"
18*4882a593Smuzhiyun #include "qgroup.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static struct kmem_cache *btrfs_ordered_extent_cache;
21*4882a593Smuzhiyun
entry_end(struct btrfs_ordered_extent * entry)22*4882a593Smuzhiyun static u64 entry_end(struct btrfs_ordered_extent *entry)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun if (entry->file_offset + entry->num_bytes < entry->file_offset)
25*4882a593Smuzhiyun return (u64)-1;
26*4882a593Smuzhiyun return entry->file_offset + entry->num_bytes;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* returns NULL if the insertion worked, or it returns the node it did find
30*4882a593Smuzhiyun * in the tree
31*4882a593Smuzhiyun */
tree_insert(struct rb_root * root,u64 file_offset,struct rb_node * node)32*4882a593Smuzhiyun static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
33*4882a593Smuzhiyun struct rb_node *node)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct rb_node **p = &root->rb_node;
36*4882a593Smuzhiyun struct rb_node *parent = NULL;
37*4882a593Smuzhiyun struct btrfs_ordered_extent *entry;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun while (*p) {
40*4882a593Smuzhiyun parent = *p;
41*4882a593Smuzhiyun entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (file_offset < entry->file_offset)
44*4882a593Smuzhiyun p = &(*p)->rb_left;
45*4882a593Smuzhiyun else if (file_offset >= entry_end(entry))
46*4882a593Smuzhiyun p = &(*p)->rb_right;
47*4882a593Smuzhiyun else
48*4882a593Smuzhiyun return parent;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun rb_link_node(node, parent, p);
52*4882a593Smuzhiyun rb_insert_color(node, root);
53*4882a593Smuzhiyun return NULL;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * look for a given offset in the tree, and if it can't be found return the
58*4882a593Smuzhiyun * first lesser offset
59*4882a593Smuzhiyun */
__tree_search(struct rb_root * root,u64 file_offset,struct rb_node ** prev_ret)60*4882a593Smuzhiyun static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
61*4882a593Smuzhiyun struct rb_node **prev_ret)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct rb_node *n = root->rb_node;
64*4882a593Smuzhiyun struct rb_node *prev = NULL;
65*4882a593Smuzhiyun struct rb_node *test;
66*4882a593Smuzhiyun struct btrfs_ordered_extent *entry;
67*4882a593Smuzhiyun struct btrfs_ordered_extent *prev_entry = NULL;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun while (n) {
70*4882a593Smuzhiyun entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
71*4882a593Smuzhiyun prev = n;
72*4882a593Smuzhiyun prev_entry = entry;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (file_offset < entry->file_offset)
75*4882a593Smuzhiyun n = n->rb_left;
76*4882a593Smuzhiyun else if (file_offset >= entry_end(entry))
77*4882a593Smuzhiyun n = n->rb_right;
78*4882a593Smuzhiyun else
79*4882a593Smuzhiyun return n;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun if (!prev_ret)
82*4882a593Smuzhiyun return NULL;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun while (prev && file_offset >= entry_end(prev_entry)) {
85*4882a593Smuzhiyun test = rb_next(prev);
86*4882a593Smuzhiyun if (!test)
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun prev_entry = rb_entry(test, struct btrfs_ordered_extent,
89*4882a593Smuzhiyun rb_node);
90*4882a593Smuzhiyun if (file_offset < entry_end(prev_entry))
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun prev = test;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun if (prev)
96*4882a593Smuzhiyun prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
97*4882a593Smuzhiyun rb_node);
98*4882a593Smuzhiyun while (prev && file_offset < entry_end(prev_entry)) {
99*4882a593Smuzhiyun test = rb_prev(prev);
100*4882a593Smuzhiyun if (!test)
101*4882a593Smuzhiyun break;
102*4882a593Smuzhiyun prev_entry = rb_entry(test, struct btrfs_ordered_extent,
103*4882a593Smuzhiyun rb_node);
104*4882a593Smuzhiyun prev = test;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun *prev_ret = prev;
107*4882a593Smuzhiyun return NULL;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * helper to check if a given offset is inside a given entry
112*4882a593Smuzhiyun */
offset_in_entry(struct btrfs_ordered_extent * entry,u64 file_offset)113*4882a593Smuzhiyun static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun if (file_offset < entry->file_offset ||
116*4882a593Smuzhiyun entry->file_offset + entry->num_bytes <= file_offset)
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun return 1;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
range_overlaps(struct btrfs_ordered_extent * entry,u64 file_offset,u64 len)121*4882a593Smuzhiyun static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
122*4882a593Smuzhiyun u64 len)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun if (file_offset + len <= entry->file_offset ||
125*4882a593Smuzhiyun entry->file_offset + entry->num_bytes <= file_offset)
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun return 1;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * look find the first ordered struct that has this offset, otherwise
132*4882a593Smuzhiyun * the first one less than this offset
133*4882a593Smuzhiyun */
tree_search(struct btrfs_ordered_inode_tree * tree,u64 file_offset)134*4882a593Smuzhiyun static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
135*4882a593Smuzhiyun u64 file_offset)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct rb_root *root = &tree->tree;
138*4882a593Smuzhiyun struct rb_node *prev = NULL;
139*4882a593Smuzhiyun struct rb_node *ret;
140*4882a593Smuzhiyun struct btrfs_ordered_extent *entry;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (tree->last) {
143*4882a593Smuzhiyun entry = rb_entry(tree->last, struct btrfs_ordered_extent,
144*4882a593Smuzhiyun rb_node);
145*4882a593Smuzhiyun if (offset_in_entry(entry, file_offset))
146*4882a593Smuzhiyun return tree->last;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun ret = __tree_search(root, file_offset, &prev);
149*4882a593Smuzhiyun if (!ret)
150*4882a593Smuzhiyun ret = prev;
151*4882a593Smuzhiyun if (ret)
152*4882a593Smuzhiyun tree->last = ret;
153*4882a593Smuzhiyun return ret;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * Allocate and add a new ordered_extent into the per-inode tree.
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * The tree is given a single reference on the ordered extent that was
160*4882a593Smuzhiyun * inserted.
161*4882a593Smuzhiyun */
__btrfs_add_ordered_extent(struct btrfs_inode * inode,u64 file_offset,u64 disk_bytenr,u64 num_bytes,u64 disk_num_bytes,int type,int dio,int compress_type)162*4882a593Smuzhiyun static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
163*4882a593Smuzhiyun u64 disk_bytenr, u64 num_bytes,
164*4882a593Smuzhiyun u64 disk_num_bytes, int type, int dio,
165*4882a593Smuzhiyun int compress_type)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct btrfs_root *root = inode->root;
168*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
169*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
170*4882a593Smuzhiyun struct rb_node *node;
171*4882a593Smuzhiyun struct btrfs_ordered_extent *entry;
172*4882a593Smuzhiyun int ret;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_PREALLOC) {
175*4882a593Smuzhiyun /* For nocow write, we can release the qgroup rsv right now */
176*4882a593Smuzhiyun ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
177*4882a593Smuzhiyun if (ret < 0)
178*4882a593Smuzhiyun return ret;
179*4882a593Smuzhiyun ret = 0;
180*4882a593Smuzhiyun } else {
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * The ordered extent has reserved qgroup space, release now
183*4882a593Smuzhiyun * and pass the reserved number for qgroup_record to free.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
186*4882a593Smuzhiyun if (ret < 0)
187*4882a593Smuzhiyun return ret;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
190*4882a593Smuzhiyun if (!entry)
191*4882a593Smuzhiyun return -ENOMEM;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun entry->file_offset = file_offset;
194*4882a593Smuzhiyun entry->disk_bytenr = disk_bytenr;
195*4882a593Smuzhiyun entry->num_bytes = num_bytes;
196*4882a593Smuzhiyun entry->disk_num_bytes = disk_num_bytes;
197*4882a593Smuzhiyun entry->bytes_left = num_bytes;
198*4882a593Smuzhiyun entry->inode = igrab(&inode->vfs_inode);
199*4882a593Smuzhiyun entry->compress_type = compress_type;
200*4882a593Smuzhiyun entry->truncated_len = (u64)-1;
201*4882a593Smuzhiyun entry->qgroup_rsv = ret;
202*4882a593Smuzhiyun if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
203*4882a593Smuzhiyun set_bit(type, &entry->flags);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (dio) {
206*4882a593Smuzhiyun percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
207*4882a593Smuzhiyun fs_info->delalloc_batch);
208*4882a593Smuzhiyun set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* one ref for the tree */
212*4882a593Smuzhiyun refcount_set(&entry->refs, 1);
213*4882a593Smuzhiyun init_waitqueue_head(&entry->wait);
214*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->list);
215*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->log_list);
216*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->root_extent_list);
217*4882a593Smuzhiyun INIT_LIST_HEAD(&entry->work_list);
218*4882a593Smuzhiyun init_completion(&entry->completion);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun trace_btrfs_ordered_extent_add(inode, entry);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
223*4882a593Smuzhiyun node = tree_insert(&tree->tree, file_offset,
224*4882a593Smuzhiyun &entry->rb_node);
225*4882a593Smuzhiyun if (node)
226*4882a593Smuzhiyun btrfs_panic(fs_info, -EEXIST,
227*4882a593Smuzhiyun "inconsistency in ordered tree at offset %llu",
228*4882a593Smuzhiyun file_offset);
229*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun spin_lock(&root->ordered_extent_lock);
232*4882a593Smuzhiyun list_add_tail(&entry->root_extent_list,
233*4882a593Smuzhiyun &root->ordered_extents);
234*4882a593Smuzhiyun root->nr_ordered_extents++;
235*4882a593Smuzhiyun if (root->nr_ordered_extents == 1) {
236*4882a593Smuzhiyun spin_lock(&fs_info->ordered_root_lock);
237*4882a593Smuzhiyun BUG_ON(!list_empty(&root->ordered_root));
238*4882a593Smuzhiyun list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
239*4882a593Smuzhiyun spin_unlock(&fs_info->ordered_root_lock);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun spin_unlock(&root->ordered_extent_lock);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * We don't need the count_max_extents here, we can assume that all of
245*4882a593Smuzhiyun * that work has been done at higher layers, so this is truly the
246*4882a593Smuzhiyun * smallest the extent is going to get.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun spin_lock(&inode->lock);
249*4882a593Smuzhiyun btrfs_mod_outstanding_extents(inode, 1);
250*4882a593Smuzhiyun spin_unlock(&inode->lock);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
btrfs_add_ordered_extent(struct btrfs_inode * inode,u64 file_offset,u64 disk_bytenr,u64 num_bytes,u64 disk_num_bytes,int type)255*4882a593Smuzhiyun int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
256*4882a593Smuzhiyun u64 disk_bytenr, u64 num_bytes, u64 disk_num_bytes,
257*4882a593Smuzhiyun int type)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
260*4882a593Smuzhiyun num_bytes, disk_num_bytes, type, 0,
261*4882a593Smuzhiyun BTRFS_COMPRESS_NONE);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
btrfs_add_ordered_extent_dio(struct btrfs_inode * inode,u64 file_offset,u64 disk_bytenr,u64 num_bytes,u64 disk_num_bytes,int type)264*4882a593Smuzhiyun int btrfs_add_ordered_extent_dio(struct btrfs_inode *inode, u64 file_offset,
265*4882a593Smuzhiyun u64 disk_bytenr, u64 num_bytes,
266*4882a593Smuzhiyun u64 disk_num_bytes, int type)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
269*4882a593Smuzhiyun num_bytes, disk_num_bytes, type, 1,
270*4882a593Smuzhiyun BTRFS_COMPRESS_NONE);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
btrfs_add_ordered_extent_compress(struct btrfs_inode * inode,u64 file_offset,u64 disk_bytenr,u64 num_bytes,u64 disk_num_bytes,int type,int compress_type)273*4882a593Smuzhiyun int btrfs_add_ordered_extent_compress(struct btrfs_inode *inode, u64 file_offset,
274*4882a593Smuzhiyun u64 disk_bytenr, u64 num_bytes,
275*4882a593Smuzhiyun u64 disk_num_bytes, int type,
276*4882a593Smuzhiyun int compress_type)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun return __btrfs_add_ordered_extent(inode, file_offset, disk_bytenr,
279*4882a593Smuzhiyun num_bytes, disk_num_bytes, type, 0,
280*4882a593Smuzhiyun compress_type);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
285*4882a593Smuzhiyun * when an ordered extent is finished. If the list covers more than one
286*4882a593Smuzhiyun * ordered extent, it is split across multiples.
287*4882a593Smuzhiyun */
btrfs_add_ordered_sum(struct btrfs_ordered_extent * entry,struct btrfs_ordered_sum * sum)288*4882a593Smuzhiyun void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
289*4882a593Smuzhiyun struct btrfs_ordered_sum *sum)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun tree = &BTRFS_I(entry->inode)->ordered_tree;
294*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
295*4882a593Smuzhiyun list_add_tail(&sum->list, &entry->list);
296*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * this is used to account for finished IO across a given range
301*4882a593Smuzhiyun * of the file. The IO may span ordered extents. If
302*4882a593Smuzhiyun * a given ordered_extent is completely done, 1 is returned, otherwise
303*4882a593Smuzhiyun * 0.
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
306*4882a593Smuzhiyun * to make sure this function only returns 1 once for a given ordered extent.
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * file_offset is updated to one byte past the range that is recorded as
309*4882a593Smuzhiyun * complete. This allows you to walk forward in the file.
310*4882a593Smuzhiyun */
btrfs_dec_test_first_ordered_pending(struct btrfs_inode * inode,struct btrfs_ordered_extent ** cached,u64 * file_offset,u64 io_size,int uptodate)311*4882a593Smuzhiyun int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
312*4882a593Smuzhiyun struct btrfs_ordered_extent **cached,
313*4882a593Smuzhiyun u64 *file_offset, u64 io_size, int uptodate)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = inode->root->fs_info;
316*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
317*4882a593Smuzhiyun struct rb_node *node;
318*4882a593Smuzhiyun struct btrfs_ordered_extent *entry = NULL;
319*4882a593Smuzhiyun int ret;
320*4882a593Smuzhiyun unsigned long flags;
321*4882a593Smuzhiyun u64 dec_end;
322*4882a593Smuzhiyun u64 dec_start;
323*4882a593Smuzhiyun u64 to_dec;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun spin_lock_irqsave(&tree->lock, flags);
326*4882a593Smuzhiyun node = tree_search(tree, *file_offset);
327*4882a593Smuzhiyun if (!node) {
328*4882a593Smuzhiyun ret = 1;
329*4882a593Smuzhiyun goto out;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
333*4882a593Smuzhiyun if (!offset_in_entry(entry, *file_offset)) {
334*4882a593Smuzhiyun ret = 1;
335*4882a593Smuzhiyun goto out;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun dec_start = max(*file_offset, entry->file_offset);
339*4882a593Smuzhiyun dec_end = min(*file_offset + io_size,
340*4882a593Smuzhiyun entry->file_offset + entry->num_bytes);
341*4882a593Smuzhiyun *file_offset = dec_end;
342*4882a593Smuzhiyun if (dec_start > dec_end) {
343*4882a593Smuzhiyun btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
344*4882a593Smuzhiyun dec_start, dec_end);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun to_dec = dec_end - dec_start;
347*4882a593Smuzhiyun if (to_dec > entry->bytes_left) {
348*4882a593Smuzhiyun btrfs_crit(fs_info,
349*4882a593Smuzhiyun "bad ordered accounting left %llu size %llu",
350*4882a593Smuzhiyun entry->bytes_left, to_dec);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun entry->bytes_left -= to_dec;
353*4882a593Smuzhiyun if (!uptodate)
354*4882a593Smuzhiyun set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (entry->bytes_left == 0) {
357*4882a593Smuzhiyun ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
358*4882a593Smuzhiyun /* test_and_set_bit implies a barrier */
359*4882a593Smuzhiyun cond_wake_up_nomb(&entry->wait);
360*4882a593Smuzhiyun } else {
361*4882a593Smuzhiyun ret = 1;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun out:
364*4882a593Smuzhiyun if (!ret && cached && entry) {
365*4882a593Smuzhiyun *cached = entry;
366*4882a593Smuzhiyun refcount_inc(&entry->refs);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun spin_unlock_irqrestore(&tree->lock, flags);
369*4882a593Smuzhiyun return ret == 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * this is used to account for finished IO across a given range
374*4882a593Smuzhiyun * of the file. The IO should not span ordered extents. If
375*4882a593Smuzhiyun * a given ordered_extent is completely done, 1 is returned, otherwise
376*4882a593Smuzhiyun * 0.
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
379*4882a593Smuzhiyun * to make sure this function only returns 1 once for a given ordered extent.
380*4882a593Smuzhiyun */
btrfs_dec_test_ordered_pending(struct btrfs_inode * inode,struct btrfs_ordered_extent ** cached,u64 file_offset,u64 io_size,int uptodate)381*4882a593Smuzhiyun int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
382*4882a593Smuzhiyun struct btrfs_ordered_extent **cached,
383*4882a593Smuzhiyun u64 file_offset, u64 io_size, int uptodate)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
386*4882a593Smuzhiyun struct rb_node *node;
387*4882a593Smuzhiyun struct btrfs_ordered_extent *entry = NULL;
388*4882a593Smuzhiyun unsigned long flags;
389*4882a593Smuzhiyun int ret;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun spin_lock_irqsave(&tree->lock, flags);
392*4882a593Smuzhiyun if (cached && *cached) {
393*4882a593Smuzhiyun entry = *cached;
394*4882a593Smuzhiyun goto have_entry;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun node = tree_search(tree, file_offset);
398*4882a593Smuzhiyun if (!node) {
399*4882a593Smuzhiyun ret = 1;
400*4882a593Smuzhiyun goto out;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
404*4882a593Smuzhiyun have_entry:
405*4882a593Smuzhiyun if (!offset_in_entry(entry, file_offset)) {
406*4882a593Smuzhiyun ret = 1;
407*4882a593Smuzhiyun goto out;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (io_size > entry->bytes_left) {
411*4882a593Smuzhiyun btrfs_crit(inode->root->fs_info,
412*4882a593Smuzhiyun "bad ordered accounting left %llu size %llu",
413*4882a593Smuzhiyun entry->bytes_left, io_size);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun entry->bytes_left -= io_size;
416*4882a593Smuzhiyun if (!uptodate)
417*4882a593Smuzhiyun set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (entry->bytes_left == 0) {
420*4882a593Smuzhiyun ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
421*4882a593Smuzhiyun /* test_and_set_bit implies a barrier */
422*4882a593Smuzhiyun cond_wake_up_nomb(&entry->wait);
423*4882a593Smuzhiyun } else {
424*4882a593Smuzhiyun ret = 1;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun out:
427*4882a593Smuzhiyun if (!ret && cached && entry) {
428*4882a593Smuzhiyun *cached = entry;
429*4882a593Smuzhiyun refcount_inc(&entry->refs);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun spin_unlock_irqrestore(&tree->lock, flags);
432*4882a593Smuzhiyun return ret == 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * used to drop a reference on an ordered extent. This will free
437*4882a593Smuzhiyun * the extent if the last reference is dropped
438*4882a593Smuzhiyun */
btrfs_put_ordered_extent(struct btrfs_ordered_extent * entry)439*4882a593Smuzhiyun void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct list_head *cur;
442*4882a593Smuzhiyun struct btrfs_ordered_sum *sum;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun trace_btrfs_ordered_extent_put(BTRFS_I(entry->inode), entry);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (refcount_dec_and_test(&entry->refs)) {
447*4882a593Smuzhiyun ASSERT(list_empty(&entry->root_extent_list));
448*4882a593Smuzhiyun ASSERT(list_empty(&entry->log_list));
449*4882a593Smuzhiyun ASSERT(RB_EMPTY_NODE(&entry->rb_node));
450*4882a593Smuzhiyun if (entry->inode)
451*4882a593Smuzhiyun btrfs_add_delayed_iput(entry->inode);
452*4882a593Smuzhiyun while (!list_empty(&entry->list)) {
453*4882a593Smuzhiyun cur = entry->list.next;
454*4882a593Smuzhiyun sum = list_entry(cur, struct btrfs_ordered_sum, list);
455*4882a593Smuzhiyun list_del(&sum->list);
456*4882a593Smuzhiyun kvfree(sum);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun kmem_cache_free(btrfs_ordered_extent_cache, entry);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * remove an ordered extent from the tree. No references are dropped
464*4882a593Smuzhiyun * and waiters are woken up.
465*4882a593Smuzhiyun */
btrfs_remove_ordered_extent(struct btrfs_inode * btrfs_inode,struct btrfs_ordered_extent * entry)466*4882a593Smuzhiyun void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
467*4882a593Smuzhiyun struct btrfs_ordered_extent *entry)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree;
470*4882a593Smuzhiyun struct btrfs_root *root = btrfs_inode->root;
471*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
472*4882a593Smuzhiyun struct rb_node *node;
473*4882a593Smuzhiyun bool pending;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* This is paired with btrfs_add_ordered_extent. */
476*4882a593Smuzhiyun spin_lock(&btrfs_inode->lock);
477*4882a593Smuzhiyun btrfs_mod_outstanding_extents(btrfs_inode, -1);
478*4882a593Smuzhiyun spin_unlock(&btrfs_inode->lock);
479*4882a593Smuzhiyun if (root != fs_info->tree_root)
480*4882a593Smuzhiyun btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
481*4882a593Smuzhiyun false);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
484*4882a593Smuzhiyun percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
485*4882a593Smuzhiyun fs_info->delalloc_batch);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun tree = &btrfs_inode->ordered_tree;
488*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
489*4882a593Smuzhiyun node = &entry->rb_node;
490*4882a593Smuzhiyun rb_erase(node, &tree->tree);
491*4882a593Smuzhiyun RB_CLEAR_NODE(node);
492*4882a593Smuzhiyun if (tree->last == node)
493*4882a593Smuzhiyun tree->last = NULL;
494*4882a593Smuzhiyun set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
495*4882a593Smuzhiyun pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
496*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * The current running transaction is waiting on us, we need to let it
500*4882a593Smuzhiyun * know that we're complete and wake it up.
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun if (pending) {
503*4882a593Smuzhiyun struct btrfs_transaction *trans;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * The checks for trans are just a formality, it should be set,
507*4882a593Smuzhiyun * but if it isn't we don't want to deref/assert under the spin
508*4882a593Smuzhiyun * lock, so be nice and check if trans is set, but ASSERT() so
509*4882a593Smuzhiyun * if it isn't set a developer will notice.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun spin_lock(&fs_info->trans_lock);
512*4882a593Smuzhiyun trans = fs_info->running_transaction;
513*4882a593Smuzhiyun if (trans)
514*4882a593Smuzhiyun refcount_inc(&trans->use_count);
515*4882a593Smuzhiyun spin_unlock(&fs_info->trans_lock);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun ASSERT(trans);
518*4882a593Smuzhiyun if (trans) {
519*4882a593Smuzhiyun if (atomic_dec_and_test(&trans->pending_ordered))
520*4882a593Smuzhiyun wake_up(&trans->pending_wait);
521*4882a593Smuzhiyun btrfs_put_transaction(trans);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun spin_lock(&root->ordered_extent_lock);
526*4882a593Smuzhiyun list_del_init(&entry->root_extent_list);
527*4882a593Smuzhiyun root->nr_ordered_extents--;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun trace_btrfs_ordered_extent_remove(btrfs_inode, entry);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (!root->nr_ordered_extents) {
532*4882a593Smuzhiyun spin_lock(&fs_info->ordered_root_lock);
533*4882a593Smuzhiyun BUG_ON(list_empty(&root->ordered_root));
534*4882a593Smuzhiyun list_del_init(&root->ordered_root);
535*4882a593Smuzhiyun spin_unlock(&fs_info->ordered_root_lock);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun spin_unlock(&root->ordered_extent_lock);
538*4882a593Smuzhiyun wake_up(&entry->wait);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
btrfs_run_ordered_extent_work(struct btrfs_work * work)541*4882a593Smuzhiyun static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
546*4882a593Smuzhiyun btrfs_start_ordered_extent(ordered, 1);
547*4882a593Smuzhiyun complete(&ordered->completion);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * wait for all the ordered extents in a root. This is done when balancing
552*4882a593Smuzhiyun * space between drives.
553*4882a593Smuzhiyun */
btrfs_wait_ordered_extents(struct btrfs_root * root,u64 nr,const u64 range_start,const u64 range_len)554*4882a593Smuzhiyun u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
555*4882a593Smuzhiyun const u64 range_start, const u64 range_len)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
558*4882a593Smuzhiyun LIST_HEAD(splice);
559*4882a593Smuzhiyun LIST_HEAD(skipped);
560*4882a593Smuzhiyun LIST_HEAD(works);
561*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered, *next;
562*4882a593Smuzhiyun u64 count = 0;
563*4882a593Smuzhiyun const u64 range_end = range_start + range_len;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun mutex_lock(&root->ordered_extent_mutex);
566*4882a593Smuzhiyun spin_lock(&root->ordered_extent_lock);
567*4882a593Smuzhiyun list_splice_init(&root->ordered_extents, &splice);
568*4882a593Smuzhiyun while (!list_empty(&splice) && nr) {
569*4882a593Smuzhiyun ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
570*4882a593Smuzhiyun root_extent_list);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (range_end <= ordered->disk_bytenr ||
573*4882a593Smuzhiyun ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) {
574*4882a593Smuzhiyun list_move_tail(&ordered->root_extent_list, &skipped);
575*4882a593Smuzhiyun cond_resched_lock(&root->ordered_extent_lock);
576*4882a593Smuzhiyun continue;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun list_move_tail(&ordered->root_extent_list,
580*4882a593Smuzhiyun &root->ordered_extents);
581*4882a593Smuzhiyun refcount_inc(&ordered->refs);
582*4882a593Smuzhiyun spin_unlock(&root->ordered_extent_lock);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun btrfs_init_work(&ordered->flush_work,
585*4882a593Smuzhiyun btrfs_run_ordered_extent_work, NULL, NULL);
586*4882a593Smuzhiyun list_add_tail(&ordered->work_list, &works);
587*4882a593Smuzhiyun btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun cond_resched();
590*4882a593Smuzhiyun spin_lock(&root->ordered_extent_lock);
591*4882a593Smuzhiyun if (nr != U64_MAX)
592*4882a593Smuzhiyun nr--;
593*4882a593Smuzhiyun count++;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun list_splice_tail(&skipped, &root->ordered_extents);
596*4882a593Smuzhiyun list_splice_tail(&splice, &root->ordered_extents);
597*4882a593Smuzhiyun spin_unlock(&root->ordered_extent_lock);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun list_for_each_entry_safe(ordered, next, &works, work_list) {
600*4882a593Smuzhiyun list_del_init(&ordered->work_list);
601*4882a593Smuzhiyun wait_for_completion(&ordered->completion);
602*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
603*4882a593Smuzhiyun cond_resched();
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun mutex_unlock(&root->ordered_extent_mutex);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun return count;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
btrfs_wait_ordered_roots(struct btrfs_fs_info * fs_info,u64 nr,const u64 range_start,const u64 range_len)610*4882a593Smuzhiyun void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
611*4882a593Smuzhiyun const u64 range_start, const u64 range_len)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun struct btrfs_root *root;
614*4882a593Smuzhiyun struct list_head splice;
615*4882a593Smuzhiyun u64 done;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun INIT_LIST_HEAD(&splice);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun mutex_lock(&fs_info->ordered_operations_mutex);
620*4882a593Smuzhiyun spin_lock(&fs_info->ordered_root_lock);
621*4882a593Smuzhiyun list_splice_init(&fs_info->ordered_roots, &splice);
622*4882a593Smuzhiyun while (!list_empty(&splice) && nr) {
623*4882a593Smuzhiyun root = list_first_entry(&splice, struct btrfs_root,
624*4882a593Smuzhiyun ordered_root);
625*4882a593Smuzhiyun root = btrfs_grab_root(root);
626*4882a593Smuzhiyun BUG_ON(!root);
627*4882a593Smuzhiyun list_move_tail(&root->ordered_root,
628*4882a593Smuzhiyun &fs_info->ordered_roots);
629*4882a593Smuzhiyun spin_unlock(&fs_info->ordered_root_lock);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun done = btrfs_wait_ordered_extents(root, nr,
632*4882a593Smuzhiyun range_start, range_len);
633*4882a593Smuzhiyun btrfs_put_root(root);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun spin_lock(&fs_info->ordered_root_lock);
636*4882a593Smuzhiyun if (nr != U64_MAX) {
637*4882a593Smuzhiyun nr -= done;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun list_splice_tail(&splice, &fs_info->ordered_roots);
641*4882a593Smuzhiyun spin_unlock(&fs_info->ordered_root_lock);
642*4882a593Smuzhiyun mutex_unlock(&fs_info->ordered_operations_mutex);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun * Used to start IO or wait for a given ordered extent to finish.
647*4882a593Smuzhiyun *
648*4882a593Smuzhiyun * If wait is one, this effectively waits on page writeback for all the pages
649*4882a593Smuzhiyun * in the extent, and it waits on the io completion code to insert
650*4882a593Smuzhiyun * metadata into the btree corresponding to the extent
651*4882a593Smuzhiyun */
btrfs_start_ordered_extent(struct btrfs_ordered_extent * entry,int wait)652*4882a593Smuzhiyun void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun u64 start = entry->file_offset;
655*4882a593Smuzhiyun u64 end = start + entry->num_bytes - 1;
656*4882a593Smuzhiyun struct btrfs_inode *inode = BTRFS_I(entry->inode);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun trace_btrfs_ordered_extent_start(inode, entry);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun * pages in the range can be dirty, clean or writeback. We
662*4882a593Smuzhiyun * start IO on any dirty ones so the wait doesn't stall waiting
663*4882a593Smuzhiyun * for the flusher thread to find them
664*4882a593Smuzhiyun */
665*4882a593Smuzhiyun if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
666*4882a593Smuzhiyun filemap_fdatawrite_range(inode->vfs_inode.i_mapping, start, end);
667*4882a593Smuzhiyun if (wait) {
668*4882a593Smuzhiyun wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
669*4882a593Smuzhiyun &entry->flags));
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /*
674*4882a593Smuzhiyun * Used to wait on ordered extents across a large range of bytes.
675*4882a593Smuzhiyun */
btrfs_wait_ordered_range(struct inode * inode,u64 start,u64 len)676*4882a593Smuzhiyun int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun int ret = 0;
679*4882a593Smuzhiyun int ret_wb = 0;
680*4882a593Smuzhiyun u64 end;
681*4882a593Smuzhiyun u64 orig_end;
682*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (start + len < start) {
685*4882a593Smuzhiyun orig_end = INT_LIMIT(loff_t);
686*4882a593Smuzhiyun } else {
687*4882a593Smuzhiyun orig_end = start + len - 1;
688*4882a593Smuzhiyun if (orig_end > INT_LIMIT(loff_t))
689*4882a593Smuzhiyun orig_end = INT_LIMIT(loff_t);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /* start IO across the range first to instantiate any delalloc
693*4882a593Smuzhiyun * extents
694*4882a593Smuzhiyun */
695*4882a593Smuzhiyun ret = btrfs_fdatawrite_range(inode, start, orig_end);
696*4882a593Smuzhiyun if (ret)
697*4882a593Smuzhiyun return ret;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * If we have a writeback error don't return immediately. Wait first
701*4882a593Smuzhiyun * for any ordered extents that haven't completed yet. This is to make
702*4882a593Smuzhiyun * sure no one can dirty the same page ranges and call writepages()
703*4882a593Smuzhiyun * before the ordered extents complete - to avoid failures (-EEXIST)
704*4882a593Smuzhiyun * when adding the new ordered extents to the ordered tree.
705*4882a593Smuzhiyun */
706*4882a593Smuzhiyun ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun end = orig_end;
709*4882a593Smuzhiyun while (1) {
710*4882a593Smuzhiyun ordered = btrfs_lookup_first_ordered_extent(BTRFS_I(inode), end);
711*4882a593Smuzhiyun if (!ordered)
712*4882a593Smuzhiyun break;
713*4882a593Smuzhiyun if (ordered->file_offset > orig_end) {
714*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
715*4882a593Smuzhiyun break;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun if (ordered->file_offset + ordered->num_bytes <= start) {
718*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun btrfs_start_ordered_extent(ordered, 1);
722*4882a593Smuzhiyun end = ordered->file_offset;
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * If the ordered extent had an error save the error but don't
725*4882a593Smuzhiyun * exit without waiting first for all other ordered extents in
726*4882a593Smuzhiyun * the range to complete.
727*4882a593Smuzhiyun */
728*4882a593Smuzhiyun if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
729*4882a593Smuzhiyun ret = -EIO;
730*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
731*4882a593Smuzhiyun if (end == 0 || end == start)
732*4882a593Smuzhiyun break;
733*4882a593Smuzhiyun end--;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun return ret_wb ? ret_wb : ret;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /*
739*4882a593Smuzhiyun * find an ordered extent corresponding to file_offset. return NULL if
740*4882a593Smuzhiyun * nothing is found, otherwise take a reference on the extent and return it
741*4882a593Smuzhiyun */
btrfs_lookup_ordered_extent(struct btrfs_inode * inode,u64 file_offset)742*4882a593Smuzhiyun struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
743*4882a593Smuzhiyun u64 file_offset)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree;
746*4882a593Smuzhiyun struct rb_node *node;
747*4882a593Smuzhiyun struct btrfs_ordered_extent *entry = NULL;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun tree = &inode->ordered_tree;
750*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
751*4882a593Smuzhiyun node = tree_search(tree, file_offset);
752*4882a593Smuzhiyun if (!node)
753*4882a593Smuzhiyun goto out;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
756*4882a593Smuzhiyun if (!offset_in_entry(entry, file_offset))
757*4882a593Smuzhiyun entry = NULL;
758*4882a593Smuzhiyun if (entry)
759*4882a593Smuzhiyun refcount_inc(&entry->refs);
760*4882a593Smuzhiyun out:
761*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
762*4882a593Smuzhiyun return entry;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* Since the DIO code tries to lock a wide area we need to look for any ordered
766*4882a593Smuzhiyun * extents that exist in the range, rather than just the start of the range.
767*4882a593Smuzhiyun */
btrfs_lookup_ordered_range(struct btrfs_inode * inode,u64 file_offset,u64 len)768*4882a593Smuzhiyun struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
769*4882a593Smuzhiyun struct btrfs_inode *inode, u64 file_offset, u64 len)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree;
772*4882a593Smuzhiyun struct rb_node *node;
773*4882a593Smuzhiyun struct btrfs_ordered_extent *entry = NULL;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun tree = &inode->ordered_tree;
776*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
777*4882a593Smuzhiyun node = tree_search(tree, file_offset);
778*4882a593Smuzhiyun if (!node) {
779*4882a593Smuzhiyun node = tree_search(tree, file_offset + len);
780*4882a593Smuzhiyun if (!node)
781*4882a593Smuzhiyun goto out;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun while (1) {
785*4882a593Smuzhiyun entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
786*4882a593Smuzhiyun if (range_overlaps(entry, file_offset, len))
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (entry->file_offset >= file_offset + len) {
790*4882a593Smuzhiyun entry = NULL;
791*4882a593Smuzhiyun break;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun entry = NULL;
794*4882a593Smuzhiyun node = rb_next(node);
795*4882a593Smuzhiyun if (!node)
796*4882a593Smuzhiyun break;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun out:
799*4882a593Smuzhiyun if (entry)
800*4882a593Smuzhiyun refcount_inc(&entry->refs);
801*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
802*4882a593Smuzhiyun return entry;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * Adds all ordered extents to the given list. The list ends up sorted by the
807*4882a593Smuzhiyun * file_offset of the ordered extents.
808*4882a593Smuzhiyun */
btrfs_get_ordered_extents_for_logging(struct btrfs_inode * inode,struct list_head * list)809*4882a593Smuzhiyun void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
810*4882a593Smuzhiyun struct list_head *list)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
813*4882a593Smuzhiyun struct rb_node *n;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun ASSERT(inode_is_locked(&inode->vfs_inode));
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
818*4882a593Smuzhiyun for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
819*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
824*4882a593Smuzhiyun continue;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun ASSERT(list_empty(&ordered->log_list));
827*4882a593Smuzhiyun list_add_tail(&ordered->log_list, list);
828*4882a593Smuzhiyun refcount_inc(&ordered->refs);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun * lookup and return any extent before 'file_offset'. NULL is returned
835*4882a593Smuzhiyun * if none is found
836*4882a593Smuzhiyun */
837*4882a593Smuzhiyun struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode * inode,u64 file_offset)838*4882a593Smuzhiyun btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree;
841*4882a593Smuzhiyun struct rb_node *node;
842*4882a593Smuzhiyun struct btrfs_ordered_extent *entry = NULL;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun tree = &inode->ordered_tree;
845*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
846*4882a593Smuzhiyun node = tree_search(tree, file_offset);
847*4882a593Smuzhiyun if (!node)
848*4882a593Smuzhiyun goto out;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
851*4882a593Smuzhiyun refcount_inc(&entry->refs);
852*4882a593Smuzhiyun out:
853*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
854*4882a593Smuzhiyun return entry;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * search the ordered extents for one corresponding to 'offset' and
859*4882a593Smuzhiyun * try to find a checksum. This is used because we allow pages to
860*4882a593Smuzhiyun * be reclaimed before their checksum is actually put into the btree
861*4882a593Smuzhiyun */
btrfs_find_ordered_sum(struct btrfs_inode * inode,u64 offset,u64 disk_bytenr,u8 * sum,int len)862*4882a593Smuzhiyun int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
863*4882a593Smuzhiyun u64 disk_bytenr, u8 *sum, int len)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = inode->root->fs_info;
866*4882a593Smuzhiyun struct btrfs_ordered_sum *ordered_sum;
867*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered;
868*4882a593Smuzhiyun struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
869*4882a593Smuzhiyun unsigned long num_sectors;
870*4882a593Smuzhiyun unsigned long i;
871*4882a593Smuzhiyun u32 sectorsize = btrfs_inode_sectorsize(inode);
872*4882a593Smuzhiyun const u8 blocksize_bits = inode->vfs_inode.i_sb->s_blocksize_bits;
873*4882a593Smuzhiyun const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
874*4882a593Smuzhiyun int index = 0;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun ordered = btrfs_lookup_ordered_extent(inode, offset);
877*4882a593Smuzhiyun if (!ordered)
878*4882a593Smuzhiyun return 0;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun spin_lock_irq(&tree->lock);
881*4882a593Smuzhiyun list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
882*4882a593Smuzhiyun if (disk_bytenr >= ordered_sum->bytenr &&
883*4882a593Smuzhiyun disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
884*4882a593Smuzhiyun i = (disk_bytenr - ordered_sum->bytenr) >> blocksize_bits;
885*4882a593Smuzhiyun num_sectors = ordered_sum->len >> blocksize_bits;
886*4882a593Smuzhiyun num_sectors = min_t(int, len - index, num_sectors - i);
887*4882a593Smuzhiyun memcpy(sum + index, ordered_sum->sums + i * csum_size,
888*4882a593Smuzhiyun num_sectors * csum_size);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun index += (int)num_sectors * csum_size;
891*4882a593Smuzhiyun if (index == len)
892*4882a593Smuzhiyun goto out;
893*4882a593Smuzhiyun disk_bytenr += num_sectors * sectorsize;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun out:
897*4882a593Smuzhiyun spin_unlock_irq(&tree->lock);
898*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
899*4882a593Smuzhiyun return index;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * btrfs_flush_ordered_range - Lock the passed range and ensures all pending
904*4882a593Smuzhiyun * ordered extents in it are run to completion.
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * @inode: Inode whose ordered tree is to be searched
907*4882a593Smuzhiyun * @start: Beginning of range to flush
908*4882a593Smuzhiyun * @end: Last byte of range to lock
909*4882a593Smuzhiyun * @cached_state: If passed, will return the extent state responsible for the
910*4882a593Smuzhiyun * locked range. It's the caller's responsibility to free the cached state.
911*4882a593Smuzhiyun *
912*4882a593Smuzhiyun * This function always returns with the given range locked, ensuring after it's
913*4882a593Smuzhiyun * called no order extent can be pending.
914*4882a593Smuzhiyun */
btrfs_lock_and_flush_ordered_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state)915*4882a593Smuzhiyun void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
916*4882a593Smuzhiyun u64 end,
917*4882a593Smuzhiyun struct extent_state **cached_state)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct btrfs_ordered_extent *ordered;
920*4882a593Smuzhiyun struct extent_state *cache = NULL;
921*4882a593Smuzhiyun struct extent_state **cachedp = &cache;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (cached_state)
924*4882a593Smuzhiyun cachedp = cached_state;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun while (1) {
927*4882a593Smuzhiyun lock_extent_bits(&inode->io_tree, start, end, cachedp);
928*4882a593Smuzhiyun ordered = btrfs_lookup_ordered_range(inode, start,
929*4882a593Smuzhiyun end - start + 1);
930*4882a593Smuzhiyun if (!ordered) {
931*4882a593Smuzhiyun /*
932*4882a593Smuzhiyun * If no external cached_state has been passed then
933*4882a593Smuzhiyun * decrement the extra ref taken for cachedp since we
934*4882a593Smuzhiyun * aren't exposing it outside of this function
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun if (!cached_state)
937*4882a593Smuzhiyun refcount_dec(&cache->refs);
938*4882a593Smuzhiyun break;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun unlock_extent_cached(&inode->io_tree, start, end, cachedp);
941*4882a593Smuzhiyun btrfs_start_ordered_extent(ordered, 1);
942*4882a593Smuzhiyun btrfs_put_ordered_extent(ordered);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
ordered_data_init(void)946*4882a593Smuzhiyun int __init ordered_data_init(void)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
949*4882a593Smuzhiyun sizeof(struct btrfs_ordered_extent), 0,
950*4882a593Smuzhiyun SLAB_MEM_SPREAD,
951*4882a593Smuzhiyun NULL);
952*4882a593Smuzhiyun if (!btrfs_ordered_extent_cache)
953*4882a593Smuzhiyun return -ENOMEM;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
ordered_data_exit(void)958*4882a593Smuzhiyun void __cold ordered_data_exit(void)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun kmem_cache_destroy(btrfs_ordered_extent_cache);
961*4882a593Smuzhiyun }
962