1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011 STRATO. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <linux/rbtree.h>
8*4882a593Smuzhiyun #include <trace/events/btrfs.h>
9*4882a593Smuzhiyun #include "ctree.h"
10*4882a593Smuzhiyun #include "disk-io.h"
11*4882a593Smuzhiyun #include "backref.h"
12*4882a593Smuzhiyun #include "ulist.h"
13*4882a593Smuzhiyun #include "transaction.h"
14*4882a593Smuzhiyun #include "delayed-ref.h"
15*4882a593Smuzhiyun #include "locking.h"
16*4882a593Smuzhiyun #include "misc.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* Just an arbitrary number so we can be sure this happened */
19*4882a593Smuzhiyun #define BACKREF_FOUND_SHARED 6
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct extent_inode_elem {
22*4882a593Smuzhiyun u64 inum;
23*4882a593Smuzhiyun u64 offset;
24*4882a593Smuzhiyun struct extent_inode_elem *next;
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
check_extent_in_eb(const struct btrfs_key * key,const struct extent_buffer * eb,const struct btrfs_file_extent_item * fi,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)27*4882a593Smuzhiyun static int check_extent_in_eb(const struct btrfs_key *key,
28*4882a593Smuzhiyun const struct extent_buffer *eb,
29*4882a593Smuzhiyun const struct btrfs_file_extent_item *fi,
30*4882a593Smuzhiyun u64 extent_item_pos,
31*4882a593Smuzhiyun struct extent_inode_elem **eie,
32*4882a593Smuzhiyun bool ignore_offset)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun u64 offset = 0;
35*4882a593Smuzhiyun struct extent_inode_elem *e;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (!ignore_offset &&
38*4882a593Smuzhiyun !btrfs_file_extent_compression(eb, fi) &&
39*4882a593Smuzhiyun !btrfs_file_extent_encryption(eb, fi) &&
40*4882a593Smuzhiyun !btrfs_file_extent_other_encoding(eb, fi)) {
41*4882a593Smuzhiyun u64 data_offset;
42*4882a593Smuzhiyun u64 data_len;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun data_offset = btrfs_file_extent_offset(eb, fi);
45*4882a593Smuzhiyun data_len = btrfs_file_extent_num_bytes(eb, fi);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (extent_item_pos < data_offset ||
48*4882a593Smuzhiyun extent_item_pos >= data_offset + data_len)
49*4882a593Smuzhiyun return 1;
50*4882a593Smuzhiyun offset = extent_item_pos - data_offset;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun e = kmalloc(sizeof(*e), GFP_NOFS);
54*4882a593Smuzhiyun if (!e)
55*4882a593Smuzhiyun return -ENOMEM;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun e->next = *eie;
58*4882a593Smuzhiyun e->inum = key->objectid;
59*4882a593Smuzhiyun e->offset = key->offset + offset;
60*4882a593Smuzhiyun *eie = e;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
free_inode_elem_list(struct extent_inode_elem * eie)65*4882a593Smuzhiyun static void free_inode_elem_list(struct extent_inode_elem *eie)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct extent_inode_elem *eie_next;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun for (; eie; eie = eie_next) {
70*4882a593Smuzhiyun eie_next = eie->next;
71*4882a593Smuzhiyun kfree(eie);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
find_extent_in_eb(const struct extent_buffer * eb,u64 wanted_disk_byte,u64 extent_item_pos,struct extent_inode_elem ** eie,bool ignore_offset)75*4882a593Smuzhiyun static int find_extent_in_eb(const struct extent_buffer *eb,
76*4882a593Smuzhiyun u64 wanted_disk_byte, u64 extent_item_pos,
77*4882a593Smuzhiyun struct extent_inode_elem **eie,
78*4882a593Smuzhiyun bool ignore_offset)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun u64 disk_byte;
81*4882a593Smuzhiyun struct btrfs_key key;
82*4882a593Smuzhiyun struct btrfs_file_extent_item *fi;
83*4882a593Smuzhiyun int slot;
84*4882a593Smuzhiyun int nritems;
85*4882a593Smuzhiyun int extent_type;
86*4882a593Smuzhiyun int ret;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * from the shared data ref, we only have the leaf but we need
90*4882a593Smuzhiyun * the key. thus, we must look into all items and see that we
91*4882a593Smuzhiyun * find one (some) with a reference to our extent item.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun nritems = btrfs_header_nritems(eb);
94*4882a593Smuzhiyun for (slot = 0; slot < nritems; ++slot) {
95*4882a593Smuzhiyun btrfs_item_key_to_cpu(eb, &key, slot);
96*4882a593Smuzhiyun if (key.type != BTRFS_EXTENT_DATA_KEY)
97*4882a593Smuzhiyun continue;
98*4882a593Smuzhiyun fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99*4882a593Smuzhiyun extent_type = btrfs_file_extent_type(eb, fi);
100*4882a593Smuzhiyun if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101*4882a593Smuzhiyun continue;
102*4882a593Smuzhiyun /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103*4882a593Smuzhiyun disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104*4882a593Smuzhiyun if (disk_byte != wanted_disk_byte)
105*4882a593Smuzhiyun continue;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108*4882a593Smuzhiyun if (ret < 0)
109*4882a593Smuzhiyun return ret;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct preftree {
116*4882a593Smuzhiyun struct rb_root_cached root;
117*4882a593Smuzhiyun unsigned int count;
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct preftrees {
123*4882a593Smuzhiyun struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124*4882a593Smuzhiyun struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125*4882a593Smuzhiyun struct preftree indirect_missing_keys;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * Checks for a shared extent during backref search.
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * The share_count tracks prelim_refs (direct and indirect) having a
132*4882a593Smuzhiyun * ref->count >0:
133*4882a593Smuzhiyun * - incremented when a ref->count transitions to >0
134*4882a593Smuzhiyun * - decremented when a ref->count transitions to <1
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun struct share_check {
137*4882a593Smuzhiyun u64 root_objectid;
138*4882a593Smuzhiyun u64 inum;
139*4882a593Smuzhiyun int share_count;
140*4882a593Smuzhiyun bool have_delayed_delete_refs;
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun
extent_is_shared(struct share_check * sc)143*4882a593Smuzhiyun static inline int extent_is_shared(struct share_check *sc)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun static struct kmem_cache *btrfs_prelim_ref_cache;
149*4882a593Smuzhiyun
btrfs_prelim_ref_init(void)150*4882a593Smuzhiyun int __init btrfs_prelim_ref_init(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
153*4882a593Smuzhiyun sizeof(struct prelim_ref),
154*4882a593Smuzhiyun 0,
155*4882a593Smuzhiyun SLAB_MEM_SPREAD,
156*4882a593Smuzhiyun NULL);
157*4882a593Smuzhiyun if (!btrfs_prelim_ref_cache)
158*4882a593Smuzhiyun return -ENOMEM;
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
btrfs_prelim_ref_exit(void)162*4882a593Smuzhiyun void __cold btrfs_prelim_ref_exit(void)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun kmem_cache_destroy(btrfs_prelim_ref_cache);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
free_pref(struct prelim_ref * ref)167*4882a593Smuzhiyun static void free_pref(struct prelim_ref *ref)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun kmem_cache_free(btrfs_prelim_ref_cache, ref);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Return 0 when both refs are for the same block (and can be merged).
174*4882a593Smuzhiyun * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
175*4882a593Smuzhiyun * indicates a 'higher' block.
176*4882a593Smuzhiyun */
prelim_ref_compare(struct prelim_ref * ref1,struct prelim_ref * ref2)177*4882a593Smuzhiyun static int prelim_ref_compare(struct prelim_ref *ref1,
178*4882a593Smuzhiyun struct prelim_ref *ref2)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun if (ref1->level < ref2->level)
181*4882a593Smuzhiyun return -1;
182*4882a593Smuzhiyun if (ref1->level > ref2->level)
183*4882a593Smuzhiyun return 1;
184*4882a593Smuzhiyun if (ref1->root_id < ref2->root_id)
185*4882a593Smuzhiyun return -1;
186*4882a593Smuzhiyun if (ref1->root_id > ref2->root_id)
187*4882a593Smuzhiyun return 1;
188*4882a593Smuzhiyun if (ref1->key_for_search.type < ref2->key_for_search.type)
189*4882a593Smuzhiyun return -1;
190*4882a593Smuzhiyun if (ref1->key_for_search.type > ref2->key_for_search.type)
191*4882a593Smuzhiyun return 1;
192*4882a593Smuzhiyun if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
193*4882a593Smuzhiyun return -1;
194*4882a593Smuzhiyun if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
195*4882a593Smuzhiyun return 1;
196*4882a593Smuzhiyun if (ref1->key_for_search.offset < ref2->key_for_search.offset)
197*4882a593Smuzhiyun return -1;
198*4882a593Smuzhiyun if (ref1->key_for_search.offset > ref2->key_for_search.offset)
199*4882a593Smuzhiyun return 1;
200*4882a593Smuzhiyun if (ref1->parent < ref2->parent)
201*4882a593Smuzhiyun return -1;
202*4882a593Smuzhiyun if (ref1->parent > ref2->parent)
203*4882a593Smuzhiyun return 1;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
update_share_count(struct share_check * sc,int oldcount,int newcount)208*4882a593Smuzhiyun static void update_share_count(struct share_check *sc, int oldcount,
209*4882a593Smuzhiyun int newcount)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun if ((!sc) || (oldcount == 0 && newcount < 1))
212*4882a593Smuzhiyun return;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (oldcount > 0 && newcount < 1)
215*4882a593Smuzhiyun sc->share_count--;
216*4882a593Smuzhiyun else if (oldcount < 1 && newcount > 0)
217*4882a593Smuzhiyun sc->share_count++;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Add @newref to the @root rbtree, merging identical refs.
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * Callers should assume that newref has been freed after calling.
224*4882a593Smuzhiyun */
prelim_ref_insert(const struct btrfs_fs_info * fs_info,struct preftree * preftree,struct prelim_ref * newref,struct share_check * sc)225*4882a593Smuzhiyun static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
226*4882a593Smuzhiyun struct preftree *preftree,
227*4882a593Smuzhiyun struct prelim_ref *newref,
228*4882a593Smuzhiyun struct share_check *sc)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct rb_root_cached *root;
231*4882a593Smuzhiyun struct rb_node **p;
232*4882a593Smuzhiyun struct rb_node *parent = NULL;
233*4882a593Smuzhiyun struct prelim_ref *ref;
234*4882a593Smuzhiyun int result;
235*4882a593Smuzhiyun bool leftmost = true;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun root = &preftree->root;
238*4882a593Smuzhiyun p = &root->rb_root.rb_node;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun while (*p) {
241*4882a593Smuzhiyun parent = *p;
242*4882a593Smuzhiyun ref = rb_entry(parent, struct prelim_ref, rbnode);
243*4882a593Smuzhiyun result = prelim_ref_compare(ref, newref);
244*4882a593Smuzhiyun if (result < 0) {
245*4882a593Smuzhiyun p = &(*p)->rb_left;
246*4882a593Smuzhiyun } else if (result > 0) {
247*4882a593Smuzhiyun p = &(*p)->rb_right;
248*4882a593Smuzhiyun leftmost = false;
249*4882a593Smuzhiyun } else {
250*4882a593Smuzhiyun /* Identical refs, merge them and free @newref */
251*4882a593Smuzhiyun struct extent_inode_elem *eie = ref->inode_list;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun while (eie && eie->next)
254*4882a593Smuzhiyun eie = eie->next;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (!eie)
257*4882a593Smuzhiyun ref->inode_list = newref->inode_list;
258*4882a593Smuzhiyun else
259*4882a593Smuzhiyun eie->next = newref->inode_list;
260*4882a593Smuzhiyun trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
261*4882a593Smuzhiyun preftree->count);
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * A delayed ref can have newref->count < 0.
264*4882a593Smuzhiyun * The ref->count is updated to follow any
265*4882a593Smuzhiyun * BTRFS_[ADD|DROP]_DELAYED_REF actions.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun update_share_count(sc, ref->count,
268*4882a593Smuzhiyun ref->count + newref->count);
269*4882a593Smuzhiyun ref->count += newref->count;
270*4882a593Smuzhiyun free_pref(newref);
271*4882a593Smuzhiyun return;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun update_share_count(sc, 0, newref->count);
276*4882a593Smuzhiyun preftree->count++;
277*4882a593Smuzhiyun trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
278*4882a593Smuzhiyun rb_link_node(&newref->rbnode, parent, p);
279*4882a593Smuzhiyun rb_insert_color_cached(&newref->rbnode, root, leftmost);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * Release the entire tree. We don't care about internal consistency so
284*4882a593Smuzhiyun * just free everything and then reset the tree root.
285*4882a593Smuzhiyun */
prelim_release(struct preftree * preftree)286*4882a593Smuzhiyun static void prelim_release(struct preftree *preftree)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct prelim_ref *ref, *next_ref;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(ref, next_ref,
291*4882a593Smuzhiyun &preftree->root.rb_root, rbnode) {
292*4882a593Smuzhiyun free_inode_elem_list(ref->inode_list);
293*4882a593Smuzhiyun free_pref(ref);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun preftree->root = RB_ROOT_CACHED;
297*4882a593Smuzhiyun preftree->count = 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * the rules for all callers of this function are:
302*4882a593Smuzhiyun * - obtaining the parent is the goal
303*4882a593Smuzhiyun * - if you add a key, you must know that it is a correct key
304*4882a593Smuzhiyun * - if you cannot add the parent or a correct key, then we will look into the
305*4882a593Smuzhiyun * block later to set a correct key
306*4882a593Smuzhiyun *
307*4882a593Smuzhiyun * delayed refs
308*4882a593Smuzhiyun * ============
309*4882a593Smuzhiyun * backref type | shared | indirect | shared | indirect
310*4882a593Smuzhiyun * information | tree | tree | data | data
311*4882a593Smuzhiyun * --------------------+--------+----------+--------+----------
312*4882a593Smuzhiyun * parent logical | y | - | - | -
313*4882a593Smuzhiyun * key to resolve | - | y | y | y
314*4882a593Smuzhiyun * tree block logical | - | - | - | -
315*4882a593Smuzhiyun * root for resolving | y | y | y | y
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * - column 1: we've the parent -> done
318*4882a593Smuzhiyun * - column 2, 3, 4: we use the key to find the parent
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * on disk refs (inline or keyed)
321*4882a593Smuzhiyun * ==============================
322*4882a593Smuzhiyun * backref type | shared | indirect | shared | indirect
323*4882a593Smuzhiyun * information | tree | tree | data | data
324*4882a593Smuzhiyun * --------------------+--------+----------+--------+----------
325*4882a593Smuzhiyun * parent logical | y | - | y | -
326*4882a593Smuzhiyun * key to resolve | - | - | - | y
327*4882a593Smuzhiyun * tree block logical | y | y | y | y
328*4882a593Smuzhiyun * root for resolving | - | y | y | y
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * - column 1, 3: we've the parent -> done
331*4882a593Smuzhiyun * - column 2: we take the first key from the block to find the parent
332*4882a593Smuzhiyun * (see add_missing_keys)
333*4882a593Smuzhiyun * - column 4: we use the key to find the parent
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * additional information that's available but not required to find the parent
336*4882a593Smuzhiyun * block might help in merging entries to gain some speed.
337*4882a593Smuzhiyun */
add_prelim_ref(const struct btrfs_fs_info * fs_info,struct preftree * preftree,u64 root_id,const struct btrfs_key * key,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)338*4882a593Smuzhiyun static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
339*4882a593Smuzhiyun struct preftree *preftree, u64 root_id,
340*4882a593Smuzhiyun const struct btrfs_key *key, int level, u64 parent,
341*4882a593Smuzhiyun u64 wanted_disk_byte, int count,
342*4882a593Smuzhiyun struct share_check *sc, gfp_t gfp_mask)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct prelim_ref *ref;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
347*4882a593Smuzhiyun return 0;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
350*4882a593Smuzhiyun if (!ref)
351*4882a593Smuzhiyun return -ENOMEM;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ref->root_id = root_id;
354*4882a593Smuzhiyun if (key)
355*4882a593Smuzhiyun ref->key_for_search = *key;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun ref->inode_list = NULL;
360*4882a593Smuzhiyun ref->level = level;
361*4882a593Smuzhiyun ref->count = count;
362*4882a593Smuzhiyun ref->parent = parent;
363*4882a593Smuzhiyun ref->wanted_disk_byte = wanted_disk_byte;
364*4882a593Smuzhiyun prelim_ref_insert(fs_info, preftree, ref, sc);
365*4882a593Smuzhiyun return extent_is_shared(sc);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* direct refs use root == 0, key == NULL */
add_direct_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,int level,u64 parent,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)369*4882a593Smuzhiyun static int add_direct_ref(const struct btrfs_fs_info *fs_info,
370*4882a593Smuzhiyun struct preftrees *preftrees, int level, u64 parent,
371*4882a593Smuzhiyun u64 wanted_disk_byte, int count,
372*4882a593Smuzhiyun struct share_check *sc, gfp_t gfp_mask)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
375*4882a593Smuzhiyun parent, wanted_disk_byte, count, sc, gfp_mask);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* indirect refs use parent == 0 */
add_indirect_ref(const struct btrfs_fs_info * fs_info,struct preftrees * preftrees,u64 root_id,const struct btrfs_key * key,int level,u64 wanted_disk_byte,int count,struct share_check * sc,gfp_t gfp_mask)379*4882a593Smuzhiyun static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
380*4882a593Smuzhiyun struct preftrees *preftrees, u64 root_id,
381*4882a593Smuzhiyun const struct btrfs_key *key, int level,
382*4882a593Smuzhiyun u64 wanted_disk_byte, int count,
383*4882a593Smuzhiyun struct share_check *sc, gfp_t gfp_mask)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct preftree *tree = &preftrees->indirect;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!key)
388*4882a593Smuzhiyun tree = &preftrees->indirect_missing_keys;
389*4882a593Smuzhiyun return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
390*4882a593Smuzhiyun wanted_disk_byte, count, sc, gfp_mask);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
is_shared_data_backref(struct preftrees * preftrees,u64 bytenr)393*4882a593Smuzhiyun static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
396*4882a593Smuzhiyun struct rb_node *parent = NULL;
397*4882a593Smuzhiyun struct prelim_ref *ref = NULL;
398*4882a593Smuzhiyun struct prelim_ref target = {};
399*4882a593Smuzhiyun int result;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun target.parent = bytenr;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun while (*p) {
404*4882a593Smuzhiyun parent = *p;
405*4882a593Smuzhiyun ref = rb_entry(parent, struct prelim_ref, rbnode);
406*4882a593Smuzhiyun result = prelim_ref_compare(ref, &target);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (result < 0)
409*4882a593Smuzhiyun p = &(*p)->rb_left;
410*4882a593Smuzhiyun else if (result > 0)
411*4882a593Smuzhiyun p = &(*p)->rb_right;
412*4882a593Smuzhiyun else
413*4882a593Smuzhiyun return 1;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun return 0;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
add_all_parents(struct btrfs_root * root,struct btrfs_path * path,struct ulist * parents,struct preftrees * preftrees,struct prelim_ref * ref,int level,u64 time_seq,const u64 * extent_item_pos,bool ignore_offset)418*4882a593Smuzhiyun static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
419*4882a593Smuzhiyun struct ulist *parents,
420*4882a593Smuzhiyun struct preftrees *preftrees, struct prelim_ref *ref,
421*4882a593Smuzhiyun int level, u64 time_seq, const u64 *extent_item_pos,
422*4882a593Smuzhiyun bool ignore_offset)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun int ret = 0;
425*4882a593Smuzhiyun int slot;
426*4882a593Smuzhiyun struct extent_buffer *eb;
427*4882a593Smuzhiyun struct btrfs_key key;
428*4882a593Smuzhiyun struct btrfs_key *key_for_search = &ref->key_for_search;
429*4882a593Smuzhiyun struct btrfs_file_extent_item *fi;
430*4882a593Smuzhiyun struct extent_inode_elem *eie = NULL, *old = NULL;
431*4882a593Smuzhiyun u64 disk_byte;
432*4882a593Smuzhiyun u64 wanted_disk_byte = ref->wanted_disk_byte;
433*4882a593Smuzhiyun u64 count = 0;
434*4882a593Smuzhiyun u64 data_offset;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (level != 0) {
437*4882a593Smuzhiyun eb = path->nodes[level];
438*4882a593Smuzhiyun ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
439*4882a593Smuzhiyun if (ret < 0)
440*4882a593Smuzhiyun return ret;
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * 1. We normally enter this function with the path already pointing to
446*4882a593Smuzhiyun * the first item to check. But sometimes, we may enter it with
447*4882a593Smuzhiyun * slot == nritems.
448*4882a593Smuzhiyun * 2. We are searching for normal backref but bytenr of this leaf
449*4882a593Smuzhiyun * matches shared data backref
450*4882a593Smuzhiyun * 3. The leaf owner is not equal to the root we are searching
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * For these cases, go to the next leaf before we continue.
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun eb = path->nodes[0];
455*4882a593Smuzhiyun if (path->slots[0] >= btrfs_header_nritems(eb) ||
456*4882a593Smuzhiyun is_shared_data_backref(preftrees, eb->start) ||
457*4882a593Smuzhiyun ref->root_id != btrfs_header_owner(eb)) {
458*4882a593Smuzhiyun if (time_seq == SEQ_LAST)
459*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
460*4882a593Smuzhiyun else
461*4882a593Smuzhiyun ret = btrfs_next_old_leaf(root, path, time_seq);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun while (!ret && count < ref->count) {
465*4882a593Smuzhiyun eb = path->nodes[0];
466*4882a593Smuzhiyun slot = path->slots[0];
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun btrfs_item_key_to_cpu(eb, &key, slot);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (key.objectid != key_for_search->objectid ||
471*4882a593Smuzhiyun key.type != BTRFS_EXTENT_DATA_KEY)
472*4882a593Smuzhiyun break;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * We are searching for normal backref but bytenr of this leaf
476*4882a593Smuzhiyun * matches shared data backref, OR
477*4882a593Smuzhiyun * the leaf owner is not equal to the root we are searching for
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun if (slot == 0 &&
480*4882a593Smuzhiyun (is_shared_data_backref(preftrees, eb->start) ||
481*4882a593Smuzhiyun ref->root_id != btrfs_header_owner(eb))) {
482*4882a593Smuzhiyun if (time_seq == SEQ_LAST)
483*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
484*4882a593Smuzhiyun else
485*4882a593Smuzhiyun ret = btrfs_next_old_leaf(root, path, time_seq);
486*4882a593Smuzhiyun continue;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
489*4882a593Smuzhiyun disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
490*4882a593Smuzhiyun data_offset = btrfs_file_extent_offset(eb, fi);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (disk_byte == wanted_disk_byte) {
493*4882a593Smuzhiyun eie = NULL;
494*4882a593Smuzhiyun old = NULL;
495*4882a593Smuzhiyun if (ref->key_for_search.offset == key.offset - data_offset)
496*4882a593Smuzhiyun count++;
497*4882a593Smuzhiyun else
498*4882a593Smuzhiyun goto next;
499*4882a593Smuzhiyun if (extent_item_pos) {
500*4882a593Smuzhiyun ret = check_extent_in_eb(&key, eb, fi,
501*4882a593Smuzhiyun *extent_item_pos,
502*4882a593Smuzhiyun &eie, ignore_offset);
503*4882a593Smuzhiyun if (ret < 0)
504*4882a593Smuzhiyun break;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun if (ret > 0)
507*4882a593Smuzhiyun goto next;
508*4882a593Smuzhiyun ret = ulist_add_merge_ptr(parents, eb->start,
509*4882a593Smuzhiyun eie, (void **)&old, GFP_NOFS);
510*4882a593Smuzhiyun if (ret < 0)
511*4882a593Smuzhiyun break;
512*4882a593Smuzhiyun if (!ret && extent_item_pos) {
513*4882a593Smuzhiyun while (old->next)
514*4882a593Smuzhiyun old = old->next;
515*4882a593Smuzhiyun old->next = eie;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun eie = NULL;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun next:
520*4882a593Smuzhiyun if (time_seq == SEQ_LAST)
521*4882a593Smuzhiyun ret = btrfs_next_item(root, path);
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun ret = btrfs_next_old_item(root, path, time_seq);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (ret > 0)
527*4882a593Smuzhiyun ret = 0;
528*4882a593Smuzhiyun else if (ret < 0)
529*4882a593Smuzhiyun free_inode_elem_list(eie);
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /*
534*4882a593Smuzhiyun * resolve an indirect backref in the form (root_id, key, level)
535*4882a593Smuzhiyun * to a logical address
536*4882a593Smuzhiyun */
resolve_indirect_ref(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,struct prelim_ref * ref,struct ulist * parents,const u64 * extent_item_pos,bool ignore_offset)537*4882a593Smuzhiyun static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
538*4882a593Smuzhiyun struct btrfs_path *path, u64 time_seq,
539*4882a593Smuzhiyun struct preftrees *preftrees,
540*4882a593Smuzhiyun struct prelim_ref *ref, struct ulist *parents,
541*4882a593Smuzhiyun const u64 *extent_item_pos, bool ignore_offset)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun struct btrfs_root *root;
544*4882a593Smuzhiyun struct extent_buffer *eb;
545*4882a593Smuzhiyun int ret = 0;
546*4882a593Smuzhiyun int root_level;
547*4882a593Smuzhiyun int level = ref->level;
548*4882a593Smuzhiyun struct btrfs_key search_key = ref->key_for_search;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * If we're search_commit_root we could possibly be holding locks on
552*4882a593Smuzhiyun * other tree nodes. This happens when qgroups does backref walks when
553*4882a593Smuzhiyun * adding new delayed refs. To deal with this we need to look in cache
554*4882a593Smuzhiyun * for the root, and if we don't find it then we need to search the
555*4882a593Smuzhiyun * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
556*4882a593Smuzhiyun * here.
557*4882a593Smuzhiyun */
558*4882a593Smuzhiyun if (path->search_commit_root)
559*4882a593Smuzhiyun root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
560*4882a593Smuzhiyun else
561*4882a593Smuzhiyun root = btrfs_get_fs_root(fs_info, ref->root_id, false);
562*4882a593Smuzhiyun if (IS_ERR(root)) {
563*4882a593Smuzhiyun ret = PTR_ERR(root);
564*4882a593Smuzhiyun goto out_free;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (!path->search_commit_root &&
568*4882a593Smuzhiyun test_bit(BTRFS_ROOT_DELETING, &root->state)) {
569*4882a593Smuzhiyun ret = -ENOENT;
570*4882a593Smuzhiyun goto out;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (btrfs_is_testing(fs_info)) {
574*4882a593Smuzhiyun ret = -ENOENT;
575*4882a593Smuzhiyun goto out;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (path->search_commit_root)
579*4882a593Smuzhiyun root_level = btrfs_header_level(root->commit_root);
580*4882a593Smuzhiyun else if (time_seq == SEQ_LAST)
581*4882a593Smuzhiyun root_level = btrfs_header_level(root->node);
582*4882a593Smuzhiyun else
583*4882a593Smuzhiyun root_level = btrfs_old_root_level(root, time_seq);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (root_level + 1 == level)
586*4882a593Smuzhiyun goto out;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * We can often find data backrefs with an offset that is too large
590*4882a593Smuzhiyun * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
591*4882a593Smuzhiyun * subtracting a file's offset with the data offset of its
592*4882a593Smuzhiyun * corresponding extent data item. This can happen for example in the
593*4882a593Smuzhiyun * clone ioctl.
594*4882a593Smuzhiyun *
595*4882a593Smuzhiyun * So if we detect such case we set the search key's offset to zero to
596*4882a593Smuzhiyun * make sure we will find the matching file extent item at
597*4882a593Smuzhiyun * add_all_parents(), otherwise we will miss it because the offset
598*4882a593Smuzhiyun * taken form the backref is much larger then the offset of the file
599*4882a593Smuzhiyun * extent item. This can make us scan a very large number of file
600*4882a593Smuzhiyun * extent items, but at least it will not make us miss any.
601*4882a593Smuzhiyun *
602*4882a593Smuzhiyun * This is an ugly workaround for a behaviour that should have never
603*4882a593Smuzhiyun * existed, but it does and a fix for the clone ioctl would touch a lot
604*4882a593Smuzhiyun * of places, cause backwards incompatibility and would not fix the
605*4882a593Smuzhiyun * problem for extents cloned with older kernels.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
608*4882a593Smuzhiyun search_key.offset >= LLONG_MAX)
609*4882a593Smuzhiyun search_key.offset = 0;
610*4882a593Smuzhiyun path->lowest_level = level;
611*4882a593Smuzhiyun if (time_seq == SEQ_LAST)
612*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
613*4882a593Smuzhiyun else
614*4882a593Smuzhiyun ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun btrfs_debug(fs_info,
617*4882a593Smuzhiyun "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
618*4882a593Smuzhiyun ref->root_id, level, ref->count, ret,
619*4882a593Smuzhiyun ref->key_for_search.objectid, ref->key_for_search.type,
620*4882a593Smuzhiyun ref->key_for_search.offset);
621*4882a593Smuzhiyun if (ret < 0)
622*4882a593Smuzhiyun goto out;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun eb = path->nodes[level];
625*4882a593Smuzhiyun while (!eb) {
626*4882a593Smuzhiyun if (WARN_ON(!level)) {
627*4882a593Smuzhiyun ret = 1;
628*4882a593Smuzhiyun goto out;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun level--;
631*4882a593Smuzhiyun eb = path->nodes[level];
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun ret = add_all_parents(root, path, parents, preftrees, ref, level,
635*4882a593Smuzhiyun time_seq, extent_item_pos, ignore_offset);
636*4882a593Smuzhiyun out:
637*4882a593Smuzhiyun btrfs_put_root(root);
638*4882a593Smuzhiyun out_free:
639*4882a593Smuzhiyun path->lowest_level = 0;
640*4882a593Smuzhiyun btrfs_release_path(path);
641*4882a593Smuzhiyun return ret;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node * node)645*4882a593Smuzhiyun unode_aux_to_inode_list(struct ulist_node *node)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun if (!node)
648*4882a593Smuzhiyun return NULL;
649*4882a593Smuzhiyun return (struct extent_inode_elem *)(uintptr_t)node->aux;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
free_leaf_list(struct ulist * ulist)652*4882a593Smuzhiyun static void free_leaf_list(struct ulist *ulist)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct ulist_node *node;
655*4882a593Smuzhiyun struct ulist_iterator uiter;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
658*4882a593Smuzhiyun while ((node = ulist_next(ulist, &uiter)))
659*4882a593Smuzhiyun free_inode_elem_list(unode_aux_to_inode_list(node));
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun ulist_free(ulist);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * We maintain three separate rbtrees: one for direct refs, one for
666*4882a593Smuzhiyun * indirect refs which have a key, and one for indirect refs which do not
667*4882a593Smuzhiyun * have a key. Each tree does merge on insertion.
668*4882a593Smuzhiyun *
669*4882a593Smuzhiyun * Once all of the references are located, we iterate over the tree of
670*4882a593Smuzhiyun * indirect refs with missing keys. An appropriate key is located and
671*4882a593Smuzhiyun * the ref is moved onto the tree for indirect refs. After all missing
672*4882a593Smuzhiyun * keys are thus located, we iterate over the indirect ref tree, resolve
673*4882a593Smuzhiyun * each reference, and then insert the resolved reference onto the
674*4882a593Smuzhiyun * direct tree (merging there too).
675*4882a593Smuzhiyun *
676*4882a593Smuzhiyun * New backrefs (i.e., for parent nodes) are added to the appropriate
677*4882a593Smuzhiyun * rbtree as they are encountered. The new backrefs are subsequently
678*4882a593Smuzhiyun * resolved as above.
679*4882a593Smuzhiyun */
resolve_indirect_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 time_seq,struct preftrees * preftrees,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)680*4882a593Smuzhiyun static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
681*4882a593Smuzhiyun struct btrfs_path *path, u64 time_seq,
682*4882a593Smuzhiyun struct preftrees *preftrees,
683*4882a593Smuzhiyun const u64 *extent_item_pos,
684*4882a593Smuzhiyun struct share_check *sc, bool ignore_offset)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun int err;
687*4882a593Smuzhiyun int ret = 0;
688*4882a593Smuzhiyun struct ulist *parents;
689*4882a593Smuzhiyun struct ulist_node *node;
690*4882a593Smuzhiyun struct ulist_iterator uiter;
691*4882a593Smuzhiyun struct rb_node *rnode;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun parents = ulist_alloc(GFP_NOFS);
694*4882a593Smuzhiyun if (!parents)
695*4882a593Smuzhiyun return -ENOMEM;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * We could trade memory usage for performance here by iterating
699*4882a593Smuzhiyun * the tree, allocating new refs for each insertion, and then
700*4882a593Smuzhiyun * freeing the entire indirect tree when we're done. In some test
701*4882a593Smuzhiyun * cases, the tree can grow quite large (~200k objects).
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
704*4882a593Smuzhiyun struct prelim_ref *ref;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ref = rb_entry(rnode, struct prelim_ref, rbnode);
707*4882a593Smuzhiyun if (WARN(ref->parent,
708*4882a593Smuzhiyun "BUG: direct ref found in indirect tree")) {
709*4882a593Smuzhiyun ret = -EINVAL;
710*4882a593Smuzhiyun goto out;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
714*4882a593Smuzhiyun preftrees->indirect.count--;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (ref->count == 0) {
717*4882a593Smuzhiyun free_pref(ref);
718*4882a593Smuzhiyun continue;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (sc && sc->root_objectid &&
722*4882a593Smuzhiyun ref->root_id != sc->root_objectid) {
723*4882a593Smuzhiyun free_pref(ref);
724*4882a593Smuzhiyun ret = BACKREF_FOUND_SHARED;
725*4882a593Smuzhiyun goto out;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
728*4882a593Smuzhiyun ref, parents, extent_item_pos,
729*4882a593Smuzhiyun ignore_offset);
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun * we can only tolerate ENOENT,otherwise,we should catch error
732*4882a593Smuzhiyun * and return directly.
733*4882a593Smuzhiyun */
734*4882a593Smuzhiyun if (err == -ENOENT) {
735*4882a593Smuzhiyun prelim_ref_insert(fs_info, &preftrees->direct, ref,
736*4882a593Smuzhiyun NULL);
737*4882a593Smuzhiyun continue;
738*4882a593Smuzhiyun } else if (err) {
739*4882a593Smuzhiyun free_pref(ref);
740*4882a593Smuzhiyun ret = err;
741*4882a593Smuzhiyun goto out;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* we put the first parent into the ref at hand */
745*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
746*4882a593Smuzhiyun node = ulist_next(parents, &uiter);
747*4882a593Smuzhiyun ref->parent = node ? node->val : 0;
748*4882a593Smuzhiyun ref->inode_list = unode_aux_to_inode_list(node);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Add a prelim_ref(s) for any other parent(s). */
751*4882a593Smuzhiyun while ((node = ulist_next(parents, &uiter))) {
752*4882a593Smuzhiyun struct prelim_ref *new_ref;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
755*4882a593Smuzhiyun GFP_NOFS);
756*4882a593Smuzhiyun if (!new_ref) {
757*4882a593Smuzhiyun free_pref(ref);
758*4882a593Smuzhiyun ret = -ENOMEM;
759*4882a593Smuzhiyun goto out;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun memcpy(new_ref, ref, sizeof(*ref));
762*4882a593Smuzhiyun new_ref->parent = node->val;
763*4882a593Smuzhiyun new_ref->inode_list = unode_aux_to_inode_list(node);
764*4882a593Smuzhiyun prelim_ref_insert(fs_info, &preftrees->direct,
765*4882a593Smuzhiyun new_ref, NULL);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /*
769*4882a593Smuzhiyun * Now it's a direct ref, put it in the direct tree. We must
770*4882a593Smuzhiyun * do this last because the ref could be merged/freed here.
771*4882a593Smuzhiyun */
772*4882a593Smuzhiyun prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun ulist_reinit(parents);
775*4882a593Smuzhiyun cond_resched();
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun out:
778*4882a593Smuzhiyun /*
779*4882a593Smuzhiyun * We may have inode lists attached to refs in the parents ulist, so we
780*4882a593Smuzhiyun * must free them before freeing the ulist and its refs.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun free_leaf_list(parents);
783*4882a593Smuzhiyun return ret;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun * read tree blocks and add keys where required.
788*4882a593Smuzhiyun */
add_missing_keys(struct btrfs_fs_info * fs_info,struct preftrees * preftrees,bool lock)789*4882a593Smuzhiyun static int add_missing_keys(struct btrfs_fs_info *fs_info,
790*4882a593Smuzhiyun struct preftrees *preftrees, bool lock)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun struct prelim_ref *ref;
793*4882a593Smuzhiyun struct extent_buffer *eb;
794*4882a593Smuzhiyun struct preftree *tree = &preftrees->indirect_missing_keys;
795*4882a593Smuzhiyun struct rb_node *node;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun while ((node = rb_first_cached(&tree->root))) {
798*4882a593Smuzhiyun ref = rb_entry(node, struct prelim_ref, rbnode);
799*4882a593Smuzhiyun rb_erase_cached(node, &tree->root);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun BUG_ON(ref->parent); /* should not be a direct ref */
802*4882a593Smuzhiyun BUG_ON(ref->key_for_search.type);
803*4882a593Smuzhiyun BUG_ON(!ref->wanted_disk_byte);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
806*4882a593Smuzhiyun ref->level - 1, NULL);
807*4882a593Smuzhiyun if (IS_ERR(eb)) {
808*4882a593Smuzhiyun free_pref(ref);
809*4882a593Smuzhiyun return PTR_ERR(eb);
810*4882a593Smuzhiyun } else if (!extent_buffer_uptodate(eb)) {
811*4882a593Smuzhiyun free_pref(ref);
812*4882a593Smuzhiyun free_extent_buffer(eb);
813*4882a593Smuzhiyun return -EIO;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun if (lock)
816*4882a593Smuzhiyun btrfs_tree_read_lock(eb);
817*4882a593Smuzhiyun if (btrfs_header_level(eb) == 0)
818*4882a593Smuzhiyun btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
819*4882a593Smuzhiyun else
820*4882a593Smuzhiyun btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
821*4882a593Smuzhiyun if (lock)
822*4882a593Smuzhiyun btrfs_tree_read_unlock(eb);
823*4882a593Smuzhiyun free_extent_buffer(eb);
824*4882a593Smuzhiyun prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
825*4882a593Smuzhiyun cond_resched();
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /*
831*4882a593Smuzhiyun * add all currently queued delayed refs from this head whose seq nr is
832*4882a593Smuzhiyun * smaller or equal that seq to the list
833*4882a593Smuzhiyun */
add_delayed_refs(const struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_head * head,u64 seq,struct preftrees * preftrees,struct share_check * sc)834*4882a593Smuzhiyun static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
835*4882a593Smuzhiyun struct btrfs_delayed_ref_head *head, u64 seq,
836*4882a593Smuzhiyun struct preftrees *preftrees, struct share_check *sc)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct btrfs_delayed_ref_node *node;
839*4882a593Smuzhiyun struct btrfs_key key;
840*4882a593Smuzhiyun struct rb_node *n;
841*4882a593Smuzhiyun int count;
842*4882a593Smuzhiyun int ret = 0;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun spin_lock(&head->lock);
845*4882a593Smuzhiyun for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
846*4882a593Smuzhiyun node = rb_entry(n, struct btrfs_delayed_ref_node,
847*4882a593Smuzhiyun ref_node);
848*4882a593Smuzhiyun if (node->seq > seq)
849*4882a593Smuzhiyun continue;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun switch (node->action) {
852*4882a593Smuzhiyun case BTRFS_ADD_DELAYED_EXTENT:
853*4882a593Smuzhiyun case BTRFS_UPDATE_DELAYED_HEAD:
854*4882a593Smuzhiyun WARN_ON(1);
855*4882a593Smuzhiyun continue;
856*4882a593Smuzhiyun case BTRFS_ADD_DELAYED_REF:
857*4882a593Smuzhiyun count = node->ref_mod;
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun case BTRFS_DROP_DELAYED_REF:
860*4882a593Smuzhiyun count = node->ref_mod * -1;
861*4882a593Smuzhiyun break;
862*4882a593Smuzhiyun default:
863*4882a593Smuzhiyun BUG();
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun switch (node->type) {
866*4882a593Smuzhiyun case BTRFS_TREE_BLOCK_REF_KEY: {
867*4882a593Smuzhiyun /* NORMAL INDIRECT METADATA backref */
868*4882a593Smuzhiyun struct btrfs_delayed_tree_ref *ref;
869*4882a593Smuzhiyun struct btrfs_key *key_ptr = NULL;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (head->extent_op && head->extent_op->update_key) {
872*4882a593Smuzhiyun btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
873*4882a593Smuzhiyun key_ptr = &key;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun ref = btrfs_delayed_node_to_tree_ref(node);
877*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, ref->root,
878*4882a593Smuzhiyun key_ptr, ref->level + 1,
879*4882a593Smuzhiyun node->bytenr, count, sc,
880*4882a593Smuzhiyun GFP_ATOMIC);
881*4882a593Smuzhiyun break;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun case BTRFS_SHARED_BLOCK_REF_KEY: {
884*4882a593Smuzhiyun /* SHARED DIRECT METADATA backref */
885*4882a593Smuzhiyun struct btrfs_delayed_tree_ref *ref;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun ref = btrfs_delayed_node_to_tree_ref(node);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
890*4882a593Smuzhiyun ref->parent, node->bytenr, count,
891*4882a593Smuzhiyun sc, GFP_ATOMIC);
892*4882a593Smuzhiyun break;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun case BTRFS_EXTENT_DATA_REF_KEY: {
895*4882a593Smuzhiyun /* NORMAL INDIRECT DATA backref */
896*4882a593Smuzhiyun struct btrfs_delayed_data_ref *ref;
897*4882a593Smuzhiyun ref = btrfs_delayed_node_to_data_ref(node);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun key.objectid = ref->objectid;
900*4882a593Smuzhiyun key.type = BTRFS_EXTENT_DATA_KEY;
901*4882a593Smuzhiyun key.offset = ref->offset;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /*
904*4882a593Smuzhiyun * If we have a share check context and a reference for
905*4882a593Smuzhiyun * another inode, we can't exit immediately. This is
906*4882a593Smuzhiyun * because even if this is a BTRFS_ADD_DELAYED_REF
907*4882a593Smuzhiyun * reference we may find next a BTRFS_DROP_DELAYED_REF
908*4882a593Smuzhiyun * which cancels out this ADD reference.
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * If this is a DROP reference and there was no previous
911*4882a593Smuzhiyun * ADD reference, then we need to signal that when we
912*4882a593Smuzhiyun * process references from the extent tree (through
913*4882a593Smuzhiyun * add_inline_refs() and add_keyed_refs()), we should
914*4882a593Smuzhiyun * not exit early if we find a reference for another
915*4882a593Smuzhiyun * inode, because one of the delayed DROP references
916*4882a593Smuzhiyun * may cancel that reference in the extent tree.
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun if (sc && count < 0)
919*4882a593Smuzhiyun sc->have_delayed_delete_refs = true;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, ref->root,
922*4882a593Smuzhiyun &key, 0, node->bytenr, count, sc,
923*4882a593Smuzhiyun GFP_ATOMIC);
924*4882a593Smuzhiyun break;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun case BTRFS_SHARED_DATA_REF_KEY: {
927*4882a593Smuzhiyun /* SHARED DIRECT FULL backref */
928*4882a593Smuzhiyun struct btrfs_delayed_data_ref *ref;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun ref = btrfs_delayed_node_to_data_ref(node);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
933*4882a593Smuzhiyun node->bytenr, count, sc,
934*4882a593Smuzhiyun GFP_ATOMIC);
935*4882a593Smuzhiyun break;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun default:
938*4882a593Smuzhiyun WARN_ON(1);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun /*
941*4882a593Smuzhiyun * We must ignore BACKREF_FOUND_SHARED until all delayed
942*4882a593Smuzhiyun * refs have been checked.
943*4882a593Smuzhiyun */
944*4882a593Smuzhiyun if (ret && (ret != BACKREF_FOUND_SHARED))
945*4882a593Smuzhiyun break;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun if (!ret)
948*4882a593Smuzhiyun ret = extent_is_shared(sc);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun spin_unlock(&head->lock);
951*4882a593Smuzhiyun return ret;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /*
955*4882a593Smuzhiyun * add all inline backrefs for bytenr to the list
956*4882a593Smuzhiyun *
957*4882a593Smuzhiyun * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
958*4882a593Smuzhiyun */
add_inline_refs(const struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int * info_level,struct preftrees * preftrees,struct share_check * sc)959*4882a593Smuzhiyun static int add_inline_refs(const struct btrfs_fs_info *fs_info,
960*4882a593Smuzhiyun struct btrfs_path *path, u64 bytenr,
961*4882a593Smuzhiyun int *info_level, struct preftrees *preftrees,
962*4882a593Smuzhiyun struct share_check *sc)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun int ret = 0;
965*4882a593Smuzhiyun int slot;
966*4882a593Smuzhiyun struct extent_buffer *leaf;
967*4882a593Smuzhiyun struct btrfs_key key;
968*4882a593Smuzhiyun struct btrfs_key found_key;
969*4882a593Smuzhiyun unsigned long ptr;
970*4882a593Smuzhiyun unsigned long end;
971*4882a593Smuzhiyun struct btrfs_extent_item *ei;
972*4882a593Smuzhiyun u64 flags;
973*4882a593Smuzhiyun u64 item_size;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /*
976*4882a593Smuzhiyun * enumerate all inline refs
977*4882a593Smuzhiyun */
978*4882a593Smuzhiyun leaf = path->nodes[0];
979*4882a593Smuzhiyun slot = path->slots[0];
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun item_size = btrfs_item_size_nr(leaf, slot);
982*4882a593Smuzhiyun BUG_ON(item_size < sizeof(*ei));
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
985*4882a593Smuzhiyun flags = btrfs_extent_flags(leaf, ei);
986*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, slot);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun ptr = (unsigned long)(ei + 1);
989*4882a593Smuzhiyun end = (unsigned long)ei + item_size;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
992*4882a593Smuzhiyun flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
993*4882a593Smuzhiyun struct btrfs_tree_block_info *info;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun info = (struct btrfs_tree_block_info *)ptr;
996*4882a593Smuzhiyun *info_level = btrfs_tree_block_level(leaf, info);
997*4882a593Smuzhiyun ptr += sizeof(struct btrfs_tree_block_info);
998*4882a593Smuzhiyun BUG_ON(ptr > end);
999*4882a593Smuzhiyun } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1000*4882a593Smuzhiyun *info_level = found_key.offset;
1001*4882a593Smuzhiyun } else {
1002*4882a593Smuzhiyun BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun while (ptr < end) {
1006*4882a593Smuzhiyun struct btrfs_extent_inline_ref *iref;
1007*4882a593Smuzhiyun u64 offset;
1008*4882a593Smuzhiyun int type;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun iref = (struct btrfs_extent_inline_ref *)ptr;
1011*4882a593Smuzhiyun type = btrfs_get_extent_inline_ref_type(leaf, iref,
1012*4882a593Smuzhiyun BTRFS_REF_TYPE_ANY);
1013*4882a593Smuzhiyun if (type == BTRFS_REF_TYPE_INVALID)
1014*4882a593Smuzhiyun return -EUCLEAN;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun offset = btrfs_extent_inline_ref_offset(leaf, iref);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun switch (type) {
1019*4882a593Smuzhiyun case BTRFS_SHARED_BLOCK_REF_KEY:
1020*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees,
1021*4882a593Smuzhiyun *info_level + 1, offset,
1022*4882a593Smuzhiyun bytenr, 1, NULL, GFP_NOFS);
1023*4882a593Smuzhiyun break;
1024*4882a593Smuzhiyun case BTRFS_SHARED_DATA_REF_KEY: {
1025*4882a593Smuzhiyun struct btrfs_shared_data_ref *sdref;
1026*4882a593Smuzhiyun int count;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1029*4882a593Smuzhiyun count = btrfs_shared_data_ref_count(leaf, sdref);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees, 0, offset,
1032*4882a593Smuzhiyun bytenr, count, sc, GFP_NOFS);
1033*4882a593Smuzhiyun break;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun case BTRFS_TREE_BLOCK_REF_KEY:
1036*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, offset,
1037*4882a593Smuzhiyun NULL, *info_level + 1,
1038*4882a593Smuzhiyun bytenr, 1, NULL, GFP_NOFS);
1039*4882a593Smuzhiyun break;
1040*4882a593Smuzhiyun case BTRFS_EXTENT_DATA_REF_KEY: {
1041*4882a593Smuzhiyun struct btrfs_extent_data_ref *dref;
1042*4882a593Smuzhiyun int count;
1043*4882a593Smuzhiyun u64 root;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1046*4882a593Smuzhiyun count = btrfs_extent_data_ref_count(leaf, dref);
1047*4882a593Smuzhiyun key.objectid = btrfs_extent_data_ref_objectid(leaf,
1048*4882a593Smuzhiyun dref);
1049*4882a593Smuzhiyun key.type = BTRFS_EXTENT_DATA_KEY;
1050*4882a593Smuzhiyun key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun if (sc && sc->inum && key.objectid != sc->inum &&
1053*4882a593Smuzhiyun !sc->have_delayed_delete_refs) {
1054*4882a593Smuzhiyun ret = BACKREF_FOUND_SHARED;
1055*4882a593Smuzhiyun break;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun root = btrfs_extent_data_ref_root(leaf, dref);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, root,
1061*4882a593Smuzhiyun &key, 0, bytenr, count,
1062*4882a593Smuzhiyun sc, GFP_NOFS);
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun break;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun default:
1067*4882a593Smuzhiyun WARN_ON(1);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun if (ret)
1070*4882a593Smuzhiyun return ret;
1071*4882a593Smuzhiyun ptr += btrfs_extent_inline_ref_size(type);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * add all non-inline backrefs for bytenr to the list
1079*4882a593Smuzhiyun *
1080*4882a593Smuzhiyun * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1081*4882a593Smuzhiyun */
add_keyed_refs(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u64 bytenr,int info_level,struct preftrees * preftrees,struct share_check * sc)1082*4882a593Smuzhiyun static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1083*4882a593Smuzhiyun struct btrfs_path *path, u64 bytenr,
1084*4882a593Smuzhiyun int info_level, struct preftrees *preftrees,
1085*4882a593Smuzhiyun struct share_check *sc)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun struct btrfs_root *extent_root = fs_info->extent_root;
1088*4882a593Smuzhiyun int ret;
1089*4882a593Smuzhiyun int slot;
1090*4882a593Smuzhiyun struct extent_buffer *leaf;
1091*4882a593Smuzhiyun struct btrfs_key key;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun while (1) {
1094*4882a593Smuzhiyun ret = btrfs_next_item(extent_root, path);
1095*4882a593Smuzhiyun if (ret < 0)
1096*4882a593Smuzhiyun break;
1097*4882a593Smuzhiyun if (ret) {
1098*4882a593Smuzhiyun ret = 0;
1099*4882a593Smuzhiyun break;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun slot = path->slots[0];
1103*4882a593Smuzhiyun leaf = path->nodes[0];
1104*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &key, slot);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (key.objectid != bytenr)
1107*4882a593Smuzhiyun break;
1108*4882a593Smuzhiyun if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1109*4882a593Smuzhiyun continue;
1110*4882a593Smuzhiyun if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1111*4882a593Smuzhiyun break;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun switch (key.type) {
1114*4882a593Smuzhiyun case BTRFS_SHARED_BLOCK_REF_KEY:
1115*4882a593Smuzhiyun /* SHARED DIRECT METADATA backref */
1116*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees,
1117*4882a593Smuzhiyun info_level + 1, key.offset,
1118*4882a593Smuzhiyun bytenr, 1, NULL, GFP_NOFS);
1119*4882a593Smuzhiyun break;
1120*4882a593Smuzhiyun case BTRFS_SHARED_DATA_REF_KEY: {
1121*4882a593Smuzhiyun /* SHARED DIRECT FULL backref */
1122*4882a593Smuzhiyun struct btrfs_shared_data_ref *sdref;
1123*4882a593Smuzhiyun int count;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun sdref = btrfs_item_ptr(leaf, slot,
1126*4882a593Smuzhiyun struct btrfs_shared_data_ref);
1127*4882a593Smuzhiyun count = btrfs_shared_data_ref_count(leaf, sdref);
1128*4882a593Smuzhiyun ret = add_direct_ref(fs_info, preftrees, 0,
1129*4882a593Smuzhiyun key.offset, bytenr, count,
1130*4882a593Smuzhiyun sc, GFP_NOFS);
1131*4882a593Smuzhiyun break;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun case BTRFS_TREE_BLOCK_REF_KEY:
1134*4882a593Smuzhiyun /* NORMAL INDIRECT METADATA backref */
1135*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, key.offset,
1136*4882a593Smuzhiyun NULL, info_level + 1, bytenr,
1137*4882a593Smuzhiyun 1, NULL, GFP_NOFS);
1138*4882a593Smuzhiyun break;
1139*4882a593Smuzhiyun case BTRFS_EXTENT_DATA_REF_KEY: {
1140*4882a593Smuzhiyun /* NORMAL INDIRECT DATA backref */
1141*4882a593Smuzhiyun struct btrfs_extent_data_ref *dref;
1142*4882a593Smuzhiyun int count;
1143*4882a593Smuzhiyun u64 root;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun dref = btrfs_item_ptr(leaf, slot,
1146*4882a593Smuzhiyun struct btrfs_extent_data_ref);
1147*4882a593Smuzhiyun count = btrfs_extent_data_ref_count(leaf, dref);
1148*4882a593Smuzhiyun key.objectid = btrfs_extent_data_ref_objectid(leaf,
1149*4882a593Smuzhiyun dref);
1150*4882a593Smuzhiyun key.type = BTRFS_EXTENT_DATA_KEY;
1151*4882a593Smuzhiyun key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun if (sc && sc->inum && key.objectid != sc->inum &&
1154*4882a593Smuzhiyun !sc->have_delayed_delete_refs) {
1155*4882a593Smuzhiyun ret = BACKREF_FOUND_SHARED;
1156*4882a593Smuzhiyun break;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun root = btrfs_extent_data_ref_root(leaf, dref);
1160*4882a593Smuzhiyun ret = add_indirect_ref(fs_info, preftrees, root,
1161*4882a593Smuzhiyun &key, 0, bytenr, count,
1162*4882a593Smuzhiyun sc, GFP_NOFS);
1163*4882a593Smuzhiyun break;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun default:
1166*4882a593Smuzhiyun WARN_ON(1);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun if (ret)
1169*4882a593Smuzhiyun return ret;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun return ret;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /*
1177*4882a593Smuzhiyun * this adds all existing backrefs (inline backrefs, backrefs and delayed
1178*4882a593Smuzhiyun * refs) for the given bytenr to the refs list, merges duplicates and resolves
1179*4882a593Smuzhiyun * indirect refs to their parent bytenr.
1180*4882a593Smuzhiyun * When roots are found, they're added to the roots list
1181*4882a593Smuzhiyun *
1182*4882a593Smuzhiyun * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1183*4882a593Smuzhiyun * much like trans == NULL case, the difference only lies in it will not
1184*4882a593Smuzhiyun * commit root.
1185*4882a593Smuzhiyun * The special case is for qgroup to search roots in commit_transaction().
1186*4882a593Smuzhiyun *
1187*4882a593Smuzhiyun * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1188*4882a593Smuzhiyun * shared extent is detected.
1189*4882a593Smuzhiyun *
1190*4882a593Smuzhiyun * Otherwise this returns 0 for success and <0 for an error.
1191*4882a593Smuzhiyun *
1192*4882a593Smuzhiyun * If ignore_offset is set to false, only extent refs whose offsets match
1193*4882a593Smuzhiyun * extent_item_pos are returned. If true, every extent ref is returned
1194*4882a593Smuzhiyun * and extent_item_pos is ignored.
1195*4882a593Smuzhiyun *
1196*4882a593Smuzhiyun * FIXME some caching might speed things up
1197*4882a593Smuzhiyun */
find_parent_nodes(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,u64 bytenr,u64 time_seq,struct ulist * refs,struct ulist * roots,const u64 * extent_item_pos,struct share_check * sc,bool ignore_offset)1198*4882a593Smuzhiyun static int find_parent_nodes(struct btrfs_trans_handle *trans,
1199*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, u64 bytenr,
1200*4882a593Smuzhiyun u64 time_seq, struct ulist *refs,
1201*4882a593Smuzhiyun struct ulist *roots, const u64 *extent_item_pos,
1202*4882a593Smuzhiyun struct share_check *sc, bool ignore_offset)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun struct btrfs_key key;
1205*4882a593Smuzhiyun struct btrfs_path *path;
1206*4882a593Smuzhiyun struct btrfs_delayed_ref_root *delayed_refs = NULL;
1207*4882a593Smuzhiyun struct btrfs_delayed_ref_head *head;
1208*4882a593Smuzhiyun int info_level = 0;
1209*4882a593Smuzhiyun int ret;
1210*4882a593Smuzhiyun struct prelim_ref *ref;
1211*4882a593Smuzhiyun struct rb_node *node;
1212*4882a593Smuzhiyun struct extent_inode_elem *eie = NULL;
1213*4882a593Smuzhiyun struct preftrees preftrees = {
1214*4882a593Smuzhiyun .direct = PREFTREE_INIT,
1215*4882a593Smuzhiyun .indirect = PREFTREE_INIT,
1216*4882a593Smuzhiyun .indirect_missing_keys = PREFTREE_INIT
1217*4882a593Smuzhiyun };
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun key.objectid = bytenr;
1220*4882a593Smuzhiyun key.offset = (u64)-1;
1221*4882a593Smuzhiyun if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1222*4882a593Smuzhiyun key.type = BTRFS_METADATA_ITEM_KEY;
1223*4882a593Smuzhiyun else
1224*4882a593Smuzhiyun key.type = BTRFS_EXTENT_ITEM_KEY;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun path = btrfs_alloc_path();
1227*4882a593Smuzhiyun if (!path)
1228*4882a593Smuzhiyun return -ENOMEM;
1229*4882a593Smuzhiyun if (!trans) {
1230*4882a593Smuzhiyun path->search_commit_root = 1;
1231*4882a593Smuzhiyun path->skip_locking = 1;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (time_seq == SEQ_LAST)
1235*4882a593Smuzhiyun path->skip_locking = 1;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /*
1238*4882a593Smuzhiyun * grab both a lock on the path and a lock on the delayed ref head.
1239*4882a593Smuzhiyun * We need both to get a consistent picture of how the refs look
1240*4882a593Smuzhiyun * at a specified point in time
1241*4882a593Smuzhiyun */
1242*4882a593Smuzhiyun again:
1243*4882a593Smuzhiyun head = NULL;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1246*4882a593Smuzhiyun if (ret < 0)
1247*4882a593Smuzhiyun goto out;
1248*4882a593Smuzhiyun if (ret == 0) {
1249*4882a593Smuzhiyun /* This shouldn't happen, indicates a bug or fs corruption. */
1250*4882a593Smuzhiyun ASSERT(ret != 0);
1251*4882a593Smuzhiyun ret = -EUCLEAN;
1252*4882a593Smuzhiyun goto out;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1256*4882a593Smuzhiyun if (trans && likely(trans->type != __TRANS_DUMMY) &&
1257*4882a593Smuzhiyun time_seq != SEQ_LAST) {
1258*4882a593Smuzhiyun #else
1259*4882a593Smuzhiyun if (trans && time_seq != SEQ_LAST) {
1260*4882a593Smuzhiyun #endif
1261*4882a593Smuzhiyun /*
1262*4882a593Smuzhiyun * look if there are updates for this ref queued and lock the
1263*4882a593Smuzhiyun * head
1264*4882a593Smuzhiyun */
1265*4882a593Smuzhiyun delayed_refs = &trans->transaction->delayed_refs;
1266*4882a593Smuzhiyun spin_lock(&delayed_refs->lock);
1267*4882a593Smuzhiyun head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1268*4882a593Smuzhiyun if (head) {
1269*4882a593Smuzhiyun if (!mutex_trylock(&head->mutex)) {
1270*4882a593Smuzhiyun refcount_inc(&head->refs);
1271*4882a593Smuzhiyun spin_unlock(&delayed_refs->lock);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun btrfs_release_path(path);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun * Mutex was contended, block until it's
1277*4882a593Smuzhiyun * released and try again
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun mutex_lock(&head->mutex);
1280*4882a593Smuzhiyun mutex_unlock(&head->mutex);
1281*4882a593Smuzhiyun btrfs_put_delayed_ref_head(head);
1282*4882a593Smuzhiyun goto again;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun spin_unlock(&delayed_refs->lock);
1285*4882a593Smuzhiyun ret = add_delayed_refs(fs_info, head, time_seq,
1286*4882a593Smuzhiyun &preftrees, sc);
1287*4882a593Smuzhiyun mutex_unlock(&head->mutex);
1288*4882a593Smuzhiyun if (ret)
1289*4882a593Smuzhiyun goto out;
1290*4882a593Smuzhiyun } else {
1291*4882a593Smuzhiyun spin_unlock(&delayed_refs->lock);
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun if (path->slots[0]) {
1296*4882a593Smuzhiyun struct extent_buffer *leaf;
1297*4882a593Smuzhiyun int slot;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun path->slots[0]--;
1300*4882a593Smuzhiyun leaf = path->nodes[0];
1301*4882a593Smuzhiyun slot = path->slots[0];
1302*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &key, slot);
1303*4882a593Smuzhiyun if (key.objectid == bytenr &&
1304*4882a593Smuzhiyun (key.type == BTRFS_EXTENT_ITEM_KEY ||
1305*4882a593Smuzhiyun key.type == BTRFS_METADATA_ITEM_KEY)) {
1306*4882a593Smuzhiyun ret = add_inline_refs(fs_info, path, bytenr,
1307*4882a593Smuzhiyun &info_level, &preftrees, sc);
1308*4882a593Smuzhiyun if (ret)
1309*4882a593Smuzhiyun goto out;
1310*4882a593Smuzhiyun ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1311*4882a593Smuzhiyun &preftrees, sc);
1312*4882a593Smuzhiyun if (ret)
1313*4882a593Smuzhiyun goto out;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun btrfs_release_path(path);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1320*4882a593Smuzhiyun if (ret)
1321*4882a593Smuzhiyun goto out;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1326*4882a593Smuzhiyun extent_item_pos, sc, ignore_offset);
1327*4882a593Smuzhiyun if (ret)
1328*4882a593Smuzhiyun goto out;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun /*
1333*4882a593Smuzhiyun * This walks the tree of merged and resolved refs. Tree blocks are
1334*4882a593Smuzhiyun * read in as needed. Unique entries are added to the ulist, and
1335*4882a593Smuzhiyun * the list of found roots is updated.
1336*4882a593Smuzhiyun *
1337*4882a593Smuzhiyun * We release the entire tree in one go before returning.
1338*4882a593Smuzhiyun */
1339*4882a593Smuzhiyun node = rb_first_cached(&preftrees.direct.root);
1340*4882a593Smuzhiyun while (node) {
1341*4882a593Smuzhiyun ref = rb_entry(node, struct prelim_ref, rbnode);
1342*4882a593Smuzhiyun node = rb_next(&ref->rbnode);
1343*4882a593Smuzhiyun /*
1344*4882a593Smuzhiyun * ref->count < 0 can happen here if there are delayed
1345*4882a593Smuzhiyun * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1346*4882a593Smuzhiyun * prelim_ref_insert() relies on this when merging
1347*4882a593Smuzhiyun * identical refs to keep the overall count correct.
1348*4882a593Smuzhiyun * prelim_ref_insert() will merge only those refs
1349*4882a593Smuzhiyun * which compare identically. Any refs having
1350*4882a593Smuzhiyun * e.g. different offsets would not be merged,
1351*4882a593Smuzhiyun * and would retain their original ref->count < 0.
1352*4882a593Smuzhiyun */
1353*4882a593Smuzhiyun if (roots && ref->count && ref->root_id && ref->parent == 0) {
1354*4882a593Smuzhiyun if (sc && sc->root_objectid &&
1355*4882a593Smuzhiyun ref->root_id != sc->root_objectid) {
1356*4882a593Smuzhiyun ret = BACKREF_FOUND_SHARED;
1357*4882a593Smuzhiyun goto out;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /* no parent == root of tree */
1361*4882a593Smuzhiyun ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1362*4882a593Smuzhiyun if (ret < 0)
1363*4882a593Smuzhiyun goto out;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun if (ref->count && ref->parent) {
1366*4882a593Smuzhiyun if (extent_item_pos && !ref->inode_list &&
1367*4882a593Smuzhiyun ref->level == 0) {
1368*4882a593Smuzhiyun struct extent_buffer *eb;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun eb = read_tree_block(fs_info, ref->parent, 0,
1371*4882a593Smuzhiyun ref->level, NULL);
1372*4882a593Smuzhiyun if (IS_ERR(eb)) {
1373*4882a593Smuzhiyun ret = PTR_ERR(eb);
1374*4882a593Smuzhiyun goto out;
1375*4882a593Smuzhiyun } else if (!extent_buffer_uptodate(eb)) {
1376*4882a593Smuzhiyun free_extent_buffer(eb);
1377*4882a593Smuzhiyun ret = -EIO;
1378*4882a593Smuzhiyun goto out;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun if (!path->skip_locking) {
1382*4882a593Smuzhiyun btrfs_tree_read_lock(eb);
1383*4882a593Smuzhiyun btrfs_set_lock_blocking_read(eb);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun ret = find_extent_in_eb(eb, bytenr,
1386*4882a593Smuzhiyun *extent_item_pos, &eie, ignore_offset);
1387*4882a593Smuzhiyun if (!path->skip_locking)
1388*4882a593Smuzhiyun btrfs_tree_read_unlock_blocking(eb);
1389*4882a593Smuzhiyun free_extent_buffer(eb);
1390*4882a593Smuzhiyun if (ret < 0)
1391*4882a593Smuzhiyun goto out;
1392*4882a593Smuzhiyun ref->inode_list = eie;
1393*4882a593Smuzhiyun /*
1394*4882a593Smuzhiyun * We transferred the list ownership to the ref,
1395*4882a593Smuzhiyun * so set to NULL to avoid a double free in case
1396*4882a593Smuzhiyun * an error happens after this.
1397*4882a593Smuzhiyun */
1398*4882a593Smuzhiyun eie = NULL;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun ret = ulist_add_merge_ptr(refs, ref->parent,
1401*4882a593Smuzhiyun ref->inode_list,
1402*4882a593Smuzhiyun (void **)&eie, GFP_NOFS);
1403*4882a593Smuzhiyun if (ret < 0)
1404*4882a593Smuzhiyun goto out;
1405*4882a593Smuzhiyun if (!ret && extent_item_pos) {
1406*4882a593Smuzhiyun /*
1407*4882a593Smuzhiyun * We've recorded that parent, so we must extend
1408*4882a593Smuzhiyun * its inode list here.
1409*4882a593Smuzhiyun *
1410*4882a593Smuzhiyun * However if there was corruption we may not
1411*4882a593Smuzhiyun * have found an eie, return an error in this
1412*4882a593Smuzhiyun * case.
1413*4882a593Smuzhiyun */
1414*4882a593Smuzhiyun ASSERT(eie);
1415*4882a593Smuzhiyun if (!eie) {
1416*4882a593Smuzhiyun ret = -EUCLEAN;
1417*4882a593Smuzhiyun goto out;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun while (eie->next)
1420*4882a593Smuzhiyun eie = eie->next;
1421*4882a593Smuzhiyun eie->next = ref->inode_list;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun eie = NULL;
1424*4882a593Smuzhiyun /*
1425*4882a593Smuzhiyun * We have transferred the inode list ownership from
1426*4882a593Smuzhiyun * this ref to the ref we added to the 'refs' ulist.
1427*4882a593Smuzhiyun * So set this ref's inode list to NULL to avoid
1428*4882a593Smuzhiyun * use-after-free when our caller uses it or double
1429*4882a593Smuzhiyun * frees in case an error happens before we return.
1430*4882a593Smuzhiyun */
1431*4882a593Smuzhiyun ref->inode_list = NULL;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun cond_resched();
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun out:
1437*4882a593Smuzhiyun btrfs_free_path(path);
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun prelim_release(&preftrees.direct);
1440*4882a593Smuzhiyun prelim_release(&preftrees.indirect);
1441*4882a593Smuzhiyun prelim_release(&preftrees.indirect_missing_keys);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun if (ret < 0)
1444*4882a593Smuzhiyun free_inode_elem_list(eie);
1445*4882a593Smuzhiyun return ret;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /*
1449*4882a593Smuzhiyun * Finds all leafs with a reference to the specified combination of bytenr and
1450*4882a593Smuzhiyun * offset. key_list_head will point to a list of corresponding keys (caller must
1451*4882a593Smuzhiyun * free each list element). The leafs will be stored in the leafs ulist, which
1452*4882a593Smuzhiyun * must be freed with ulist_free.
1453*4882a593Smuzhiyun *
1454*4882a593Smuzhiyun * returns 0 on success, <0 on error
1455*4882a593Smuzhiyun */
1456*4882a593Smuzhiyun int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1457*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, u64 bytenr,
1458*4882a593Smuzhiyun u64 time_seq, struct ulist **leafs,
1459*4882a593Smuzhiyun const u64 *extent_item_pos, bool ignore_offset)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun int ret;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun *leafs = ulist_alloc(GFP_NOFS);
1464*4882a593Smuzhiyun if (!*leafs)
1465*4882a593Smuzhiyun return -ENOMEM;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1468*4882a593Smuzhiyun *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1469*4882a593Smuzhiyun if (ret < 0 && ret != -ENOENT) {
1470*4882a593Smuzhiyun free_leaf_list(*leafs);
1471*4882a593Smuzhiyun return ret;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun return 0;
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun /*
1478*4882a593Smuzhiyun * walk all backrefs for a given extent to find all roots that reference this
1479*4882a593Smuzhiyun * extent. Walking a backref means finding all extents that reference this
1480*4882a593Smuzhiyun * extent and in turn walk the backrefs of those, too. Naturally this is a
1481*4882a593Smuzhiyun * recursive process, but here it is implemented in an iterative fashion: We
1482*4882a593Smuzhiyun * find all referencing extents for the extent in question and put them on a
1483*4882a593Smuzhiyun * list. In turn, we find all referencing extents for those, further appending
1484*4882a593Smuzhiyun * to the list. The way we iterate the list allows adding more elements after
1485*4882a593Smuzhiyun * the current while iterating. The process stops when we reach the end of the
1486*4882a593Smuzhiyun * list. Found roots are added to the roots list.
1487*4882a593Smuzhiyun *
1488*4882a593Smuzhiyun * returns 0 on success, < 0 on error.
1489*4882a593Smuzhiyun */
1490*4882a593Smuzhiyun static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1491*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, u64 bytenr,
1492*4882a593Smuzhiyun u64 time_seq, struct ulist **roots,
1493*4882a593Smuzhiyun bool ignore_offset)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun struct ulist *tmp;
1496*4882a593Smuzhiyun struct ulist_node *node = NULL;
1497*4882a593Smuzhiyun struct ulist_iterator uiter;
1498*4882a593Smuzhiyun int ret;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun tmp = ulist_alloc(GFP_NOFS);
1501*4882a593Smuzhiyun if (!tmp)
1502*4882a593Smuzhiyun return -ENOMEM;
1503*4882a593Smuzhiyun *roots = ulist_alloc(GFP_NOFS);
1504*4882a593Smuzhiyun if (!*roots) {
1505*4882a593Smuzhiyun ulist_free(tmp);
1506*4882a593Smuzhiyun return -ENOMEM;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
1510*4882a593Smuzhiyun while (1) {
1511*4882a593Smuzhiyun ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1512*4882a593Smuzhiyun tmp, *roots, NULL, NULL, ignore_offset);
1513*4882a593Smuzhiyun if (ret < 0 && ret != -ENOENT) {
1514*4882a593Smuzhiyun ulist_free(tmp);
1515*4882a593Smuzhiyun ulist_free(*roots);
1516*4882a593Smuzhiyun *roots = NULL;
1517*4882a593Smuzhiyun return ret;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun node = ulist_next(tmp, &uiter);
1520*4882a593Smuzhiyun if (!node)
1521*4882a593Smuzhiyun break;
1522*4882a593Smuzhiyun bytenr = node->val;
1523*4882a593Smuzhiyun cond_resched();
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun ulist_free(tmp);
1527*4882a593Smuzhiyun return 0;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1531*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, u64 bytenr,
1532*4882a593Smuzhiyun u64 time_seq, struct ulist **roots,
1533*4882a593Smuzhiyun bool ignore_offset)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun int ret;
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun if (!trans)
1538*4882a593Smuzhiyun down_read(&fs_info->commit_root_sem);
1539*4882a593Smuzhiyun ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1540*4882a593Smuzhiyun time_seq, roots, ignore_offset);
1541*4882a593Smuzhiyun if (!trans)
1542*4882a593Smuzhiyun up_read(&fs_info->commit_root_sem);
1543*4882a593Smuzhiyun return ret;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun /**
1547*4882a593Smuzhiyun * btrfs_check_shared - tell us whether an extent is shared
1548*4882a593Smuzhiyun *
1549*4882a593Smuzhiyun * btrfs_check_shared uses the backref walking code but will short
1550*4882a593Smuzhiyun * circuit as soon as it finds a root or inode that doesn't match the
1551*4882a593Smuzhiyun * one passed in. This provides a significant performance benefit for
1552*4882a593Smuzhiyun * callers (such as fiemap) which want to know whether the extent is
1553*4882a593Smuzhiyun * shared but do not need a ref count.
1554*4882a593Smuzhiyun *
1555*4882a593Smuzhiyun * This attempts to attach to the running transaction in order to account for
1556*4882a593Smuzhiyun * delayed refs, but continues on even when no running transaction exists.
1557*4882a593Smuzhiyun *
1558*4882a593Smuzhiyun * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1559*4882a593Smuzhiyun */
1560*4882a593Smuzhiyun int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1561*4882a593Smuzhiyun struct ulist *roots, struct ulist *tmp)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = root->fs_info;
1564*4882a593Smuzhiyun struct btrfs_trans_handle *trans;
1565*4882a593Smuzhiyun struct ulist_iterator uiter;
1566*4882a593Smuzhiyun struct ulist_node *node;
1567*4882a593Smuzhiyun struct seq_list elem = SEQ_LIST_INIT(elem);
1568*4882a593Smuzhiyun int ret = 0;
1569*4882a593Smuzhiyun struct share_check shared = {
1570*4882a593Smuzhiyun .root_objectid = root->root_key.objectid,
1571*4882a593Smuzhiyun .inum = inum,
1572*4882a593Smuzhiyun .share_count = 0,
1573*4882a593Smuzhiyun .have_delayed_delete_refs = false,
1574*4882a593Smuzhiyun };
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun ulist_init(roots);
1577*4882a593Smuzhiyun ulist_init(tmp);
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun trans = btrfs_join_transaction_nostart(root);
1580*4882a593Smuzhiyun if (IS_ERR(trans)) {
1581*4882a593Smuzhiyun if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1582*4882a593Smuzhiyun ret = PTR_ERR(trans);
1583*4882a593Smuzhiyun goto out;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun trans = NULL;
1586*4882a593Smuzhiyun down_read(&fs_info->commit_root_sem);
1587*4882a593Smuzhiyun } else {
1588*4882a593Smuzhiyun btrfs_get_tree_mod_seq(fs_info, &elem);
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun ULIST_ITER_INIT(&uiter);
1592*4882a593Smuzhiyun while (1) {
1593*4882a593Smuzhiyun ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1594*4882a593Smuzhiyun roots, NULL, &shared, false);
1595*4882a593Smuzhiyun if (ret == BACKREF_FOUND_SHARED) {
1596*4882a593Smuzhiyun /* this is the only condition under which we return 1 */
1597*4882a593Smuzhiyun ret = 1;
1598*4882a593Smuzhiyun break;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun if (ret < 0 && ret != -ENOENT)
1601*4882a593Smuzhiyun break;
1602*4882a593Smuzhiyun ret = 0;
1603*4882a593Smuzhiyun node = ulist_next(tmp, &uiter);
1604*4882a593Smuzhiyun if (!node)
1605*4882a593Smuzhiyun break;
1606*4882a593Smuzhiyun bytenr = node->val;
1607*4882a593Smuzhiyun shared.share_count = 0;
1608*4882a593Smuzhiyun shared.have_delayed_delete_refs = false;
1609*4882a593Smuzhiyun cond_resched();
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun if (trans) {
1613*4882a593Smuzhiyun btrfs_put_tree_mod_seq(fs_info, &elem);
1614*4882a593Smuzhiyun btrfs_end_transaction(trans);
1615*4882a593Smuzhiyun } else {
1616*4882a593Smuzhiyun up_read(&fs_info->commit_root_sem);
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun out:
1619*4882a593Smuzhiyun ulist_release(roots);
1620*4882a593Smuzhiyun ulist_release(tmp);
1621*4882a593Smuzhiyun return ret;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1625*4882a593Smuzhiyun u64 start_off, struct btrfs_path *path,
1626*4882a593Smuzhiyun struct btrfs_inode_extref **ret_extref,
1627*4882a593Smuzhiyun u64 *found_off)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun int ret, slot;
1630*4882a593Smuzhiyun struct btrfs_key key;
1631*4882a593Smuzhiyun struct btrfs_key found_key;
1632*4882a593Smuzhiyun struct btrfs_inode_extref *extref;
1633*4882a593Smuzhiyun const struct extent_buffer *leaf;
1634*4882a593Smuzhiyun unsigned long ptr;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun key.objectid = inode_objectid;
1637*4882a593Smuzhiyun key.type = BTRFS_INODE_EXTREF_KEY;
1638*4882a593Smuzhiyun key.offset = start_off;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1641*4882a593Smuzhiyun if (ret < 0)
1642*4882a593Smuzhiyun return ret;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun while (1) {
1645*4882a593Smuzhiyun leaf = path->nodes[0];
1646*4882a593Smuzhiyun slot = path->slots[0];
1647*4882a593Smuzhiyun if (slot >= btrfs_header_nritems(leaf)) {
1648*4882a593Smuzhiyun /*
1649*4882a593Smuzhiyun * If the item at offset is not found,
1650*4882a593Smuzhiyun * btrfs_search_slot will point us to the slot
1651*4882a593Smuzhiyun * where it should be inserted. In our case
1652*4882a593Smuzhiyun * that will be the slot directly before the
1653*4882a593Smuzhiyun * next INODE_REF_KEY_V2 item. In the case
1654*4882a593Smuzhiyun * that we're pointing to the last slot in a
1655*4882a593Smuzhiyun * leaf, we must move one leaf over.
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun ret = btrfs_next_leaf(root, path);
1658*4882a593Smuzhiyun if (ret) {
1659*4882a593Smuzhiyun if (ret >= 1)
1660*4882a593Smuzhiyun ret = -ENOENT;
1661*4882a593Smuzhiyun break;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun continue;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun btrfs_item_key_to_cpu(leaf, &found_key, slot);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /*
1669*4882a593Smuzhiyun * Check that we're still looking at an extended ref key for
1670*4882a593Smuzhiyun * this particular objectid. If we have different
1671*4882a593Smuzhiyun * objectid or type then there are no more to be found
1672*4882a593Smuzhiyun * in the tree and we can exit.
1673*4882a593Smuzhiyun */
1674*4882a593Smuzhiyun ret = -ENOENT;
1675*4882a593Smuzhiyun if (found_key.objectid != inode_objectid)
1676*4882a593Smuzhiyun break;
1677*4882a593Smuzhiyun if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1678*4882a593Smuzhiyun break;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun ret = 0;
1681*4882a593Smuzhiyun ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1682*4882a593Smuzhiyun extref = (struct btrfs_inode_extref *)ptr;
1683*4882a593Smuzhiyun *ret_extref = extref;
1684*4882a593Smuzhiyun if (found_off)
1685*4882a593Smuzhiyun *found_off = found_key.offset;
1686*4882a593Smuzhiyun break;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun return ret;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun /*
1693*4882a593Smuzhiyun * this iterates to turn a name (from iref/extref) into a full filesystem path.
1694*4882a593Smuzhiyun * Elements of the path are separated by '/' and the path is guaranteed to be
1695*4882a593Smuzhiyun * 0-terminated. the path is only given within the current file system.
1696*4882a593Smuzhiyun * Therefore, it never starts with a '/'. the caller is responsible to provide
1697*4882a593Smuzhiyun * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1698*4882a593Smuzhiyun * the start point of the resulting string is returned. this pointer is within
1699*4882a593Smuzhiyun * dest, normally.
1700*4882a593Smuzhiyun * in case the path buffer would overflow, the pointer is decremented further
1701*4882a593Smuzhiyun * as if output was written to the buffer, though no more output is actually
1702*4882a593Smuzhiyun * generated. that way, the caller can determine how much space would be
1703*4882a593Smuzhiyun * required for the path to fit into the buffer. in that case, the returned
1704*4882a593Smuzhiyun * value will be smaller than dest. callers must check this!
1705*4882a593Smuzhiyun */
1706*4882a593Smuzhiyun char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1707*4882a593Smuzhiyun u32 name_len, unsigned long name_off,
1708*4882a593Smuzhiyun struct extent_buffer *eb_in, u64 parent,
1709*4882a593Smuzhiyun char *dest, u32 size)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun int slot;
1712*4882a593Smuzhiyun u64 next_inum;
1713*4882a593Smuzhiyun int ret;
1714*4882a593Smuzhiyun s64 bytes_left = ((s64)size) - 1;
1715*4882a593Smuzhiyun struct extent_buffer *eb = eb_in;
1716*4882a593Smuzhiyun struct btrfs_key found_key;
1717*4882a593Smuzhiyun int leave_spinning = path->leave_spinning;
1718*4882a593Smuzhiyun struct btrfs_inode_ref *iref;
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun if (bytes_left >= 0)
1721*4882a593Smuzhiyun dest[bytes_left] = '\0';
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun path->leave_spinning = 1;
1724*4882a593Smuzhiyun while (1) {
1725*4882a593Smuzhiyun bytes_left -= name_len;
1726*4882a593Smuzhiyun if (bytes_left >= 0)
1727*4882a593Smuzhiyun read_extent_buffer(eb, dest + bytes_left,
1728*4882a593Smuzhiyun name_off, name_len);
1729*4882a593Smuzhiyun if (eb != eb_in) {
1730*4882a593Smuzhiyun if (!path->skip_locking)
1731*4882a593Smuzhiyun btrfs_tree_read_unlock_blocking(eb);
1732*4882a593Smuzhiyun free_extent_buffer(eb);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun ret = btrfs_find_item(fs_root, path, parent, 0,
1735*4882a593Smuzhiyun BTRFS_INODE_REF_KEY, &found_key);
1736*4882a593Smuzhiyun if (ret > 0)
1737*4882a593Smuzhiyun ret = -ENOENT;
1738*4882a593Smuzhiyun if (ret)
1739*4882a593Smuzhiyun break;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun next_inum = found_key.offset;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /* regular exit ahead */
1744*4882a593Smuzhiyun if (parent == next_inum)
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun slot = path->slots[0];
1748*4882a593Smuzhiyun eb = path->nodes[0];
1749*4882a593Smuzhiyun /* make sure we can use eb after releasing the path */
1750*4882a593Smuzhiyun if (eb != eb_in) {
1751*4882a593Smuzhiyun if (!path->skip_locking)
1752*4882a593Smuzhiyun btrfs_set_lock_blocking_read(eb);
1753*4882a593Smuzhiyun path->nodes[0] = NULL;
1754*4882a593Smuzhiyun path->locks[0] = 0;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun btrfs_release_path(path);
1757*4882a593Smuzhiyun iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun name_len = btrfs_inode_ref_name_len(eb, iref);
1760*4882a593Smuzhiyun name_off = (unsigned long)(iref + 1);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun parent = next_inum;
1763*4882a593Smuzhiyun --bytes_left;
1764*4882a593Smuzhiyun if (bytes_left >= 0)
1765*4882a593Smuzhiyun dest[bytes_left] = '/';
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun btrfs_release_path(path);
1769*4882a593Smuzhiyun path->leave_spinning = leave_spinning;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun if (ret)
1772*4882a593Smuzhiyun return ERR_PTR(ret);
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun return dest + bytes_left;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun /*
1778*4882a593Smuzhiyun * this makes the path point to (logical EXTENT_ITEM *)
1779*4882a593Smuzhiyun * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1780*4882a593Smuzhiyun * tree blocks and <0 on error.
1781*4882a593Smuzhiyun */
1782*4882a593Smuzhiyun int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1783*4882a593Smuzhiyun struct btrfs_path *path, struct btrfs_key *found_key,
1784*4882a593Smuzhiyun u64 *flags_ret)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun int ret;
1787*4882a593Smuzhiyun u64 flags;
1788*4882a593Smuzhiyun u64 size = 0;
1789*4882a593Smuzhiyun u32 item_size;
1790*4882a593Smuzhiyun const struct extent_buffer *eb;
1791*4882a593Smuzhiyun struct btrfs_extent_item *ei;
1792*4882a593Smuzhiyun struct btrfs_key key;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1795*4882a593Smuzhiyun key.type = BTRFS_METADATA_ITEM_KEY;
1796*4882a593Smuzhiyun else
1797*4882a593Smuzhiyun key.type = BTRFS_EXTENT_ITEM_KEY;
1798*4882a593Smuzhiyun key.objectid = logical;
1799*4882a593Smuzhiyun key.offset = (u64)-1;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1802*4882a593Smuzhiyun if (ret < 0)
1803*4882a593Smuzhiyun return ret;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1806*4882a593Smuzhiyun if (ret) {
1807*4882a593Smuzhiyun if (ret > 0)
1808*4882a593Smuzhiyun ret = -ENOENT;
1809*4882a593Smuzhiyun return ret;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1812*4882a593Smuzhiyun if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1813*4882a593Smuzhiyun size = fs_info->nodesize;
1814*4882a593Smuzhiyun else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1815*4882a593Smuzhiyun size = found_key->offset;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun if (found_key->objectid > logical ||
1818*4882a593Smuzhiyun found_key->objectid + size <= logical) {
1819*4882a593Smuzhiyun btrfs_debug(fs_info,
1820*4882a593Smuzhiyun "logical %llu is not within any extent", logical);
1821*4882a593Smuzhiyun return -ENOENT;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun eb = path->nodes[0];
1825*4882a593Smuzhiyun item_size = btrfs_item_size_nr(eb, path->slots[0]);
1826*4882a593Smuzhiyun BUG_ON(item_size < sizeof(*ei));
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1829*4882a593Smuzhiyun flags = btrfs_extent_flags(eb, ei);
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun btrfs_debug(fs_info,
1832*4882a593Smuzhiyun "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1833*4882a593Smuzhiyun logical, logical - found_key->objectid, found_key->objectid,
1834*4882a593Smuzhiyun found_key->offset, flags, item_size);
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun WARN_ON(!flags_ret);
1837*4882a593Smuzhiyun if (flags_ret) {
1838*4882a593Smuzhiyun if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1839*4882a593Smuzhiyun *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1840*4882a593Smuzhiyun else if (flags & BTRFS_EXTENT_FLAG_DATA)
1841*4882a593Smuzhiyun *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1842*4882a593Smuzhiyun else
1843*4882a593Smuzhiyun BUG();
1844*4882a593Smuzhiyun return 0;
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun return -EIO;
1848*4882a593Smuzhiyun }
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun /*
1851*4882a593Smuzhiyun * helper function to iterate extent inline refs. ptr must point to a 0 value
1852*4882a593Smuzhiyun * for the first call and may be modified. it is used to track state.
1853*4882a593Smuzhiyun * if more refs exist, 0 is returned and the next call to
1854*4882a593Smuzhiyun * get_extent_inline_ref must pass the modified ptr parameter to get the
1855*4882a593Smuzhiyun * next ref. after the last ref was processed, 1 is returned.
1856*4882a593Smuzhiyun * returns <0 on error
1857*4882a593Smuzhiyun */
1858*4882a593Smuzhiyun static int get_extent_inline_ref(unsigned long *ptr,
1859*4882a593Smuzhiyun const struct extent_buffer *eb,
1860*4882a593Smuzhiyun const struct btrfs_key *key,
1861*4882a593Smuzhiyun const struct btrfs_extent_item *ei,
1862*4882a593Smuzhiyun u32 item_size,
1863*4882a593Smuzhiyun struct btrfs_extent_inline_ref **out_eiref,
1864*4882a593Smuzhiyun int *out_type)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun unsigned long end;
1867*4882a593Smuzhiyun u64 flags;
1868*4882a593Smuzhiyun struct btrfs_tree_block_info *info;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun if (!*ptr) {
1871*4882a593Smuzhiyun /* first call */
1872*4882a593Smuzhiyun flags = btrfs_extent_flags(eb, ei);
1873*4882a593Smuzhiyun if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1874*4882a593Smuzhiyun if (key->type == BTRFS_METADATA_ITEM_KEY) {
1875*4882a593Smuzhiyun /* a skinny metadata extent */
1876*4882a593Smuzhiyun *out_eiref =
1877*4882a593Smuzhiyun (struct btrfs_extent_inline_ref *)(ei + 1);
1878*4882a593Smuzhiyun } else {
1879*4882a593Smuzhiyun WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1880*4882a593Smuzhiyun info = (struct btrfs_tree_block_info *)(ei + 1);
1881*4882a593Smuzhiyun *out_eiref =
1882*4882a593Smuzhiyun (struct btrfs_extent_inline_ref *)(info + 1);
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun } else {
1885*4882a593Smuzhiyun *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun *ptr = (unsigned long)*out_eiref;
1888*4882a593Smuzhiyun if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1889*4882a593Smuzhiyun return -ENOENT;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun end = (unsigned long)ei + item_size;
1893*4882a593Smuzhiyun *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1894*4882a593Smuzhiyun *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1895*4882a593Smuzhiyun BTRFS_REF_TYPE_ANY);
1896*4882a593Smuzhiyun if (*out_type == BTRFS_REF_TYPE_INVALID)
1897*4882a593Smuzhiyun return -EUCLEAN;
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun *ptr += btrfs_extent_inline_ref_size(*out_type);
1900*4882a593Smuzhiyun WARN_ON(*ptr > end);
1901*4882a593Smuzhiyun if (*ptr == end)
1902*4882a593Smuzhiyun return 1; /* last */
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun return 0;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun /*
1908*4882a593Smuzhiyun * reads the tree block backref for an extent. tree level and root are returned
1909*4882a593Smuzhiyun * through out_level and out_root. ptr must point to a 0 value for the first
1910*4882a593Smuzhiyun * call and may be modified (see get_extent_inline_ref comment).
1911*4882a593Smuzhiyun * returns 0 if data was provided, 1 if there was no more data to provide or
1912*4882a593Smuzhiyun * <0 on error.
1913*4882a593Smuzhiyun */
1914*4882a593Smuzhiyun int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1915*4882a593Smuzhiyun struct btrfs_key *key, struct btrfs_extent_item *ei,
1916*4882a593Smuzhiyun u32 item_size, u64 *out_root, u8 *out_level)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun int ret;
1919*4882a593Smuzhiyun int type;
1920*4882a593Smuzhiyun struct btrfs_extent_inline_ref *eiref;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun if (*ptr == (unsigned long)-1)
1923*4882a593Smuzhiyun return 1;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun while (1) {
1926*4882a593Smuzhiyun ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1927*4882a593Smuzhiyun &eiref, &type);
1928*4882a593Smuzhiyun if (ret < 0)
1929*4882a593Smuzhiyun return ret;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1932*4882a593Smuzhiyun type == BTRFS_SHARED_BLOCK_REF_KEY)
1933*4882a593Smuzhiyun break;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun if (ret == 1)
1936*4882a593Smuzhiyun return 1;
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun /* we can treat both ref types equally here */
1940*4882a593Smuzhiyun *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1943*4882a593Smuzhiyun struct btrfs_tree_block_info *info;
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun info = (struct btrfs_tree_block_info *)(ei + 1);
1946*4882a593Smuzhiyun *out_level = btrfs_tree_block_level(eb, info);
1947*4882a593Smuzhiyun } else {
1948*4882a593Smuzhiyun ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1949*4882a593Smuzhiyun *out_level = (u8)key->offset;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun if (ret == 1)
1953*4882a593Smuzhiyun *ptr = (unsigned long)-1;
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun return 0;
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1959*4882a593Smuzhiyun struct extent_inode_elem *inode_list,
1960*4882a593Smuzhiyun u64 root, u64 extent_item_objectid,
1961*4882a593Smuzhiyun iterate_extent_inodes_t *iterate, void *ctx)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun struct extent_inode_elem *eie;
1964*4882a593Smuzhiyun int ret = 0;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun for (eie = inode_list; eie; eie = eie->next) {
1967*4882a593Smuzhiyun btrfs_debug(fs_info,
1968*4882a593Smuzhiyun "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1969*4882a593Smuzhiyun extent_item_objectid, eie->inum,
1970*4882a593Smuzhiyun eie->offset, root);
1971*4882a593Smuzhiyun ret = iterate(eie->inum, eie->offset, root, ctx);
1972*4882a593Smuzhiyun if (ret) {
1973*4882a593Smuzhiyun btrfs_debug(fs_info,
1974*4882a593Smuzhiyun "stopping iteration for %llu due to ret=%d",
1975*4882a593Smuzhiyun extent_item_objectid, ret);
1976*4882a593Smuzhiyun break;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun return ret;
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun /*
1984*4882a593Smuzhiyun * calls iterate() for every inode that references the extent identified by
1985*4882a593Smuzhiyun * the given parameters.
1986*4882a593Smuzhiyun * when the iterator function returns a non-zero value, iteration stops.
1987*4882a593Smuzhiyun */
1988*4882a593Smuzhiyun int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1989*4882a593Smuzhiyun u64 extent_item_objectid, u64 extent_item_pos,
1990*4882a593Smuzhiyun int search_commit_root,
1991*4882a593Smuzhiyun iterate_extent_inodes_t *iterate, void *ctx,
1992*4882a593Smuzhiyun bool ignore_offset)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun int ret;
1995*4882a593Smuzhiyun struct btrfs_trans_handle *trans = NULL;
1996*4882a593Smuzhiyun struct ulist *refs = NULL;
1997*4882a593Smuzhiyun struct ulist *roots = NULL;
1998*4882a593Smuzhiyun struct ulist_node *ref_node = NULL;
1999*4882a593Smuzhiyun struct ulist_node *root_node = NULL;
2000*4882a593Smuzhiyun struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
2001*4882a593Smuzhiyun struct ulist_iterator ref_uiter;
2002*4882a593Smuzhiyun struct ulist_iterator root_uiter;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun btrfs_debug(fs_info, "resolving all inodes for extent %llu",
2005*4882a593Smuzhiyun extent_item_objectid);
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun if (!search_commit_root) {
2008*4882a593Smuzhiyun trans = btrfs_attach_transaction(fs_info->extent_root);
2009*4882a593Smuzhiyun if (IS_ERR(trans)) {
2010*4882a593Smuzhiyun if (PTR_ERR(trans) != -ENOENT &&
2011*4882a593Smuzhiyun PTR_ERR(trans) != -EROFS)
2012*4882a593Smuzhiyun return PTR_ERR(trans);
2013*4882a593Smuzhiyun trans = NULL;
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun if (trans)
2018*4882a593Smuzhiyun btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2019*4882a593Smuzhiyun else
2020*4882a593Smuzhiyun down_read(&fs_info->commit_root_sem);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
2023*4882a593Smuzhiyun tree_mod_seq_elem.seq, &refs,
2024*4882a593Smuzhiyun &extent_item_pos, ignore_offset);
2025*4882a593Smuzhiyun if (ret)
2026*4882a593Smuzhiyun goto out;
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun ULIST_ITER_INIT(&ref_uiter);
2029*4882a593Smuzhiyun while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2030*4882a593Smuzhiyun ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2031*4882a593Smuzhiyun tree_mod_seq_elem.seq, &roots,
2032*4882a593Smuzhiyun ignore_offset);
2033*4882a593Smuzhiyun if (ret)
2034*4882a593Smuzhiyun break;
2035*4882a593Smuzhiyun ULIST_ITER_INIT(&root_uiter);
2036*4882a593Smuzhiyun while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2037*4882a593Smuzhiyun btrfs_debug(fs_info,
2038*4882a593Smuzhiyun "root %llu references leaf %llu, data list %#llx",
2039*4882a593Smuzhiyun root_node->val, ref_node->val,
2040*4882a593Smuzhiyun ref_node->aux);
2041*4882a593Smuzhiyun ret = iterate_leaf_refs(fs_info,
2042*4882a593Smuzhiyun (struct extent_inode_elem *)
2043*4882a593Smuzhiyun (uintptr_t)ref_node->aux,
2044*4882a593Smuzhiyun root_node->val,
2045*4882a593Smuzhiyun extent_item_objectid,
2046*4882a593Smuzhiyun iterate, ctx);
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun ulist_free(roots);
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun free_leaf_list(refs);
2052*4882a593Smuzhiyun out:
2053*4882a593Smuzhiyun if (trans) {
2054*4882a593Smuzhiyun btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2055*4882a593Smuzhiyun btrfs_end_transaction(trans);
2056*4882a593Smuzhiyun } else {
2057*4882a593Smuzhiyun up_read(&fs_info->commit_root_sem);
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun return ret;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2064*4882a593Smuzhiyun {
2065*4882a593Smuzhiyun struct btrfs_data_container *inodes = ctx;
2066*4882a593Smuzhiyun const size_t c = 3 * sizeof(u64);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun if (inodes->bytes_left >= c) {
2069*4882a593Smuzhiyun inodes->bytes_left -= c;
2070*4882a593Smuzhiyun inodes->val[inodes->elem_cnt] = inum;
2071*4882a593Smuzhiyun inodes->val[inodes->elem_cnt + 1] = offset;
2072*4882a593Smuzhiyun inodes->val[inodes->elem_cnt + 2] = root;
2073*4882a593Smuzhiyun inodes->elem_cnt += 3;
2074*4882a593Smuzhiyun } else {
2075*4882a593Smuzhiyun inodes->bytes_missing += c - inodes->bytes_left;
2076*4882a593Smuzhiyun inodes->bytes_left = 0;
2077*4882a593Smuzhiyun inodes->elem_missed += 3;
2078*4882a593Smuzhiyun }
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun return 0;
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2084*4882a593Smuzhiyun struct btrfs_path *path,
2085*4882a593Smuzhiyun void *ctx, bool ignore_offset)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun int ret;
2088*4882a593Smuzhiyun u64 extent_item_pos;
2089*4882a593Smuzhiyun u64 flags = 0;
2090*4882a593Smuzhiyun struct btrfs_key found_key;
2091*4882a593Smuzhiyun int search_commit_root = path->search_commit_root;
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2094*4882a593Smuzhiyun btrfs_release_path(path);
2095*4882a593Smuzhiyun if (ret < 0)
2096*4882a593Smuzhiyun return ret;
2097*4882a593Smuzhiyun if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2098*4882a593Smuzhiyun return -EINVAL;
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun extent_item_pos = logical - found_key.objectid;
2101*4882a593Smuzhiyun ret = iterate_extent_inodes(fs_info, found_key.objectid,
2102*4882a593Smuzhiyun extent_item_pos, search_commit_root,
2103*4882a593Smuzhiyun build_ino_list, ctx, ignore_offset);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun return ret;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2109*4882a593Smuzhiyun struct extent_buffer *eb, void *ctx);
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2112*4882a593Smuzhiyun struct btrfs_path *path,
2113*4882a593Smuzhiyun iterate_irefs_t *iterate, void *ctx)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun int ret = 0;
2116*4882a593Smuzhiyun int slot;
2117*4882a593Smuzhiyun u32 cur;
2118*4882a593Smuzhiyun u32 len;
2119*4882a593Smuzhiyun u32 name_len;
2120*4882a593Smuzhiyun u64 parent = 0;
2121*4882a593Smuzhiyun int found = 0;
2122*4882a593Smuzhiyun struct extent_buffer *eb;
2123*4882a593Smuzhiyun struct btrfs_item *item;
2124*4882a593Smuzhiyun struct btrfs_inode_ref *iref;
2125*4882a593Smuzhiyun struct btrfs_key found_key;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun while (!ret) {
2128*4882a593Smuzhiyun ret = btrfs_find_item(fs_root, path, inum,
2129*4882a593Smuzhiyun parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2130*4882a593Smuzhiyun &found_key);
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun if (ret < 0)
2133*4882a593Smuzhiyun break;
2134*4882a593Smuzhiyun if (ret) {
2135*4882a593Smuzhiyun ret = found ? 0 : -ENOENT;
2136*4882a593Smuzhiyun break;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun ++found;
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun parent = found_key.offset;
2141*4882a593Smuzhiyun slot = path->slots[0];
2142*4882a593Smuzhiyun eb = btrfs_clone_extent_buffer(path->nodes[0]);
2143*4882a593Smuzhiyun if (!eb) {
2144*4882a593Smuzhiyun ret = -ENOMEM;
2145*4882a593Smuzhiyun break;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun btrfs_release_path(path);
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun item = btrfs_item_nr(slot);
2150*4882a593Smuzhiyun iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2153*4882a593Smuzhiyun name_len = btrfs_inode_ref_name_len(eb, iref);
2154*4882a593Smuzhiyun /* path must be released before calling iterate()! */
2155*4882a593Smuzhiyun btrfs_debug(fs_root->fs_info,
2156*4882a593Smuzhiyun "following ref at offset %u for inode %llu in tree %llu",
2157*4882a593Smuzhiyun cur, found_key.objectid,
2158*4882a593Smuzhiyun fs_root->root_key.objectid);
2159*4882a593Smuzhiyun ret = iterate(parent, name_len,
2160*4882a593Smuzhiyun (unsigned long)(iref + 1), eb, ctx);
2161*4882a593Smuzhiyun if (ret)
2162*4882a593Smuzhiyun break;
2163*4882a593Smuzhiyun len = sizeof(*iref) + name_len;
2164*4882a593Smuzhiyun iref = (struct btrfs_inode_ref *)((char *)iref + len);
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun free_extent_buffer(eb);
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun btrfs_release_path(path);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun return ret;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2175*4882a593Smuzhiyun struct btrfs_path *path,
2176*4882a593Smuzhiyun iterate_irefs_t *iterate, void *ctx)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun int ret;
2179*4882a593Smuzhiyun int slot;
2180*4882a593Smuzhiyun u64 offset = 0;
2181*4882a593Smuzhiyun u64 parent;
2182*4882a593Smuzhiyun int found = 0;
2183*4882a593Smuzhiyun struct extent_buffer *eb;
2184*4882a593Smuzhiyun struct btrfs_inode_extref *extref;
2185*4882a593Smuzhiyun u32 item_size;
2186*4882a593Smuzhiyun u32 cur_offset;
2187*4882a593Smuzhiyun unsigned long ptr;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun while (1) {
2190*4882a593Smuzhiyun ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2191*4882a593Smuzhiyun &offset);
2192*4882a593Smuzhiyun if (ret < 0)
2193*4882a593Smuzhiyun break;
2194*4882a593Smuzhiyun if (ret) {
2195*4882a593Smuzhiyun ret = found ? 0 : -ENOENT;
2196*4882a593Smuzhiyun break;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun ++found;
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun slot = path->slots[0];
2201*4882a593Smuzhiyun eb = btrfs_clone_extent_buffer(path->nodes[0]);
2202*4882a593Smuzhiyun if (!eb) {
2203*4882a593Smuzhiyun ret = -ENOMEM;
2204*4882a593Smuzhiyun break;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun btrfs_release_path(path);
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun item_size = btrfs_item_size_nr(eb, slot);
2209*4882a593Smuzhiyun ptr = btrfs_item_ptr_offset(eb, slot);
2210*4882a593Smuzhiyun cur_offset = 0;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun while (cur_offset < item_size) {
2213*4882a593Smuzhiyun u32 name_len;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2216*4882a593Smuzhiyun parent = btrfs_inode_extref_parent(eb, extref);
2217*4882a593Smuzhiyun name_len = btrfs_inode_extref_name_len(eb, extref);
2218*4882a593Smuzhiyun ret = iterate(parent, name_len,
2219*4882a593Smuzhiyun (unsigned long)&extref->name, eb, ctx);
2220*4882a593Smuzhiyun if (ret)
2221*4882a593Smuzhiyun break;
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun cur_offset += btrfs_inode_extref_name_len(eb, extref);
2224*4882a593Smuzhiyun cur_offset += sizeof(*extref);
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun free_extent_buffer(eb);
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun offset++;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun btrfs_release_path(path);
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun return ret;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2237*4882a593Smuzhiyun struct btrfs_path *path, iterate_irefs_t *iterate,
2238*4882a593Smuzhiyun void *ctx)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun int ret;
2241*4882a593Smuzhiyun int found_refs = 0;
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2244*4882a593Smuzhiyun if (!ret)
2245*4882a593Smuzhiyun ++found_refs;
2246*4882a593Smuzhiyun else if (ret != -ENOENT)
2247*4882a593Smuzhiyun return ret;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2250*4882a593Smuzhiyun if (ret == -ENOENT && found_refs)
2251*4882a593Smuzhiyun return 0;
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun return ret;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun /*
2257*4882a593Smuzhiyun * returns 0 if the path could be dumped (probably truncated)
2258*4882a593Smuzhiyun * returns <0 in case of an error
2259*4882a593Smuzhiyun */
2260*4882a593Smuzhiyun static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2261*4882a593Smuzhiyun struct extent_buffer *eb, void *ctx)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun struct inode_fs_paths *ipath = ctx;
2264*4882a593Smuzhiyun char *fspath;
2265*4882a593Smuzhiyun char *fspath_min;
2266*4882a593Smuzhiyun int i = ipath->fspath->elem_cnt;
2267*4882a593Smuzhiyun const int s_ptr = sizeof(char *);
2268*4882a593Smuzhiyun u32 bytes_left;
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun bytes_left = ipath->fspath->bytes_left > s_ptr ?
2271*4882a593Smuzhiyun ipath->fspath->bytes_left - s_ptr : 0;
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2274*4882a593Smuzhiyun fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2275*4882a593Smuzhiyun name_off, eb, inum, fspath_min, bytes_left);
2276*4882a593Smuzhiyun if (IS_ERR(fspath))
2277*4882a593Smuzhiyun return PTR_ERR(fspath);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun if (fspath > fspath_min) {
2280*4882a593Smuzhiyun ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2281*4882a593Smuzhiyun ++ipath->fspath->elem_cnt;
2282*4882a593Smuzhiyun ipath->fspath->bytes_left = fspath - fspath_min;
2283*4882a593Smuzhiyun } else {
2284*4882a593Smuzhiyun ++ipath->fspath->elem_missed;
2285*4882a593Smuzhiyun ipath->fspath->bytes_missing += fspath_min - fspath;
2286*4882a593Smuzhiyun ipath->fspath->bytes_left = 0;
2287*4882a593Smuzhiyun }
2288*4882a593Smuzhiyun
2289*4882a593Smuzhiyun return 0;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun /*
2293*4882a593Smuzhiyun * this dumps all file system paths to the inode into the ipath struct, provided
2294*4882a593Smuzhiyun * is has been created large enough. each path is zero-terminated and accessed
2295*4882a593Smuzhiyun * from ipath->fspath->val[i].
2296*4882a593Smuzhiyun * when it returns, there are ipath->fspath->elem_cnt number of paths available
2297*4882a593Smuzhiyun * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2298*4882a593Smuzhiyun * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2299*4882a593Smuzhiyun * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2300*4882a593Smuzhiyun * have been needed to return all paths.
2301*4882a593Smuzhiyun */
2302*4882a593Smuzhiyun int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2305*4882a593Smuzhiyun inode_to_path, ipath);
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun struct btrfs_data_container *init_data_container(u32 total_bytes)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun struct btrfs_data_container *data;
2311*4882a593Smuzhiyun size_t alloc_bytes;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2314*4882a593Smuzhiyun data = kvmalloc(alloc_bytes, GFP_KERNEL);
2315*4882a593Smuzhiyun if (!data)
2316*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun if (total_bytes >= sizeof(*data)) {
2319*4882a593Smuzhiyun data->bytes_left = total_bytes - sizeof(*data);
2320*4882a593Smuzhiyun data->bytes_missing = 0;
2321*4882a593Smuzhiyun } else {
2322*4882a593Smuzhiyun data->bytes_missing = sizeof(*data) - total_bytes;
2323*4882a593Smuzhiyun data->bytes_left = 0;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun data->elem_cnt = 0;
2327*4882a593Smuzhiyun data->elem_missed = 0;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun return data;
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /*
2333*4882a593Smuzhiyun * allocates space to return multiple file system paths for an inode.
2334*4882a593Smuzhiyun * total_bytes to allocate are passed, note that space usable for actual path
2335*4882a593Smuzhiyun * information will be total_bytes - sizeof(struct inode_fs_paths).
2336*4882a593Smuzhiyun * the returned pointer must be freed with free_ipath() in the end.
2337*4882a593Smuzhiyun */
2338*4882a593Smuzhiyun struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2339*4882a593Smuzhiyun struct btrfs_path *path)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun struct inode_fs_paths *ifp;
2342*4882a593Smuzhiyun struct btrfs_data_container *fspath;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun fspath = init_data_container(total_bytes);
2345*4882a593Smuzhiyun if (IS_ERR(fspath))
2346*4882a593Smuzhiyun return ERR_CAST(fspath);
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2349*4882a593Smuzhiyun if (!ifp) {
2350*4882a593Smuzhiyun kvfree(fspath);
2351*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun ifp->btrfs_path = path;
2355*4882a593Smuzhiyun ifp->fspath = fspath;
2356*4882a593Smuzhiyun ifp->fs_root = fs_root;
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun return ifp;
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun void free_ipath(struct inode_fs_paths *ipath)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun if (!ipath)
2364*4882a593Smuzhiyun return;
2365*4882a593Smuzhiyun kvfree(ipath->fspath);
2366*4882a593Smuzhiyun kfree(ipath);
2367*4882a593Smuzhiyun }
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2370*4882a593Smuzhiyun struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2371*4882a593Smuzhiyun {
2372*4882a593Smuzhiyun struct btrfs_backref_iter *ret;
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun ret = kzalloc(sizeof(*ret), gfp_flag);
2375*4882a593Smuzhiyun if (!ret)
2376*4882a593Smuzhiyun return NULL;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun ret->path = btrfs_alloc_path();
2379*4882a593Smuzhiyun if (!ret->path) {
2380*4882a593Smuzhiyun kfree(ret);
2381*4882a593Smuzhiyun return NULL;
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun /* Current backref iterator only supports iteration in commit root */
2385*4882a593Smuzhiyun ret->path->search_commit_root = 1;
2386*4882a593Smuzhiyun ret->path->skip_locking = 1;
2387*4882a593Smuzhiyun ret->fs_info = fs_info;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun return ret;
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = iter->fs_info;
2395*4882a593Smuzhiyun struct btrfs_path *path = iter->path;
2396*4882a593Smuzhiyun struct btrfs_extent_item *ei;
2397*4882a593Smuzhiyun struct btrfs_key key;
2398*4882a593Smuzhiyun int ret;
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun key.objectid = bytenr;
2401*4882a593Smuzhiyun key.type = BTRFS_METADATA_ITEM_KEY;
2402*4882a593Smuzhiyun key.offset = (u64)-1;
2403*4882a593Smuzhiyun iter->bytenr = bytenr;
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2406*4882a593Smuzhiyun if (ret < 0)
2407*4882a593Smuzhiyun return ret;
2408*4882a593Smuzhiyun if (ret == 0) {
2409*4882a593Smuzhiyun ret = -EUCLEAN;
2410*4882a593Smuzhiyun goto release;
2411*4882a593Smuzhiyun }
2412*4882a593Smuzhiyun if (path->slots[0] == 0) {
2413*4882a593Smuzhiyun WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2414*4882a593Smuzhiyun ret = -EUCLEAN;
2415*4882a593Smuzhiyun goto release;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun path->slots[0]--;
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2420*4882a593Smuzhiyun if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2421*4882a593Smuzhiyun key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2422*4882a593Smuzhiyun ret = -ENOENT;
2423*4882a593Smuzhiyun goto release;
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun memcpy(&iter->cur_key, &key, sizeof(key));
2426*4882a593Smuzhiyun iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2427*4882a593Smuzhiyun path->slots[0]);
2428*4882a593Smuzhiyun iter->end_ptr = (u32)(iter->item_ptr +
2429*4882a593Smuzhiyun btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2430*4882a593Smuzhiyun ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2431*4882a593Smuzhiyun struct btrfs_extent_item);
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun /*
2434*4882a593Smuzhiyun * Only support iteration on tree backref yet.
2435*4882a593Smuzhiyun *
2436*4882a593Smuzhiyun * This is an extra precaution for non skinny-metadata, where
2437*4882a593Smuzhiyun * EXTENT_ITEM is also used for tree blocks, that we can only use
2438*4882a593Smuzhiyun * extent flags to determine if it's a tree block.
2439*4882a593Smuzhiyun */
2440*4882a593Smuzhiyun if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2441*4882a593Smuzhiyun ret = -ENOTSUPP;
2442*4882a593Smuzhiyun goto release;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun /* If there is no inline backref, go search for keyed backref */
2447*4882a593Smuzhiyun if (iter->cur_ptr >= iter->end_ptr) {
2448*4882a593Smuzhiyun ret = btrfs_next_item(fs_info->extent_root, path);
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun /* No inline nor keyed ref */
2451*4882a593Smuzhiyun if (ret > 0) {
2452*4882a593Smuzhiyun ret = -ENOENT;
2453*4882a593Smuzhiyun goto release;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun if (ret < 0)
2456*4882a593Smuzhiyun goto release;
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2459*4882a593Smuzhiyun path->slots[0]);
2460*4882a593Smuzhiyun if (iter->cur_key.objectid != bytenr ||
2461*4882a593Smuzhiyun (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2462*4882a593Smuzhiyun iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2463*4882a593Smuzhiyun ret = -ENOENT;
2464*4882a593Smuzhiyun goto release;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2467*4882a593Smuzhiyun path->slots[0]);
2468*4882a593Smuzhiyun iter->item_ptr = iter->cur_ptr;
2469*4882a593Smuzhiyun iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2470*4882a593Smuzhiyun path->nodes[0], path->slots[0]));
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun
2473*4882a593Smuzhiyun return 0;
2474*4882a593Smuzhiyun release:
2475*4882a593Smuzhiyun btrfs_backref_iter_release(iter);
2476*4882a593Smuzhiyun return ret;
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun /*
2480*4882a593Smuzhiyun * Go to the next backref item of current bytenr, can be either inlined or
2481*4882a593Smuzhiyun * keyed.
2482*4882a593Smuzhiyun *
2483*4882a593Smuzhiyun * Caller needs to check whether it's inline ref or not by iter->cur_key.
2484*4882a593Smuzhiyun *
2485*4882a593Smuzhiyun * Return 0 if we get next backref without problem.
2486*4882a593Smuzhiyun * Return >0 if there is no extra backref for this bytenr.
2487*4882a593Smuzhiyun * Return <0 if there is something wrong happened.
2488*4882a593Smuzhiyun */
2489*4882a593Smuzhiyun int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2490*4882a593Smuzhiyun {
2491*4882a593Smuzhiyun struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2492*4882a593Smuzhiyun struct btrfs_path *path = iter->path;
2493*4882a593Smuzhiyun struct btrfs_extent_inline_ref *iref;
2494*4882a593Smuzhiyun int ret;
2495*4882a593Smuzhiyun u32 size;
2496*4882a593Smuzhiyun
2497*4882a593Smuzhiyun if (btrfs_backref_iter_is_inline_ref(iter)) {
2498*4882a593Smuzhiyun /* We're still inside the inline refs */
2499*4882a593Smuzhiyun ASSERT(iter->cur_ptr < iter->end_ptr);
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun if (btrfs_backref_has_tree_block_info(iter)) {
2502*4882a593Smuzhiyun /* First tree block info */
2503*4882a593Smuzhiyun size = sizeof(struct btrfs_tree_block_info);
2504*4882a593Smuzhiyun } else {
2505*4882a593Smuzhiyun /* Use inline ref type to determine the size */
2506*4882a593Smuzhiyun int type;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun iref = (struct btrfs_extent_inline_ref *)
2509*4882a593Smuzhiyun ((unsigned long)iter->cur_ptr);
2510*4882a593Smuzhiyun type = btrfs_extent_inline_ref_type(eb, iref);
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun size = btrfs_extent_inline_ref_size(type);
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun iter->cur_ptr += size;
2515*4882a593Smuzhiyun if (iter->cur_ptr < iter->end_ptr)
2516*4882a593Smuzhiyun return 0;
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun /* All inline items iterated, fall through */
2519*4882a593Smuzhiyun }
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun /* We're at keyed items, there is no inline item, go to the next one */
2522*4882a593Smuzhiyun ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2523*4882a593Smuzhiyun if (ret)
2524*4882a593Smuzhiyun return ret;
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2527*4882a593Smuzhiyun if (iter->cur_key.objectid != iter->bytenr ||
2528*4882a593Smuzhiyun (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2529*4882a593Smuzhiyun iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2530*4882a593Smuzhiyun return 1;
2531*4882a593Smuzhiyun iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2532*4882a593Smuzhiyun path->slots[0]);
2533*4882a593Smuzhiyun iter->cur_ptr = iter->item_ptr;
2534*4882a593Smuzhiyun iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2535*4882a593Smuzhiyun path->slots[0]);
2536*4882a593Smuzhiyun return 0;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2540*4882a593Smuzhiyun struct btrfs_backref_cache *cache, int is_reloc)
2541*4882a593Smuzhiyun {
2542*4882a593Smuzhiyun int i;
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun cache->rb_root = RB_ROOT;
2545*4882a593Smuzhiyun for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2546*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->pending[i]);
2547*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->changed);
2548*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->detached);
2549*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->leaves);
2550*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->pending_edge);
2551*4882a593Smuzhiyun INIT_LIST_HEAD(&cache->useless_node);
2552*4882a593Smuzhiyun cache->fs_info = fs_info;
2553*4882a593Smuzhiyun cache->is_reloc = is_reloc;
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun struct btrfs_backref_node *btrfs_backref_alloc_node(
2557*4882a593Smuzhiyun struct btrfs_backref_cache *cache, u64 bytenr, int level)
2558*4882a593Smuzhiyun {
2559*4882a593Smuzhiyun struct btrfs_backref_node *node;
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2562*4882a593Smuzhiyun node = kzalloc(sizeof(*node), GFP_NOFS);
2563*4882a593Smuzhiyun if (!node)
2564*4882a593Smuzhiyun return node;
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun INIT_LIST_HEAD(&node->list);
2567*4882a593Smuzhiyun INIT_LIST_HEAD(&node->upper);
2568*4882a593Smuzhiyun INIT_LIST_HEAD(&node->lower);
2569*4882a593Smuzhiyun RB_CLEAR_NODE(&node->rb_node);
2570*4882a593Smuzhiyun cache->nr_nodes++;
2571*4882a593Smuzhiyun node->level = level;
2572*4882a593Smuzhiyun node->bytenr = bytenr;
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun return node;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2578*4882a593Smuzhiyun struct btrfs_backref_cache *cache)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun edge = kzalloc(sizeof(*edge), GFP_NOFS);
2583*4882a593Smuzhiyun if (edge)
2584*4882a593Smuzhiyun cache->nr_edges++;
2585*4882a593Smuzhiyun return edge;
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun
2588*4882a593Smuzhiyun /*
2589*4882a593Smuzhiyun * Drop the backref node from cache, also cleaning up all its
2590*4882a593Smuzhiyun * upper edges and any uncached nodes in the path.
2591*4882a593Smuzhiyun *
2592*4882a593Smuzhiyun * This cleanup happens bottom up, thus the node should either
2593*4882a593Smuzhiyun * be the lowest node in the cache or a detached node.
2594*4882a593Smuzhiyun */
2595*4882a593Smuzhiyun void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2596*4882a593Smuzhiyun struct btrfs_backref_node *node)
2597*4882a593Smuzhiyun {
2598*4882a593Smuzhiyun struct btrfs_backref_node *upper;
2599*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
2600*4882a593Smuzhiyun
2601*4882a593Smuzhiyun if (!node)
2602*4882a593Smuzhiyun return;
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun BUG_ON(!node->lowest && !node->detached);
2605*4882a593Smuzhiyun while (!list_empty(&node->upper)) {
2606*4882a593Smuzhiyun edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2607*4882a593Smuzhiyun list[LOWER]);
2608*4882a593Smuzhiyun upper = edge->node[UPPER];
2609*4882a593Smuzhiyun list_del(&edge->list[LOWER]);
2610*4882a593Smuzhiyun list_del(&edge->list[UPPER]);
2611*4882a593Smuzhiyun btrfs_backref_free_edge(cache, edge);
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun /*
2614*4882a593Smuzhiyun * Add the node to leaf node list if no other child block
2615*4882a593Smuzhiyun * cached.
2616*4882a593Smuzhiyun */
2617*4882a593Smuzhiyun if (list_empty(&upper->lower)) {
2618*4882a593Smuzhiyun list_add_tail(&upper->lower, &cache->leaves);
2619*4882a593Smuzhiyun upper->lowest = 1;
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun btrfs_backref_drop_node(cache, node);
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun /*
2627*4882a593Smuzhiyun * Release all nodes/edges from current cache
2628*4882a593Smuzhiyun */
2629*4882a593Smuzhiyun void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2630*4882a593Smuzhiyun {
2631*4882a593Smuzhiyun struct btrfs_backref_node *node;
2632*4882a593Smuzhiyun int i;
2633*4882a593Smuzhiyun
2634*4882a593Smuzhiyun while (!list_empty(&cache->detached)) {
2635*4882a593Smuzhiyun node = list_entry(cache->detached.next,
2636*4882a593Smuzhiyun struct btrfs_backref_node, list);
2637*4882a593Smuzhiyun btrfs_backref_cleanup_node(cache, node);
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun while (!list_empty(&cache->leaves)) {
2641*4882a593Smuzhiyun node = list_entry(cache->leaves.next,
2642*4882a593Smuzhiyun struct btrfs_backref_node, lower);
2643*4882a593Smuzhiyun btrfs_backref_cleanup_node(cache, node);
2644*4882a593Smuzhiyun }
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun cache->last_trans = 0;
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2649*4882a593Smuzhiyun ASSERT(list_empty(&cache->pending[i]));
2650*4882a593Smuzhiyun ASSERT(list_empty(&cache->pending_edge));
2651*4882a593Smuzhiyun ASSERT(list_empty(&cache->useless_node));
2652*4882a593Smuzhiyun ASSERT(list_empty(&cache->changed));
2653*4882a593Smuzhiyun ASSERT(list_empty(&cache->detached));
2654*4882a593Smuzhiyun ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2655*4882a593Smuzhiyun ASSERT(!cache->nr_nodes);
2656*4882a593Smuzhiyun ASSERT(!cache->nr_edges);
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun /*
2660*4882a593Smuzhiyun * Handle direct tree backref
2661*4882a593Smuzhiyun *
2662*4882a593Smuzhiyun * Direct tree backref means, the backref item shows its parent bytenr
2663*4882a593Smuzhiyun * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2664*4882a593Smuzhiyun *
2665*4882a593Smuzhiyun * @ref_key: The converted backref key.
2666*4882a593Smuzhiyun * For keyed backref, it's the item key.
2667*4882a593Smuzhiyun * For inlined backref, objectid is the bytenr,
2668*4882a593Smuzhiyun * type is btrfs_inline_ref_type, offset is
2669*4882a593Smuzhiyun * btrfs_inline_ref_offset.
2670*4882a593Smuzhiyun */
2671*4882a593Smuzhiyun static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2672*4882a593Smuzhiyun struct btrfs_key *ref_key,
2673*4882a593Smuzhiyun struct btrfs_backref_node *cur)
2674*4882a593Smuzhiyun {
2675*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
2676*4882a593Smuzhiyun struct btrfs_backref_node *upper;
2677*4882a593Smuzhiyun struct rb_node *rb_node;
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun /* Only reloc root uses backref pointing to itself */
2682*4882a593Smuzhiyun if (ref_key->objectid == ref_key->offset) {
2683*4882a593Smuzhiyun struct btrfs_root *root;
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun cur->is_reloc_root = 1;
2686*4882a593Smuzhiyun /* Only reloc backref cache cares about a specific root */
2687*4882a593Smuzhiyun if (cache->is_reloc) {
2688*4882a593Smuzhiyun root = find_reloc_root(cache->fs_info, cur->bytenr);
2689*4882a593Smuzhiyun if (!root)
2690*4882a593Smuzhiyun return -ENOENT;
2691*4882a593Smuzhiyun cur->root = root;
2692*4882a593Smuzhiyun } else {
2693*4882a593Smuzhiyun /*
2694*4882a593Smuzhiyun * For generic purpose backref cache, reloc root node
2695*4882a593Smuzhiyun * is useless.
2696*4882a593Smuzhiyun */
2697*4882a593Smuzhiyun list_add(&cur->list, &cache->useless_node);
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun return 0;
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun edge = btrfs_backref_alloc_edge(cache);
2703*4882a593Smuzhiyun if (!edge)
2704*4882a593Smuzhiyun return -ENOMEM;
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2707*4882a593Smuzhiyun if (!rb_node) {
2708*4882a593Smuzhiyun /* Parent node not yet cached */
2709*4882a593Smuzhiyun upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2710*4882a593Smuzhiyun cur->level + 1);
2711*4882a593Smuzhiyun if (!upper) {
2712*4882a593Smuzhiyun btrfs_backref_free_edge(cache, edge);
2713*4882a593Smuzhiyun return -ENOMEM;
2714*4882a593Smuzhiyun }
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun /*
2717*4882a593Smuzhiyun * Backrefs for the upper level block isn't cached, add the
2718*4882a593Smuzhiyun * block to pending list
2719*4882a593Smuzhiyun */
2720*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2721*4882a593Smuzhiyun } else {
2722*4882a593Smuzhiyun /* Parent node already cached */
2723*4882a593Smuzhiyun upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2724*4882a593Smuzhiyun ASSERT(upper->checked);
2725*4882a593Smuzhiyun INIT_LIST_HEAD(&edge->list[UPPER]);
2726*4882a593Smuzhiyun }
2727*4882a593Smuzhiyun btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2728*4882a593Smuzhiyun return 0;
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun /*
2732*4882a593Smuzhiyun * Handle indirect tree backref
2733*4882a593Smuzhiyun *
2734*4882a593Smuzhiyun * Indirect tree backref means, we only know which tree the node belongs to.
2735*4882a593Smuzhiyun * We still need to do a tree search to find out the parents. This is for
2736*4882a593Smuzhiyun * TREE_BLOCK_REF backref (keyed or inlined).
2737*4882a593Smuzhiyun *
2738*4882a593Smuzhiyun * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2739*4882a593Smuzhiyun * @tree_key: The first key of this tree block.
2740*4882a593Smuzhiyun * @path: A clean (released) path, to avoid allocating path everytime
2741*4882a593Smuzhiyun * the function get called.
2742*4882a593Smuzhiyun */
2743*4882a593Smuzhiyun static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2744*4882a593Smuzhiyun struct btrfs_path *path,
2745*4882a593Smuzhiyun struct btrfs_key *ref_key,
2746*4882a593Smuzhiyun struct btrfs_key *tree_key,
2747*4882a593Smuzhiyun struct btrfs_backref_node *cur)
2748*4882a593Smuzhiyun {
2749*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = cache->fs_info;
2750*4882a593Smuzhiyun struct btrfs_backref_node *upper;
2751*4882a593Smuzhiyun struct btrfs_backref_node *lower;
2752*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
2753*4882a593Smuzhiyun struct extent_buffer *eb;
2754*4882a593Smuzhiyun struct btrfs_root *root;
2755*4882a593Smuzhiyun struct rb_node *rb_node;
2756*4882a593Smuzhiyun int level;
2757*4882a593Smuzhiyun bool need_check = true;
2758*4882a593Smuzhiyun int ret;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2761*4882a593Smuzhiyun if (IS_ERR(root))
2762*4882a593Smuzhiyun return PTR_ERR(root);
2763*4882a593Smuzhiyun if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2764*4882a593Smuzhiyun cur->cowonly = 1;
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun if (btrfs_root_level(&root->root_item) == cur->level) {
2767*4882a593Smuzhiyun /* Tree root */
2768*4882a593Smuzhiyun ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2769*4882a593Smuzhiyun /*
2770*4882a593Smuzhiyun * For reloc backref cache, we may ignore reloc root. But for
2771*4882a593Smuzhiyun * general purpose backref cache, we can't rely on
2772*4882a593Smuzhiyun * btrfs_should_ignore_reloc_root() as it may conflict with
2773*4882a593Smuzhiyun * current running relocation and lead to missing root.
2774*4882a593Smuzhiyun *
2775*4882a593Smuzhiyun * For general purpose backref cache, reloc root detection is
2776*4882a593Smuzhiyun * completely relying on direct backref (key->offset is parent
2777*4882a593Smuzhiyun * bytenr), thus only do such check for reloc cache.
2778*4882a593Smuzhiyun */
2779*4882a593Smuzhiyun if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2780*4882a593Smuzhiyun btrfs_put_root(root);
2781*4882a593Smuzhiyun list_add(&cur->list, &cache->useless_node);
2782*4882a593Smuzhiyun } else {
2783*4882a593Smuzhiyun cur->root = root;
2784*4882a593Smuzhiyun }
2785*4882a593Smuzhiyun return 0;
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun
2788*4882a593Smuzhiyun level = cur->level + 1;
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun /* Search the tree to find parent blocks referring to the block */
2791*4882a593Smuzhiyun path->search_commit_root = 1;
2792*4882a593Smuzhiyun path->skip_locking = 1;
2793*4882a593Smuzhiyun path->lowest_level = level;
2794*4882a593Smuzhiyun ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2795*4882a593Smuzhiyun path->lowest_level = 0;
2796*4882a593Smuzhiyun if (ret < 0) {
2797*4882a593Smuzhiyun btrfs_put_root(root);
2798*4882a593Smuzhiyun return ret;
2799*4882a593Smuzhiyun }
2800*4882a593Smuzhiyun if (ret > 0 && path->slots[level] > 0)
2801*4882a593Smuzhiyun path->slots[level]--;
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun eb = path->nodes[level];
2804*4882a593Smuzhiyun if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2805*4882a593Smuzhiyun btrfs_err(fs_info,
2806*4882a593Smuzhiyun "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2807*4882a593Smuzhiyun cur->bytenr, level - 1, root->root_key.objectid,
2808*4882a593Smuzhiyun tree_key->objectid, tree_key->type, tree_key->offset);
2809*4882a593Smuzhiyun btrfs_put_root(root);
2810*4882a593Smuzhiyun ret = -ENOENT;
2811*4882a593Smuzhiyun goto out;
2812*4882a593Smuzhiyun }
2813*4882a593Smuzhiyun lower = cur;
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun /* Add all nodes and edges in the path */
2816*4882a593Smuzhiyun for (; level < BTRFS_MAX_LEVEL; level++) {
2817*4882a593Smuzhiyun if (!path->nodes[level]) {
2818*4882a593Smuzhiyun ASSERT(btrfs_root_bytenr(&root->root_item) ==
2819*4882a593Smuzhiyun lower->bytenr);
2820*4882a593Smuzhiyun /* Same as previous should_ignore_reloc_root() call */
2821*4882a593Smuzhiyun if (btrfs_should_ignore_reloc_root(root) &&
2822*4882a593Smuzhiyun cache->is_reloc) {
2823*4882a593Smuzhiyun btrfs_put_root(root);
2824*4882a593Smuzhiyun list_add(&lower->list, &cache->useless_node);
2825*4882a593Smuzhiyun } else {
2826*4882a593Smuzhiyun lower->root = root;
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun break;
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun edge = btrfs_backref_alloc_edge(cache);
2832*4882a593Smuzhiyun if (!edge) {
2833*4882a593Smuzhiyun btrfs_put_root(root);
2834*4882a593Smuzhiyun ret = -ENOMEM;
2835*4882a593Smuzhiyun goto out;
2836*4882a593Smuzhiyun }
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun eb = path->nodes[level];
2839*4882a593Smuzhiyun rb_node = rb_simple_search(&cache->rb_root, eb->start);
2840*4882a593Smuzhiyun if (!rb_node) {
2841*4882a593Smuzhiyun upper = btrfs_backref_alloc_node(cache, eb->start,
2842*4882a593Smuzhiyun lower->level + 1);
2843*4882a593Smuzhiyun if (!upper) {
2844*4882a593Smuzhiyun btrfs_put_root(root);
2845*4882a593Smuzhiyun btrfs_backref_free_edge(cache, edge);
2846*4882a593Smuzhiyun ret = -ENOMEM;
2847*4882a593Smuzhiyun goto out;
2848*4882a593Smuzhiyun }
2849*4882a593Smuzhiyun upper->owner = btrfs_header_owner(eb);
2850*4882a593Smuzhiyun if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2851*4882a593Smuzhiyun upper->cowonly = 1;
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun /*
2854*4882a593Smuzhiyun * If we know the block isn't shared we can avoid
2855*4882a593Smuzhiyun * checking its backrefs.
2856*4882a593Smuzhiyun */
2857*4882a593Smuzhiyun if (btrfs_block_can_be_shared(root, eb))
2858*4882a593Smuzhiyun upper->checked = 0;
2859*4882a593Smuzhiyun else
2860*4882a593Smuzhiyun upper->checked = 1;
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun /*
2863*4882a593Smuzhiyun * Add the block to pending list if we need to check its
2864*4882a593Smuzhiyun * backrefs, we only do this once while walking up a
2865*4882a593Smuzhiyun * tree as we will catch anything else later on.
2866*4882a593Smuzhiyun */
2867*4882a593Smuzhiyun if (!upper->checked && need_check) {
2868*4882a593Smuzhiyun need_check = false;
2869*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER],
2870*4882a593Smuzhiyun &cache->pending_edge);
2871*4882a593Smuzhiyun } else {
2872*4882a593Smuzhiyun if (upper->checked)
2873*4882a593Smuzhiyun need_check = true;
2874*4882a593Smuzhiyun INIT_LIST_HEAD(&edge->list[UPPER]);
2875*4882a593Smuzhiyun }
2876*4882a593Smuzhiyun } else {
2877*4882a593Smuzhiyun upper = rb_entry(rb_node, struct btrfs_backref_node,
2878*4882a593Smuzhiyun rb_node);
2879*4882a593Smuzhiyun ASSERT(upper->checked);
2880*4882a593Smuzhiyun INIT_LIST_HEAD(&edge->list[UPPER]);
2881*4882a593Smuzhiyun if (!upper->owner)
2882*4882a593Smuzhiyun upper->owner = btrfs_header_owner(eb);
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun if (rb_node) {
2887*4882a593Smuzhiyun btrfs_put_root(root);
2888*4882a593Smuzhiyun break;
2889*4882a593Smuzhiyun }
2890*4882a593Smuzhiyun lower = upper;
2891*4882a593Smuzhiyun upper = NULL;
2892*4882a593Smuzhiyun }
2893*4882a593Smuzhiyun out:
2894*4882a593Smuzhiyun btrfs_release_path(path);
2895*4882a593Smuzhiyun return ret;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun
2898*4882a593Smuzhiyun /*
2899*4882a593Smuzhiyun * Add backref node @cur into @cache.
2900*4882a593Smuzhiyun *
2901*4882a593Smuzhiyun * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2902*4882a593Smuzhiyun * links aren't yet bi-directional. Needs to finish such links.
2903*4882a593Smuzhiyun * Use btrfs_backref_finish_upper_links() to finish such linkage.
2904*4882a593Smuzhiyun *
2905*4882a593Smuzhiyun * @path: Released path for indirect tree backref lookup
2906*4882a593Smuzhiyun * @iter: Released backref iter for extent tree search
2907*4882a593Smuzhiyun * @node_key: The first key of the tree block
2908*4882a593Smuzhiyun */
2909*4882a593Smuzhiyun int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2910*4882a593Smuzhiyun struct btrfs_path *path,
2911*4882a593Smuzhiyun struct btrfs_backref_iter *iter,
2912*4882a593Smuzhiyun struct btrfs_key *node_key,
2913*4882a593Smuzhiyun struct btrfs_backref_node *cur)
2914*4882a593Smuzhiyun {
2915*4882a593Smuzhiyun struct btrfs_fs_info *fs_info = cache->fs_info;
2916*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
2917*4882a593Smuzhiyun struct btrfs_backref_node *exist;
2918*4882a593Smuzhiyun int ret;
2919*4882a593Smuzhiyun
2920*4882a593Smuzhiyun ret = btrfs_backref_iter_start(iter, cur->bytenr);
2921*4882a593Smuzhiyun if (ret < 0)
2922*4882a593Smuzhiyun return ret;
2923*4882a593Smuzhiyun /*
2924*4882a593Smuzhiyun * We skip the first btrfs_tree_block_info, as we don't use the key
2925*4882a593Smuzhiyun * stored in it, but fetch it from the tree block
2926*4882a593Smuzhiyun */
2927*4882a593Smuzhiyun if (btrfs_backref_has_tree_block_info(iter)) {
2928*4882a593Smuzhiyun ret = btrfs_backref_iter_next(iter);
2929*4882a593Smuzhiyun if (ret < 0)
2930*4882a593Smuzhiyun goto out;
2931*4882a593Smuzhiyun /* No extra backref? This means the tree block is corrupted */
2932*4882a593Smuzhiyun if (ret > 0) {
2933*4882a593Smuzhiyun ret = -EUCLEAN;
2934*4882a593Smuzhiyun goto out;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun WARN_ON(cur->checked);
2938*4882a593Smuzhiyun if (!list_empty(&cur->upper)) {
2939*4882a593Smuzhiyun /*
2940*4882a593Smuzhiyun * The backref was added previously when processing backref of
2941*4882a593Smuzhiyun * type BTRFS_TREE_BLOCK_REF_KEY
2942*4882a593Smuzhiyun */
2943*4882a593Smuzhiyun ASSERT(list_is_singular(&cur->upper));
2944*4882a593Smuzhiyun edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2945*4882a593Smuzhiyun list[LOWER]);
2946*4882a593Smuzhiyun ASSERT(list_empty(&edge->list[UPPER]));
2947*4882a593Smuzhiyun exist = edge->node[UPPER];
2948*4882a593Smuzhiyun /*
2949*4882a593Smuzhiyun * Add the upper level block to pending list if we need check
2950*4882a593Smuzhiyun * its backrefs
2951*4882a593Smuzhiyun */
2952*4882a593Smuzhiyun if (!exist->checked)
2953*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2954*4882a593Smuzhiyun } else {
2955*4882a593Smuzhiyun exist = NULL;
2956*4882a593Smuzhiyun }
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2959*4882a593Smuzhiyun struct extent_buffer *eb;
2960*4882a593Smuzhiyun struct btrfs_key key;
2961*4882a593Smuzhiyun int type;
2962*4882a593Smuzhiyun
2963*4882a593Smuzhiyun cond_resched();
2964*4882a593Smuzhiyun eb = btrfs_backref_get_eb(iter);
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun key.objectid = iter->bytenr;
2967*4882a593Smuzhiyun if (btrfs_backref_iter_is_inline_ref(iter)) {
2968*4882a593Smuzhiyun struct btrfs_extent_inline_ref *iref;
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun /* Update key for inline backref */
2971*4882a593Smuzhiyun iref = (struct btrfs_extent_inline_ref *)
2972*4882a593Smuzhiyun ((unsigned long)iter->cur_ptr);
2973*4882a593Smuzhiyun type = btrfs_get_extent_inline_ref_type(eb, iref,
2974*4882a593Smuzhiyun BTRFS_REF_TYPE_BLOCK);
2975*4882a593Smuzhiyun if (type == BTRFS_REF_TYPE_INVALID) {
2976*4882a593Smuzhiyun ret = -EUCLEAN;
2977*4882a593Smuzhiyun goto out;
2978*4882a593Smuzhiyun }
2979*4882a593Smuzhiyun key.type = type;
2980*4882a593Smuzhiyun key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2981*4882a593Smuzhiyun } else {
2982*4882a593Smuzhiyun key.type = iter->cur_key.type;
2983*4882a593Smuzhiyun key.offset = iter->cur_key.offset;
2984*4882a593Smuzhiyun }
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun /*
2987*4882a593Smuzhiyun * Parent node found and matches current inline ref, no need to
2988*4882a593Smuzhiyun * rebuild this node for this inline ref
2989*4882a593Smuzhiyun */
2990*4882a593Smuzhiyun if (exist &&
2991*4882a593Smuzhiyun ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2992*4882a593Smuzhiyun exist->owner == key.offset) ||
2993*4882a593Smuzhiyun (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2994*4882a593Smuzhiyun exist->bytenr == key.offset))) {
2995*4882a593Smuzhiyun exist = NULL;
2996*4882a593Smuzhiyun continue;
2997*4882a593Smuzhiyun }
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3000*4882a593Smuzhiyun if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3001*4882a593Smuzhiyun ret = handle_direct_tree_backref(cache, &key, cur);
3002*4882a593Smuzhiyun if (ret < 0)
3003*4882a593Smuzhiyun goto out;
3004*4882a593Smuzhiyun continue;
3005*4882a593Smuzhiyun } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3006*4882a593Smuzhiyun ret = -EINVAL;
3007*4882a593Smuzhiyun btrfs_print_v0_err(fs_info);
3008*4882a593Smuzhiyun btrfs_handle_fs_error(fs_info, ret, NULL);
3009*4882a593Smuzhiyun goto out;
3010*4882a593Smuzhiyun } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
3011*4882a593Smuzhiyun continue;
3012*4882a593Smuzhiyun }
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun /*
3015*4882a593Smuzhiyun * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
3016*4882a593Smuzhiyun * means the root objectid. We need to search the tree to get
3017*4882a593Smuzhiyun * its parent bytenr.
3018*4882a593Smuzhiyun */
3019*4882a593Smuzhiyun ret = handle_indirect_tree_backref(cache, path, &key, node_key,
3020*4882a593Smuzhiyun cur);
3021*4882a593Smuzhiyun if (ret < 0)
3022*4882a593Smuzhiyun goto out;
3023*4882a593Smuzhiyun }
3024*4882a593Smuzhiyun ret = 0;
3025*4882a593Smuzhiyun cur->checked = 1;
3026*4882a593Smuzhiyun WARN_ON(exist);
3027*4882a593Smuzhiyun out:
3028*4882a593Smuzhiyun btrfs_backref_iter_release(iter);
3029*4882a593Smuzhiyun return ret;
3030*4882a593Smuzhiyun }
3031*4882a593Smuzhiyun
3032*4882a593Smuzhiyun /*
3033*4882a593Smuzhiyun * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3034*4882a593Smuzhiyun */
3035*4882a593Smuzhiyun int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3036*4882a593Smuzhiyun struct btrfs_backref_node *start)
3037*4882a593Smuzhiyun {
3038*4882a593Smuzhiyun struct list_head *useless_node = &cache->useless_node;
3039*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
3040*4882a593Smuzhiyun struct rb_node *rb_node;
3041*4882a593Smuzhiyun LIST_HEAD(pending_edge);
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun ASSERT(start->checked);
3044*4882a593Smuzhiyun
3045*4882a593Smuzhiyun /* Insert this node to cache if it's not COW-only */
3046*4882a593Smuzhiyun if (!start->cowonly) {
3047*4882a593Smuzhiyun rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3048*4882a593Smuzhiyun &start->rb_node);
3049*4882a593Smuzhiyun if (rb_node)
3050*4882a593Smuzhiyun btrfs_backref_panic(cache->fs_info, start->bytenr,
3051*4882a593Smuzhiyun -EEXIST);
3052*4882a593Smuzhiyun list_add_tail(&start->lower, &cache->leaves);
3053*4882a593Smuzhiyun }
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun /*
3056*4882a593Smuzhiyun * Use breadth first search to iterate all related edges.
3057*4882a593Smuzhiyun *
3058*4882a593Smuzhiyun * The starting points are all the edges of this node
3059*4882a593Smuzhiyun */
3060*4882a593Smuzhiyun list_for_each_entry(edge, &start->upper, list[LOWER])
3061*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &pending_edge);
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun while (!list_empty(&pending_edge)) {
3064*4882a593Smuzhiyun struct btrfs_backref_node *upper;
3065*4882a593Smuzhiyun struct btrfs_backref_node *lower;
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun edge = list_first_entry(&pending_edge,
3068*4882a593Smuzhiyun struct btrfs_backref_edge, list[UPPER]);
3069*4882a593Smuzhiyun list_del_init(&edge->list[UPPER]);
3070*4882a593Smuzhiyun upper = edge->node[UPPER];
3071*4882a593Smuzhiyun lower = edge->node[LOWER];
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun /* Parent is detached, no need to keep any edges */
3074*4882a593Smuzhiyun if (upper->detached) {
3075*4882a593Smuzhiyun list_del(&edge->list[LOWER]);
3076*4882a593Smuzhiyun btrfs_backref_free_edge(cache, edge);
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun /* Lower node is orphan, queue for cleanup */
3079*4882a593Smuzhiyun if (list_empty(&lower->upper))
3080*4882a593Smuzhiyun list_add(&lower->list, useless_node);
3081*4882a593Smuzhiyun continue;
3082*4882a593Smuzhiyun }
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun /*
3085*4882a593Smuzhiyun * All new nodes added in current build_backref_tree() haven't
3086*4882a593Smuzhiyun * been linked to the cache rb tree.
3087*4882a593Smuzhiyun * So if we have upper->rb_node populated, this means a cache
3088*4882a593Smuzhiyun * hit. We only need to link the edge, as @upper and all its
3089*4882a593Smuzhiyun * parents have already been linked.
3090*4882a593Smuzhiyun */
3091*4882a593Smuzhiyun if (!RB_EMPTY_NODE(&upper->rb_node)) {
3092*4882a593Smuzhiyun if (upper->lowest) {
3093*4882a593Smuzhiyun list_del_init(&upper->lower);
3094*4882a593Smuzhiyun upper->lowest = 0;
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &upper->lower);
3098*4882a593Smuzhiyun continue;
3099*4882a593Smuzhiyun }
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun /* Sanity check, we shouldn't have any unchecked nodes */
3102*4882a593Smuzhiyun if (!upper->checked) {
3103*4882a593Smuzhiyun ASSERT(0);
3104*4882a593Smuzhiyun return -EUCLEAN;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun /* Sanity check, COW-only node has non-COW-only parent */
3108*4882a593Smuzhiyun if (start->cowonly != upper->cowonly) {
3109*4882a593Smuzhiyun ASSERT(0);
3110*4882a593Smuzhiyun return -EUCLEAN;
3111*4882a593Smuzhiyun }
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun /* Only cache non-COW-only (subvolume trees) tree blocks */
3114*4882a593Smuzhiyun if (!upper->cowonly) {
3115*4882a593Smuzhiyun rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3116*4882a593Smuzhiyun &upper->rb_node);
3117*4882a593Smuzhiyun if (rb_node) {
3118*4882a593Smuzhiyun btrfs_backref_panic(cache->fs_info,
3119*4882a593Smuzhiyun upper->bytenr, -EEXIST);
3120*4882a593Smuzhiyun return -EUCLEAN;
3121*4882a593Smuzhiyun }
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun
3124*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &upper->lower);
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun /*
3127*4882a593Smuzhiyun * Also queue all the parent edges of this uncached node
3128*4882a593Smuzhiyun * to finish the upper linkage
3129*4882a593Smuzhiyun */
3130*4882a593Smuzhiyun list_for_each_entry(edge, &upper->upper, list[LOWER])
3131*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER], &pending_edge);
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun return 0;
3134*4882a593Smuzhiyun }
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3137*4882a593Smuzhiyun struct btrfs_backref_node *node)
3138*4882a593Smuzhiyun {
3139*4882a593Smuzhiyun struct btrfs_backref_node *lower;
3140*4882a593Smuzhiyun struct btrfs_backref_node *upper;
3141*4882a593Smuzhiyun struct btrfs_backref_edge *edge;
3142*4882a593Smuzhiyun
3143*4882a593Smuzhiyun while (!list_empty(&cache->useless_node)) {
3144*4882a593Smuzhiyun lower = list_first_entry(&cache->useless_node,
3145*4882a593Smuzhiyun struct btrfs_backref_node, list);
3146*4882a593Smuzhiyun list_del_init(&lower->list);
3147*4882a593Smuzhiyun }
3148*4882a593Smuzhiyun while (!list_empty(&cache->pending_edge)) {
3149*4882a593Smuzhiyun edge = list_first_entry(&cache->pending_edge,
3150*4882a593Smuzhiyun struct btrfs_backref_edge, list[UPPER]);
3151*4882a593Smuzhiyun list_del(&edge->list[UPPER]);
3152*4882a593Smuzhiyun list_del(&edge->list[LOWER]);
3153*4882a593Smuzhiyun lower = edge->node[LOWER];
3154*4882a593Smuzhiyun upper = edge->node[UPPER];
3155*4882a593Smuzhiyun btrfs_backref_free_edge(cache, edge);
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun /*
3158*4882a593Smuzhiyun * Lower is no longer linked to any upper backref nodes and
3159*4882a593Smuzhiyun * isn't in the cache, we can free it ourselves.
3160*4882a593Smuzhiyun */
3161*4882a593Smuzhiyun if (list_empty(&lower->upper) &&
3162*4882a593Smuzhiyun RB_EMPTY_NODE(&lower->rb_node))
3163*4882a593Smuzhiyun list_add(&lower->list, &cache->useless_node);
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun if (!RB_EMPTY_NODE(&upper->rb_node))
3166*4882a593Smuzhiyun continue;
3167*4882a593Smuzhiyun
3168*4882a593Smuzhiyun /* Add this guy's upper edges to the list to process */
3169*4882a593Smuzhiyun list_for_each_entry(edge, &upper->upper, list[LOWER])
3170*4882a593Smuzhiyun list_add_tail(&edge->list[UPPER],
3171*4882a593Smuzhiyun &cache->pending_edge);
3172*4882a593Smuzhiyun if (list_empty(&upper->upper))
3173*4882a593Smuzhiyun list_add(&upper->list, &cache->useless_node);
3174*4882a593Smuzhiyun }
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun while (!list_empty(&cache->useless_node)) {
3177*4882a593Smuzhiyun lower = list_first_entry(&cache->useless_node,
3178*4882a593Smuzhiyun struct btrfs_backref_node, list);
3179*4882a593Smuzhiyun list_del_init(&lower->list);
3180*4882a593Smuzhiyun if (lower == node)
3181*4882a593Smuzhiyun node = NULL;
3182*4882a593Smuzhiyun btrfs_backref_drop_node(cache, lower);
3183*4882a593Smuzhiyun }
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun btrfs_backref_cleanup_node(cache, node);
3186*4882a593Smuzhiyun ASSERT(list_empty(&cache->useless_node) &&
3187*4882a593Smuzhiyun list_empty(&cache->pending_edge));
3188*4882a593Smuzhiyun }
3189