Lines Matching +full:lower +full:- +full:case

1 // SPDX-License-Identifier: GPL-2.0
10 #include "disk-io.h"
14 #include "delayed-ref.h"
50 offset = extent_item_pos - data_offset; in check_extent_in_eb()
55 return -ENOMEM; in check_extent_in_eb()
57 e->next = *eie; in check_extent_in_eb()
58 e->inum = key->objectid; in check_extent_in_eb()
59 e->offset = key->offset + offset; in check_extent_in_eb()
70 eie_next = eie->next; in free_inode_elem_list()
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; in extent_is_shared()
158 return -ENOMEM; in btrfs_prelim_ref_init()
174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
180 if (ref1->level < ref2->level) in prelim_ref_compare()
181 return -1; in prelim_ref_compare()
182 if (ref1->level > ref2->level) in prelim_ref_compare()
184 if (ref1->root_id < ref2->root_id) in prelim_ref_compare()
185 return -1; in prelim_ref_compare()
186 if (ref1->root_id > ref2->root_id) in prelim_ref_compare()
188 if (ref1->key_for_search.type < ref2->key_for_search.type) in prelim_ref_compare()
189 return -1; in prelim_ref_compare()
190 if (ref1->key_for_search.type > ref2->key_for_search.type) in prelim_ref_compare()
192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) in prelim_ref_compare()
193 return -1; in prelim_ref_compare()
194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) in prelim_ref_compare()
196 if (ref1->key_for_search.offset < ref2->key_for_search.offset) in prelim_ref_compare()
197 return -1; in prelim_ref_compare()
198 if (ref1->key_for_search.offset > ref2->key_for_search.offset) in prelim_ref_compare()
200 if (ref1->parent < ref2->parent) in prelim_ref_compare()
201 return -1; in prelim_ref_compare()
202 if (ref1->parent > ref2->parent) in prelim_ref_compare()
215 sc->share_count--; in update_share_count()
217 sc->share_count++; in update_share_count()
237 root = &preftree->root; in prelim_ref_insert()
238 p = &root->rb_root.rb_node; in prelim_ref_insert()
245 p = &(*p)->rb_left; in prelim_ref_insert()
247 p = &(*p)->rb_right; in prelim_ref_insert()
251 struct extent_inode_elem *eie = ref->inode_list; in prelim_ref_insert()
253 while (eie && eie->next) in prelim_ref_insert()
254 eie = eie->next; in prelim_ref_insert()
257 ref->inode_list = newref->inode_list; in prelim_ref_insert()
259 eie->next = newref->inode_list; in prelim_ref_insert()
261 preftree->count); in prelim_ref_insert()
263 * A delayed ref can have newref->count < 0. in prelim_ref_insert()
264 * The ref->count is updated to follow any in prelim_ref_insert()
267 update_share_count(sc, ref->count, in prelim_ref_insert()
268 ref->count + newref->count); in prelim_ref_insert()
269 ref->count += newref->count; in prelim_ref_insert()
275 update_share_count(sc, 0, newref->count); in prelim_ref_insert()
276 preftree->count++; in prelim_ref_insert()
277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); in prelim_ref_insert()
278 rb_link_node(&newref->rbnode, parent, p); in prelim_ref_insert()
279 rb_insert_color_cached(&newref->rbnode, root, leftmost); in prelim_ref_insert()
291 &preftree->root.rb_root, rbnode) { in prelim_release()
292 free_inode_elem_list(ref->inode_list); in prelim_release()
296 preftree->root = RB_ROOT_CACHED; in prelim_release()
297 preftree->count = 0; in prelim_release()
302 * - obtaining the parent is the goal
303 * - if you add a key, you must know that it is a correct key
304 * - if you cannot add the parent or a correct key, then we will look into the
311 * --------------------+--------+----------+--------+----------
312 * parent logical | y | - | - | -
313 * key to resolve | - | y | y | y
314 * tree block logical | - | - | - | -
317 * - column 1: we've the parent -> done
318 * - column 2, 3, 4: we use the key to find the parent
324 * --------------------+--------+----------+--------+----------
325 * parent logical | y | - | y | -
326 * key to resolve | - | - | - | y
328 * root for resolving | - | y | y | y
330 * - column 1, 3: we've the parent -> done
331 * - column 2: we take the first key from the block to find the parent
333 * - column 4: we use the key to find the parent
351 return -ENOMEM; in add_prelim_ref()
353 ref->root_id = root_id; in add_prelim_ref()
355 ref->key_for_search = *key; in add_prelim_ref()
357 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); in add_prelim_ref()
359 ref->inode_list = NULL; in add_prelim_ref()
360 ref->level = level; in add_prelim_ref()
361 ref->count = count; in add_prelim_ref()
362 ref->parent = parent; in add_prelim_ref()
363 ref->wanted_disk_byte = wanted_disk_byte; in add_prelim_ref()
374 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, in add_direct_ref()
385 struct preftree *tree = &preftrees->indirect; in add_indirect_ref()
388 tree = &preftrees->indirect_missing_keys; in add_indirect_ref()
395 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; in is_shared_data_backref()
409 p = &(*p)->rb_left; in is_shared_data_backref()
411 p = &(*p)->rb_right; in is_shared_data_backref()
428 struct btrfs_key *key_for_search = &ref->key_for_search; in add_all_parents()
432 u64 wanted_disk_byte = ref->wanted_disk_byte; in add_all_parents()
437 eb = path->nodes[level]; in add_all_parents()
438 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); in add_all_parents()
454 eb = path->nodes[0]; in add_all_parents()
455 if (path->slots[0] >= btrfs_header_nritems(eb) || in add_all_parents()
456 is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
457 ref->root_id != btrfs_header_owner(eb)) { in add_all_parents()
464 while (!ret && count < ref->count) { in add_all_parents()
465 eb = path->nodes[0]; in add_all_parents()
466 slot = path->slots[0]; in add_all_parents()
470 if (key.objectid != key_for_search->objectid || in add_all_parents()
480 (is_shared_data_backref(preftrees, eb->start) || in add_all_parents()
481 ref->root_id != btrfs_header_owner(eb))) { in add_all_parents()
495 if (ref->key_for_search.offset == key.offset - data_offset) in add_all_parents()
508 ret = ulist_add_merge_ptr(parents, eb->start, in add_all_parents()
513 while (old->next) in add_all_parents()
514 old = old->next; in add_all_parents()
515 old->next = eie; in add_all_parents()
547 int level = ref->level; in resolve_indirect_ref()
548 struct btrfs_key search_key = ref->key_for_search; in resolve_indirect_ref()
558 if (path->search_commit_root) in resolve_indirect_ref()
559 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); in resolve_indirect_ref()
561 root = btrfs_get_fs_root(fs_info, ref->root_id, false); in resolve_indirect_ref()
567 if (!path->search_commit_root && in resolve_indirect_ref()
568 test_bit(BTRFS_ROOT_DELETING, &root->state)) { in resolve_indirect_ref()
569 ret = -ENOENT; in resolve_indirect_ref()
574 ret = -ENOENT; in resolve_indirect_ref()
578 if (path->search_commit_root) in resolve_indirect_ref()
579 root_level = btrfs_header_level(root->commit_root); in resolve_indirect_ref()
581 root_level = btrfs_header_level(root->node); in resolve_indirect_ref()
595 * So if we detect such case we set the search key's offset to zero to in resolve_indirect_ref()
610 path->lowest_level = level; in resolve_indirect_ref()
618 ref->root_id, level, ref->count, ret, in resolve_indirect_ref()
619 ref->key_for_search.objectid, ref->key_for_search.type, in resolve_indirect_ref()
620 ref->key_for_search.offset); in resolve_indirect_ref()
624 eb = path->nodes[level]; in resolve_indirect_ref()
630 level--; in resolve_indirect_ref()
631 eb = path->nodes[level]; in resolve_indirect_ref()
639 path->lowest_level = 0; in resolve_indirect_ref()
649 return (struct extent_inode_elem *)(uintptr_t)node->aux; in unode_aux_to_inode_list()
695 return -ENOMEM; in resolve_indirect_refs()
703 while ((rnode = rb_first_cached(&preftrees->indirect.root))) { in resolve_indirect_refs()
707 if (WARN(ref->parent, in resolve_indirect_refs()
709 ret = -EINVAL; in resolve_indirect_refs()
713 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); in resolve_indirect_refs()
714 preftrees->indirect.count--; in resolve_indirect_refs()
716 if (ref->count == 0) { in resolve_indirect_refs()
721 if (sc && sc->root_objectid && in resolve_indirect_refs()
722 ref->root_id != sc->root_objectid) { in resolve_indirect_refs()
734 if (err == -ENOENT) { in resolve_indirect_refs()
735 prelim_ref_insert(fs_info, &preftrees->direct, ref, in resolve_indirect_refs()
747 ref->parent = node ? node->val : 0; in resolve_indirect_refs()
748 ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
758 ret = -ENOMEM; in resolve_indirect_refs()
762 new_ref->parent = node->val; in resolve_indirect_refs()
763 new_ref->inode_list = unode_aux_to_inode_list(node); in resolve_indirect_refs()
764 prelim_ref_insert(fs_info, &preftrees->direct, in resolve_indirect_refs()
772 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); in resolve_indirect_refs()
794 struct preftree *tree = &preftrees->indirect_missing_keys; in add_missing_keys()
797 while ((node = rb_first_cached(&tree->root))) { in add_missing_keys()
799 rb_erase_cached(node, &tree->root); in add_missing_keys()
801 BUG_ON(ref->parent); /* should not be a direct ref */ in add_missing_keys()
802 BUG_ON(ref->key_for_search.type); in add_missing_keys()
803 BUG_ON(!ref->wanted_disk_byte); in add_missing_keys()
805 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, in add_missing_keys()
806 ref->level - 1, NULL); in add_missing_keys()
813 return -EIO; in add_missing_keys()
818 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
820 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); in add_missing_keys()
824 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); in add_missing_keys()
844 spin_lock(&head->lock); in add_delayed_refs()
845 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { in add_delayed_refs()
848 if (node->seq > seq) in add_delayed_refs()
851 switch (node->action) { in add_delayed_refs()
852 case BTRFS_ADD_DELAYED_EXTENT: in add_delayed_refs()
853 case BTRFS_UPDATE_DELAYED_HEAD: in add_delayed_refs()
856 case BTRFS_ADD_DELAYED_REF: in add_delayed_refs()
857 count = node->ref_mod; in add_delayed_refs()
859 case BTRFS_DROP_DELAYED_REF: in add_delayed_refs()
860 count = node->ref_mod * -1; in add_delayed_refs()
865 switch (node->type) { in add_delayed_refs()
866 case BTRFS_TREE_BLOCK_REF_KEY: { in add_delayed_refs()
871 if (head->extent_op && head->extent_op->update_key) { in add_delayed_refs()
872 btrfs_disk_key_to_cpu(&key, &head->extent_op->key); in add_delayed_refs()
877 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
878 key_ptr, ref->level + 1, in add_delayed_refs()
879 node->bytenr, count, sc, in add_delayed_refs()
883 case BTRFS_SHARED_BLOCK_REF_KEY: { in add_delayed_refs()
889 ret = add_direct_ref(fs_info, preftrees, ref->level + 1, in add_delayed_refs()
890 ref->parent, node->bytenr, count, in add_delayed_refs()
894 case BTRFS_EXTENT_DATA_REF_KEY: { in add_delayed_refs()
899 key.objectid = ref->objectid; in add_delayed_refs()
901 key.offset = ref->offset; in add_delayed_refs()
919 sc->have_delayed_delete_refs = true; in add_delayed_refs()
921 ret = add_indirect_ref(fs_info, preftrees, ref->root, in add_delayed_refs()
922 &key, 0, node->bytenr, count, sc, in add_delayed_refs()
926 case BTRFS_SHARED_DATA_REF_KEY: { in add_delayed_refs()
932 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, in add_delayed_refs()
933 node->bytenr, count, sc, in add_delayed_refs()
950 spin_unlock(&head->lock); in add_delayed_refs()
978 leaf = path->nodes[0]; in add_inline_refs()
979 slot = path->slots[0]; in add_inline_refs()
1014 return -EUCLEAN; in add_inline_refs()
1019 case BTRFS_SHARED_BLOCK_REF_KEY: in add_inline_refs()
1024 case BTRFS_SHARED_DATA_REF_KEY: { in add_inline_refs()
1035 case BTRFS_TREE_BLOCK_REF_KEY: in add_inline_refs()
1040 case BTRFS_EXTENT_DATA_REF_KEY: { in add_inline_refs()
1045 dref = (struct btrfs_extent_data_ref *)(&iref->offset); in add_inline_refs()
1052 if (sc && sc->inum && key.objectid != sc->inum && in add_inline_refs()
1053 !sc->have_delayed_delete_refs) { in add_inline_refs()
1078 * add all non-inline backrefs for bytenr to the list
1087 struct btrfs_root *extent_root = fs_info->extent_root; in add_keyed_refs()
1102 slot = path->slots[0]; in add_keyed_refs()
1103 leaf = path->nodes[0]; in add_keyed_refs()
1114 case BTRFS_SHARED_BLOCK_REF_KEY: in add_keyed_refs()
1120 case BTRFS_SHARED_DATA_REF_KEY: { in add_keyed_refs()
1133 case BTRFS_TREE_BLOCK_REF_KEY: in add_keyed_refs()
1139 case BTRFS_EXTENT_DATA_REF_KEY: { in add_keyed_refs()
1153 if (sc && sc->inum && key.objectid != sc->inum && in add_keyed_refs()
1154 !sc->have_delayed_delete_refs) { in add_keyed_refs()
1183 * much like trans == NULL case, the difference only lies in it will not
1185 * The special case is for qgroup to search roots in commit_transaction().
1187 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1220 key.offset = (u64)-1;
1228 return -ENOMEM;
1230 path->search_commit_root = 1;
1231 path->skip_locking = 1;
1235 path->skip_locking = 1;
1245 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1251 ret = -EUCLEAN;
1256 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1265 delayed_refs = &trans->transaction->delayed_refs;
1266 spin_lock(&delayed_refs->lock);
1269 if (!mutex_trylock(&head->mutex)) {
1270 refcount_inc(&head->refs);
1271 spin_unlock(&delayed_refs->lock);
1279 mutex_lock(&head->mutex);
1280 mutex_unlock(&head->mutex);
1284 spin_unlock(&delayed_refs->lock);
1287 mutex_unlock(&head->mutex);
1291 spin_unlock(&delayed_refs->lock);
1295 if (path->slots[0]) {
1299 path->slots[0]--;
1300 leaf = path->nodes[0];
1301 slot = path->slots[0];
1319 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1342 node = rb_next(&ref->rbnode);
1344 * ref->count < 0 can happen here if there are delayed
1345 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1351 * and would retain their original ref->count < 0.
1353 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1354 if (sc && sc->root_objectid &&
1355 ref->root_id != sc->root_objectid) {
1361 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1365 if (ref->count && ref->parent) {
1366 if (extent_item_pos && !ref->inode_list &&
1367 ref->level == 0) {
1370 eb = read_tree_block(fs_info, ref->parent, 0,
1371 ref->level, NULL);
1377 ret = -EIO;
1381 if (!path->skip_locking) {
1387 if (!path->skip_locking)
1392 ref->inode_list = eie;
1395 * so set to NULL to avoid a double free in case
1400 ret = ulist_add_merge_ptr(refs, ref->parent,
1401 ref->inode_list,
1412 * case.
1416 ret = -EUCLEAN;
1419 while (eie->next)
1420 eie = eie->next;
1421 eie->next = ref->inode_list;
1428 * use-after-free when our caller uses it or double
1429 * frees in case an error happens before we return.
1431 ref->inode_list = NULL;
1465 return -ENOMEM;
1469 if (ret < 0 && ret != -ENOENT) {
1502 return -ENOMEM;
1506 return -ENOMEM;
1513 if (ret < 0 && ret != -ENOENT) {
1522 bytenr = node->val;
1538 down_read(&fs_info->commit_root_sem);
1542 up_read(&fs_info->commit_root_sem);
1547 * btrfs_check_shared - tell us whether an extent is shared
1563 struct btrfs_fs_info *fs_info = root->fs_info;
1570 .root_objectid = root->root_key.objectid,
1581 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1586 down_read(&fs_info->commit_root_sem);
1600 if (ret < 0 && ret != -ENOENT)
1606 bytenr = node->val;
1616 up_read(&fs_info->commit_root_sem);
1645 leaf = path->nodes[0];
1646 slot = path->slots[0];
1651 * where it should be inserted. In our case
1653 * next INODE_REF_KEY_V2 item. In the case
1660 ret = -ENOENT;
1674 ret = -ENOENT;
1681 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1695 * 0-terminated. the path is only given within the current file system.
1700 * in case the path buffer would overflow, the pointer is decremented further
1703 * required for the path to fit into the buffer. in that case, the returned
1714 s64 bytes_left = ((s64)size) - 1;
1717 int leave_spinning = path->leave_spinning;
1723 path->leave_spinning = 1;
1725 bytes_left -= name_len;
1730 if (!path->skip_locking)
1737 ret = -ENOENT;
1747 slot = path->slots[0];
1748 eb = path->nodes[0];
1751 if (!path->skip_locking)
1753 path->nodes[0] = NULL;
1754 path->locks[0] = 0;
1763 --bytes_left;
1769 path->leave_spinning = leave_spinning;
1799 key.offset = (u64)-1;
1801 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1805 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1808 ret = -ENOENT;
1811 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1812 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1813 size = fs_info->nodesize;
1814 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1815 size = found_key->offset;
1817 if (found_key->objectid > logical ||
1818 found_key->objectid + size <= logical) {
1821 return -ENOENT;
1824 eb = path->nodes[0];
1825 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1828 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1833 logical, logical - found_key->objectid, found_key->objectid,
1834 found_key->offset, flags, item_size);
1847 return -EIO;
1874 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1879 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1889 return -ENOENT;
1897 return -EUCLEAN;
1922 if (*ptr == (unsigned long)-1)
1942 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1948 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1949 *out_level = (u8)key->offset;
1953 *ptr = (unsigned long)-1;
1966 for (eie = inode_list; eie; eie = eie->next) {
1969 extent_item_objectid, eie->inum,
1970 eie->offset, root);
1971 ret = iterate(eie->inum, eie->offset, root, ctx);
1986 * when the iterator function returns a non-zero value, iteration stops.
2008 trans = btrfs_attach_transaction(fs_info->extent_root);
2010 if (PTR_ERR(trans) != -ENOENT &&
2011 PTR_ERR(trans) != -EROFS)
2020 down_read(&fs_info->commit_root_sem);
2030 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
2039 root_node->val, ref_node->val,
2040 ref_node->aux);
2043 (uintptr_t)ref_node->aux,
2044 root_node->val,
2057 up_read(&fs_info->commit_root_sem);
2068 if (inodes->bytes_left >= c) {
2069 inodes->bytes_left -= c;
2070 inodes->val[inodes->elem_cnt] = inum;
2071 inodes->val[inodes->elem_cnt + 1] = offset;
2072 inodes->val[inodes->elem_cnt + 2] = root;
2073 inodes->elem_cnt += 3;
2075 inodes->bytes_missing += c - inodes->bytes_left;
2076 inodes->bytes_left = 0;
2077 inodes->elem_missed += 3;
2091 int search_commit_root = path->search_commit_root;
2098 return -EINVAL;
2100 extent_item_pos = logical - found_key.objectid;
2135 ret = found ? 0 : -ENOENT;
2141 slot = path->slots[0];
2142 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2144 ret = -ENOMEM;
2155 btrfs_debug(fs_root->fs_info,
2158 fs_root->root_key.objectid);
2195 ret = found ? 0 : -ENOENT;
2200 slot = path->slots[0];
2201 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2203 ret = -ENOMEM;
2219 (unsigned long)&extref->name, eb, ctx);
2246 else if (ret != -ENOENT)
2250 if (ret == -ENOENT && found_refs)
2258 * returns <0 in case of an error
2266 int i = ipath->fspath->elem_cnt;
2270 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2271 ipath->fspath->bytes_left - s_ptr : 0;
2273 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2274 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2280 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2281 ++ipath->fspath->elem_cnt;
2282 ipath->fspath->bytes_left = fspath - fspath_min;
2284 ++ipath->fspath->elem_missed;
2285 ipath->fspath->bytes_missing += fspath_min - fspath;
2286 ipath->fspath->bytes_left = 0;
2294 * is has been created large enough. each path is zero-terminated and accessed
2295 * from ipath->fspath->val[i].
2296 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2297 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2298 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2299 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2304 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2316 return ERR_PTR(-ENOMEM);
2319 data->bytes_left = total_bytes - sizeof(*data);
2320 data->bytes_missing = 0;
2322 data->bytes_missing = sizeof(*data) - total_bytes;
2323 data->bytes_left = 0;
2326 data->elem_cnt = 0;
2327 data->elem_missed = 0;
2335 * information will be total_bytes - sizeof(struct inode_fs_paths).
2351 return ERR_PTR(-ENOMEM);
2354 ifp->btrfs_path = path;
2355 ifp->fspath = fspath;
2356 ifp->fs_root = fs_root;
2365 kvfree(ipath->fspath);
2378 ret->path = btrfs_alloc_path();
2379 if (!ret->path) {
2385 ret->path->search_commit_root = 1;
2386 ret->path->skip_locking = 1;
2387 ret->fs_info = fs_info;
2394 struct btrfs_fs_info *fs_info = iter->fs_info;
2395 struct btrfs_path *path = iter->path;
2402 key.offset = (u64)-1;
2403 iter->bytenr = bytenr;
2405 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2409 ret = -EUCLEAN;
2412 if (path->slots[0] == 0) {
2414 ret = -EUCLEAN;
2417 path->slots[0]--;
2419 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2422 ret = -ENOENT;
2425 memcpy(&iter->cur_key, &key, sizeof(key));
2426 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2427 path->slots[0]);
2428 iter->end_ptr = (u32)(iter->item_ptr +
2429 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2430 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2436 * This is an extra precaution for non skinny-metadata, where
2440 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2441 ret = -ENOTSUPP;
2444 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2447 if (iter->cur_ptr >= iter->end_ptr) {
2448 ret = btrfs_next_item(fs_info->extent_root, path);
2452 ret = -ENOENT;
2458 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2459 path->slots[0]);
2460 if (iter->cur_key.objectid != bytenr ||
2461 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2462 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2463 ret = -ENOENT;
2466 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2467 path->slots[0]);
2468 iter->item_ptr = iter->cur_ptr;
2469 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2470 path->nodes[0], path->slots[0]));
2483 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2492 struct btrfs_path *path = iter->path;
2499 ASSERT(iter->cur_ptr < iter->end_ptr);
2509 ((unsigned long)iter->cur_ptr);
2514 iter->cur_ptr += size;
2515 if (iter->cur_ptr < iter->end_ptr)
2522 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2526 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2527 if (iter->cur_key.objectid != iter->bytenr ||
2528 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2529 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2531 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2532 path->slots[0]);
2533 iter->cur_ptr = iter->item_ptr;
2534 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2535 path->slots[0]);
2544 cache->rb_root = RB_ROOT;
2546 INIT_LIST_HEAD(&cache->pending[i]);
2547 INIT_LIST_HEAD(&cache->changed);
2548 INIT_LIST_HEAD(&cache->detached);
2549 INIT_LIST_HEAD(&cache->leaves);
2550 INIT_LIST_HEAD(&cache->pending_edge);
2551 INIT_LIST_HEAD(&cache->useless_node);
2552 cache->fs_info = fs_info;
2553 cache->is_reloc = is_reloc;
2566 INIT_LIST_HEAD(&node->list);
2567 INIT_LIST_HEAD(&node->upper);
2568 INIT_LIST_HEAD(&node->lower);
2569 RB_CLEAR_NODE(&node->rb_node);
2570 cache->nr_nodes++;
2571 node->level = level;
2572 node->bytenr = bytenr;
2584 cache->nr_edges++;
2604 BUG_ON(!node->lowest && !node->detached);
2605 while (!list_empty(&node->upper)) {
2606 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2607 list[LOWER]);
2608 upper = edge->node[UPPER];
2609 list_del(&edge->list[LOWER]);
2610 list_del(&edge->list[UPPER]);
2617 if (list_empty(&upper->lower)) {
2618 list_add_tail(&upper->lower, &cache->leaves);
2619 upper->lowest = 1;
2634 while (!list_empty(&cache->detached)) {
2635 node = list_entry(cache->detached.next,
2640 while (!list_empty(&cache->leaves)) {
2641 node = list_entry(cache->leaves.next,
2642 struct btrfs_backref_node, lower);
2646 cache->last_trans = 0;
2649 ASSERT(list_empty(&cache->pending[i]));
2650 ASSERT(list_empty(&cache->pending_edge));
2651 ASSERT(list_empty(&cache->useless_node));
2652 ASSERT(list_empty(&cache->changed));
2653 ASSERT(list_empty(&cache->detached));
2654 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2655 ASSERT(!cache->nr_nodes);
2656 ASSERT(!cache->nr_edges);
2679 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2682 if (ref_key->objectid == ref_key->offset) {
2685 cur->is_reloc_root = 1;
2687 if (cache->is_reloc) {
2688 root = find_reloc_root(cache->fs_info, cur->bytenr);
2690 return -ENOENT;
2691 cur->root = root;
2697 list_add(&cur->list, &cache->useless_node);
2704 return -ENOMEM;
2706 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2709 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2710 cur->level + 1);
2713 return -ENOMEM;
2720 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2724 ASSERT(upper->checked);
2725 INIT_LIST_HEAD(&edge->list[UPPER]);
2749 struct btrfs_fs_info *fs_info = cache->fs_info;
2751 struct btrfs_backref_node *lower; local
2760 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2763 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2764 cur->cowonly = 1;
2766 if (btrfs_root_level(&root->root_item) == cur->level) {
2768 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2776 * completely relying on direct backref (key->offset is parent
2779 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2781 list_add(&cur->list, &cache->useless_node);
2783 cur->root = root;
2788 level = cur->level + 1;
2791 path->search_commit_root = 1;
2792 path->skip_locking = 1;
2793 path->lowest_level = level;
2795 path->lowest_level = 0;
2800 if (ret > 0 && path->slots[level] > 0)
2801 path->slots[level]--;
2803 eb = path->nodes[level];
2804 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2807 cur->bytenr, level - 1, root->root_key.objectid,
2808 tree_key->objectid, tree_key->type, tree_key->offset);
2810 ret = -ENOENT;
2813 lower = cur;
2817 if (!path->nodes[level]) {
2818 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2819 lower->bytenr);
2822 cache->is_reloc) {
2824 list_add(&lower->list, &cache->useless_node);
2826 lower->root = root;
2834 ret = -ENOMEM;
2838 eb = path->nodes[level];
2839 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2841 upper = btrfs_backref_alloc_node(cache, eb->start,
2842 lower->level + 1);
2846 ret = -ENOMEM;
2849 upper->owner = btrfs_header_owner(eb);
2850 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2851 upper->cowonly = 1;
2858 upper->checked = 0;
2860 upper->checked = 1;
2867 if (!upper->checked && need_check) {
2869 list_add_tail(&edge->list[UPPER],
2870 &cache->pending_edge);
2872 if (upper->checked)
2874 INIT_LIST_HEAD(&edge->list[UPPER]);
2879 ASSERT(upper->checked);
2880 INIT_LIST_HEAD(&edge->list[UPPER]);
2881 if (!upper->owner)
2882 upper->owner = btrfs_header_owner(eb);
2884 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2890 lower = upper;
2902 * links aren't yet bi-directional. Needs to finish such links.
2915 struct btrfs_fs_info *fs_info = cache->fs_info;
2920 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2933 ret = -EUCLEAN;
2937 WARN_ON(cur->checked);
2938 if (!list_empty(&cur->upper)) {
2943 ASSERT(list_is_singular(&cur->upper));
2944 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2945 list[LOWER]);
2946 ASSERT(list_empty(&edge->list[UPPER]));
2947 exist = edge->node[UPPER];
2952 if (!exist->checked)
2953 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2966 key.objectid = iter->bytenr;
2972 ((unsigned long)iter->cur_ptr);
2976 ret = -EUCLEAN;
2982 key.type = iter->cur_key.type;
2983 key.offset = iter->cur_key.offset;
2992 exist->owner == key.offset) ||
2994 exist->bytenr == key.offset))) {
3006 ret = -EINVAL;
3025 cur->checked = 1;
3038 struct list_head *useless_node = &cache->useless_node;
3043 ASSERT(start->checked);
3045 /* Insert this node to cache if it's not COW-only */
3046 if (!start->cowonly) {
3047 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3048 &start->rb_node);
3050 btrfs_backref_panic(cache->fs_info, start->bytenr,
3051 -EEXIST);
3052 list_add_tail(&start->lower, &cache->leaves);
3060 list_for_each_entry(edge, &start->upper, list[LOWER])
3061 list_add_tail(&edge->list[UPPER], &pending_edge);
3065 struct btrfs_backref_node *lower; local
3069 list_del_init(&edge->list[UPPER]);
3070 upper = edge->node[UPPER];
3071 lower = edge->node[LOWER];
3074 if (upper->detached) {
3075 list_del(&edge->list[LOWER]);
3078 /* Lower node is orphan, queue for cleanup */
3079 if (list_empty(&lower->upper))
3080 list_add(&lower->list, useless_node);
3087 * So if we have upper->rb_node populated, this means a cache
3091 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3092 if (upper->lowest) {
3093 list_del_init(&upper->lower);
3094 upper->lowest = 0;
3097 list_add_tail(&edge->list[UPPER], &upper->lower);
3102 if (!upper->checked) {
3104 return -EUCLEAN;
3107 /* Sanity check, COW-only node has non-COW-only parent */
3108 if (start->cowonly != upper->cowonly) {
3110 return -EUCLEAN;
3113 /* Only cache non-COW-only (subvolume trees) tree blocks */
3114 if (!upper->cowonly) {
3115 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3116 &upper->rb_node);
3118 btrfs_backref_panic(cache->fs_info,
3119 upper->bytenr, -EEXIST);
3120 return -EUCLEAN;
3124 list_add_tail(&edge->list[UPPER], &upper->lower);
3130 list_for_each_entry(edge, &upper->upper, list[LOWER])
3131 list_add_tail(&edge->list[UPPER], &pending_edge);
3139 struct btrfs_backref_node *lower; local
3143 while (!list_empty(&cache->useless_node)) {
3144 lower = list_first_entry(&cache->useless_node,
3146 list_del_init(&lower->list);
3148 while (!list_empty(&cache->pending_edge)) {
3149 edge = list_first_entry(&cache->pending_edge,
3151 list_del(&edge->list[UPPER]);
3152 list_del(&edge->list[LOWER]);
3153 lower = edge->node[LOWER];
3154 upper = edge->node[UPPER];
3158 * Lower is no longer linked to any upper backref nodes and
3161 if (list_empty(&lower->upper) &&
3162 RB_EMPTY_NODE(&lower->rb_node))
3163 list_add(&lower->list, &cache->useless_node);
3165 if (!RB_EMPTY_NODE(&upper->rb_node))
3169 list_for_each_entry(edge, &upper->upper, list[LOWER])
3170 list_add_tail(&edge->list[UPPER],
3171 &cache->pending_edge);
3172 if (list_empty(&upper->upper))
3173 list_add(&upper->list, &cache->useless_node);
3176 while (!list_empty(&cache->useless_node)) {
3177 lower = list_first_entry(&cache->useless_node,
3179 list_del_init(&lower->list);
3180 if (lower == node)
3182 btrfs_backref_drop_node(cache, lower);
3186 ASSERT(list_empty(&cache->useless_node) &&
3187 list_empty(&cache->pending_edge));