Lines Matching +full:tightly +full:- +full:coupled

1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/page-flags.h>
17 #include "extent-io-tree.h"
22 #include "check-integrity.h"
24 #include "rcu-string.h"
26 #include "disk-io.h"
34 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree()
71 if (!fs_info->allocated_ebs.next) in btrfs_extent_buffer_leak_debug_check()
74 spin_lock_irqsave(&fs_info->eb_leak_lock, flags); in btrfs_extent_buffer_leak_debug_check()
75 while (!list_empty(&fs_info->allocated_ebs)) { in btrfs_extent_buffer_leak_debug_check()
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
85 spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); in btrfs_extent_buffer_leak_debug_check()
95 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
97 refcount_read(&state->refs)); in btrfs_extent_state_leak_debug_check()
98 list_del(&state->leak_list); in btrfs_extent_state_leak_debug_check()
108 struct inode *inode = tree->private_data; in __btrfs_debug_check_extent_io_range()
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
116 btrfs_debug_rl(BTRFS_I(inode)->root->fs_info, in __btrfs_debug_check_extent_io_range()
153 if (set && (state->state & bits) == bits) in add_extent_changeset()
155 if (!set && (state->state & bits) == 0) in add_extent_changeset()
157 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
158 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
167 struct extent_io_tree *tree = bio->bi_private; in submit_one_bio()
169 bio->bi_private = NULL; in submit_one_bio()
171 if (is_data_inode(tree->private_data)) in submit_one_bio()
172 ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num, in submit_one_bio()
175 ret = btrfs_submit_metadata_bio(tree->private_data, bio, in submit_one_bio()
184 if (epd->bio) { in end_write_bio()
185 epd->bio->bi_status = errno_to_blk_status(ret); in end_write_bio()
186 bio_endio(epd->bio); in end_write_bio()
187 epd->bio = NULL; in end_write_bio()
201 if (epd->bio) { in flush_write_bio()
202 ret = submit_one_bio(epd->bio, 0, 0); in flush_write_bio()
204 * Clean up of epd->bio is handled by its endio function. in flush_write_bio()
208 * to clean up epd->bio. in flush_write_bio()
210 epd->bio = NULL; in flush_write_bio()
221 return -ENOMEM; in extent_state_cache_init()
231 return -ENOMEM; in extent_io_init()
249 return -ENOMEM; in extent_io_init()
282 tree->fs_info = fs_info; in extent_io_tree_init()
283 tree->state = RB_ROOT; in extent_io_tree_init()
284 tree->dirty_bytes = 0; in extent_io_tree_init()
285 spin_lock_init(&tree->lock); in extent_io_tree_init()
286 tree->private_data = private_data; in extent_io_tree_init()
287 tree->owner = owner; in extent_io_tree_init()
289 lockdep_set_class(&tree->lock, &file_extent_tree_class); in extent_io_tree_init()
294 spin_lock(&tree->lock); in extent_io_tree_release()
301 while (!RB_EMPTY_ROOT(&tree->state)) { in extent_io_tree_release()
305 node = rb_first(&tree->state); in extent_io_tree_release()
307 rb_erase(&state->rb_node, &tree->state); in extent_io_tree_release()
308 RB_CLEAR_NODE(&state->rb_node); in extent_io_tree_release()
313 ASSERT(!waitqueue_active(&state->wq)); in extent_io_tree_release()
316 cond_resched_lock(&tree->lock); in extent_io_tree_release()
318 spin_unlock(&tree->lock); in extent_io_tree_release()
333 state->state = 0; in alloc_extent_state()
334 state->failrec = NULL; in alloc_extent_state()
335 RB_CLEAR_NODE(&state->rb_node); in alloc_extent_state()
336 btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states); in alloc_extent_state()
337 refcount_set(&state->refs, 1); in alloc_extent_state()
338 init_waitqueue_head(&state->wq); in alloc_extent_state()
347 if (refcount_dec_and_test(&state->refs)) { in free_extent_state()
349 btrfs_leak_debug_del(&leak_lock, &state->leak_list); in free_extent_state()
372 p = search_start ? &search_start : &root->rb_node; in tree_insert()
377 if (offset < entry->start) in tree_insert()
378 p = &(*p)->rb_left; in tree_insert()
379 else if (offset > entry->end) in tree_insert()
380 p = &(*p)->rb_right; in tree_insert()
392 * __etree_search - searche @tree for an entry that contains @offset. Such
393 * entry would have entry->start <= offset && entry->end >= offset.
395 * @tree - the tree to search
396 * @offset - offset that should fall within an entry in @tree
397 * @next_ret - pointer to the first entry whose range ends after @offset
398 * @prev - pointer to the first entry whose range begins before @offset
399 * @p_ret - pointer where new node should be anchored (used when inserting an
401 * @parent_ret - points to entry which would have been the parent of the entry,
415 struct rb_root *root = &tree->state; in __etree_search()
416 struct rb_node **n = &root->rb_node; in __etree_search()
427 if (offset < entry->start) in __etree_search()
428 n = &(*n)->rb_left; in __etree_search()
429 else if (offset > entry->end) in __etree_search()
430 n = &(*n)->rb_right; in __etree_search()
442 while (prev && offset > prev_entry->end) { in __etree_search()
452 while (prev && offset < prev_entry->start) { in __etree_search()
497 if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY)) in merge_state()
500 other_node = rb_prev(&state->rb_node); in merge_state()
503 if (other->end == state->start - 1 && in merge_state()
504 other->state == state->state) { in merge_state()
505 if (tree->private_data && in merge_state()
506 is_data_inode(tree->private_data)) in merge_state()
507 btrfs_merge_delalloc_extent(tree->private_data, in merge_state()
509 state->start = other->start; in merge_state()
510 rb_erase(&other->rb_node, &tree->state); in merge_state()
511 RB_CLEAR_NODE(&other->rb_node); in merge_state()
515 other_node = rb_next(&state->rb_node); in merge_state()
518 if (other->start == state->end + 1 && in merge_state()
519 other->state == state->state) { in merge_state()
520 if (tree->private_data && in merge_state()
521 is_data_inode(tree->private_data)) in merge_state()
522 btrfs_merge_delalloc_extent(tree->private_data, in merge_state()
524 state->end = other->end; in merge_state()
525 rb_erase(&other->rb_node, &tree->state); in merge_state()
526 RB_CLEAR_NODE(&other->rb_node); in merge_state()
540 * This may return -EEXIST if the extent is already there, in which case the
555 btrfs_err(tree->fs_info, in insert_state()
559 state->start = start; in insert_state()
560 state->end = end; in insert_state()
564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); in insert_state()
568 btrfs_err(tree->fs_info, in insert_state()
570 found->start, found->end, start, end); in insert_state()
571 return -EEXIST; in insert_state()
583 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
585 * prealloc: [orig->start, split - 1]
586 * orig: [ split, orig->end ]
596 if (tree->private_data && is_data_inode(tree->private_data)) in split_state()
597 btrfs_split_delalloc_extent(tree->private_data, orig, split); in split_state()
599 prealloc->start = orig->start; in split_state()
600 prealloc->end = split - 1; in split_state()
601 prealloc->state = orig->state; in split_state()
602 orig->start = split; in split_state()
604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, in split_state()
605 &prealloc->rb_node, NULL, NULL); in split_state()
608 return -EEXIST; in split_state()
615 struct rb_node *next = rb_next(&state->rb_node); in next_state()
638 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { in clear_state_bit()
639 u64 range = state->end - state->start + 1; in clear_state_bit()
640 WARN_ON(range > tree->dirty_bytes); in clear_state_bit()
641 tree->dirty_bytes -= range; in clear_state_bit()
644 if (tree->private_data && is_data_inode(tree->private_data)) in clear_state_bit()
645 btrfs_clear_delalloc_extent(tree->private_data, state, bits); in clear_state_bit()
649 state->state &= ~bits_to_clear; in clear_state_bit()
651 wake_up(&state->wq); in clear_state_bit()
652 if (state->state == 0) { in clear_state_bit()
655 rb_erase(&state->rb_node, &tree->state); in clear_state_bit()
656 RB_CLEAR_NODE(&state->rb_node); in clear_state_bit()
679 btrfs_panic(tree->fs_info, err, in extent_io_tree_panic()
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
723 * up not needing the pre-allocated extent state at all, which in __clear_extent_bit()
731 spin_lock(&tree->lock); in __clear_extent_bit()
741 cached->start <= start && cached->end > start) { in __clear_extent_bit()
743 refcount_dec(&cached->refs); in __clear_extent_bit()
759 if (state->start > end) in __clear_extent_bit()
761 WARN_ON(state->end < start); in __clear_extent_bit()
762 last_end = state->end; in __clear_extent_bit()
765 if (!(state->state & bits)) { in __clear_extent_bit()
771 * | ---- desired range ---- | in __clear_extent_bit()
773 * | ------------- state -------------- | in __clear_extent_bit()
786 if (state->start < start) { in __clear_extent_bit()
796 if (state->end <= end) { in __clear_extent_bit()
804 * | ---- desired range ---- | in __clear_extent_bit()
809 if (state->start <= end && state->end > end) { in __clear_extent_bit()
817 wake_up(&state->wq); in __clear_extent_bit()
827 if (last_end == (u64)-1) in __clear_extent_bit()
836 spin_unlock(&tree->lock); in __clear_extent_bit()
842 spin_unlock(&tree->lock); in __clear_extent_bit()
852 __releases(tree->lock) in wait_on_state()
853 __acquires(tree->lock) in wait_on_state()
856 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); in wait_on_state()
857 spin_unlock(&tree->lock); in wait_on_state()
859 spin_lock(&tree->lock); in wait_on_state()
860 finish_wait(&state->wq, &wait); in wait_on_state()
876 spin_lock(&tree->lock); in wait_extent_bit()
890 if (state->start > end) in wait_extent_bit()
893 if (state->state & bits) { in wait_extent_bit()
894 start = state->start; in wait_extent_bit()
895 refcount_inc(&state->refs); in wait_extent_bit()
900 start = state->end + 1; in wait_extent_bit()
905 if (!cond_resched_lock(&tree->lock)) { in wait_extent_bit()
911 spin_unlock(&tree->lock); in wait_extent_bit()
921 if (tree->private_data && is_data_inode(tree->private_data)) in set_state_bits()
922 btrfs_set_delalloc_extent(tree->private_data, state, bits); in set_state_bits()
924 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { in set_state_bits()
925 u64 range = state->end - state->start + 1; in set_state_bits()
926 tree->dirty_bytes += range; in set_state_bits()
930 state->state |= bits_to_set; in set_state_bits()
938 if (!flags || (state->state & flags)) { in cache_state_if_flags()
940 refcount_inc(&state->refs); in cache_state_if_flags()
956 * If any of the exclusive bits are set, this will fail with -EEXIST if some
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
985 * up not needing the pre-allocated extent state at all, which in __set_extent_bit()
993 spin_lock(&tree->lock); in __set_extent_bit()
996 if (state->start <= start && state->end > start && in __set_extent_bit()
998 node = &state->rb_node; in __set_extent_bit()
1021 last_start = state->start; in __set_extent_bit()
1022 last_end = state->end; in __set_extent_bit()
1025 * | ---- desired range ---- | in __set_extent_bit()
1030 if (state->start == start && state->end <= end) { in __set_extent_bit()
1031 if (state->state & exclusive_bits) { in __set_extent_bit()
1032 *failed_start = state->start; in __set_extent_bit()
1033 err = -EEXIST; in __set_extent_bit()
1040 if (last_end == (u64)-1) in __set_extent_bit()
1044 if (start < end && state && state->start == start && in __set_extent_bit()
1051 * | ---- desired range ---- | in __set_extent_bit()
1054 * | ------------- state -------------- | in __set_extent_bit()
1066 if (state->start < start) { in __set_extent_bit()
1067 if (state->state & exclusive_bits) { in __set_extent_bit()
1069 err = -EEXIST; in __set_extent_bit()
1077 if ((state->state & bits) == bits) { in __set_extent_bit()
1078 start = state->end + 1; in __set_extent_bit()
1092 if (state->end <= end) { in __set_extent_bit()
1096 if (last_end == (u64)-1) in __set_extent_bit()
1100 if (start < end && state && state->start == start && in __set_extent_bit()
1107 * | ---- desired range ---- | in __set_extent_bit()
1113 if (state->start > start) { in __set_extent_bit()
1118 this_end = last_start - 1; in __set_extent_bit()
1138 * | ---- desired range ---- | in __set_extent_bit()
1143 if (state->start <= end && state->end > end) { in __set_extent_bit()
1144 if (state->state & exclusive_bits) { in __set_extent_bit()
1146 err = -EEXIST; in __set_extent_bit()
1166 spin_unlock(&tree->lock); in __set_extent_bit()
1172 spin_unlock(&tree->lock); in __set_extent_bit()
1190 * convert_extent_bit - convert all bits in a given range from one bit to
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1236 return -ENOMEM; in convert_extent_bit()
1239 spin_lock(&tree->lock); in convert_extent_bit()
1242 if (state->start <= start && state->end > start && in convert_extent_bit()
1244 node = &state->rb_node; in convert_extent_bit()
1257 err = -ENOMEM; in convert_extent_bit()
1270 last_start = state->start; in convert_extent_bit()
1271 last_end = state->end; in convert_extent_bit()
1274 * | ---- desired range ---- | in convert_extent_bit()
1279 if (state->start == start && state->end <= end) { in convert_extent_bit()
1283 if (last_end == (u64)-1) in convert_extent_bit()
1286 if (start < end && state && state->start == start && in convert_extent_bit()
1293 * | ---- desired range ---- | in convert_extent_bit()
1296 * | ------------- state -------------- | in convert_extent_bit()
1308 if (state->start < start) { in convert_extent_bit()
1311 err = -ENOMEM; in convert_extent_bit()
1320 if (state->end <= end) { in convert_extent_bit()
1325 if (last_end == (u64)-1) in convert_extent_bit()
1328 if (start < end && state && state->start == start && in convert_extent_bit()
1335 * | ---- desired range ---- | in convert_extent_bit()
1341 if (state->start > start) { in convert_extent_bit()
1346 this_end = last_start - 1; in convert_extent_bit()
1350 err = -ENOMEM; in convert_extent_bit()
1368 * | ---- desired range ---- | in convert_extent_bit()
1373 if (state->start <= end && state->end > end) { in convert_extent_bit()
1376 err = -ENOMEM; in convert_extent_bit()
1394 spin_unlock(&tree->lock); in convert_extent_bit()
1400 spin_unlock(&tree->lock); in convert_extent_bit()
1414 * either fail with -EEXIST or changeset will record the whole in set_record_extent_bits()
1465 if (err == -EEXIST) { in lock_extent_bits()
1482 if (err == -EEXIST) { in try_lock_extent()
1484 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1498 page = find_get_page(inode->i_mapping, index); in extent_range_clear_dirty_for_io()
1513 page = find_get_page(inode->i_mapping, index); in extent_range_redirty_for_io()
1523 * return it. tree->lock must be held. NULL will returned if
1543 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1568 spin_lock(&tree->lock); in find_first_extent_bit()
1571 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1573 if (state->state & bits) in find_first_extent_bit()
1588 *start_ret = state->start; in find_first_extent_bit()
1589 *end_ret = state->end; in find_first_extent_bit()
1593 spin_unlock(&tree->lock); in find_first_extent_bit()
1599 * @tree - io tree to check
1600 * @start - offset to start the search from
1601 * @start_ret - the first offset we found with the bits set
1602 * @end_ret - the final contiguous range of the bits that were set
1603 * @bits - bits to look for
1607 * will drop the tree->lock, so use this helper if you want to find the actual
1609 * then walk down the tree until we find a non-contiguous area. The area
1618 spin_lock(&tree->lock); in find_contiguous_extent_bit()
1621 *start_ret = state->start; in find_contiguous_extent_bit()
1622 *end_ret = state->end; in find_contiguous_extent_bit()
1624 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
1626 *end_ret = state->end; in find_contiguous_extent_bit()
1630 spin_unlock(&tree->lock); in find_contiguous_extent_bit()
1635 * find_first_clear_extent_bit - find the first range that has @bits not set.
1638 * @tree - the tree to search
1639 * @start - the offset at/after which the found extent should start
1640 * @start_ret - records the beginning of the range
1641 * @end_ret - records the end of the range (inclusive)
1642 * @bits - the set of bits which must be unset
1645 * set it's possible that @end_ret contains -1, this happens in case the range
1655 spin_lock(&tree->lock); in find_first_clear_extent_bit()
1666 *end_ret = -1; in find_first_clear_extent_bit()
1674 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1675 *end_ret = -1; in find_first_clear_extent_bit()
1686 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1687 if (state->state & bits) { in find_first_clear_extent_bit()
1689 * |--range with bits sets--| in find_first_clear_extent_bit()
1693 start = state->end + 1; in find_first_clear_extent_bit()
1700 * |--range with bits cleared----| in find_first_clear_extent_bit()
1704 *start_ret = state->start; in find_first_clear_extent_bit()
1709 * |---prev range---|---hole/unset---|---node range---| in find_first_clear_extent_bit()
1715 * |---hole/unset--||--first node--| in find_first_clear_extent_bit()
1722 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1736 if (state->end >= start && !(state->state & bits)) { in find_first_clear_extent_bit()
1737 *end_ret = state->end; in find_first_clear_extent_bit()
1739 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1748 spin_unlock(&tree->lock); in find_first_clear_extent_bit()
1767 spin_lock(&tree->lock); in btrfs_find_delalloc_range()
1775 *end = (u64)-1; in btrfs_find_delalloc_range()
1781 if (found && (state->start != cur_start || in btrfs_find_delalloc_range()
1782 (state->state & EXTENT_BOUNDARY))) { in btrfs_find_delalloc_range()
1785 if (!(state->state & EXTENT_DELALLOC)) { in btrfs_find_delalloc_range()
1787 *end = state->end; in btrfs_find_delalloc_range()
1791 *start = state->start; in btrfs_find_delalloc_range()
1793 refcount_inc(&state->refs); in btrfs_find_delalloc_range()
1796 *end = state->end; in btrfs_find_delalloc_range()
1797 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1799 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1806 spin_unlock(&tree->lock); in btrfs_find_delalloc_range()
1823 if (index == locked_page->index && end_index == index) in __unlock_for_delalloc()
1826 __process_pages_contig(inode->i_mapping, locked_page, index, end_index, in __unlock_for_delalloc()
1841 if (index == locked_page->index && index == end_index) in lock_delalloc_pages()
1844 ret = __process_pages_contig(inode->i_mapping, locked_page, index, in lock_delalloc_pages()
1846 if (ret == -EAGAIN) in lock_delalloc_pages()
1864 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in find_lock_delalloc_range()
1897 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
1898 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
1903 ASSERT(!ret || ret == -EAGAIN); in find_lock_delalloc_range()
1904 if (ret == -EAGAIN) { in find_lock_delalloc_range()
1946 unsigned long nr_pages = end_index - start_index + 1; in __process_pages_contig()
1960 mapping_set_error(mapping, -EIO); in __process_pages_contig()
1972 err = -EAGAIN; in __process_pages_contig()
1998 pages[i]->mapping != mapping) { in __process_pages_contig()
2002 err = -EAGAIN; in __process_pages_contig()
2009 nr_pages -= ret; in __process_pages_contig()
2015 *index_ret = start_index + pages_locked - 1; in __process_pages_contig()
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); in extent_clear_unlock_delalloc()
2026 __process_pages_contig(inode->vfs_inode.i_mapping, locked_page, in extent_clear_unlock_delalloc()
2050 spin_lock(&tree->lock); in count_range_bits()
2052 total_bytes = tree->dirty_bytes; in count_range_bits()
2065 if (state->start > search_end) in count_range_bits()
2067 if (contig && found && state->start > last + 1) in count_range_bits()
2069 if (state->end >= cur_start && (state->state & bits) == bits) { in count_range_bits()
2070 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
2071 max(cur_start, state->start); in count_range_bits()
2075 *start = max(cur_start, state->start); in count_range_bits()
2078 last = state->end; in count_range_bits()
2087 spin_unlock(&tree->lock); in count_range_bits()
2102 spin_lock(&tree->lock); in set_state_failrec()
2109 ret = -ENOENT; in set_state_failrec()
2113 if (state->start != start) { in set_state_failrec()
2114 ret = -ENOENT; in set_state_failrec()
2117 state->failrec = failrec; in set_state_failrec()
2119 spin_unlock(&tree->lock); in set_state_failrec()
2129 spin_lock(&tree->lock); in get_state_failrec()
2136 failrec = ERR_PTR(-ENOENT); in get_state_failrec()
2140 if (state->start != start) { in get_state_failrec()
2141 failrec = ERR_PTR(-ENOENT); in get_state_failrec()
2145 failrec = state->failrec; in get_state_failrec()
2147 spin_unlock(&tree->lock); in get_state_failrec()
2164 spin_lock(&tree->lock); in test_range_bit()
2165 if (cached && extent_state_in_tree(cached) && cached->start <= start && in test_range_bit()
2166 cached->end > start) in test_range_bit()
2167 node = &cached->rb_node; in test_range_bit()
2173 if (filled && state->start > start) { in test_range_bit()
2178 if (state->start > end) in test_range_bit()
2181 if (state->state & bits) { in test_range_bit()
2190 if (state->end == (u64)-1) in test_range_bit()
2193 start = state->end + 1; in test_range_bit()
2203 spin_unlock(&tree->lock); in test_range_bit()
2214 u64 end = start + PAGE_SIZE - 1; in check_page_uptodate()
2226 set_state_failrec(failure_tree, rec->start, NULL); in free_io_failure()
2227 ret = clear_extent_bits(failure_tree, rec->start, in free_io_failure()
2228 rec->start + rec->len - 1, in free_io_failure()
2233 ret = clear_extent_bits(io_tree, rec->start, in free_io_failure()
2234 rec->start + rec->len - 1, in free_io_failure()
2264 ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); in repair_io_failure()
2268 bio->bi_iter.bi_size = 0; in repair_io_failure()
2289 return -EIO; in repair_io_failure()
2291 ASSERT(bbio->mirror_num == 1); in repair_io_failure()
2298 return -EIO; in repair_io_failure()
2300 BUG_ON(mirror_num != bbio->mirror_num); in repair_io_failure()
2303 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9; in repair_io_failure()
2304 bio->bi_iter.bi_sector = sector; in repair_io_failure()
2305 dev = bbio->stripes[bbio->mirror_num - 1].dev; in repair_io_failure()
2307 if (!dev || !dev->bdev || in repair_io_failure()
2308 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { in repair_io_failure()
2311 return -EIO; in repair_io_failure()
2313 bio_set_dev(bio, dev->bdev); in repair_io_failure()
2314 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; in repair_io_failure()
2322 return -EIO; in repair_io_failure()
2328 rcu_str_deref(dev->name), sector); in repair_io_failure()
2336 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_repair_eb_io_failure()
2337 u64 start = eb->start; in btrfs_repair_eb_io_failure()
2341 if (sb_rdonly(fs_info->sb)) in btrfs_repair_eb_io_failure()
2342 return -EROFS; in btrfs_repair_eb_io_failure()
2345 struct page *p = eb->pages[i]; in btrfs_repair_eb_io_failure()
2348 start - page_offset(p), mirror_num); in btrfs_repair_eb_io_failure()
2373 ret = count_range_bits(failure_tree, &private, (u64)-1, 1, in clean_io_failure()
2382 BUG_ON(!failrec->this_mirror); in clean_io_failure()
2384 if (failrec->in_validation) { in clean_io_failure()
2388 failrec->start); in clean_io_failure()
2391 if (sb_rdonly(fs_info->sb)) in clean_io_failure()
2394 spin_lock(&io_tree->lock); in clean_io_failure()
2396 failrec->start, in clean_io_failure()
2398 spin_unlock(&io_tree->lock); in clean_io_failure()
2400 if (state && state->start <= failrec->start && in clean_io_failure()
2401 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2402 num_copies = btrfs_num_copies(fs_info, failrec->logical, in clean_io_failure()
2403 failrec->len); in clean_io_failure()
2405 repair_io_failure(fs_info, ino, start, failrec->len, in clean_io_failure()
2406 failrec->logical, page, pg_offset, in clean_io_failure()
2407 failrec->failed_mirror); in clean_io_failure()
2419 * - hold extent lock
2420 * - under ordered extent
2421 * - the inode is freeing
2425 struct extent_io_tree *failure_tree = &inode->io_failure_tree; in btrfs_free_io_failure_record()
2429 if (RB_EMPTY_ROOT(&failure_tree->state)) in btrfs_free_io_failure_record()
2432 spin_lock(&failure_tree->lock); in btrfs_free_io_failure_record()
2435 if (state->start > end) in btrfs_free_io_failure_record()
2438 ASSERT(state->end <= end); in btrfs_free_io_failure_record()
2442 failrec = state->failrec; in btrfs_free_io_failure_record()
2448 spin_unlock(&failure_tree->lock); in btrfs_free_io_failure_record()
2454 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_get_io_failure_record()
2457 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; in btrfs_get_io_failure_record()
2458 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_get_io_failure_record()
2459 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in btrfs_get_io_failure_record()
2467 failrec->logical, failrec->start, failrec->len, in btrfs_get_io_failure_record()
2468 failrec->in_validation); in btrfs_get_io_failure_record()
2480 return ERR_PTR(-ENOMEM); in btrfs_get_io_failure_record()
2482 failrec->start = start; in btrfs_get_io_failure_record()
2483 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2484 failrec->this_mirror = 0; in btrfs_get_io_failure_record()
2485 failrec->bio_flags = 0; in btrfs_get_io_failure_record()
2486 failrec->in_validation = 0; in btrfs_get_io_failure_record()
2488 read_lock(&em_tree->lock); in btrfs_get_io_failure_record()
2489 em = lookup_extent_mapping(em_tree, start, failrec->len); in btrfs_get_io_failure_record()
2491 read_unlock(&em_tree->lock); in btrfs_get_io_failure_record()
2493 return ERR_PTR(-EIO); in btrfs_get_io_failure_record()
2496 if (em->start > start || em->start + em->len <= start) { in btrfs_get_io_failure_record()
2500 read_unlock(&em_tree->lock); in btrfs_get_io_failure_record()
2503 return ERR_PTR(-EIO); in btrfs_get_io_failure_record()
2506 logical = start - em->start; in btrfs_get_io_failure_record()
2507 logical = em->block_start + logical; in btrfs_get_io_failure_record()
2508 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_get_io_failure_record()
2509 logical = em->block_start; in btrfs_get_io_failure_record()
2510 failrec->bio_flags = EXTENT_BIO_COMPRESSED; in btrfs_get_io_failure_record()
2511 extent_set_compress_type(&failrec->bio_flags, em->compress_type); in btrfs_get_io_failure_record()
2516 logical, start, failrec->len); in btrfs_get_io_failure_record()
2518 failrec->logical = logical; in btrfs_get_io_failure_record()
2540 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_check_repairable()
2543 num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); in btrfs_check_repairable()
2552 num_copies, failrec->this_mirror, failed_mirror); in btrfs_check_repairable()
2570 BUG_ON(failrec->in_validation); in btrfs_check_repairable()
2571 failrec->in_validation = 1; in btrfs_check_repairable()
2572 failrec->this_mirror = failed_mirror; in btrfs_check_repairable()
2579 if (failrec->in_validation) { in btrfs_check_repairable()
2580 BUG_ON(failrec->this_mirror != failed_mirror); in btrfs_check_repairable()
2581 failrec->in_validation = 0; in btrfs_check_repairable()
2582 failrec->this_mirror = 0; in btrfs_check_repairable()
2584 failrec->failed_mirror = failed_mirror; in btrfs_check_repairable()
2585 failrec->this_mirror++; in btrfs_check_repairable()
2586 if (failrec->this_mirror == failed_mirror) in btrfs_check_repairable()
2587 failrec->this_mirror++; in btrfs_check_repairable()
2590 if (failrec->this_mirror > num_copies) { in btrfs_check_repairable()
2593 num_copies, failrec->this_mirror, failed_mirror); in btrfs_check_repairable()
2603 const u32 blocksize = inode->i_sb->s_blocksize; in btrfs_io_needs_validation()
2610 if (bio->bi_status == BLK_STS_OK) in btrfs_io_needs_validation()
2623 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get in btrfs_io_needs_validation()
2627 if (btrfs_io_bio(bio)->iter.bi_size > blocksize) in btrfs_io_needs_validation()
2634 len += bvec->bv_len; in btrfs_io_needs_validation()
2649 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_read_repair()
2650 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_submit_read_repair()
2651 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; in btrfs_submit_read_repair()
2653 const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits; in btrfs_submit_read_repair()
2678 repair_bio->bi_opf = REQ_OP_READ; in btrfs_submit_read_repair()
2680 repair_bio->bi_opf |= REQ_FAILFAST_DEV; in btrfs_submit_read_repair()
2681 repair_bio->bi_end_io = failed_bio->bi_end_io; in btrfs_submit_read_repair()
2682 repair_bio->bi_iter.bi_sector = failrec->logical >> 9; in btrfs_submit_read_repair()
2683 repair_bio->bi_private = failed_bio->bi_private; in btrfs_submit_read_repair()
2685 if (failed_io_bio->csum) { in btrfs_submit_read_repair()
2686 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); in btrfs_submit_read_repair()
2688 repair_io_bio->csum = repair_io_bio->csum_inline; in btrfs_submit_read_repair()
2689 memcpy(repair_io_bio->csum, in btrfs_submit_read_repair()
2690 failed_io_bio->csum + csum_size * icsum, csum_size); in btrfs_submit_read_repair()
2693 bio_add_page(repair_bio, page, failrec->len, pgoff); in btrfs_submit_read_repair()
2694 repair_io_bio->logical = failrec->start; in btrfs_submit_read_repair()
2695 repair_io_bio->iter = repair_bio->bi_iter; in btrfs_submit_read_repair()
2697 btrfs_debug(btrfs_sb(inode->i_sb), in btrfs_submit_read_repair()
2699 failrec->this_mirror, failrec->in_validation); in btrfs_submit_read_repair()
2701 status = submit_bio_hook(inode, repair_bio, failrec->this_mirror, in btrfs_submit_read_repair()
2702 failrec->bio_flags); in btrfs_submit_read_repair()
2722 ret = err < 0 ? err : -EIO; in end_extent_writepage()
2723 mapping_set_error(page->mapping, ret); in end_extent_writepage()
2738 int error = blk_status_to_errno(bio->bi_status); in end_bio_extent_writepage()
2746 struct page *page = bvec->bv_page; in end_bio_extent_writepage()
2747 struct inode *inode = page->mapping->host; in end_bio_extent_writepage()
2748 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in end_bio_extent_writepage()
2750 /* We always issue full-page reads, but if some block in end_bio_extent_writepage()
2755 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { in end_bio_extent_writepage()
2756 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) in end_bio_extent_writepage()
2759 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
2763 bvec->bv_offset, bvec->bv_len); in end_bio_extent_writepage()
2767 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2781 u64 end = start + len - 1; in endio_readpage_release_extent()
2783 if (uptodate && tree->track_uptodate) in endio_readpage_release_extent()
2802 int uptodate = !bio->bi_status; in end_bio_extent_readpage()
2817 struct page *page = bvec->bv_page; in end_bio_extent_readpage()
2818 struct inode *inode = page->mapping->host; in end_bio_extent_readpage()
2819 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in end_bio_extent_readpage()
2823 (u64)bio->bi_iter.bi_sector, bio->bi_status, in end_bio_extent_readpage()
2824 io_bio->mirror_num); in end_bio_extent_readpage()
2825 tree = &BTRFS_I(inode)->io_tree; in end_bio_extent_readpage()
2826 failure_tree = &BTRFS_I(inode)->io_failure_tree; in end_bio_extent_readpage()
2828 /* We always issue full-page reads, but if some block in end_bio_extent_readpage()
2833 if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { in end_bio_extent_readpage()
2834 if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) in end_bio_extent_readpage()
2837 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
2841 bvec->bv_offset, bvec->bv_len); in end_bio_extent_readpage()
2845 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2846 len = bvec->bv_len; in end_bio_extent_readpage()
2848 mirror = io_bio->mirror_num; in end_bio_extent_readpage()
2859 clean_io_failure(BTRFS_I(inode)->root->fs_info, in end_bio_extent_readpage()
2877 * If it can't handle the error it will return -EIO and in end_bio_extent_readpage()
2881 start - page_offset(page), in end_bio_extent_readpage()
2884 uptodate = !bio->bi_status; in end_bio_extent_readpage()
2891 eb = (struct extent_buffer *)page->private; in end_bio_extent_readpage()
2892 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bio_extent_readpage()
2893 eb->read_mirror = mirror; in end_bio_extent_readpage()
2894 atomic_dec(&eb->io_pages); in end_bio_extent_readpage()
2896 &eb->bflags)) in end_bio_extent_readpage()
2897 btree_readahead_hook(eb, -EIO); in end_bio_extent_readpage()
2907 if (page->index == end_index && off) in end_bio_extent_readpage()
2926 end - start + 1, 0); in end_bio_extent_readpage()
2929 extent_len = end + 1 - start; in end_bio_extent_readpage()
2931 extent_len += end + 1 - start; in end_bio_extent_readpage()
2936 extent_len = end + 1 - start; in end_bio_extent_readpage()
2967 bio->bi_iter.bi_sector = first_byte >> 9; in btrfs_bio_alloc()
2981 btrfs_bio->iter = bio->bi_iter; in btrfs_bio_clone()
3008 btrfs_bio->iter = bio->bi_iter; in btrfs_bio_clone_partial()
3041 struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; in submit_extent_page()
3051 contig = bio->bi_iter.bi_sector == sector; in submit_extent_page()
3076 bio->bi_end_io = end_io_func; in submit_extent_page()
3077 bio->bi_private = tree; in submit_extent_page()
3078 bio->bi_write_hint = page->mapping->host->i_write_hint; in submit_extent_page()
3079 bio->bi_opf = opf; in submit_extent_page()
3083 bdev = BTRFS_I(page->mapping->host)->root->fs_info->fs_devices->latest_bdev; in submit_extent_page()
3100 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
3117 if (extent_map_in_tree(em) && start >= em->start && in __get_extent_map()
3119 refcount_inc(&em->refs); in __get_extent_map()
3130 refcount_inc(&em->refs); in __get_extent_map()
3146 struct inode *inode = page->mapping->host; in btrfs_do_readpage()
3148 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
3160 size_t blocksize = inode->i_sb->s_blocksize; in btrfs_do_readpage()
3162 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; in btrfs_do_readpage()
3174 if (page->index == last_byte >> PAGE_SHIFT) { in btrfs_do_readpage()
3179 iosize = PAGE_SIZE - zero_offset; in btrfs_do_readpage()
3194 iosize = PAGE_SIZE - pg_offset; in btrfs_do_readpage()
3199 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3202 cur + iosize - 1, &cached); in btrfs_do_readpage()
3206 end - cur + 1, em_cached); in btrfs_do_readpage()
3212 extent_offset = cur - em->start; in btrfs_do_readpage()
3216 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_do_readpage()
3219 em->compress_type); in btrfs_do_readpage()
3222 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
3223 cur_end = min(extent_map_end(em) - 1, end); in btrfs_do_readpage()
3226 disk_io_size = em->block_len; in btrfs_do_readpage()
3227 offset = em->block_start; in btrfs_do_readpage()
3229 offset = em->block_start + extent_offset; in btrfs_do_readpage()
3232 block_start = em->block_start; in btrfs_do_readpage()
3233 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_do_readpage()
3247 * [0 - 8K] [8K - 24K] in btrfs_do_readpage()
3263 * for both ranges because each compressed bio is tightly in btrfs_do_readpage()
3264 * coupled with a single extent map, and each range can have in btrfs_do_readpage()
3268 * non-optimal behavior (submitting 2 bios for the same extent). in btrfs_do_readpage()
3270 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) && in btrfs_do_readpage()
3271 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
3272 *prev_em_start != em->start) in btrfs_do_readpage()
3276 *prev_em_start = em->start; in btrfs_do_readpage()
3291 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3294 cur + iosize - 1, &cached); in btrfs_do_readpage()
3303 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3313 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3331 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3353 struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); in contiguous_readpages()
3368 wbc->nr_to_write -= nr_written; in update_nr_written()
3385 u64 page_end = delalloc_start + PAGE_SIZE - 1; in writepage_delalloc()
3394 found = find_lock_delalloc_range(&inode->vfs_inode, page, in writepage_delalloc()
3411 return ret < 0 ? ret : -EIO; in writepage_delalloc()
3417 delalloc_to_write += (delalloc_end - delalloc_start + in writepage_delalloc()
3421 if (wbc->nr_to_write < delalloc_to_write) { in writepage_delalloc()
3426 wbc->nr_to_write = min_t(u64, delalloc_to_write, in writepage_delalloc()
3439 wbc->nr_to_write -= *nr_written; in writepage_delalloc()
3462 struct extent_io_tree *tree = &inode->io_tree; in __extent_writepage_io()
3464 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage_io()
3494 blocksize = inode->vfs_inode.i_sb->s_blocksize; in __extent_writepage_io()
3505 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); in __extent_writepage_io()
3512 extent_offset = cur - em->start; in __extent_writepage_io()
3516 iosize = min(em_end - cur, end - cur + 1); in __extent_writepage_io()
3518 offset = em->block_start + extent_offset; in __extent_writepage_io()
3519 block_start = em->block_start; in __extent_writepage_io()
3520 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in __extent_writepage_io()
3534 cur + iosize - 1, 1); in __extent_writepage_io()
3540 btrfs_set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage_io()
3542 btrfs_err(inode->root->fs_info, in __extent_writepage_io()
3544 page->index, cur, end); in __extent_writepage_io()
3549 &epd->bio, in __extent_writepage_io()
3578 struct inode *inode = page->mapping->host; in __extent_writepage()
3580 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage()
3595 if (page->index > end_index || in __extent_writepage()
3596 (page->index == end_index && !pg_offset)) { in __extent_writepage()
3597 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); in __extent_writepage()
3602 if (page->index == end_index) { in __extent_writepage()
3607 PAGE_SIZE - pg_offset); in __extent_writepage()
3614 if (!epd->extent_locked) { in __extent_writepage()
3635 ret = ret < 0 ? ret : -EIO; in __extent_writepage()
3645 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
3651 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_extent_buffer_writeback()
3653 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_extent_buffer_writeback()
3666 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
3679 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
3681 if (!epd->sync_io) in lock_extent_buffer_for_io()
3692 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) in lock_extent_buffer_for_io()
3703 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3704 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
3705 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
3706 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3708 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, in lock_extent_buffer_for_io()
3709 -eb->len, in lock_extent_buffer_for_io()
3710 fs_info->dirty_metadata_batch); in lock_extent_buffer_for_io()
3713 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3723 struct page *p = eb->pages[i]; in lock_extent_buffer_for_io()
3745 unlock_page(eb->pages[i]); in lock_extent_buffer_for_io()
3752 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3753 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in lock_extent_buffer_for_io()
3755 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3756 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, in lock_extent_buffer_for_io()
3757 fs_info->dirty_metadata_batch); in lock_extent_buffer_for_io()
3765 struct extent_buffer *eb = (struct extent_buffer *)page->private; in set_btree_ioerr()
3769 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in set_btree_ioerr()
3776 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
3782 fs_info = eb->fs_info; in set_btree_ioerr()
3783 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, in set_btree_ioerr()
3784 eb->len, fs_info->dirty_metadata_batch); in set_btree_ioerr()
3788 * failed, increment the counter transaction->eb_write_errors. in set_btree_ioerr()
3792 * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it in set_btree_ioerr()
3801 * btree nodes/leafs whose content on disk is invalid - either garbage in set_btree_ioerr()
3806 * not be enough - we need to distinguish between log tree extents vs in set_btree_ioerr()
3807 * non-log tree extents, and the next filemap_fdatawait_range() call in set_btree_ioerr()
3808 * will catch and clear such errors in the mapping - and that call might in set_btree_ioerr()
3811 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
3819 * with errors - because we were not using AS_EIO/AS_ENOSPC, in set_btree_ioerr()
3824 switch (eb->log_index) { in set_btree_ioerr()
3825 case -1: in set_btree_ioerr()
3826 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3829 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3832 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3848 struct page *page = bvec->bv_page; in end_bio_extent_buffer_writepage()
3850 eb = (struct extent_buffer *)page->private; in end_bio_extent_buffer_writepage()
3852 done = atomic_dec_and_test(&eb->io_pages); in end_bio_extent_buffer_writepage()
3854 if (bio->bi_status || in end_bio_extent_buffer_writepage()
3855 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { in end_bio_extent_buffer_writepage()
3875 u64 offset = eb->start; in write_one_eb()
3882 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in write_one_eb()
3884 atomic_set(&eb->io_pages, num_pages); in write_one_eb()
3891 memzero_extent_buffer(eb, end, eb->len - end); in write_one_eb()
3899 memzero_extent_buffer(eb, start, end - start); in write_one_eb()
3903 struct page *p = eb->pages[i]; in write_one_eb()
3909 &epd->bio, in write_one_eb()
3916 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) in write_one_eb()
3918 ret = -EIO; in write_one_eb()
3928 struct page *p = eb->pages[i]; in write_one_eb()
3944 .sync_io = wbc->sync_mode == WB_SYNC_ALL, in btree_write_cache_pages()
3946 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; in btree_write_cache_pages()
3958 if (wbc->range_cyclic) { in btree_write_cache_pages()
3959 index = mapping->writeback_index; /* Start from prev offset */ in btree_write_cache_pages()
3960 end = -1; in btree_write_cache_pages()
3967 index = wbc->range_start >> PAGE_SHIFT; in btree_write_cache_pages()
3968 end = wbc->range_end >> PAGE_SHIFT; in btree_write_cache_pages()
3971 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
3976 if (wbc->sync_mode == WB_SYNC_ALL) in btree_write_cache_pages()
3989 spin_lock(&mapping->private_lock); in btree_write_cache_pages()
3991 spin_unlock(&mapping->private_lock); in btree_write_cache_pages()
3995 eb = (struct extent_buffer *)page->private; in btree_write_cache_pages()
4003 spin_unlock(&mapping->private_lock); in btree_write_cache_pages()
4008 spin_unlock(&mapping->private_lock); in btree_write_cache_pages()
4012 ret = atomic_inc_not_zero(&eb->refs); in btree_write_cache_pages()
4013 spin_unlock(&mapping->private_lock); in btree_write_cache_pages()
4041 nr_to_write_done = wbc->nr_to_write <= 0; in btree_write_cache_pages()
4064 * This would prevent use-after-free if we had dirty pages not in btree_write_cache_pages()
4067 * - Bad extent tree in btree_write_cache_pages()
4070 * - Log tree operations in btree_write_cache_pages()
4072 * generation, then get cleaned in tree re-balance. in btree_write_cache_pages()
4078 * - Offending tree block gets re-dirtied from its original owner in btree_write_cache_pages()
4087 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { in btree_write_cache_pages()
4090 ret = -EROFS; in btree_write_cache_pages()
4097 …* write_cache_pages - walk the list of dirty pages of the given address space and write all of the…
4099 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
4103 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
4104 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
4106 * the call was made get new I/O started against them. If wbc->sync_mode is
4114 struct inode *inode = mapping->host; in extent_write_cache_pages()
4140 if (wbc->range_cyclic) { in extent_write_cache_pages()
4141 index = mapping->writeback_index; /* Start from prev offset */ in extent_write_cache_pages()
4142 end = -1; in extent_write_cache_pages()
4149 index = wbc->range_start >> PAGE_SHIFT; in extent_write_cache_pages()
4150 end = wbc->range_end >> PAGE_SHIFT; in extent_write_cache_pages()
4151 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) in extent_write_cache_pages()
4163 if (range_whole && wbc->nr_to_write == LONG_MAX && in extent_write_cache_pages()
4165 &BTRFS_I(inode)->runtime_flags)) in extent_write_cache_pages()
4166 wbc->tagged_writepages = 1; in extent_write_cache_pages()
4168 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
4173 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) in extent_write_cache_pages()
4184 done_index = page->index + 1; in extent_write_cache_pages()
4188 * invalidated (changing page->mapping to NULL), in extent_write_cache_pages()
4198 if (unlikely(page->mapping != mapping)) { in extent_write_cache_pages()
4203 if (wbc->sync_mode != WB_SYNC_NONE) { in extent_write_cache_pages()
4228 nr_to_write_done = wbc->nr_to_write <= 0; in extent_write_cache_pages()
4252 if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) in extent_write_cache_pages()
4253 mapping->writeback_index = done_index; in extent_write_cache_pages()
4265 .sync_io = wbc->sync_mode == WB_SYNC_ALL, in extent_write_full_page()
4284 struct address_space *mapping = inode->i_mapping; in extent_write_locked_range()
4286 unsigned long nr_pages = (end - start + PAGE_SIZE) >> in extent_write_locked_range()
4311 start + PAGE_SIZE - 1, 1); in extent_write_locked_range()
4335 .sync_io = wbc->sync_mode == WB_SYNC_ALL, in extent_writepages()
4354 u64 prev_em_start = (u64)-1; in extent_readahead()
4359 u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1; in extent_readahead()
4361 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); in extent_readahead()
4386 u64 end = start + PAGE_SIZE - 1; in extent_invalidatepage()
4387 size_t blocksize = page->mapping->host->i_sb->s_blocksize; in extent_invalidatepage()
4409 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
4443 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
4444 struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); in try_release_extent_mapping()
4445 struct extent_io_tree *tree = &btrfs_inode->io_tree; in try_release_extent_mapping()
4446 struct extent_map_tree *map = &btrfs_inode->extent_tree; in try_release_extent_mapping()
4449 page->mapping->host->i_size > SZ_16M) { in try_release_extent_mapping()
4455 len = end - start + 1; in try_release_extent_mapping()
4456 write_lock(&map->lock); in try_release_extent_mapping()
4459 write_unlock(&map->lock); in try_release_extent_mapping()
4462 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || in try_release_extent_mapping()
4463 em->start != start) { in try_release_extent_mapping()
4464 write_unlock(&map->lock); in try_release_extent_mapping()
4468 if (test_range_bit(tree, em->start, in try_release_extent_mapping()
4469 extent_map_end(em) - 1, in try_release_extent_mapping()
4478 if (list_empty(&em->list) || in try_release_extent_mapping()
4479 test_bit(EXTENT_FLAG_LOGGING, &em->flags)) in try_release_extent_mapping()
4488 fs_info = btrfs_inode->root->fs_info; in try_release_extent_mapping()
4489 spin_lock(&fs_info->trans_lock); in try_release_extent_mapping()
4490 cur_gen = fs_info->generation; in try_release_extent_mapping()
4491 spin_unlock(&fs_info->trans_lock); in try_release_extent_mapping()
4492 if (em->generation >= cur_gen) in try_release_extent_mapping()
4508 write_unlock(&map->lock); in try_release_extent_mapping()
4513 cond_resched(); /* Allow large-extent preemption. */ in try_release_extent_mapping()
4534 len = last - offset; in get_extent_skip_holes()
4543 if (em->block_start != EXTENT_MAP_HOLE) in get_extent_skip_holes()
4584 if (!cache->cached) in emit_fiemap_extent()
4594 if (cache->offset + cache->len > offset) { in emit_fiemap_extent()
4596 return -EINVAL; in emit_fiemap_extent()
4610 if (cache->offset + cache->len == offset && in emit_fiemap_extent()
4611 cache->phys + cache->len == phys && in emit_fiemap_extent()
4612 (cache->flags & ~FIEMAP_EXTENT_LAST) == in emit_fiemap_extent()
4614 cache->len += len; in emit_fiemap_extent()
4615 cache->flags |= flags; in emit_fiemap_extent()
4620 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, in emit_fiemap_extent()
4621 cache->len, cache->flags); in emit_fiemap_extent()
4622 cache->cached = false; in emit_fiemap_extent()
4626 cache->cached = true; in emit_fiemap_extent()
4627 cache->offset = offset; in emit_fiemap_extent()
4628 cache->phys = phys; in emit_fiemap_extent()
4629 cache->len = len; in emit_fiemap_extent()
4630 cache->flags = flags; in emit_fiemap_extent()
4632 if (cache->flags & FIEMAP_EXTENT_LAST) { in emit_fiemap_extent()
4633 ret = fiemap_fill_next_extent(fieinfo, cache->offset, in emit_fiemap_extent()
4634 cache->phys, cache->len, cache->flags); in emit_fiemap_extent()
4635 cache->cached = false; in emit_fiemap_extent()
4645 * |<- Fiemap range ->|
4646 * |<------------ First extent ----------->|
4656 if (!cache->cached) in emit_last_fiemap_cache()
4659 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, in emit_last_fiemap_cache()
4660 cache->len, cache->flags); in emit_last_fiemap_cache()
4661 cache->cached = false; in emit_last_fiemap_cache()
4678 u64 isize = i_size_read(&inode->vfs_inode); in extent_fiemap()
4683 struct btrfs_root *root = inode->root; in extent_fiemap()
4693 return -EINVAL; in extent_fiemap()
4697 return -ENOMEM; in extent_fiemap()
4698 path->leave_spinning = 1; in extent_fiemap()
4703 ret = -ENOMEM; in extent_fiemap()
4713 len = round_up(max, btrfs_inode_sectorsize(inode)) - start; in extent_fiemap()
4719 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1, in extent_fiemap()
4729 path->slots[0]--; in extent_fiemap()
4730 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); in extent_fiemap()
4737 last = (u64)-1; in extent_fiemap()
4756 last = (u64)-1; in extent_fiemap()
4760 lock_extent_bits(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4775 if (em->start >= max || extent_map_end(em) < off) in extent_fiemap()
4784 em_start = max(em->start, off); in extent_fiemap()
4792 if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) in extent_fiemap()
4793 offset_in_extent = em_start - em->start; in extent_fiemap()
4795 em_len = em_end - em_start; in extent_fiemap()
4797 if (em->block_start < EXTENT_MAP_LAST_BYTE) in extent_fiemap()
4798 disko = em->block_start + offset_in_extent; in extent_fiemap()
4809 if (em->block_start == EXTENT_MAP_LAST_BYTE) { in extent_fiemap()
4812 } else if (em->block_start == EXTENT_MAP_INLINE) { in extent_fiemap()
4815 } else if (em->block_start == EXTENT_MAP_DELALLOC) { in extent_fiemap()
4818 } else if (fieinfo->fi_extents_max) { in extent_fiemap()
4819 u64 bytenr = em->block_start - in extent_fiemap()
4820 (em->start - em->orig_start); in extent_fiemap()
4837 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) in extent_fiemap()
4839 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in extent_fiemap()
4844 if ((em_start >= last) || em_len == (u64)-1 || in extent_fiemap()
4845 (last == (u64)-1 && isize <= em_end)) { in extent_fiemap()
4873 unlock_extent_cached(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4890 return (atomic_read(&eb->io_pages) || in extent_buffer_under_io()
4891 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
4892 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
4902 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in btrfs_release_extent_buffer_pages()
4908 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
4913 spin_lock(&page->mapping->private_lock); in btrfs_release_extent_buffer_pages()
4922 page->private == (unsigned long)eb) { in btrfs_release_extent_buffer_pages()
4923 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in btrfs_release_extent_buffer_pages()
4934 spin_unlock(&page->mapping->private_lock); in btrfs_release_extent_buffer_pages()
4947 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in btrfs_release_extent_buffer()
4958 eb->start = start; in __alloc_extent_buffer()
4959 eb->len = len; in __alloc_extent_buffer()
4960 eb->fs_info = fs_info; in __alloc_extent_buffer()
4961 eb->bflags = 0; in __alloc_extent_buffer()
4962 rwlock_init(&eb->lock); in __alloc_extent_buffer()
4963 atomic_set(&eb->blocking_readers, 0); in __alloc_extent_buffer()
4964 eb->blocking_writers = 0; in __alloc_extent_buffer()
4965 eb->lock_recursed = false; in __alloc_extent_buffer()
4966 init_waitqueue_head(&eb->write_lock_wq); in __alloc_extent_buffer()
4967 init_waitqueue_head(&eb->read_lock_wq); in __alloc_extent_buffer()
4969 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list, in __alloc_extent_buffer()
4970 &fs_info->allocated_ebs); in __alloc_extent_buffer()
4972 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
4973 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
4974 atomic_set(&eb->io_pages, 0); in __alloc_extent_buffer()
4984 eb->spinning_writers = 0; in __alloc_extent_buffer()
4985 atomic_set(&eb->spinning_readers, 0); in __alloc_extent_buffer()
4986 atomic_set(&eb->read_locks, 0); in __alloc_extent_buffer()
4987 eb->write_locks = 0; in __alloc_extent_buffer()
5000 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); in btrfs_clone_extent_buffer()
5013 new->pages[i] = p; in btrfs_clone_extent_buffer()
5014 copy_page(page_address(p), page_address(src->pages[i])); in btrfs_clone_extent_buffer()
5017 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags); in btrfs_clone_extent_buffer()
5018 set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); in btrfs_clone_extent_buffer()
5036 eb->pages[i] = alloc_page(GFP_NOFS); in __alloc_dummy_extent_buffer()
5037 if (!eb->pages[i]) in __alloc_dummy_extent_buffer()
5042 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
5046 for (; i > 0; i--) in __alloc_dummy_extent_buffer()
5047 __free_page(eb->pages[i - 1]); in __alloc_dummy_extent_buffer()
5055 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize); in alloc_dummy_extent_buffer()
5066 * It is only cleared in two cases: freeing the last non-tree in check_buffer_tree_ref()
5080 * which trigger io after they set eb->io_pages. Note that once io is in check_buffer_tree_ref()
5084 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
5085 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5088 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
5089 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5090 atomic_inc(&eb->refs); in check_buffer_tree_ref()
5091 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
5103 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
5116 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer()
5118 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer()
5125 * eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
5135 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
5136 spin_lock(&eb->refs_lock); in find_extent_buffer()
5137 spin_unlock(&eb->refs_lock); in find_extent_buffer()
5159 return ERR_PTR(-ENOMEM); in alloc_test_extent_buffer()
5160 eb->fs_info = fs_info; in alloc_test_extent_buffer()
5167 spin_lock(&fs_info->buffer_lock); in alloc_test_extent_buffer()
5168 ret = radix_tree_insert(&fs_info->buffer_radix, in alloc_test_extent_buffer()
5170 spin_unlock(&fs_info->buffer_lock); in alloc_test_extent_buffer()
5172 if (ret == -EEXIST) { in alloc_test_extent_buffer()
5180 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
5192 unsigned long len = fs_info->nodesize; in alloc_extent_buffer()
5199 struct address_space *mapping = fs_info->btree_inode->i_mapping; in alloc_extent_buffer()
5203 if (!IS_ALIGNED(start, fs_info->sectorsize)) { in alloc_extent_buffer()
5205 return ERR_PTR(-EINVAL); in alloc_extent_buffer()
5214 return ERR_PTR(-ENOMEM); in alloc_extent_buffer()
5220 exists = ERR_PTR(-ENOMEM); in alloc_extent_buffer()
5224 spin_lock(&mapping->private_lock); in alloc_extent_buffer()
5231 * overwrite page->private. in alloc_extent_buffer()
5233 exists = (struct extent_buffer *)p->private; in alloc_extent_buffer()
5234 if (atomic_inc_not_zero(&exists->refs)) { in alloc_extent_buffer()
5235 spin_unlock(&mapping->private_lock); in alloc_extent_buffer()
5252 spin_unlock(&mapping->private_lock); in alloc_extent_buffer()
5254 eb->pages[i] = p; in alloc_extent_buffer()
5267 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
5275 spin_lock(&fs_info->buffer_lock); in alloc_extent_buffer()
5276 ret = radix_tree_insert(&fs_info->buffer_radix, in alloc_extent_buffer()
5278 spin_unlock(&fs_info->buffer_lock); in alloc_extent_buffer()
5280 if (ret == -EEXIST) { in alloc_extent_buffer()
5289 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
5297 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5301 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
5303 if (eb->pages[i]) in alloc_extent_buffer()
5304 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5320 __releases(&eb->refs_lock) in release_extent_buffer()
5322 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
5324 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
5325 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
5326 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
5327 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
5329 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5331 spin_lock(&fs_info->buffer_lock); in release_extent_buffer()
5332 radix_tree_delete(&fs_info->buffer_radix, in release_extent_buffer()
5333 eb->start >> PAGE_SHIFT); in release_extent_buffer()
5334 spin_unlock(&fs_info->buffer_lock); in release_extent_buffer()
5336 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5339 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in release_extent_buffer()
5343 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
5348 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
5351 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5364 refs = atomic_read(&eb->refs); in free_extent_buffer()
5365 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
5366 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
5369 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); in free_extent_buffer()
5374 spin_lock(&eb->refs_lock); in free_extent_buffer()
5375 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
5376 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
5378 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
5379 atomic_dec(&eb->refs); in free_extent_buffer()
5393 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
5394 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
5396 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
5397 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
5398 atomic_dec(&eb->refs); in free_extent_buffer_stale()
5411 page = eb->pages[i]; in clear_extent_buffer_dirty()
5419 xa_lock_irq(&page->mapping->i_pages); in clear_extent_buffer_dirty()
5421 __xa_clear_mark(&page->mapping->i_pages, in clear_extent_buffer_dirty()
5423 xa_unlock_irq(&page->mapping->i_pages); in clear_extent_buffer_dirty()
5427 WARN_ON(atomic_read(&eb->refs) == 0); in clear_extent_buffer_dirty()
5438 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
5441 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
5442 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
5446 set_page_dirty(eb->pages[i]); in set_extent_buffer_dirty()
5450 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
5462 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
5465 page = eb->pages[i]; in clear_extent_buffer_uptodate()
5477 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
5480 page = eb->pages[i]; in set_extent_buffer_uptodate()
5498 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
5503 page = eb->pages[i]; in read_extent_buffer_pages()
5518 page = eb->pages[i]; in read_extent_buffer_pages()
5526 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in read_extent_buffer_pages()
5530 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
5531 eb->read_mirror = 0; in read_extent_buffer_pages()
5532 atomic_set(&eb->io_pages, num_reads); in read_extent_buffer_pages()
5539 page = eb->pages[i]; in read_extent_buffer_pages()
5543 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5562 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5579 page = eb->pages[i]; in read_extent_buffer_pages()
5582 ret = -EIO; in read_extent_buffer_pages()
5589 locked_pages--; in read_extent_buffer_pages()
5590 page = eb->pages[locked_pages]; in read_extent_buffer_pages()
5599 btrfs_warn(eb->fs_info, in report_eb_range()
5601 eb->start, eb->len, start, len); in report_eb_range()
5619 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
5620 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
5642 page = eb->pages[i]; in read_extent_buffer()
5644 cur = min(len, (PAGE_SIZE - offset)); in read_extent_buffer()
5649 len -= cur; in read_extent_buffer()
5667 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
5668 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
5673 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
5675 cur = min(len, (PAGE_SIZE - offset)); in read_extent_buffer_to_user_nofault()
5678 ret = -EFAULT; in read_extent_buffer_to_user_nofault()
5683 len -= cur; in read_extent_buffer_to_user_nofault()
5703 return -EINVAL; in memcmp_extent_buffer()
5708 page = eb->pages[i]; in memcmp_extent_buffer()
5710 cur = min(len, (PAGE_SIZE - offset)); in memcmp_extent_buffer()
5718 len -= cur; in memcmp_extent_buffer()
5730 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_chunk_tree_uuid()
5731 kaddr = page_address(eb->pages[0]); in write_extent_buffer_chunk_tree_uuid()
5740 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_fsid()
5741 kaddr = page_address(eb->pages[0]); in write_extent_buffer_fsid()
5762 page = eb->pages[i]; in write_extent_buffer()
5765 cur = min(len, PAGE_SIZE - offset); in write_extent_buffer()
5770 len -= cur; in write_extent_buffer()
5791 page = eb->pages[i]; in memzero_extent_buffer()
5794 cur = min(len, PAGE_SIZE - offset); in memzero_extent_buffer()
5798 len -= cur; in memzero_extent_buffer()
5810 ASSERT(dst->len == src->len); in copy_extent_buffer_full()
5814 copy_page(page_address(dst->pages[i]), in copy_extent_buffer_full()
5815 page_address(src->pages[i])); in copy_extent_buffer_full()
5823 u64 dst_len = dst->len; in copy_extent_buffer()
5834 WARN_ON(src->len != dst_len); in copy_extent_buffer()
5839 page = dst->pages[i]; in copy_extent_buffer()
5842 cur = min(len, (unsigned long)(PAGE_SIZE - offset)); in copy_extent_buffer()
5848 len -= cur; in copy_extent_buffer()
5855 * eb_bitmap_offset() - calculate the page and offset of the byte containing the
5887 * extent_buffer_test_bit - determine whether a bit in a bitmap item is set
5901 page = eb->pages[i]; in extent_buffer_test_bit()
5904 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
5908 * extent_buffer_bitmap_set - set an area of a bitmap
5922 int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); in extent_buffer_bitmap_set()
5926 page = eb->pages[i]; in extent_buffer_bitmap_set()
5932 len -= bits_to_set; in extent_buffer_bitmap_set()
5937 page = eb->pages[++i]; in extent_buffer_bitmap_set()
5950 * extent_buffer_bitmap_clear - clear an area of a bitmap
5965 int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); in extent_buffer_bitmap_clear()
5969 page = eb->pages[i]; in extent_buffer_bitmap_clear()
5975 len -= bits_to_clear; in extent_buffer_bitmap_clear()
5980 page = eb->pages[++i]; in extent_buffer_bitmap_clear()
5993 unsigned long distance = (src > dst) ? src - dst : dst - src; in areas_overlap()
6040 cur = min(len, (unsigned long)(PAGE_SIZE - in memcpy_extent_buffer()
6043 (unsigned long)(PAGE_SIZE - dst_off_in_page)); in memcpy_extent_buffer()
6045 copy_pages(dst->pages[dst_i], dst->pages[src_i], in memcpy_extent_buffer()
6050 len -= cur; in memcpy_extent_buffer()
6061 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
6062 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
6082 copy_pages(dst->pages[dst_i], dst->pages[src_i], in memmove_extent_buffer()
6083 dst_off_in_page - cur + 1, in memmove_extent_buffer()
6084 src_off_in_page - cur + 1, cur); in memmove_extent_buffer()
6086 dst_end -= cur; in memmove_extent_buffer()
6087 src_end -= cur; in memmove_extent_buffer()
6088 len -= cur; in memmove_extent_buffer()
6100 spin_lock(&page->mapping->private_lock); in try_release_extent_buffer()
6102 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
6106 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
6114 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
6115 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
6116 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
6117 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
6120 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
6126 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
6127 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()