Lines Matching refs:fs_info
157 struct btrfs_fs_info *fs_info; member
215 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
245 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
255 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
256 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
278 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) in __scrub_blocked_if_needed() argument
280 while (atomic_read(&fs_info->scrub_pause_req)) { in __scrub_blocked_if_needed()
281 mutex_unlock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
282 wait_event(fs_info->scrub_pause_wait, in __scrub_blocked_if_needed()
283 atomic_read(&fs_info->scrub_pause_req) == 0); in __scrub_blocked_if_needed()
284 mutex_lock(&fs_info->scrub_lock); in __scrub_blocked_if_needed()
288 static void scrub_pause_on(struct btrfs_fs_info *fs_info) in scrub_pause_on() argument
290 atomic_inc(&fs_info->scrubs_paused); in scrub_pause_on()
291 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_on()
294 static void scrub_pause_off(struct btrfs_fs_info *fs_info) in scrub_pause_off() argument
296 mutex_lock(&fs_info->scrub_lock); in scrub_pause_off()
297 __scrub_blocked_if_needed(fs_info); in scrub_pause_off()
298 atomic_dec(&fs_info->scrubs_paused); in scrub_pause_off()
299 mutex_unlock(&fs_info->scrub_lock); in scrub_pause_off()
301 wake_up(&fs_info->scrub_pause_wait); in scrub_pause_off()
304 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) in scrub_blocked_if_needed() argument
306 scrub_pause_on(fs_info); in scrub_blocked_if_needed()
307 scrub_pause_off(fs_info); in scrub_blocked_if_needed()
423 static int lock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, in lock_full_stripe() argument
433 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); in lock_full_stripe()
470 static int unlock_full_stripe(struct btrfs_fs_info *fs_info, u64 bytenr, in unlock_full_stripe() argument
484 bg_cache = btrfs_lookup_block_group(fs_info, bytenr); in unlock_full_stripe()
507 btrfs_warn(fs_info, "full stripe lock at %llu refcount underflow", in unlock_full_stripe()
576 struct btrfs_fs_info *fs_info, int is_dev_replace) in scrub_setup_ctx() argument
588 sctx->fs_info = fs_info; in scrub_setup_ctx()
613 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); in scrub_setup_ctx()
623 WARN_ON(!fs_info->dev_replace.tgtdev); in scrub_setup_ctx()
625 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
647 struct btrfs_fs_info *fs_info = swarn->dev->fs_info; in scrub_print_warning_inode() local
652 local_root = btrfs_get_fs_root(fs_info, root, true); in scrub_print_warning_inode()
703 btrfs_warn_in_rcu(fs_info, in scrub_print_warning_inode()
717 btrfs_warn_in_rcu(fs_info, in scrub_print_warning_inode()
731 struct btrfs_fs_info *fs_info; in scrub_print_warning() local
747 fs_info = sblock->sctx->fs_info; in scrub_print_warning()
758 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key, in scrub_print_warning()
775 btrfs_warn_in_rcu(fs_info, in scrub_print_warning()
789 iterate_extent_inodes(fs_info, found_key.objectid, in scrub_print_warning()
803 static inline void scrub_put_recover(struct btrfs_fs_info *fs_info, in scrub_put_recover() argument
807 btrfs_bio_counter_dec(fs_info); in scrub_put_recover()
825 struct btrfs_fs_info *fs_info; in scrub_handle_errored_block() local
842 fs_info = sctx->fs_info; in scrub_handle_errored_block()
879 ret = lock_full_stripe(fs_info, logical, &full_stripe_locked); in scrub_handle_errored_block()
946 scrub_recheck_block(fs_info, sblock_bad, 1); in scrub_handle_errored_block()
1048 scrub_recheck_block(fs_info, sblock_other, 0); in scrub_handle_errored_block()
1142 &fs_info->dev_replace.num_write_errors); in scrub_handle_errored_block()
1167 scrub_recheck_block(fs_info, sblock_bad, 1); in scrub_handle_errored_block()
1180 btrfs_err_rl_in_rcu(fs_info, in scrub_handle_errored_block()
1189 btrfs_err_rl_in_rcu(fs_info, in scrub_handle_errored_block()
1208 scrub_put_recover(fs_info, recover); in scrub_handle_errored_block()
1218 ret = unlock_full_stripe(fs_info, logical, full_stripe_locked); in scrub_handle_errored_block()
1269 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_setup_recheck_block() local
1301 btrfs_bio_counter_inc_blocked(fs_info); in scrub_setup_recheck_block()
1302 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, in scrub_setup_recheck_block()
1306 btrfs_bio_counter_dec(fs_info); in scrub_setup_recheck_block()
1313 btrfs_bio_counter_dec(fs_info); in scrub_setup_recheck_block()
1339 scrub_put_recover(fs_info, recover); in scrub_setup_recheck_block()
1381 scrub_put_recover(fs_info, recover); in scrub_setup_recheck_block()
1395 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, in scrub_submit_raid56_bio_wait() argument
1408 ret = raid56_parity_recover(fs_info, bio, page->recover->bbio, in scrub_submit_raid56_bio_wait()
1418 static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info, in scrub_recheck_block_on_raid56() argument
1440 if (scrub_submit_raid56_bio_wait(fs_info, bio, first_page)) { in scrub_recheck_block_on_raid56()
1464 static void scrub_recheck_block(struct btrfs_fs_info *fs_info, in scrub_recheck_block() argument
1474 return scrub_recheck_block_on_raid56(fs_info, sblock); in scrub_recheck_block()
1553 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; in scrub_repair_page_from_good_copy() local
1563 btrfs_warn_rl(fs_info, in scrub_repair_page_from_good_copy()
1582 atomic64_inc(&fs_info->dev_replace.num_write_errors); in scrub_repair_page_from_good_copy()
1594 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_write_block_to_dev_replace() local
1609 atomic64_inc(&fs_info->dev_replace.num_write_errors); in scrub_write_block_to_dev_replace()
1713 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_wr_bio_end_io() local
1719 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); in scrub_wr_bio_end_io()
1731 &sbio->sctx->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1786 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_data() local
1787 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_data()
1799 shash->tfm = fs_info->csum_shash; in scrub_checksum_data()
1813 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_tree_block() local
1814 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_tree_block()
1817 const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT; in scrub_checksum_tree_block()
1844 if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, in scrub_checksum_tree_block()
1848 shash->tfm = fs_info->csum_shash; in scrub_checksum_tree_block()
1869 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_super() local
1870 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in scrub_checksum_super()
1891 shash->tfm = fs_info->csum_shash; in scrub_checksum_super()
2041 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_missing_raid56_end_io() local
2048 btrfs_queue_work(fs_info->scrub_workers, &sblock->work); in scrub_missing_raid56_end_io()
2055 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_worker() local
2069 btrfs_err_rl_in_rcu(fs_info, in scrub_missing_raid56_worker()
2076 btrfs_err_rl_in_rcu(fs_info, in scrub_missing_raid56_worker()
2096 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_pages() local
2105 btrfs_bio_counter_inc_blocked(fs_info); in scrub_missing_raid56_pages()
2106 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, in scrub_missing_raid56_pages()
2127 rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); in scrub_missing_raid56_pages()
2146 btrfs_bio_counter_dec(fs_info); in scrub_missing_raid56_pages()
2246 struct btrfs_fs_info *fs_info = sbio->dev->fs_info; in scrub_bio_end_io() local
2251 btrfs_queue_work(fs_info->scrub_workers, &sbio->work); in scrub_bio_end_io()
2303 int sectorsize = sparity->sctx->fs_info->sectorsize; in __scrub_mark_bitmap()
2389 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); in scrub_find_csum()
2392 num_sectors = sum->len / sctx->fs_info->sectorsize; in scrub_find_csum()
2415 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2424 blocksize = sctx->fs_info->nodesize; in scrub_extent()
2430 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2561 blocksize = sctx->fs_info->sectorsize; in scrub_extent_for_parity()
2667 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; in scrub_parity_bio_endio() local
2677 btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work); in scrub_parity_bio_endio()
2683 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_parity_check_and_repair() local
2696 btrfs_bio_counter_inc_blocked(fs_info); in scrub_parity_check_and_repair()
2697 ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start, in scrub_parity_check_and_repair()
2707 rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, in scrub_parity_check_and_repair()
2721 btrfs_bio_counter_dec(fs_info); in scrub_parity_check_and_repair()
2757 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity() local
2758 struct btrfs_root *root = fs_info->extent_root; in scrub_raid56_parity()
2759 struct btrfs_root *csum_root = fs_info->csum_root; in scrub_raid56_parity()
2779 nsectors = div_u64(map->stripe_len, fs_info->sectorsize); in scrub_raid56_parity()
2803 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) in scrub_raid56_parity()
2850 bytes = fs_info->nodesize; in scrub_raid56_parity()
2874 btrfs_err(fs_info, in scrub_raid56_parity()
2901 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, in scrub_raid56_parity()
2982 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe() local
2983 struct btrfs_root *root = fs_info->extent_root; in scrub_stripe()
2984 struct btrfs_root *csum_root = fs_info->csum_root; in scrub_stripe()
3076 scrub_blocked_if_needed(fs_info); in scrub_stripe()
3115 if (atomic_read(&fs_info->scrub_cancel_req) || in scrub_stripe()
3123 if (atomic_read(&fs_info->scrub_pause_req)) { in scrub_stripe()
3133 scrub_blocked_if_needed(fs_info); in scrub_stripe()
3154 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) in scrub_stripe()
3203 bytes = fs_info->nodesize; in scrub_stripe()
3240 btrfs_err(fs_info, in scrub_stripe()
3270 scrub_remap_extent(fs_info, extent_logical, in scrub_stripe()
3370 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk() local
3371 struct extent_map_tree *map_tree = &fs_info->mapping_tree; in scrub_chunk()
3422 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks() local
3423 struct btrfs_root *root = fs_info->dev_root; in scrub_enumerate_chunks()
3433 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; in scrub_enumerate_chunks()
3495 cache = btrfs_lookup_block_group(fs_info, chunk_offset); in scrub_enumerate_chunks()
3527 scrub_pause_on(fs_info); in scrub_enumerate_chunks()
3572 btrfs_warn(fs_info, in scrub_enumerate_chunks()
3575 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3579 btrfs_warn(fs_info, in scrub_enumerate_chunks()
3583 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3594 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, in scrub_enumerate_chunks()
3598 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3627 scrub_pause_on(fs_info); in scrub_enumerate_chunks()
3638 scrub_pause_off(fs_info); in scrub_enumerate_chunks()
3659 if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) in scrub_enumerate_chunks()
3660 btrfs_discard_queue_work(&fs_info->discard_ctl, in scrub_enumerate_chunks()
3698 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers() local
3700 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in scrub_supers()
3704 if (scrub_dev->fs_devices != fs_info->fs_devices) in scrub_supers()
3707 gen = fs_info->last_trans_committed; in scrub_supers()
3726 static void scrub_workers_put(struct btrfs_fs_info *fs_info) in scrub_workers_put() argument
3728 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt, in scrub_workers_put()
3729 &fs_info->scrub_lock)) { in scrub_workers_put()
3734 scrub_workers = fs_info->scrub_workers; in scrub_workers_put()
3735 scrub_wr_comp = fs_info->scrub_wr_completion_workers; in scrub_workers_put()
3736 scrub_parity = fs_info->scrub_parity_workers; in scrub_workers_put()
3738 fs_info->scrub_workers = NULL; in scrub_workers_put()
3739 fs_info->scrub_wr_completion_workers = NULL; in scrub_workers_put()
3740 fs_info->scrub_parity_workers = NULL; in scrub_workers_put()
3741 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_put()
3752 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, in scrub_workers_get() argument
3759 int max_active = fs_info->thread_pool_size; in scrub_workers_get()
3762 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt)) in scrub_workers_get()
3765 scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags, in scrub_workers_get()
3770 scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags, in scrub_workers_get()
3775 scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags, in scrub_workers_get()
3780 mutex_lock(&fs_info->scrub_lock); in scrub_workers_get()
3781 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { in scrub_workers_get()
3782 ASSERT(fs_info->scrub_workers == NULL && in scrub_workers_get()
3783 fs_info->scrub_wr_completion_workers == NULL && in scrub_workers_get()
3784 fs_info->scrub_parity_workers == NULL); in scrub_workers_get()
3785 fs_info->scrub_workers = scrub_workers; in scrub_workers_get()
3786 fs_info->scrub_wr_completion_workers = scrub_wr_comp; in scrub_workers_get()
3787 fs_info->scrub_parity_workers = scrub_parity; in scrub_workers_get()
3788 refcount_set(&fs_info->scrub_workers_refcnt, 1); in scrub_workers_get()
3789 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
3793 refcount_inc(&fs_info->scrub_workers_refcnt); in scrub_workers_get()
3794 mutex_unlock(&fs_info->scrub_lock); in scrub_workers_get()
3806 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, in btrfs_scrub_dev() argument
3816 if (btrfs_fs_closing(fs_info)) in btrfs_scrub_dev()
3819 if (fs_info->nodesize > BTRFS_STRIPE_LEN) { in btrfs_scrub_dev()
3825 btrfs_err(fs_info, in btrfs_scrub_dev()
3827 fs_info->nodesize, in btrfs_scrub_dev()
3832 if (fs_info->sectorsize != PAGE_SIZE) { in btrfs_scrub_dev()
3834 btrfs_err_rl(fs_info, in btrfs_scrub_dev()
3836 fs_info->sectorsize, PAGE_SIZE); in btrfs_scrub_dev()
3840 if (fs_info->nodesize > in btrfs_scrub_dev()
3842 fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) { in btrfs_scrub_dev()
3847 btrfs_err(fs_info, in btrfs_scrub_dev()
3849 fs_info->nodesize, in btrfs_scrub_dev()
3851 fs_info->sectorsize, in btrfs_scrub_dev()
3857 sctx = scrub_setup_ctx(fs_info, is_dev_replace); in btrfs_scrub_dev()
3861 ret = scrub_workers_get(fs_info, is_dev_replace); in btrfs_scrub_dev()
3865 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3866 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); in btrfs_scrub_dev()
3869 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3876 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3877 btrfs_err_in_rcu(fs_info, in btrfs_scrub_dev()
3884 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3887 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3888 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3893 down_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
3896 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) { in btrfs_scrub_dev()
3897 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
3898 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3899 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3903 up_read(&fs_info->dev_replace.rwsem); in btrfs_scrub_dev()
3907 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3913 __scrub_blocked_if_needed(fs_info); in btrfs_scrub_dev()
3914 atomic_inc(&fs_info->scrubs_running); in btrfs_scrub_dev()
3915 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3934 btrfs_info(fs_info, "scrub: started on devid %llu", devid); in btrfs_scrub_dev()
3939 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3941 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_dev()
3959 atomic_dec(&fs_info->scrubs_running); in btrfs_scrub_dev()
3960 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_dev()
3968 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d", in btrfs_scrub_dev()
3971 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3973 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_dev()
3975 scrub_workers_put(fs_info); in btrfs_scrub_dev()
3985 trans = btrfs_start_transaction(fs_info->tree_root, 0); in btrfs_scrub_dev()
3988 btrfs_err(fs_info, in btrfs_scrub_dev()
3994 btrfs_err(fs_info, in btrfs_scrub_dev()
3999 scrub_workers_put(fs_info); in btrfs_scrub_dev()
4006 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info) in btrfs_scrub_pause() argument
4008 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4009 atomic_inc(&fs_info->scrub_pause_req); in btrfs_scrub_pause()
4010 while (atomic_read(&fs_info->scrubs_paused) != in btrfs_scrub_pause()
4011 atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_pause()
4012 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4013 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_pause()
4014 atomic_read(&fs_info->scrubs_paused) == in btrfs_scrub_pause()
4015 atomic_read(&fs_info->scrubs_running)); in btrfs_scrub_pause()
4016 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4018 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_pause()
4021 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info) in btrfs_scrub_continue() argument
4023 atomic_dec(&fs_info->scrub_pause_req); in btrfs_scrub_continue()
4024 wake_up(&fs_info->scrub_pause_wait); in btrfs_scrub_continue()
4027 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) in btrfs_scrub_cancel() argument
4029 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4030 if (!atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
4031 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4035 atomic_inc(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
4036 while (atomic_read(&fs_info->scrubs_running)) { in btrfs_scrub_cancel()
4037 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4038 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel()
4039 atomic_read(&fs_info->scrubs_running) == 0); in btrfs_scrub_cancel()
4040 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4042 atomic_dec(&fs_info->scrub_cancel_req); in btrfs_scrub_cancel()
4043 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel()
4050 struct btrfs_fs_info *fs_info = dev->fs_info; in btrfs_scrub_cancel_dev() local
4053 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4056 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4061 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4062 wait_event(fs_info->scrub_pause_wait, in btrfs_scrub_cancel_dev()
4064 mutex_lock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4066 mutex_unlock(&fs_info->scrub_lock); in btrfs_scrub_cancel_dev()
4071 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, in btrfs_scrub_progress() argument
4077 mutex_lock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
4078 dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); in btrfs_scrub_progress()
4083 mutex_unlock(&fs_info->fs_devices->device_list_mutex); in btrfs_scrub_progress()
4088 static void scrub_remap_extent(struct btrfs_fs_info *fs_info, in scrub_remap_extent() argument
4099 ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical, in scrub_remap_extent()