Lines Matching refs:sctx
86 struct scrub_ctx *sctx; member
107 struct scrub_ctx *sctx; member
124 struct scrub_ctx *sctx; member
210 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
211 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
236 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
238 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
250 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
252 static void scrub_wr_submit(struct scrub_ctx *sctx);
257 static void scrub_put_ctx(struct scrub_ctx *sctx);
265 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) in scrub_pending_bio_inc() argument
267 refcount_inc(&sctx->refs); in scrub_pending_bio_inc()
268 atomic_inc(&sctx->bios_in_flight); in scrub_pending_bio_inc()
271 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) in scrub_pending_bio_dec() argument
273 atomic_dec(&sctx->bios_in_flight); in scrub_pending_bio_dec()
274 wake_up(&sctx->list_wait); in scrub_pending_bio_dec()
275 scrub_put_ctx(sctx); in scrub_pending_bio_dec()
527 static void scrub_free_csums(struct scrub_ctx *sctx) in scrub_free_csums() argument
529 while (!list_empty(&sctx->csum_list)) { in scrub_free_csums()
531 sum = list_first_entry(&sctx->csum_list, in scrub_free_csums()
538 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) in scrub_free_ctx() argument
542 if (!sctx) in scrub_free_ctx()
546 if (sctx->curr != -1) { in scrub_free_ctx()
547 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
557 struct scrub_bio *sbio = sctx->bios[i]; in scrub_free_ctx()
564 kfree(sctx->wr_curr_bio); in scrub_free_ctx()
565 scrub_free_csums(sctx); in scrub_free_ctx()
566 kfree(sctx); in scrub_free_ctx()
569 static void scrub_put_ctx(struct scrub_ctx *sctx) in scrub_put_ctx() argument
571 if (refcount_dec_and_test(&sctx->refs)) in scrub_put_ctx()
572 scrub_free_ctx(sctx); in scrub_put_ctx()
578 struct scrub_ctx *sctx; in scrub_setup_ctx() local
581 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL); in scrub_setup_ctx()
582 if (!sctx) in scrub_setup_ctx()
584 refcount_set(&sctx->refs, 1); in scrub_setup_ctx()
585 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
586 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; in scrub_setup_ctx()
587 sctx->curr = -1; in scrub_setup_ctx()
588 sctx->fs_info = fs_info; in scrub_setup_ctx()
589 INIT_LIST_HEAD(&sctx->csum_list); in scrub_setup_ctx()
596 sctx->bios[i] = sbio; in scrub_setup_ctx()
599 sbio->sctx = sctx; in scrub_setup_ctx()
605 sctx->bios[i]->next_free = i + 1; in scrub_setup_ctx()
607 sctx->bios[i]->next_free = -1; in scrub_setup_ctx()
609 sctx->first_free = 0; in scrub_setup_ctx()
610 atomic_set(&sctx->bios_in_flight, 0); in scrub_setup_ctx()
611 atomic_set(&sctx->workers_pending, 0); in scrub_setup_ctx()
612 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
613 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); in scrub_setup_ctx()
615 spin_lock_init(&sctx->list_lock); in scrub_setup_ctx()
616 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
617 init_waitqueue_head(&sctx->list_wait); in scrub_setup_ctx()
619 WARN_ON(sctx->wr_curr_bio != NULL); in scrub_setup_ctx()
620 mutex_init(&sctx->wr_lock); in scrub_setup_ctx()
621 sctx->wr_curr_bio = NULL; in scrub_setup_ctx()
624 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; in scrub_setup_ctx()
625 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; in scrub_setup_ctx()
626 sctx->flush_all_writes = false; in scrub_setup_ctx()
629 return sctx; in scrub_setup_ctx()
632 scrub_free_ctx(sctx); in scrub_setup_ctx()
747 fs_info = sblock->sctx->fs_info; in scrub_print_warning()
823 struct scrub_ctx *sctx = sblock_to_check->sctx; in scrub_handle_errored_block() local
842 fs_info = sctx->fs_info; in scrub_handle_errored_block()
849 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
850 ++sctx->stat.super_errors; in scrub_handle_errored_block()
851 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
882 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
884 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
885 sctx->stat.read_errors++; in scrub_handle_errored_block()
886 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
887 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
923 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
924 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
925 sctx->stat.read_errors++; in scrub_handle_errored_block()
926 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
927 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
935 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
936 sctx->stat.read_errors++; in scrub_handle_errored_block()
937 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
938 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
958 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
959 sctx->stat.unverified_errors++; in scrub_handle_errored_block()
961 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
963 if (sctx->is_dev_replace) in scrub_handle_errored_block()
969 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
970 sctx->stat.read_errors++; in scrub_handle_errored_block()
971 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
976 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
977 sctx->stat.csum_errors++; in scrub_handle_errored_block()
978 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
984 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
985 sctx->stat.verify_errors++; in scrub_handle_errored_block()
986 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
998 if (sctx->readonly) { in scrub_handle_errored_block()
999 ASSERT(!sctx->is_dev_replace); in scrub_handle_errored_block()
1053 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1065 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) in scrub_handle_errored_block()
1099 if (!page_bad->io_error && !sctx->is_dev_replace) in scrub_handle_errored_block()
1128 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1156 if (success && !sctx->is_dev_replace) { in scrub_handle_errored_block()
1176 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1177 sctx->stat.corrected_errors++; in scrub_handle_errored_block()
1179 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1186 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1187 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1188 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1268 struct scrub_ctx *sctx = original_sblock->sctx; in scrub_setup_recheck_block() local
1269 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_setup_recheck_block()
1331 sblock->sctx = sctx; in scrub_setup_recheck_block()
1336 spin_lock(&sctx->stat_lock); in scrub_setup_recheck_block()
1337 sctx->stat.malloc_errors++; in scrub_setup_recheck_block()
1338 spin_unlock(&sctx->stat_lock); in scrub_setup_recheck_block()
1352 sctx->csum_size); in scrub_setup_recheck_block()
1553 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info; in scrub_repair_page_from_good_copy()
1594 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_write_block_to_dev_replace()
1622 return scrub_add_page_to_wr_bio(sblock->sctx, spage); in scrub_write_page_to_dev_replace()
1625 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, in scrub_add_page_to_wr_bio() argument
1631 mutex_lock(&sctx->wr_lock); in scrub_add_page_to_wr_bio()
1633 if (!sctx->wr_curr_bio) { in scrub_add_page_to_wr_bio()
1634 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio), in scrub_add_page_to_wr_bio()
1636 if (!sctx->wr_curr_bio) { in scrub_add_page_to_wr_bio()
1637 mutex_unlock(&sctx->wr_lock); in scrub_add_page_to_wr_bio()
1640 sctx->wr_curr_bio->sctx = sctx; in scrub_add_page_to_wr_bio()
1641 sctx->wr_curr_bio->page_count = 0; in scrub_add_page_to_wr_bio()
1643 sbio = sctx->wr_curr_bio; in scrub_add_page_to_wr_bio()
1649 sbio->dev = sctx->wr_tgtdev; in scrub_add_page_to_wr_bio()
1652 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio); in scrub_add_page_to_wr_bio()
1666 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1675 mutex_unlock(&sctx->wr_lock); in scrub_add_page_to_wr_bio()
1678 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1685 if (sbio->page_count == sctx->pages_per_wr_bio) in scrub_add_page_to_wr_bio()
1686 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1687 mutex_unlock(&sctx->wr_lock); in scrub_add_page_to_wr_bio()
1692 static void scrub_wr_submit(struct scrub_ctx *sctx) in scrub_wr_submit() argument
1696 if (!sctx->wr_curr_bio) in scrub_wr_submit()
1699 sbio = sctx->wr_curr_bio; in scrub_wr_submit()
1700 sctx->wr_curr_bio = NULL; in scrub_wr_submit()
1702 scrub_pending_bio_inc(sctx); in scrub_wr_submit()
1725 struct scrub_ctx *sctx = sbio->sctx; in scrub_wr_bio_end_io_worker() local
1731 &sbio->sctx->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1746 scrub_pending_bio_dec(sctx); in scrub_wr_bio_end_io_worker()
1785 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_data() local
1786 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_data()
1803 if (memcmp(csum, spage->csum, sctx->csum_size)) in scrub_checksum_data()
1811 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_tree_block() local
1813 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_tree_block()
1817 const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT; in scrub_checksum_tree_block()
1826 memcpy(on_disk_csum, h->csum, sctx->csum_size); in scrub_checksum_tree_block()
1859 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_tree_block()
1868 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_super() local
1869 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_checksum_super()
1896 if (memcmp(calculated_csum, s->csum, sctx->csum_size)) in scrub_checksum_super()
1905 spin_lock(&sctx->stat_lock); in scrub_checksum_super()
1906 ++sctx->stat.super_errors; in scrub_checksum_super()
1907 spin_unlock(&sctx->stat_lock); in scrub_checksum_super()
1952 static void scrub_submit(struct scrub_ctx *sctx) in scrub_submit() argument
1956 if (sctx->curr == -1) in scrub_submit()
1959 sbio = sctx->bios[sctx->curr]; in scrub_submit()
1960 sctx->curr = -1; in scrub_submit()
1961 scrub_pending_bio_inc(sctx); in scrub_submit()
1965 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, in scrub_add_page_to_rd_bio() argument
1976 while (sctx->curr == -1) { in scrub_add_page_to_rd_bio()
1977 spin_lock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
1978 sctx->curr = sctx->first_free; in scrub_add_page_to_rd_bio()
1979 if (sctx->curr != -1) { in scrub_add_page_to_rd_bio()
1980 sctx->first_free = sctx->bios[sctx->curr]->next_free; in scrub_add_page_to_rd_bio()
1981 sctx->bios[sctx->curr]->next_free = -1; in scrub_add_page_to_rd_bio()
1982 sctx->bios[sctx->curr]->page_count = 0; in scrub_add_page_to_rd_bio()
1983 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
1985 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
1986 wait_event(sctx->list_wait, sctx->first_free != -1); in scrub_add_page_to_rd_bio()
1989 sbio = sctx->bios[sctx->curr]; in scrub_add_page_to_rd_bio()
1998 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio); in scrub_add_page_to_rd_bio()
2013 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2025 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2032 if (sbio->page_count == sctx->pages_per_rd_bio) in scrub_add_page_to_rd_bio()
2033 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2041 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info; in scrub_missing_raid56_end_io()
2054 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_worker() local
2055 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_worker()
2066 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2067 sctx->stat.read_errors++; in scrub_missing_raid56_worker()
2068 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2073 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2074 sctx->stat.uncorrectable_errors++; in scrub_missing_raid56_worker()
2075 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2083 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_missing_raid56_worker()
2084 mutex_lock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2085 scrub_wr_submit(sctx); in scrub_missing_raid56_worker()
2086 mutex_unlock(&sctx->wr_lock); in scrub_missing_raid56_worker()
2090 scrub_pending_bio_dec(sctx); in scrub_missing_raid56_worker()
2095 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_pages() local
2096 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_missing_raid56_pages()
2111 if (WARN_ON(!sctx->is_dev_replace || in scrub_missing_raid56_pages()
2139 scrub_pending_bio_inc(sctx); in scrub_missing_raid56_pages()
2148 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2149 sctx->stat.malloc_errors++; in scrub_missing_raid56_pages()
2150 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2153 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_pages() argument
2163 spin_lock(&sctx->stat_lock); in scrub_pages()
2164 sctx->stat.malloc_errors++; in scrub_pages()
2165 spin_unlock(&sctx->stat_lock); in scrub_pages()
2172 sblock->sctx = sctx; in scrub_pages()
2182 spin_lock(&sctx->stat_lock); in scrub_pages()
2183 sctx->stat.malloc_errors++; in scrub_pages()
2184 spin_unlock(&sctx->stat_lock); in scrub_pages()
2201 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages()
2227 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages()
2235 scrub_submit(sctx); in scrub_pages()
2257 struct scrub_ctx *sctx = sbio->sctx; in scrub_bio_end_io_worker() local
2282 spin_lock(&sctx->list_lock); in scrub_bio_end_io_worker()
2283 sbio->next_free = sctx->first_free; in scrub_bio_end_io_worker()
2284 sctx->first_free = sbio->index; in scrub_bio_end_io_worker()
2285 spin_unlock(&sctx->list_lock); in scrub_bio_end_io_worker()
2287 if (sctx->is_dev_replace && sctx->flush_all_writes) { in scrub_bio_end_io_worker()
2288 mutex_lock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2289 scrub_wr_submit(sctx); in scrub_bio_end_io_worker()
2290 mutex_unlock(&sctx->wr_lock); in scrub_bio_end_io_worker()
2293 scrub_pending_bio_dec(sctx); in scrub_bio_end_io_worker()
2303 int sectorsize = sparity->sctx->fs_info->sectorsize; in __scrub_mark_bitmap()
2353 if (!corrupted && sblock->sctx->is_dev_replace) in scrub_block_complete()
2367 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) in scrub_find_csum() argument
2373 while (!list_empty(&sctx->csum_list)) { in scrub_find_csum()
2374 sum = list_first_entry(&sctx->csum_list, in scrub_find_csum()
2381 ++sctx->stat.csum_discards; in scrub_find_csum()
2389 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize); in scrub_find_csum()
2392 num_sectors = sum->len / sctx->fs_info->sectorsize; in scrub_find_csum()
2393 memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size); in scrub_find_csum()
2402 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, in scrub_extent() argument
2415 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2416 spin_lock(&sctx->stat_lock); in scrub_extent()
2417 sctx->stat.data_extents_scrubbed++; in scrub_extent()
2418 sctx->stat.data_bytes_scrubbed += len; in scrub_extent()
2419 spin_unlock(&sctx->stat_lock); in scrub_extent()
2424 blocksize = sctx->fs_info->nodesize; in scrub_extent()
2425 spin_lock(&sctx->stat_lock); in scrub_extent()
2426 sctx->stat.tree_extents_scrubbed++; in scrub_extent()
2427 sctx->stat.tree_bytes_scrubbed += len; in scrub_extent()
2428 spin_unlock(&sctx->stat_lock); in scrub_extent()
2430 blocksize = sctx->fs_info->sectorsize; in scrub_extent()
2440 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent()
2442 ++sctx->stat.no_csum; in scrub_extent()
2444 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, in scrub_extent()
2462 struct scrub_ctx *sctx = sparity->sctx; in scrub_pages_for_parity() local
2468 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2469 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2470 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2477 sblock->sctx = sctx; in scrub_pages_for_parity()
2489 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2490 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2491 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2511 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages_for_parity()
2529 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages_for_parity()
2546 struct scrub_ctx *sctx = sparity->sctx; in scrub_extent_for_parity() local
2561 blocksize = sctx->fs_info->sectorsize; in scrub_extent_for_parity()
2571 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent_for_parity()
2634 struct scrub_ctx *sctx = sparity->sctx; in scrub_free_parity() local
2640 spin_lock(&sctx->stat_lock); in scrub_free_parity()
2641 sctx->stat.read_errors += nbits; in scrub_free_parity()
2642 sctx->stat.uncorrectable_errors += nbits; in scrub_free_parity()
2643 spin_unlock(&sctx->stat_lock); in scrub_free_parity()
2658 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_bio_endio_worker() local
2661 scrub_pending_bio_dec(sctx); in scrub_parity_bio_endio_worker()
2667 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info; in scrub_parity_bio_endio()
2682 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_check_and_repair() local
2683 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_parity_check_and_repair()
2714 scrub_pending_bio_inc(sctx); in scrub_parity_check_and_repair()
2725 spin_lock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2726 sctx->stat.malloc_errors++; in scrub_parity_check_and_repair()
2727 spin_unlock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2750 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, in scrub_raid56_parity() argument
2757 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_raid56_parity()
2784 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
2785 sctx->stat.malloc_errors++; in scrub_raid56_parity()
2786 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
2792 sparity->sctx = sctx; in scrub_raid56_parity()
2877 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
2878 sctx->stat.uncorrectable_errors++; in scrub_raid56_parity()
2879 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
2920 &sctx->csum_list, 1); in scrub_raid56_parity()
2931 scrub_free_csums(sctx); in scrub_raid56_parity()
2966 scrub_submit(sctx); in scrub_raid56_parity()
2967 mutex_lock(&sctx->wr_lock); in scrub_raid56_parity()
2968 scrub_wr_submit(sctx); in scrub_raid56_parity()
2969 mutex_unlock(&sctx->wr_lock); in scrub_raid56_parity()
2975 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, in scrub_stripe() argument
2982 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_stripe()
3074 wait_event(sctx->list_wait, in scrub_stripe()
3075 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3116 atomic_read(&sctx->cancel_req)) { in scrub_stripe()
3125 sctx->flush_all_writes = true; in scrub_stripe()
3126 scrub_submit(sctx); in scrub_stripe()
3127 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3128 scrub_wr_submit(sctx); in scrub_stripe()
3129 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3130 wait_event(sctx->list_wait, in scrub_stripe()
3131 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3132 sctx->flush_all_writes = false; in scrub_stripe()
3145 ret = scrub_raid56_parity(sctx, map, scrub_dev, in scrub_stripe()
3243 spin_lock(&sctx->stat_lock); in scrub_stripe()
3244 sctx->stat.uncorrectable_errors++; in scrub_stripe()
3245 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3269 if (sctx->is_dev_replace) in scrub_stripe()
3279 &sctx->csum_list, 1); in scrub_stripe()
3284 ret = scrub_extent(sctx, map, extent_logical, extent_len, in scrub_stripe()
3289 scrub_free_csums(sctx); in scrub_stripe()
3312 ret = scrub_raid56_parity(sctx, in scrub_stripe()
3341 spin_lock(&sctx->stat_lock); in scrub_stripe()
3343 sctx->stat.last_physical = map->stripes[num].physical + in scrub_stripe()
3346 sctx->stat.last_physical = physical; in scrub_stripe()
3347 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3353 scrub_submit(sctx); in scrub_stripe()
3354 mutex_lock(&sctx->wr_lock); in scrub_stripe()
3355 scrub_wr_submit(sctx); in scrub_stripe()
3356 mutex_unlock(&sctx->wr_lock); in scrub_stripe()
3364 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, in scrub_chunk() argument
3370 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_chunk()
3404 ret = scrub_stripe(sctx, map, scrub_dev, i, in scrub_chunk()
3417 int scrub_enumerate_chunks(struct scrub_ctx *sctx, in scrub_enumerate_chunks() argument
3422 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_enumerate_chunks()
3559 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace); in scrub_enumerate_chunks()
3562 } else if (ret == -ENOSPC && !sctx->is_dev_replace) { in scrub_enumerate_chunks()
3592 if (sctx->is_dev_replace) { in scrub_enumerate_chunks()
3605 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, in scrub_enumerate_chunks()
3618 sctx->flush_all_writes = true; in scrub_enumerate_chunks()
3619 scrub_submit(sctx); in scrub_enumerate_chunks()
3620 mutex_lock(&sctx->wr_lock); in scrub_enumerate_chunks()
3621 scrub_wr_submit(sctx); in scrub_enumerate_chunks()
3622 mutex_unlock(&sctx->wr_lock); in scrub_enumerate_chunks()
3624 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3625 atomic_read(&sctx->bios_in_flight) == 0); in scrub_enumerate_chunks()
3634 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3635 atomic_read(&sctx->workers_pending) == 0); in scrub_enumerate_chunks()
3636 sctx->flush_all_writes = false; in scrub_enumerate_chunks()
3672 if (sctx->is_dev_replace && in scrub_enumerate_chunks()
3677 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
3691 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, in scrub_supers() argument
3698 struct btrfs_fs_info *fs_info = sctx->fs_info; in scrub_supers()
3715 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, in scrub_supers()
3721 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in scrub_supers()
3810 struct scrub_ctx *sctx; in btrfs_scrub_dev() local
3857 sctx = scrub_setup_ctx(fs_info, is_dev_replace); in btrfs_scrub_dev()
3858 if (IS_ERR(sctx)) in btrfs_scrub_dev()
3859 return PTR_ERR(sctx); in btrfs_scrub_dev()
3905 sctx->readonly = readonly; in btrfs_scrub_dev()
3906 dev->scrub_ctx = sctx; in btrfs_scrub_dev()
3930 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3931 old_super_errors = sctx->stat.super_errors; in btrfs_scrub_dev()
3932 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3940 ret = scrub_supers(sctx, dev); in btrfs_scrub_dev()
3943 spin_lock(&sctx->stat_lock); in btrfs_scrub_dev()
3949 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly) in btrfs_scrub_dev()
3951 spin_unlock(&sctx->stat_lock); in btrfs_scrub_dev()
3955 ret = scrub_enumerate_chunks(sctx, dev, start, end); in btrfs_scrub_dev()
3958 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in btrfs_scrub_dev()
3962 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); in btrfs_scrub_dev()
3965 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
3976 scrub_put_ctx(sctx); in btrfs_scrub_dev()
4001 scrub_free_ctx(sctx); in btrfs_scrub_dev()
4051 struct scrub_ctx *sctx; in btrfs_scrub_cancel_dev() local
4054 sctx = dev->scrub_ctx; in btrfs_scrub_cancel_dev()
4055 if (!sctx) { in btrfs_scrub_cancel_dev()
4059 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
4075 struct scrub_ctx *sctx = NULL; in btrfs_scrub_progress() local
4080 sctx = dev->scrub_ctx; in btrfs_scrub_progress()
4081 if (sctx) in btrfs_scrub_progress()
4082 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
4085 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()