Lines Matching refs:rbio

177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) in start_async_work() argument
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL); in start_async_work()
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); in start_async_work()
250 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
257 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
261 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
262 if (!rbio->bio_pages[i]) in cache_rbio_pages()
265 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
266 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
270 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
271 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
272 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
274 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
280 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
282 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
346 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
348 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
356 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
359 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
371 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
373 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
374 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
387 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
388 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
389 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
390 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
391 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
396 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
400 __free_raid_bio(rbio); in __remove_rbio_from_cache()
406 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
414 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
417 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
428 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
434 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
437 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
466 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
471 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
474 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
477 spin_lock(&rbio->bio_list_lock); in cache_rbio()
480 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
481 refcount_inc(&rbio->refs); in cache_rbio()
483 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
484 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
486 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
490 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
499 if (found != rbio) in cache_rbio()
530 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
533 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
536 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
537 if (size != rbio->nr_data * rbio->stripe_len) in rbio_is_full()
539 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in rbio_is_full()
540 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
616 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page_index() argument
619 return stripe * rbio->stripe_npages + index; in rbio_stripe_page_index()
626 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, in rbio_stripe_page() argument
629 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; in rbio_stripe_page()
635 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
637 return rbio_stripe_page(rbio, rbio->nr_data, index); in rbio_pstripe_page()
644 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
646 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
648 return rbio_stripe_page(rbio, rbio->nr_data + 1, index); in rbio_qstripe_page()
673 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
683 h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
687 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) in lock_stripe_add()
700 steal_rbio(cur, rbio); in lock_stripe_add()
708 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
709 merge_rbio(cur, rbio); in lock_stripe_add()
711 freeit = rbio; in lock_stripe_add()
723 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
724 merge_rbio(pending, rbio); in lock_stripe_add()
726 freeit = rbio; in lock_stripe_add()
736 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
742 refcount_inc(&rbio->refs); in lock_stripe_add()
743 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
757 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
764 bucket = rbio_bucket(rbio); in unlock_stripe()
765 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
767 if (list_empty(&rbio->plug_list)) in unlock_stripe()
768 cache_rbio(rbio); in unlock_stripe()
771 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
773 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
779 if (list_empty(&rbio->plug_list) && in unlock_stripe()
780 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
782 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
783 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
787 list_del_init(&rbio->hash_list); in unlock_stripe()
788 refcount_dec(&rbio->refs); in unlock_stripe()
795 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
797 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
802 list_del_init(&rbio->plug_list); in unlock_stripe()
806 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
812 steal_rbio(rbio, next); in unlock_stripe()
815 steal_rbio(rbio, next); in unlock_stripe()
818 steal_rbio(rbio, next); in unlock_stripe()
826 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
831 remove_rbio_from_cache(rbio); in unlock_stripe()
834 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
838 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
841 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
842 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
843 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
845 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
846 if (rbio->stripe_pages[i]) { in __free_raid_bio()
847 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
848 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
852 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
853 kfree(rbio); in __free_raid_bio()
873 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) in rbio_orig_end_io() argument
875 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
878 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
879 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
885 bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages); in rbio_orig_end_io()
895 unlock_stripe(rbio); in rbio_orig_end_io()
896 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
897 __free_raid_bio(rbio); in rbio_orig_end_io()
910 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
915 fail_bio_stripe(rbio, bio); in raid_write_end_io()
919 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
925 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
926 0 : rbio->bbio->max_errors; in raid_write_end_io()
927 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
930 rbio_orig_end_io(rbio, err); in raid_write_end_io()
949 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
955 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
957 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
958 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
959 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
964 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
984 struct btrfs_raid_bio *rbio; in alloc_rbio() local
991 rbio = kzalloc(sizeof(*rbio) + in alloc_rbio()
992 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
993 sizeof(*rbio->bio_pages) * num_pages + in alloc_rbio()
994 sizeof(*rbio->finish_pointers) * real_stripes + in alloc_rbio()
995 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + in alloc_rbio()
996 sizeof(*rbio->finish_pbitmap) * in alloc_rbio()
999 if (!rbio) in alloc_rbio()
1002 bio_list_init(&rbio->bio_list); in alloc_rbio()
1003 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
1004 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
1005 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
1006 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
1007 rbio->bbio = bbio; in alloc_rbio()
1008 rbio->fs_info = fs_info; in alloc_rbio()
1009 rbio->stripe_len = stripe_len; in alloc_rbio()
1010 rbio->nr_pages = num_pages; in alloc_rbio()
1011 rbio->real_stripes = real_stripes; in alloc_rbio()
1012 rbio->stripe_npages = stripe_npages; in alloc_rbio()
1013 rbio->faila = -1; in alloc_rbio()
1014 rbio->failb = -1; in alloc_rbio()
1015 refcount_set(&rbio->refs, 1); in alloc_rbio()
1016 atomic_set(&rbio->error, 0); in alloc_rbio()
1017 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
1023 p = rbio + 1; in alloc_rbio()
1028 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
1029 CONSUME_ALLOC(rbio->bio_pages, num_pages); in alloc_rbio()
1030 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
1031 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1032 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); in alloc_rbio()
1042 rbio->nr_data = nr_data; in alloc_rbio()
1043 return rbio; in alloc_rbio()
1047 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1052 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1053 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1058 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1064 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1069 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); in alloc_rbio_parity_pages()
1071 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1072 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1077 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1087 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1100 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1105 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1144 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1146 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1147 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1148 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1150 finish_rmw(rbio); in validate_rbio_for_rmw()
1162 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1169 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1170 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1176 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1183 rbio->bio_pages[page_index + i] = bvec.bv_page; in index_rbio_pages()
1187 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1198 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1200 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1201 void **pointers = rbio->finish_pointers; in finish_rmw()
1202 int nr_data = rbio->nr_data; in finish_rmw()
1212 if (rbio->real_stripes - rbio->nr_data == 1) in finish_rmw()
1214 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_rmw()
1220 ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages)); in finish_rmw()
1230 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1231 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1232 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1234 atomic_set(&rbio->error, 0); in finish_rmw()
1245 index_rbio_pages(rbio); in finish_rmw()
1246 if (!rbio_is_full(rbio)) in finish_rmw()
1247 cache_rbio_pages(rbio); in finish_rmw()
1249 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1251 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1255 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1260 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1270 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1274 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1283 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1284 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1292 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1293 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1297 if (!test_bit(pagenr, rbio->dbitmap)) in finish_rmw()
1300 if (stripe < rbio->nr_data) { in finish_rmw()
1301 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1305 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1308 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1309 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1318 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1322 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in finish_rmw()
1326 if (!test_bit(pagenr, rbio->dbitmap)) in finish_rmw()
1329 if (stripe < rbio->nr_data) { in finish_rmw()
1330 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1334 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1337 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1338 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1339 pagenr, rbio->stripe_len); in finish_rmw()
1346 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1347 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1350 bio->bi_private = rbio; in finish_rmw()
1359 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_rmw()
1370 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1379 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1380 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1381 if (in_range(physical, stripe->physical, rbio->stripe_len) && in find_bio_stripe()
1396 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1402 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1403 u64 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1405 if (in_range(logical, stripe_start, rbio->stripe_len)) in find_logical_bio_stripe()
1414 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1419 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1422 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1425 if (rbio->faila == -1) { in fail_rbio_index()
1427 rbio->faila = failed; in fail_rbio_index()
1428 atomic_inc(&rbio->error); in fail_rbio_index()
1429 } else if (rbio->failb == -1) { in fail_rbio_index()
1431 rbio->failb = failed; in fail_rbio_index()
1432 atomic_inc(&rbio->error); in fail_rbio_index()
1437 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1446 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1449 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1454 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1482 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1485 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1491 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1502 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1507 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_rmw_end_io()
1514 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1525 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1529 index_rbio_pages(rbio); in raid56_rmw_stripe()
1531 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1536 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1537 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in raid56_rmw_stripe()
1545 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1549 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1557 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1558 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1579 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1581 bio->bi_private = rbio; in raid56_rmw_stripe()
1585 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_rmw_stripe()
1593 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_stripe()
1601 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1609 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1613 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1615 __free_raid_bio(rbio); in full_stripe_write()
1619 ret = lock_stripe_add(rbio); in full_stripe_write()
1621 finish_rmw(rbio); in full_stripe_write()
1630 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1634 ret = lock_stripe_add(rbio); in partial_stripe_write()
1636 start_async_work(rbio, rmw_work); in partial_stripe_write()
1646 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1649 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1650 return partial_stripe_write(rbio); in __raid56_parity_write()
1651 return full_stripe_write(rbio); in __raid56_parity_write()
1754 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) in rbio_add_bio() argument
1756 const struct btrfs_fs_info *fs_info = rbio->fs_info; in rbio_add_bio()
1758 const u64 full_stripe_start = rbio->bbio->raid_map[0]; in rbio_add_bio()
1765 rbio->nr_data * rbio->stripe_len); in rbio_add_bio()
1767 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1768 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1774 PAGE_SHIFT) % rbio->stripe_npages; in rbio_add_bio()
1776 set_bit(bit, rbio->dbitmap); in rbio_add_bio()
1786 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1791 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_write()
1792 if (IS_ERR(rbio)) { in raid56_parity_write()
1794 return PTR_ERR(rbio); in raid56_parity_write()
1796 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1797 rbio_add_bio(rbio, bio); in raid56_parity_write()
1800 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1806 if (rbio_is_full(rbio)) { in raid56_parity_write()
1807 ret = full_stripe_write(rbio); in raid56_parity_write()
1820 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1823 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1835 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1844 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1850 faila = rbio->faila; in __raid_recover_end_io()
1851 failb = rbio->failb; in __raid_recover_end_io()
1853 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1854 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1855 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1856 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1857 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1860 index_rbio_pages(rbio); in __raid_recover_end_io()
1862 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid_recover_end_io()
1867 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1868 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1874 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1879 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1880 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1882 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1884 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1890 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1896 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1922 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1923 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1935 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1936 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1939 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1950 copy_page(pointers[faila], pointers[rbio->nr_data]); in __raid_recover_end_io()
1954 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1956 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1959 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); in __raid_recover_end_io()
1967 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1968 for (i = 0; i < rbio->stripe_npages; i++) { in __raid_recover_end_io()
1970 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1974 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1979 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1984 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1985 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1987 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1989 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
2005 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
2006 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
2022 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
2023 cache_rbio_pages(rbio); in __raid_recover_end_io()
2025 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
2027 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2029 rbio->faila = -1; in __raid_recover_end_io()
2030 rbio->failb = -1; in __raid_recover_end_io()
2032 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
2033 finish_rmw(rbio); in __raid_recover_end_io()
2034 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
2035 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
2039 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2049 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
2056 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2061 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2064 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2065 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_recover_end_io()
2067 __raid_recover_end_io(rbio); in raid_recover_end_io()
2078 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2089 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2093 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2103 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2104 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2105 atomic_inc(&rbio->error); in __raid56_parity_recover()
2109 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { in __raid56_parity_recover()
2110 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2111 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2112 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2125 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2126 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2137 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2139 bio->bi_private = rbio; in __raid56_parity_recover()
2143 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in __raid56_parity_recover()
2151 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2152 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2153 rbio_orig_end_io(rbio, BLK_STS_IOERR); in __raid56_parity_recover()
2171 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2179 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_recover()
2180 if (IS_ERR(rbio)) { in raid56_parity_recover()
2183 return PTR_ERR(rbio); in raid56_parity_recover()
2186 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2187 rbio_add_bio(rbio, bio); in raid56_parity_recover()
2189 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2190 if (rbio->faila == -1) { in raid56_parity_recover()
2197 kfree(rbio); in raid56_parity_recover()
2203 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2219 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2220 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2221 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2222 rbio->failb--; in raid56_parity_recover()
2225 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2235 __raid56_parity_recover(rbio); in raid56_parity_recover()
2247 struct btrfs_raid_bio *rbio; in rmw_work() local
2249 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2250 raid56_rmw_stripe(rbio); in rmw_work()
2255 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2257 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2258 __raid56_parity_recover(rbio); in read_rebuild_work()
2277 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2280 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2281 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2283 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2289 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2296 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2298 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2302 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2306 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2307 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2313 rbio->generic_bio_cnt = 1; in raid56_parity_alloc_scrub_rbio()
2315 return rbio; in raid56_parity_alloc_scrub_rbio()
2319 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2325 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2326 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2327 rbio->stripe_len * rbio->nr_data); in raid56_add_scrub_pages()
2328 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2330 rbio->bio_pages[index] = page; in raid56_add_scrub_pages()
2337 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2344 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2345 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2346 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2347 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2353 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2359 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2362 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2363 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2364 unsigned long *pbitmap = rbio->finish_pbitmap; in finish_parity_scrub()
2365 int nr_data = rbio->nr_data; in finish_parity_scrub()
2378 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2380 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2385 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2387 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2395 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2413 pointers[rbio->real_stripes - 1] = kmap(q_page); in finish_parity_scrub()
2416 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2421 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2426 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2432 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2441 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2443 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) in finish_parity_scrub()
2444 copy_page(parity, pointers[rbio->scrubp]); in finish_parity_scrub()
2447 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2451 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2467 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2470 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2471 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2472 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2480 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2483 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2484 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2485 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2486 pagenr, rbio->stripe_len); in finish_parity_scrub()
2495 rbio_orig_end_io(rbio, BLK_STS_OK); in finish_parity_scrub()
2499 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2502 bio->bi_private = rbio; in finish_parity_scrub()
2511 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_parity_scrub()
2517 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2519 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2531 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2533 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2536 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2539 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2541 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2542 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2544 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2546 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2547 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2554 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2562 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2572 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2575 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2577 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2582 rbio_orig_end_io(rbio, BLK_STS_IOERR); in validate_rbio_for_parity_scrub()
2595 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2598 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2604 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2612 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2615 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2626 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2630 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2635 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2636 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2644 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2648 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2656 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2657 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2678 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2680 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2684 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); in raid56_parity_scrub_stripe()
2692 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_parity_scrub_stripe()
2700 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2705 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2707 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2708 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2711 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2713 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2714 start_async_work(rbio, scrub_parity_work); in raid56_parity_submit_scrub_rbio()
2723 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2725 rbio = alloc_rbio(fs_info, bbio, length); in raid56_alloc_missing_rbio()
2726 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2729 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2730 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2737 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2738 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2740 kfree(rbio); in raid56_alloc_missing_rbio()
2748 rbio->generic_bio_cnt = 1; in raid56_alloc_missing_rbio()
2750 return rbio; in raid56_alloc_missing_rbio()
2753 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2755 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2756 start_async_work(rbio, read_rebuild_work); in raid56_submit_missing_rbio()