Lines Matching refs:bbio
63 struct btrfs_bio *bbio; member
282 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
573 if (last->bbio->raid_map[0] != in rbio_can_merge()
574 cur->bbio->raid_map[0]) in rbio_can_merge()
687 if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0]) in lock_stripe_add()
852 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
926 0 : rbio->bbio->max_errors; in raid_write_end_io()
981 struct btrfs_bio *bbio, in alloc_rbio() argument
986 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; in alloc_rbio()
1007 rbio->bbio = bbio; in alloc_rbio()
1035 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) in alloc_rbio()
1037 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) in alloc_rbio()
1100 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1176 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1200 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw() local
1315 if (likely(!bbio->num_tgtdevs)) in finish_rmw()
1319 if (!bbio->tgtdev_map[stripe]) in finish_rmw()
1338 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1379 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1380 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1403 u64 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1494 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1758 const u64 full_stripe_start = rbio->bbio->raid_map[0]; in rbio_add_bio()
1784 struct btrfs_bio *bbio, u64 stripe_len) in raid56_parity_write() argument
1791 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_write()
1793 btrfs_put_bbio(bbio); in raid56_parity_write()
1890 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1922 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1923 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1935 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
2064 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2125 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2168 struct btrfs_bio *bbio, u64 stripe_len, in raid56_parity_recover() argument
2175 ASSERT(bbio->mirror_num == mirror_num); in raid56_parity_recover()
2179 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_recover()
2182 btrfs_put_bbio(bbio); in raid56_parity_recover()
2194 (u64)bio->bi_iter.bi_size, bbio->map_type); in raid56_parity_recover()
2196 btrfs_put_bbio(bbio); in raid56_parity_recover()
2205 btrfs_get_bbio(bbio); in raid56_parity_recover()
2273 struct btrfs_bio *bbio, u64 stripe_len, in raid56_parity_alloc_scrub_rbio() argument
2280 rbio = alloc_rbio(fs_info, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2297 if (bbio->stripes[i].dev == scrub_dev) { in raid56_parity_alloc_scrub_rbio()
2325 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2326 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2328 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2362 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub() local
2385 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2485 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2533 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2554 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2721 struct btrfs_bio *bbio, u64 length) in raid56_alloc_missing_rbio() argument
2725 rbio = alloc_rbio(fs_info, bbio, length); in raid56_alloc_missing_rbio()