Lines Matching refs:devs
94 int size = offsetof(struct r10bio, devs[conf->copies]); in r10bio_pool_alloc()
151 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
157 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
164 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
171 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
199 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
200 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
201 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
202 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
218 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
226 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
242 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
246 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
318 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
331 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
333 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
346 return r10_bio->devs[slot].devnum; in find_bio_disk()
358 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
479 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
511 r10_bio->devs[slot].addr, in raid10_end_write_request()
516 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
518 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
594 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
595 r10bio->devs[slot].addr = s; in __raid10_find_phys()
612 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
613 r10bio->devs[slot].addr = s; in __raid10_find_phys()
749 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
751 disk = r10_bio->devs[slot].devnum; in read_balance()
754 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
760 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
763 dev_sector = r10_bio->devs[slot].addr; in read_balance()
819 new_distance = r10_bio->devs[slot].addr; in read_balance()
821 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1131 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1147 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1154 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1190 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1191 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1193 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1223 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1238 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1240 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1242 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1348 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1369 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1370 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1378 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1418 r10_bio->devs[i].bio = bio; in raid10_write_request()
1422 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1434 if (r10_bio->devs[j].bio) { in raid10_write_request()
1435 d = r10_bio->devs[j].devnum; in raid10_write_request()
1438 if (r10_bio->devs[j].repl_bio) { in raid10_write_request()
1440 d = r10_bio->devs[j].devnum; in raid10_write_request()
1475 if (r10_bio->devs[i].bio) in raid10_write_request()
1477 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1497 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1969 r10_bio->devs[slot].addr, in end_sync_write()
2007 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2014 fbio = r10_bio->devs[i].bio; in sync_request_write()
2026 tbio = r10_bio->devs[i].bio; in sync_request_write()
2034 d = r10_bio->devs[i].devnum; in sync_request_write()
2036 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2075 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2098 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2101 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2102 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2104 d = r10_bio->devs[i].devnum; in sync_request_write()
2139 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2143 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2144 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2157 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2165 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2189 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2227 d = r10_bio->devs[1].devnum; in recovery_request_write()
2228 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2229 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2323 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2347 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2365 d = r10_bio->devs[sl].devnum; in fix_read_error()
2370 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2375 r10_bio->devs[sl].addr + in fix_read_error()
2396 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2401 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2405 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2420 d = r10_bio->devs[sl].devnum; in fix_read_error()
2430 r10_bio->devs[sl].addr + in fix_read_error()
2456 d = r10_bio->devs[sl].devnum; in fix_read_error()
2466 r10_bio->devs[sl].addr + in fix_read_error()
2507 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2543 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2568 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2578 bio = r10_bio->devs[slot].bio; in handle_read_error()
2580 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2583 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2611 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2613 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2614 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2616 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2619 r10_bio->devs[m].addr, in handle_write_completed()
2624 r10_bio->devs[m].addr, in handle_write_completed()
2629 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2630 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2633 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2636 r10_bio->devs[m].addr, in handle_write_completed()
2641 r10_bio->devs[m].addr, in handle_write_completed()
2650 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2651 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2656 r10_bio->devs[m].addr, in handle_write_completed()
2668 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2673 r10_bio->devs[m].addr, in handle_write_completed()
2805 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
2809 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3127 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3138 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3152 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3159 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3167 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3170 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3171 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3172 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3173 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3174 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3177 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3187 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3190 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3219 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3225 r10_bio->devs[k].addr, in raid10_sync_request()
3231 r10_bio->devs[k].addr, in raid10_sync_request()
3255 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3262 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3269 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3322 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3327 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3328 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3330 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3338 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3371 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3374 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3389 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3390 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3393 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3394 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3989 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3999 sector_div(size, devs); in raid10_takeover_raid0()
4542 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4552 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4589 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4593 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4596 b = r10_bio->devs[s/2].bio; in reshape_request()
4602 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4613 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4680 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4685 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4688 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4746 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4753 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4768 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
4776 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()