Lines Matching refs:r10_bio
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
119 struct r10bio *r10_bio; in r10buf_pool_alloc() local
125 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
126 if (!r10_bio) in r10buf_pool_alloc()
151 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
157 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
164 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
171 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
182 rp->raid_bio = r10_bio; in r10buf_pool_alloc()
190 return r10_bio; in r10buf_pool_alloc()
199 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
200 bio_put(r10_bio->devs[j].bio); in r10buf_pool_alloc()
201 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
202 bio_put(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
206 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
242 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
246 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
247 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
253 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
257 put_all_bios(conf, r10_bio); in free_r10bio()
258 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
261 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
265 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
270 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
273 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
277 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
292 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
294 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
297 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
307 free_r10bio(r10_bio); in raid_end_bio_io()
313 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
315 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
318 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
324 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
331 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
333 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
340 update_head_pos(slot, r10_bio); in find_bio_disk()
346 return r10_bio->devs[slot].devnum; in find_bio_disk()
352 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
355 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
357 slot = r10_bio->read_slot; in raid10_end_read_request()
358 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
362 update_head_pos(slot, r10_bio); in raid10_end_read_request()
374 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
386 raid_end_bio_io(r10_bio); in raid10_end_read_request()
396 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
397 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
398 reschedule_retry(r10_bio); in raid10_end_read_request()
402 static void close_write(struct r10bio *r10_bio) in close_write() argument
405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
406 r10_bio->sectors, in close_write()
407 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
409 md_write_end(r10_bio->mddev); in close_write()
412 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
414 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
415 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
416 reschedule_retry(r10_bio); in one_write_done()
418 close_write(r10_bio); in one_write_done()
419 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
420 reschedule_retry(r10_bio); in one_write_done()
422 raid_end_bio_io(r10_bio); in one_write_done()
429 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
432 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
475 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
478 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_end_write_request()
479 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
507 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
511 r10_bio->devs[slot].addr, in raid10_end_write_request()
512 r10_bio->sectors, in raid10_end_write_request()
516 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
518 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
520 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
529 one_write_done(r10_bio); in raid10_end_write_request()
704 struct r10bio *r10_bio, in read_balance() argument
707 const sector_t this_sector = r10_bio->sector; in read_balance()
709 int sectors = r10_bio->sectors; in read_balance()
719 raid10_find_phys(conf, r10_bio); in read_balance()
728 clear_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
749 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
751 disk = r10_bio->devs[slot].devnum; in read_balance()
754 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
760 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
763 dev_sector = r10_bio->devs[slot].addr; in read_balance()
809 set_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
819 new_distance = r10_bio->devs[slot].addr; in read_balance()
821 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
842 r10_bio->read_slot = slot; in read_balance()
1036 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1040 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1118 struct r10bio *r10_bio) in raid10_read_request() argument
1127 int slot = r10_bio->read_slot; in raid10_read_request()
1131 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1147 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1154 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1159 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1160 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1165 (unsigned long long)r10_bio->sector); in raid10_read_request()
1167 raid_end_bio_io(r10_bio); in raid10_read_request()
1174 (unsigned long long)r10_bio->sector); in raid10_read_request()
1183 r10_bio->master_bio = bio; in raid10_read_request()
1184 r10_bio->sectors = max_sectors; in raid10_read_request()
1186 slot = r10_bio->read_slot; in raid10_read_request()
1190 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1191 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1193 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1194 choose_data_offset(r10_bio, rdev); in raid10_read_request()
1199 test_bit(R10BIO_FailFast, &r10_bio->state)) in raid10_read_request()
1201 read_bio->bi_private = r10_bio; in raid10_read_request()
1206 r10_bio->sector); in raid10_read_request()
1211 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1223 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1238 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1240 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1242 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1243 choose_data_offset(r10_bio, rdev)); in raid10_write_one_disk()
1251 mbio->bi_private = r10_bio; in raid10_write_one_disk()
1256 r10_bio->sector); in raid10_write_one_disk()
1260 atomic_inc(&r10_bio->remaining); in raid10_write_one_disk()
1280 struct r10bio *r10_bio) in raid10_write_request() argument
1304 sectors = r10_bio->sectors; in raid10_write_request()
1340 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in raid10_write_request()
1341 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1345 max_sectors = r10_bio->sectors; in raid10_write_request()
1348 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1369 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1370 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1373 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_write_request()
1378 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1418 r10_bio->devs[i].bio = bio; in raid10_write_request()
1422 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1434 if (r10_bio->devs[j].bio) { in raid10_write_request()
1435 d = r10_bio->devs[j].devnum; in raid10_write_request()
1438 if (r10_bio->devs[j].repl_bio) { in raid10_write_request()
1440 d = r10_bio->devs[j].devnum; in raid10_write_request()
1457 if (max_sectors < r10_bio->sectors) in raid10_write_request()
1458 r10_bio->sectors = max_sectors; in raid10_write_request()
1460 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request()
1461 struct bio *split = bio_split(bio, r10_bio->sectors, in raid10_write_request()
1468 r10_bio->master_bio = bio; in raid10_write_request()
1471 atomic_set(&r10_bio->remaining, 1); in raid10_write_request()
1472 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1475 if (r10_bio->devs[i].bio) in raid10_write_request()
1476 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1477 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1478 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1480 one_write_done(r10_bio); in raid10_write_request()
1486 struct r10bio *r10_bio; in __make_request() local
1488 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1490 r10_bio->master_bio = bio; in __make_request()
1491 r10_bio->sectors = sectors; in __make_request()
1493 r10_bio->mddev = mddev; in __make_request()
1494 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1495 r10_bio->state = 0; in __make_request()
1496 r10_bio->read_slot = -1; in __make_request()
1497 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies); in __make_request()
1500 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1502 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1870 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) in __end_sync_read() argument
1872 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
1875 set_bit(R10BIO_Uptodate, &r10_bio->state); in __end_sync_read()
1880 atomic_add(r10_bio->sectors, in __end_sync_read()
1887 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in __end_sync_read()
1888 atomic_dec_and_test(&r10_bio->remaining)) { in __end_sync_read()
1892 reschedule_retry(r10_bio); in __end_sync_read()
1898 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_read() local
1899 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1900 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
1902 __end_sync_read(r10_bio, bio, d); in end_sync_read()
1908 struct r10bio *r10_bio = bio->bi_private; in end_reshape_read() local
1910 __end_sync_read(r10_bio, bio, r10_bio->read_slot); in end_reshape_read()
1913 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
1915 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
1917 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
1918 if (r10_bio->master_bio == NULL) { in end_sync_request()
1920 sector_t s = r10_bio->sectors; in end_sync_request()
1921 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1922 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1923 reschedule_retry(r10_bio); in end_sync_request()
1925 put_buf(r10_bio); in end_sync_request()
1929 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
1930 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
1931 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
1932 reschedule_retry(r10_bio); in end_sync_request()
1934 put_buf(r10_bio); in end_sync_request()
1935 r10_bio = r10_bio2; in end_sync_request()
1942 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_write() local
1943 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
1952 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
1966 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
1969 r10_bio->devs[slot].addr, in end_sync_write()
1970 r10_bio->sectors, in end_sync_write()
1972 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
1976 end_sync_request(r10_bio); in end_sync_write()
1995 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2003 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
2007 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2014 fbio = r10_bio->devs[i].bio; in sync_request_write()
2015 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2019 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2026 tbio = r10_bio->devs[i].bio; in sync_request_write()
2034 d = r10_bio->devs[i].devnum; in sync_request_write()
2036 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2041 int sectors = r10_bio->sectors; in sync_request_write()
2054 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2073 rp->raid_bio = r10_bio; in sync_request_write()
2075 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2082 atomic_inc(&r10_bio->remaining); in sync_request_write()
2098 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2101 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2102 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2104 d = r10_bio->devs[i].devnum; in sync_request_write()
2105 atomic_inc(&r10_bio->remaining); in sync_request_write()
2112 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2113 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2114 put_buf(r10_bio); in sync_request_write()
2128 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2137 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2139 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2141 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2143 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2144 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2157 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2165 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2189 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2211 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2217 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2218 fix_recovery_read_error(r10_bio); in recovery_request_write()
2219 end_sync_request(r10_bio); in recovery_request_write()
2227 d = r10_bio->devs[1].devnum; in recovery_request_write()
2228 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2229 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2317 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2320 int sectors = r10_bio->sectors; in fix_read_error()
2323 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2347 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2353 int sl = r10_bio->read_slot; in fix_read_error()
2365 d = r10_bio->devs[sl].devnum; in fix_read_error()
2370 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2375 r10_bio->devs[sl].addr + in fix_read_error()
2388 } while (!success && sl != r10_bio->read_slot); in fix_read_error()
2396 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2401 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2405 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2414 while (sl != r10_bio->read_slot) { in fix_read_error()
2420 d = r10_bio->devs[sl].devnum; in fix_read_error()
2430 r10_bio->devs[sl].addr + in fix_read_error()
2439 choose_data_offset(r10_bio, in fix_read_error()
2450 while (sl != r10_bio->read_slot) { in fix_read_error()
2456 d = r10_bio->devs[sl].devnum; in fix_read_error()
2466 r10_bio->devs[sl].addr + in fix_read_error()
2476 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2487 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2502 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2504 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2505 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2507 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2522 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2530 sector = r10_bio->sector; in narrow_write_error()
2531 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2543 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2545 choose_data_offset(r10_bio, rdev); in narrow_write_error()
2563 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2565 int slot = r10_bio->read_slot; in handle_read_error()
2568 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2578 bio = r10_bio->devs[slot].bio; in handle_read_error()
2580 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2583 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2586 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2593 r10_bio->state = 0; in handle_read_error()
2594 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2597 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2608 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2609 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
2611 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2613 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2614 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2616 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2619 r10_bio->devs[m].addr, in handle_write_completed()
2620 r10_bio->sectors, 0); in handle_write_completed()
2624 r10_bio->devs[m].addr, in handle_write_completed()
2625 r10_bio->sectors, 0)) in handle_write_completed()
2629 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2630 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2633 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2636 r10_bio->devs[m].addr, in handle_write_completed()
2637 r10_bio->sectors, 0); in handle_write_completed()
2641 r10_bio->devs[m].addr, in handle_write_completed()
2642 r10_bio->sectors, 0)) in handle_write_completed()
2646 put_buf(r10_bio); in handle_write_completed()
2650 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2651 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2656 r10_bio->devs[m].addr, in handle_write_completed()
2657 r10_bio->sectors, 0); in handle_write_completed()
2661 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
2664 &r10_bio->state); in handle_write_completed()
2668 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2673 r10_bio->devs[m].addr, in handle_write_completed()
2674 r10_bio->sectors, 0); in handle_write_completed()
2680 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2691 &r10_bio->state)) in handle_write_completed()
2692 close_write(r10_bio); in handle_write_completed()
2693 raid_end_bio_io(r10_bio); in handle_write_completed()
2701 struct r10bio *r10_bio; in raid10d() local
2721 r10_bio = list_first_entry(&tmp, struct r10bio, in raid10d()
2723 list_del(&r10_bio->retry_list); in raid10d()
2725 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10d()
2728 &r10_bio->state)) in raid10d()
2729 close_write(r10_bio); in raid10d()
2730 raid_end_bio_io(r10_bio); in raid10d()
2744 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
2749 mddev = r10_bio->mddev; in raid10d()
2751 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
2752 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
2753 handle_write_completed(conf, r10_bio); in raid10d()
2754 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
2755 reshape_request_write(mddev, r10_bio); in raid10d()
2756 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
2757 sync_request_write(mddev, r10_bio); in raid10d()
2758 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
2759 recovery_request_write(mddev, r10_bio); in raid10d()
2760 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
2761 handle_read_error(mddev, r10_bio); in raid10d()
2892 struct r10bio *r10_bio; in raid10_sync_request() local
3028 r10_bio = NULL; in raid10_sync_request()
3060 rb2 = r10_bio; in raid10_sync_request()
3094 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3095 r10_bio->state = 0; in raid10_sync_request()
3097 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3099 r10_bio->master_bio = (struct bio*)rb2; in raid10_sync_request()
3102 r10_bio->mddev = mddev; in raid10_sync_request()
3103 set_bit(R10BIO_IsRecover, &r10_bio->state); in raid10_sync_request()
3104 r10_bio->sector = sect; in raid10_sync_request()
3106 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3127 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3138 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3152 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3159 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3167 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3170 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3171 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3172 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3173 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3174 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3177 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3185 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3187 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3190 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3206 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3219 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3225 r10_bio->devs[k].addr, in raid10_sync_request()
3231 r10_bio->devs[k].addr, in raid10_sync_request()
3243 put_buf(r10_bio); in raid10_sync_request()
3246 r10_bio = rb2; in raid10_sync_request()
3255 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3262 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3269 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3274 while (r10_bio) { in raid10_sync_request()
3275 struct r10bio *rb2 = r10_bio; in raid10_sync_request()
3276 r10_bio = (struct r10bio*) rb2->master_bio; in raid10_sync_request()
3307 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3308 r10_bio->state = 0; in raid10_sync_request()
3310 r10_bio->mddev = mddev; in raid10_sync_request()
3311 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3315 r10_bio->master_bio = NULL; in raid10_sync_request()
3316 r10_bio->sector = sector_nr; in raid10_sync_request()
3317 set_bit(R10BIO_IsSync, &r10_bio->state); in raid10_sync_request()
3318 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3319 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3322 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3327 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3328 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3330 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3338 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3352 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3371 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3374 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3389 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3390 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3393 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3394 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3399 put_buf(r10_bio); in raid10_sync_request()
3427 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3480 r10_bio = get_resync_r10bio(bio); in raid10_sync_request()
3481 r10_bio->sectors = nr_sectors; in raid10_sync_request()
4413 struct r10bio *r10_bio; in reshape_request() local
4518 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4519 r10_bio->state = 0; in reshape_request()
4521 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4522 r10_bio->mddev = mddev; in reshape_request()
4523 r10_bio->sector = sector_nr; in reshape_request()
4524 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4525 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4526 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4527 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4534 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4542 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4544 read_bio->bi_private = r10_bio; in reshape_request()
4551 r10_bio->master_bio = read_bio; in reshape_request()
4552 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4581 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4589 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4593 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4596 b = r10_bio->devs[s/2].bio; in reshape_request()
4602 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4613 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4630 r10_bio->sectors = nr_sectors; in reshape_request()
4633 md_sync_acct_bio(read_bio, r10_bio->sectors); in reshape_request()
4634 atomic_inc(&r10_bio->remaining); in reshape_request()
4654 static void end_reshape_request(struct r10bio *r10_bio);
4656 struct r10bio *r10_bio);
4657 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4667 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
4668 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4670 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4677 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
4680 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4685 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4688 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4696 md_sync_acct_bio(b, r10_bio->sectors); in reshape_request_write()
4697 atomic_inc(&r10_bio->remaining); in reshape_request_write()
4701 end_reshape_request(r10_bio); in reshape_request_write()
4736 struct r10bio *r10_bio) in handle_reshape_read_error() argument
4739 int sectors = r10_bio->sectors; in handle_reshape_read_error()
4753 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4755 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
4812 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_reshape_write() local
4813 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
4820 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
4834 end_reshape_request(r10_bio); in end_reshape_write()
4837 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
4839 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
4841 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4842 bio_put(r10_bio->master_bio); in end_reshape_request()
4843 put_buf(r10_bio); in end_reshape_request()