Lines Matching refs:r1_bio

59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,  in check_and_add_serial()  argument
64 sector_t lo = r1_bio->sector; in check_and_add_serial()
65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio) in wait_for_serialization() argument
86 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization()
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0); in wait_for_serialization()
149 struct r1bio *r1_bio; in r1buf_pool_alloc() local
155 r1_bio = r1bio_pool_alloc(gfp_flags, pi); in r1buf_pool_alloc()
156 if (!r1_bio) in r1buf_pool_alloc()
171 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
186 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
196 rp->raid_bio = r1_bio; in r1buf_pool_alloc()
200 r1_bio->master_bio = NULL; in r1buf_pool_alloc()
202 return r1_bio; in r1buf_pool_alloc()
210 bio_put(r1_bio->bios[j]); in r1buf_pool_alloc()
214 rbio_pool_free(r1_bio, data); in r1buf_pool_alloc()
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio) in put_all_bios() argument
242 struct bio **bio = r1_bio->bios + i; in put_all_bios()
249 static void free_r1bio(struct r1bio *r1_bio) in free_r1bio() argument
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
253 put_all_bios(conf, r1_bio); in free_r1bio()
254 mempool_free(r1_bio, &conf->r1bio_pool); in free_r1bio()
257 static void put_buf(struct r1bio *r1_bio) in put_buf() argument
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
260 sector_t sect = r1_bio->sector; in put_buf()
264 struct bio *bio = r1_bio->bios[i]; in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
269 mempool_free(r1_bio, &conf->r1buf_pool); in put_buf()
274 static void reschedule_retry(struct r1bio *r1_bio) in reschedule_retry() argument
277 struct mddev *mddev = r1_bio->mddev; in reschedule_retry()
281 idx = sector_to_idx(r1_bio->sector); in reschedule_retry()
283 list_add(&r1_bio->retry_list, &conf->retry_list); in reschedule_retry()
296 static void call_bio_endio(struct r1bio *r1_bio) in call_bio_endio() argument
298 struct bio *bio = r1_bio->master_bio; in call_bio_endio()
300 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in call_bio_endio()
306 static void raid_end_bio_io(struct r1bio *r1_bio) in raid_end_bio_io() argument
308 struct bio *bio = r1_bio->master_bio; in raid_end_bio_io()
309 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
312 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid_end_bio_io()
318 call_bio_endio(r1_bio); in raid_end_bio_io()
324 allow_barrier(conf, r1_bio->sector); in raid_end_bio_io()
326 free_r1bio(r1_bio); in raid_end_bio_io()
332 static inline void update_head_pos(int disk, struct r1bio *r1_bio) in update_head_pos() argument
334 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
337 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
343 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio) in find_bio_disk() argument
346 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
350 if (r1_bio->bios[mirror] == bio) in find_bio_disk()
354 update_head_pos(mirror, r1_bio); in find_bio_disk()
362 struct r1bio *r1_bio = bio->bi_private; in raid1_end_read_request() local
363 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev; in raid1_end_read_request()
369 update_head_pos(r1_bio->read_disk, r1_bio); in raid1_end_read_request()
372 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_read_request()
374 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_end_read_request()
385 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
386 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
393 raid_end_bio_io(r1_bio); in raid1_end_read_request()
403 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
404 set_bit(R1BIO_ReadError, &r1_bio->state); in raid1_end_read_request()
405 reschedule_retry(r1_bio); in raid1_end_read_request()
410 static void close_write(struct r1bio *r1_bio) in close_write() argument
413 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in close_write()
414 bio_free_pages(r1_bio->behind_master_bio); in close_write()
415 bio_put(r1_bio->behind_master_bio); in close_write()
416 r1_bio->behind_master_bio = NULL; in close_write()
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
420 r1_bio->sectors, in close_write()
421 !test_bit(R1BIO_Degraded, &r1_bio->state), in close_write()
422 test_bit(R1BIO_BehindIO, &r1_bio->state)); in close_write()
423 md_write_end(r1_bio->mddev); in close_write()
426 static void r1_bio_write_done(struct r1bio *r1_bio) in r1_bio_write_done() argument
428 if (!atomic_dec_and_test(&r1_bio->remaining)) in r1_bio_write_done()
431 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in r1_bio_write_done()
432 reschedule_retry(r1_bio); in r1_bio_write_done()
434 close_write(r1_bio); in r1_bio_write_done()
435 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) in r1_bio_write_done()
436 reschedule_retry(r1_bio); in r1_bio_write_done()
438 raid_end_bio_io(r1_bio); in r1_bio_write_done()
444 struct r1bio *r1_bio = bio->bi_private; in raid1_end_write_request() local
445 int behind = test_bit(R1BIO_BehindIO, &r1_bio->state); in raid1_end_write_request()
446 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
448 int mirror = find_bio_disk(r1_bio, bio); in raid1_end_write_request()
451 sector_t lo = r1_bio->sector; in raid1_end_write_request()
452 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
469 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
477 set_bit(R1BIO_WriteError, &r1_bio->state); in raid1_end_write_request()
480 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_end_write_request()
482 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
499 r1_bio->bios[mirror] = NULL; in raid1_end_write_request()
511 set_bit(R1BIO_Uptodate, &r1_bio->state); in raid1_end_write_request()
514 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
516 r1_bio->bios[mirror] = IO_MADE_GOOD; in raid1_end_write_request()
517 set_bit(R1BIO_MadeGood, &r1_bio->state); in raid1_end_write_request()
525 atomic_dec(&r1_bio->behind_remaining); in raid1_end_write_request()
534 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && in raid1_end_write_request()
535 test_bit(R1BIO_Uptodate, &r1_bio->state)) { in raid1_end_write_request()
537 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { in raid1_end_write_request()
538 struct bio *mbio = r1_bio->master_bio; in raid1_end_write_request()
543 call_bio_endio(r1_bio); in raid1_end_write_request()
548 if (r1_bio->bios[mirror] == NULL) in raid1_end_write_request()
555 r1_bio_write_done(r1_bio); in raid1_end_write_request()
594 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors) in read_balance() argument
596 const sector_t this_sector = r1_bio->sector; in read_balance()
615 sectors = r1_bio->sectors; in read_balance()
624 clear_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
642 if (r1_bio->bios[disk] == IO_BLOCKED in read_balance()
703 set_bit(R1BIO_FailFast, &r1_bio->state); in read_balance()
1099 static void alloc_behind_master_bio(struct r1bio *r1_bio, in alloc_behind_master_bio() argument
1107 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); in alloc_behind_master_bio()
1135 r1_bio->behind_master_bio = behind_bio; in alloc_behind_master_bio()
1136 set_bit(R1BIO_BehindIO, &r1_bio->state); in alloc_behind_master_bio()
1178 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) in init_r1bio() argument
1180 r1_bio->master_bio = bio; in init_r1bio()
1181 r1_bio->sectors = bio_sectors(bio); in init_r1bio()
1182 r1_bio->state = 0; in init_r1bio()
1183 r1_bio->mddev = mddev; in init_r1bio()
1184 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio()
1191 struct r1bio *r1_bio; in alloc_r1bio() local
1193 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); in alloc_r1bio()
1195 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); in alloc_r1bio()
1196 init_r1bio(r1_bio, mddev, bio); in alloc_r1bio()
1197 return r1_bio; in alloc_r1bio()
1201 int max_read_sectors, struct r1bio *r1_bio) in raid1_read_request() argument
1211 bool print_msg = !!r1_bio; in raid1_read_request()
1219 gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO; in raid1_read_request()
1225 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev); in raid1_read_request()
1239 if (!r1_bio) in raid1_read_request()
1240 r1_bio = alloc_r1bio(mddev, bio); in raid1_read_request()
1242 init_r1bio(r1_bio, mddev, bio); in raid1_read_request()
1243 r1_bio->sectors = max_read_sectors; in raid1_read_request()
1249 rdisk = read_balance(conf, r1_bio, &max_sectors); in raid1_read_request()
1257 (unsigned long long)r1_bio->sector); in raid1_read_request()
1259 raid_end_bio_io(r1_bio); in raid1_read_request()
1267 (unsigned long long)r1_bio->sector, in raid1_read_request()
1287 r1_bio->master_bio = bio; in raid1_read_request()
1288 r1_bio->sectors = max_sectors; in raid1_read_request()
1291 r1_bio->read_disk = rdisk; in raid1_read_request()
1295 r1_bio->bios[rdisk] = read_bio; in raid1_read_request()
1297 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request()
1303 test_bit(R1BIO_FailFast, &r1_bio->state)) in raid1_read_request()
1305 read_bio->bi_private = r1_bio; in raid1_read_request()
1309 disk_devt(mddev->gendisk), r1_bio->sector); in raid1_read_request()
1318 struct r1bio *r1_bio; in raid1_write_request() local
1352 r1_bio = alloc_r1bio(mddev, bio); in raid1_write_request()
1353 r1_bio->sectors = max_write_sectors; in raid1_write_request()
1376 max_sectors = r1_bio->sectors; in raid1_write_request()
1384 r1_bio->bios[i] = NULL; in raid1_write_request()
1387 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1_write_request()
1397 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors, in raid1_write_request()
1406 if (is_bad && first_bad <= r1_bio->sector) { in raid1_write_request()
1408 bad_sectors -= (r1_bio->sector - first_bad); in raid1_write_request()
1428 int good_sectors = first_bad - r1_bio->sector; in raid1_write_request()
1433 r1_bio->bios[i] = bio; in raid1_write_request()
1442 if (r1_bio->bios[j]) in raid1_write_request()
1444 r1_bio->state = 0; in raid1_write_request()
1458 r1_bio->master_bio = bio; in raid1_write_request()
1459 r1_bio->sectors = max_sectors; in raid1_write_request()
1462 atomic_set(&r1_bio->remaining, 1); in raid1_write_request()
1463 atomic_set(&r1_bio->behind_remaining, 0); in raid1_write_request()
1470 if (!r1_bio->bios[i]) in raid1_write_request()
1482 alloc_behind_master_bio(r1_bio, bio); in raid1_write_request()
1485 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors, in raid1_write_request()
1486 test_bit(R1BIO_BehindIO, &r1_bio->state)); in raid1_write_request()
1490 if (r1_bio->behind_master_bio) in raid1_write_request()
1491 mbio = bio_clone_fast(r1_bio->behind_master_bio, in raid1_write_request()
1496 if (r1_bio->behind_master_bio) { in raid1_write_request()
1498 wait_for_serialization(rdev, r1_bio); in raid1_write_request()
1500 atomic_inc(&r1_bio->behind_remaining); in raid1_write_request()
1502 wait_for_serialization(rdev, r1_bio); in raid1_write_request()
1504 r1_bio->bios[i] = mbio; in raid1_write_request()
1506 mbio->bi_iter.bi_sector = (r1_bio->sector + in raid1_write_request()
1515 mbio->bi_private = r1_bio; in raid1_write_request()
1517 atomic_inc(&r1_bio->remaining); in raid1_write_request()
1522 r1_bio->sector); in raid1_write_request()
1543 r1_bio_write_done(r1_bio); in raid1_write_request()
1861 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_read() local
1863 update_head_pos(r1_bio->read_disk, r1_bio); in end_sync_read()
1871 set_bit(R1BIO_Uptodate, &r1_bio->state); in end_sync_read()
1873 if (atomic_dec_and_test(&r1_bio->remaining)) in end_sync_read()
1874 reschedule_retry(r1_bio); in end_sync_read()
1877 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) in abort_sync_write() argument
1880 sector_t s = r1_bio->sector; in abort_sync_write()
1881 long sectors_to_go = r1_bio->sectors; in abort_sync_write()
1891 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate) in put_sync_write_buf() argument
1893 if (atomic_dec_and_test(&r1_bio->remaining)) { in put_sync_write_buf()
1894 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf()
1895 int s = r1_bio->sectors; in put_sync_write_buf()
1897 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in put_sync_write_buf()
1898 test_bit(R1BIO_WriteError, &r1_bio->state)) in put_sync_write_buf()
1899 reschedule_retry(r1_bio); in put_sync_write_buf()
1901 put_buf(r1_bio); in put_sync_write_buf()
1910 struct r1bio *r1_bio = get_resync_r1bio(bio); in end_sync_write() local
1911 struct mddev *mddev = r1_bio->mddev; in end_sync_write()
1915 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; in end_sync_write()
1918 abort_sync_write(mddev, r1_bio); in end_sync_write()
1923 set_bit(R1BIO_WriteError, &r1_bio->state); in end_sync_write()
1924 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in end_sync_write()
1926 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, in end_sync_write()
1927 r1_bio->sector, in end_sync_write()
1928 r1_bio->sectors, in end_sync_write()
1931 set_bit(R1BIO_MadeGood, &r1_bio->state); in end_sync_write()
1933 put_sync_write_buf(r1_bio, uptodate); in end_sync_write()
1955 static int fix_sync_read_error(struct r1bio *r1_bio) in fix_sync_read_error() argument
1968 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error()
1970 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; in fix_sync_read_error()
1972 sector_t sect = r1_bio->sector; in fix_sync_read_error()
1973 int sectors = r1_bio->sectors; in fix_sync_read_error()
1977 rdev = conf->mirrors[r1_bio->read_disk].rdev; in fix_sync_read_error()
1991 int d = r1_bio->read_disk; in fix_sync_read_error()
1998 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { in fix_sync_read_error()
2014 } while (!success && d != r1_bio->read_disk); in fix_sync_read_error()
2026 (unsigned long long)r1_bio->sector); in fix_sync_read_error()
2038 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2039 put_buf(r1_bio); in fix_sync_read_error()
2051 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2055 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2061 r1_bio->bios[d]->bi_end_io = NULL; in fix_sync_read_error()
2066 while (d != r1_bio->read_disk) { in fix_sync_read_error()
2070 if (r1_bio->bios[d]->bi_end_io != end_sync_read) in fix_sync_read_error()
2082 set_bit(R1BIO_Uptodate, &r1_bio->state); in fix_sync_read_error()
2087 static void process_checks(struct r1bio *r1_bio) in process_checks() argument
2096 struct mddev *mddev = r1_bio->mddev; in process_checks()
2103 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); in process_checks()
2106 struct bio *b = r1_bio->bios[i]; in process_checks()
2114 b->bi_iter.bi_sector = r1_bio->sector + in process_checks()
2118 rp->raid_bio = r1_bio; in process_checks()
2122 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); in process_checks()
2125 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && in process_checks()
2126 !r1_bio->bios[primary]->bi_status) { in process_checks()
2127 r1_bio->bios[primary]->bi_end_io = NULL; in process_checks()
2131 r1_bio->read_disk = primary; in process_checks()
2134 struct bio *pbio = r1_bio->bios[primary]; in process_checks()
2135 struct bio *sbio = r1_bio->bios[i]; in process_checks()
2161 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2174 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2181 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) in sync_request_write()
2183 if (!fix_sync_read_error(r1_bio)) in sync_request_write()
2187 process_checks(r1_bio); in sync_request_write()
2192 atomic_set(&r1_bio->remaining, 1); in sync_request_write()
2194 wbio = r1_bio->bios[i]; in sync_request_write()
2197 (i == r1_bio->read_disk || in sync_request_write()
2201 abort_sync_write(mddev, r1_bio); in sync_request_write()
2210 atomic_inc(&r1_bio->remaining); in sync_request_write()
2216 put_sync_write_buf(r1_bio, 1); in sync_request_write()
2323 static int narrow_write_error(struct r1bio *r1_bio, int i) in narrow_write_error() argument
2325 struct mddev *mddev = r1_bio->mddev; in narrow_write_error()
2343 int sect_to_write = r1_bio->sectors; in narrow_write_error()
2351 sector = r1_bio->sector; in narrow_write_error()
2362 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { in narrow_write_error()
2363 wbio = bio_clone_fast(r1_bio->behind_master_bio, in narrow_write_error()
2367 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, in narrow_write_error()
2372 wbio->bi_iter.bi_sector = r1_bio->sector; in narrow_write_error()
2373 wbio->bi_iter.bi_size = r1_bio->sectors << 9; in narrow_write_error()
2375 bio_trim(wbio, sector - r1_bio->sector, sectors); in narrow_write_error()
2393 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_sync_write_finished() argument
2396 int s = r1_bio->sectors; in handle_sync_write_finished()
2399 struct bio *bio = r1_bio->bios[m]; in handle_sync_write_finished()
2403 test_bit(R1BIO_MadeGood, &r1_bio->state)) { in handle_sync_write_finished()
2404 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0); in handle_sync_write_finished()
2407 test_bit(R1BIO_WriteError, &r1_bio->state)) { in handle_sync_write_finished()
2408 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) in handle_sync_write_finished()
2412 put_buf(r1_bio); in handle_sync_write_finished()
2416 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) in handle_write_finished() argument
2422 if (r1_bio->bios[m] == IO_MADE_GOOD) { in handle_write_finished()
2425 r1_bio->sector, in handle_write_finished()
2426 r1_bio->sectors, 0); in handle_write_finished()
2428 } else if (r1_bio->bios[m] != NULL) { in handle_write_finished()
2434 if (!narrow_write_error(r1_bio, m)) { in handle_write_finished()
2438 set_bit(R1BIO_Degraded, &r1_bio->state); in handle_write_finished()
2445 list_add(&r1_bio->retry_list, &conf->bio_end_io_list); in handle_write_finished()
2446 idx = sector_to_idx(r1_bio->sector); in handle_write_finished()
2456 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in handle_write_finished()
2457 close_write(r1_bio); in handle_write_finished()
2458 raid_end_bio_io(r1_bio); in handle_write_finished()
2462 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) in handle_read_error() argument
2468 clear_bit(R1BIO_ReadError, &r1_bio->state); in handle_read_error()
2478 bio = r1_bio->bios[r1_bio->read_disk]; in handle_read_error()
2480 r1_bio->bios[r1_bio->read_disk] = NULL; in handle_read_error()
2482 rdev = conf->mirrors[r1_bio->read_disk].rdev; in handle_read_error()
2486 fix_read_error(conf, r1_bio->read_disk, in handle_read_error()
2487 r1_bio->sector, r1_bio->sectors); in handle_read_error()
2492 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; in handle_read_error()
2496 allow_barrier(conf, r1_bio->sector); in handle_read_error()
2497 bio = r1_bio->master_bio; in handle_read_error()
2500 r1_bio->state = 0; in handle_read_error()
2501 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2507 struct r1bio *r1_bio; in raid1d() local
2524 r1_bio = list_first_entry(&tmp, struct r1bio, in raid1d()
2526 list_del(&r1_bio->retry_list); in raid1d()
2527 idx = sector_to_idx(r1_bio->sector); in raid1d()
2530 set_bit(R1BIO_Degraded, &r1_bio->state); in raid1d()
2531 if (test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2532 close_write(r1_bio); in raid1d()
2533 raid_end_bio_io(r1_bio); in raid1d()
2547 r1_bio = list_entry(head->prev, struct r1bio, retry_list); in raid1d()
2549 idx = sector_to_idx(r1_bio->sector); in raid1d()
2553 mddev = r1_bio->mddev; in raid1d()
2555 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { in raid1d()
2556 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2557 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2558 handle_sync_write_finished(conf, r1_bio); in raid1d()
2560 sync_request_write(mddev, r1_bio); in raid1d()
2561 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || in raid1d()
2562 test_bit(R1BIO_WriteError, &r1_bio->state)) in raid1d()
2563 handle_write_finished(conf, r1_bio); in raid1d()
2564 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) in raid1d()
2565 handle_read_error(conf, r1_bio); in raid1d()
2618 struct r1bio *r1_bio; in raid1_sync_request() local
2694 r1_bio = raid1_alloc_init_r1buf(conf); in raid1_sync_request()
2706 r1_bio->mddev = mddev; in raid1_sync_request()
2707 r1_bio->sector = sector_nr; in raid1_sync_request()
2708 r1_bio->state = 0; in raid1_sync_request()
2709 set_bit(R1BIO_IsSync, &r1_bio->state); in raid1_sync_request()
2715 bio = r1_bio->bios[i]; in raid1_sync_request()
2778 r1_bio->read_disk = disk; in raid1_sync_request()
2786 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { in raid1_sync_request()
2794 put_buf(r1_bio); in raid1_sync_request()
2828 put_buf(r1_bio); in raid1_sync_request()
2858 bio = r1_bio->bios[i]; in raid1_sync_request()
2875 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2891 atomic_set(&r1_bio->remaining, read_targets); in raid1_sync_request()
2893 bio = r1_bio->bios[i]; in raid1_sync_request()
2903 atomic_set(&r1_bio->remaining, 1); in raid1_sync_request()
2904 bio = r1_bio->bios[r1_bio->read_disk]; in raid1_sync_request()