Lines Matching refs:mddev
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in r10buf_pool_alloc()
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in r10buf_pool_alloc()
174 &conf->mddev->recovery)) { in r10buf_pool_alloc()
255 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
263 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
273 struct mddev *mddev = r10_bio->mddev; in reschedule_retry() local
274 struct r10conf *conf = mddev->private; in reschedule_retry()
284 md_wakeup_thread(mddev->thread); in reschedule_retry()
295 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
315 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
355 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
387 rdev_dec_pending(rdev, conf->mddev); in raid10_end_read_request()
394 mdname(conf->mddev), in raid10_end_read_request()
405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
409 md_write_end(r10_bio->mddev); in close_write()
432 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
457 md_error(rdev->mddev, rdev); in raid10_end_write_request()
462 &rdev->mddev->recovery); in raid10_end_write_request()
467 md_error(rdev->mddev, rdev); in raid10_end_write_request()
531 rdev_dec_pending(rdev, conf->mddev); in raid10_end_write_request()
630 conf->mddev->reshape_backwards)) { in raid10_find_phys()
735 if ((conf->mddev->recovery_cp < MaxSector in read_balance()
737 (mddev_is_clustered(conf->mddev) && in read_balance()
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
880 md_bitmap_unplug(conf->mddev->bitmap); in flush_pending_writes()
969 raid10_log(conf->mddev, "wait barrier"); in wait_barrier()
979 (conf->mddev->thread->tsk == current && in wait_barrier()
981 &conf->mddev->recovery) && in wait_barrier()
1039 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || in choose_data_offset()
1056 struct mddev *mddev = plug->cb.data; in raid10_unplug() local
1057 struct r10conf *conf = mddev->private; in raid10_unplug()
1066 md_wakeup_thread(mddev->thread); in raid10_unplug()
1073 md_bitmap_unplug(mddev->bitmap); in raid10_unplug()
1100 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, in regular_request_wait() argument
1104 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in regular_request_wait()
1107 raid10_log(conf->mddev, "wait reshape"); in regular_request_wait()
1117 static void raid10_read_request(struct mddev *mddev, struct bio *bio, in raid10_read_request() argument
1120 struct r10conf *conf = mddev->private; in raid10_read_request()
1159 regular_request_wait(mddev, conf, bio, r10_bio->sectors); in raid10_read_request()
1164 mdname(mddev), b, in raid10_read_request()
1172 mdname(mddev), in raid10_read_request()
1188 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid10_read_request()
1203 if (mddev->gendisk) in raid10_read_request()
1205 read_bio, disk_devt(mddev->gendisk), in raid10_read_request()
1211 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1221 struct r10conf *conf = mddev->private; in raid10_write_one_disk()
1236 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid10_write_one_disk()
1253 if (conf->mddev->gendisk) in raid10_write_one_disk()
1255 mbio, disk_devt(conf->mddev->gendisk), in raid10_write_one_disk()
1262 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); in raid10_write_one_disk()
1275 md_wakeup_thread(mddev->thread); in raid10_write_one_disk()
1279 static void raid10_write_request(struct mddev *mddev, struct bio *bio, in raid10_write_request() argument
1282 struct r10conf *conf = mddev->private; in raid10_write_request()
1288 if ((mddev_is_clustered(mddev) && in raid10_write_request()
1289 md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1296 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid10_write_request()
1305 regular_request_wait(mddev, conf, bio, sectors); in raid10_write_request()
1306 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_write_request()
1307 (mddev->reshape_backwards in raid10_write_request()
1313 mddev->reshape_position = conf->reshape_progress; in raid10_write_request()
1314 set_mask_bits(&mddev->sb_flags, 0, in raid10_write_request()
1316 md_wakeup_thread(mddev->thread); in raid10_write_request()
1317 raid10_log(conf->mddev, "wait reshape metadata"); in raid10_write_request()
1318 wait_event(mddev->sb_wait, in raid10_write_request()
1319 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in raid10_write_request()
1321 conf->reshape_safe = mddev->reshape_position; in raid10_write_request()
1325 md_wakeup_thread(mddev->thread); in raid10_write_request()
1326 raid10_log(mddev, "wait queued"); in raid10_write_request()
1436 rdev_dec_pending(conf->mirrors[d].rdev, mddev); in raid10_write_request()
1447 rdev_dec_pending(rdev, mddev); in raid10_write_request()
1451 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid10_write_request()
1452 md_wait_for_blocked_rdev(blocked_rdev, mddev); in raid10_write_request()
1472 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1476 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1478 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1483 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) in __make_request() argument
1485 struct r10conf *conf = mddev->private; in __make_request()
1493 r10_bio->mddev = mddev; in __make_request()
1500 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1502 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1505 static bool raid10_make_request(struct mddev *mddev, struct bio *bio) in raid10_make_request() argument
1507 struct r10conf *conf = mddev->private; in raid10_make_request()
1513 && md_flush_request(mddev, bio)) in raid10_make_request()
1516 if (!md_write_start(mddev, bio)) in raid10_make_request()
1531 __make_request(mddev, bio, sectors); in raid10_make_request()
1538 static void raid10_status(struct seq_file *seq, struct mddev *mddev) in raid10_status() argument
1540 struct r10conf *conf = mddev->private; in raid10_status()
1544 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in raid10_status()
1556 conf->geo.raid_disks - mddev->degraded); in raid10_status()
1618 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) in raid10_error() argument
1621 struct r10conf *conf = mddev->private; in raid10_error()
1631 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid10_error()
1640 mddev->degraded++; in raid10_error()
1644 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid10_error()
1647 set_mask_bits(&mddev->sb_flags, 0, in raid10_error()
1652 mdname(mddev), bdevname(rdev->bdev, b), in raid10_error()
1653 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in raid10_error()
1666 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1690 static int raid10_spare_active(struct mddev *mddev) in raid10_spare_active() argument
1693 struct r10conf *conf = mddev->private; in raid10_spare_active()
1731 mddev->degraded -= count; in raid10_spare_active()
1738 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_add_disk() argument
1740 struct r10conf *conf = mddev->private; in raid10_add_disk()
1746 if (mddev->recovery_cp < MaxSector) in raid10_add_disk()
1754 if (md_integrity_add_rdev(rdev, mddev)) in raid10_add_disk()
1768 if (p->recovery_disabled == mddev->recovery_disabled) in raid10_add_disk()
1778 if (mddev->gendisk) in raid10_add_disk()
1779 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1786 if (mddev->gendisk) in raid10_add_disk()
1787 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_add_disk()
1791 p->recovery_disabled = mddev->recovery_disabled - 1; in raid10_add_disk()
1799 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid10_add_disk()
1800 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid10_add_disk()
1806 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid10_remove_disk() argument
1808 struct r10conf *conf = mddev->private; in raid10_remove_disk()
1815 if (unlikely(number >= mddev->raid_disks)) in raid10_remove_disk()
1834 mddev->recovery_disabled != p->recovery_disabled && in raid10_remove_disk()
1862 err = md_integrity_register(mddev); in raid10_remove_disk()
1872 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
1886 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); in __end_sync_read()
1899 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
1915 struct mddev *mddev = r10_bio->mddev; in end_sync_request() local
1926 md_done_sync(mddev, s, 1); in end_sync_request()
1943 struct mddev *mddev = r10_bio->mddev; in end_sync_write() local
1944 struct r10conf *conf = mddev->private; in end_sync_write()
1960 md_error(mddev, rdev); in end_sync_write()
1965 &rdev->mddev->recovery); in end_sync_write()
1974 rdev_dec_pending(rdev, mddev); in end_sync_write()
1995 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
1997 struct r10conf *conf = mddev->private; in sync_request_write()
2054 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2055 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) in sync_request_write()
2060 md_error(rdev->mddev, rdev); in sync_request_write()
2113 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2137 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error() local
2138 struct r10conf *conf = mddev->private; in fix_recovery_read_error()
2176 &rdev->mddev->recovery); in fix_recovery_read_error()
2194 mdname(mddev)); in fix_recovery_read_error()
2197 = mddev->recovery_disabled; in fix_recovery_read_error()
2199 &mddev->recovery); in fix_recovery_read_error()
2211 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2213 struct r10conf *conf = mddev->private; in recovery_request_write()
2255 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) in check_decay_read_errors() argument
2301 &rdev->mddev->recovery); in r10_sync_page_io()
2305 md_error(rdev->mddev, rdev); in r10_sync_page_io()
2317 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2322 int max_read_errors = atomic_read(&mddev->max_corr_read_errors); in fix_read_error()
2335 check_decay_read_errors(mddev, rdev); in fix_read_error()
2342 mdname(mddev), b, in fix_read_error()
2345 mdname(mddev), b); in fix_read_error()
2346 md_error(mddev, rdev); in fix_read_error()
2380 rdev_dec_pending(rdev, mddev); in fix_read_error()
2404 md_error(mddev, rdev); in fix_read_error()
2436 mdname(mddev), s, in fix_read_error()
2443 mdname(mddev), in fix_read_error()
2446 rdev_dec_pending(rdev, mddev); in fix_read_error()
2473 mdname(mddev), s, in fix_read_error()
2479 mdname(mddev), in fix_read_error()
2484 mdname(mddev), s, in fix_read_error()
2492 rdev_dec_pending(rdev, mddev); in fix_read_error()
2505 struct mddev *mddev = r10_bio->mddev; in narrow_write_error() local
2506 struct r10conf *conf = mddev->private; in narrow_write_error()
2541 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in narrow_write_error()
2563 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2567 struct r10conf *conf = mddev->private; in handle_read_error()
2582 if (mddev->ro) in handle_read_error()
2586 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2589 md_error(mddev, rdev); in handle_read_error()
2591 rdev_dec_pending(rdev, mddev); in handle_read_error()
2594 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2626 md_error(conf->mddev, rdev); in handle_write_completed()
2643 md_error(conf->mddev, rdev); in handle_write_completed()
2658 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2662 md_error(conf->mddev, rdev); in handle_write_completed()
2666 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2675 rdev_dec_pending(rdev, conf->mddev); in handle_write_completed()
2688 md_wakeup_thread(conf->mddev->thread); in handle_write_completed()
2700 struct mddev *mddev = thread->mddev; in raid10d() local
2703 struct r10conf *conf = mddev->private; in raid10d()
2707 md_check_recovery(mddev); in raid10d()
2710 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
2713 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid10d()
2724 if (mddev->degraded) in raid10d()
2749 mddev = r10_bio->mddev; in raid10d()
2750 conf = mddev->private; in raid10d()
2755 reshape_request_write(mddev, r10_bio); in raid10d()
2757 sync_request_write(mddev, r10_bio); in raid10d()
2759 recovery_request_write(mddev, r10_bio); in raid10d()
2761 handle_read_error(mddev, r10_bio); in raid10d()
2766 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid10d()
2767 md_check_recovery(mddev); in raid10d()
2798 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || in raid10_alloc_init_r10buf()
2799 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) in raid10_alloc_init_r10buf()
2845 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; in raid10_set_cluster_sync_high()
2888 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
2891 struct r10conf *conf = mddev->private; in raid10_sync_request()
2911 if (mddev->bitmap == NULL && in raid10_sync_request()
2912 mddev->recovery_cp == MaxSector && in raid10_sync_request()
2913 mddev->reshape_position == MaxSector && in raid10_sync_request()
2914 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid10_sync_request()
2915 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid10_sync_request()
2916 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in raid10_sync_request()
2919 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
2923 max_sector = mddev->dev_sectors; in raid10_sync_request()
2924 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in raid10_sync_request()
2925 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
2926 max_sector = mddev->resync_max_sectors; in raid10_sync_request()
2940 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in raid10_sync_request()
2946 if (mddev->curr_resync < max_sector) { /* aborted */ in raid10_sync_request()
2947 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in raid10_sync_request()
2948 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid10_sync_request()
2952 raid10_find_virt(conf, mddev->curr_resync, i); in raid10_sync_request()
2953 md_bitmap_end_sync(mddev->bitmap, sect, in raid10_sync_request()
2958 if ((!mddev->bitmap || conf->fullsync) in raid10_sync_request()
2960 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
2975 md_bitmap_close_sync(mddev->bitmap); in raid10_sync_request()
2981 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in raid10_sync_request()
2982 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
2992 if (max_sector > mddev->resync_max) in raid10_sync_request()
2993 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid10_sync_request()
3025 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3062 if (sect >= mddev->resync_max_sectors) { in raid10_sync_request()
3075 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3102 r10_bio->mddev = mddev; in raid10_sync_request()
3121 must_sync = md_bitmap_start_sync(mddev->bitmap, sect, in raid10_sync_request()
3237 &mddev->recovery)) in raid10_sync_request()
3239 mdname(mddev)); in raid10_sync_request()
3241 = mddev->recovery_disabled; in raid10_sync_request()
3247 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3249 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3252 rdev_dec_pending(mrdev, mddev); in raid10_sync_request()
3254 rdev_dec_pending(mreplace, mddev); in raid10_sync_request()
3293 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3294 mddev_is_clustered(mddev) && in raid10_sync_request()
3297 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid10_sync_request()
3298 &sync_blocks, mddev->degraded) && in raid10_sync_request()
3300 &mddev->recovery)) { in raid10_sync_request()
3310 r10_bio->mddev = mddev; in raid10_sync_request()
3392 mddev); in raid10_sync_request()
3397 mddev); in raid10_sync_request()
3429 if (mddev_is_clustered(mddev) && in raid10_sync_request()
3430 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in raid10_sync_request()
3433 conf->cluster_sync_low = mddev->curr_resync_completed; in raid10_sync_request()
3436 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3440 } else if (mddev_is_clustered(mddev)) { in raid10_sync_request()
3460 mddev->curr_resync_completed, i); in raid10_sync_request()
3469 md_cluster_ops->resync_info_update(mddev, in raid10_sync_request()
3494 md_done_sync(mddev, sectors_skipped, 1); in raid10_sync_request()
3512 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid10_size() argument
3515 struct r10conf *conf = mddev->private; in raid10_size()
3562 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3568 layout = mddev->layout; in setup_geo()
3569 chunk = mddev->chunk_sectors; in setup_geo()
3570 disks = mddev->raid_disks - mddev->delta_disks; in setup_geo()
3573 layout = mddev->new_layout; in setup_geo()
3574 chunk = mddev->new_chunk_sectors; in setup_geo()
3575 disks = mddev->raid_disks; in setup_geo()
3580 layout = mddev->new_layout; in setup_geo()
3581 chunk = mddev->new_chunk_sectors; in setup_geo()
3582 disks = mddev->raid_disks + mddev->delta_disks; in setup_geo()
3618 static struct r10conf *setup_conf(struct mddev *mddev) in setup_conf() argument
3625 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3629 mdname(mddev), PAGE_SIZE); in setup_conf()
3633 if (copies < 2 || copies > mddev->raid_disks) { in setup_conf()
3635 mdname(mddev), mddev->new_layout); in setup_conf()
3645 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), in setup_conf()
3666 calc_sectors(conf, mddev->dev_sectors); in setup_conf()
3667 if (mddev->reshape_position == MaxSector) { in setup_conf()
3671 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { in setup_conf()
3675 conf->reshape_progress = mddev->reshape_position; in setup_conf()
3692 conf->thread = md_register_thread(raid10d, mddev, "raid10"); in setup_conf()
3696 conf->mddev = mddev; in setup_conf()
3716 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * in raid10_set_io_opt()
3720 static int raid10_run(struct mddev *mddev) in raid10_run() argument
3731 if (mddev_init_writes_pending(mddev) < 0) in raid10_run()
3734 if (mddev->private == NULL) { in raid10_run()
3735 conf = setup_conf(mddev); in raid10_run()
3738 mddev->private = conf; in raid10_run()
3740 conf = mddev->private; in raid10_run()
3744 if (mddev_is_clustered(conf->mddev)) { in raid10_run()
3747 fc = (mddev->layout >> 8) & 255; in raid10_run()
3748 fo = mddev->layout & (1<<16); in raid10_run()
3756 mddev->thread = conf->thread; in raid10_run()
3759 if (mddev->queue) { in raid10_run()
3760 blk_queue_max_discard_sectors(mddev->queue, in raid10_run()
3761 mddev->chunk_sectors); in raid10_run()
3762 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid10_run()
3763 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid10_run()
3764 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid10_run()
3768 rdev_for_each(rdev, mddev) { in raid10_run()
3789 if (!mddev->reshape_backwards) in raid10_run()
3796 if (mddev->gendisk) in raid10_run()
3797 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid10_run()
3807 if (mddev->queue) { in raid10_run()
3810 mddev->queue); in raid10_run()
3813 mddev->queue); in raid10_run()
3818 mdname(mddev)); in raid10_run()
3832 mddev->degraded = 0; in raid10_run()
3850 mddev->degraded++; in raid10_run()
3862 disk->recovery_disabled = mddev->recovery_disabled - 1; in raid10_run()
3865 if (mddev->recovery_cp != MaxSector) in raid10_run()
3867 mdname(mddev)); in raid10_run()
3869 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in raid10_run()
3874 mddev->dev_sectors = conf->dev_sectors; in raid10_run()
3875 size = raid10_size(mddev, 0, 0); in raid10_run()
3876 md_set_array_sectors(mddev, size); in raid10_run()
3877 mddev->resync_max_sectors = size; in raid10_run()
3878 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid10_run()
3880 if (md_integrity_register(mddev)) in raid10_run()
3898 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_run()
3899 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_run()
3900 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_run()
3901 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_run()
3902 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_run()
3904 if (!mddev->sync_thread) in raid10_run()
3911 md_unregister_thread(&mddev->thread); in raid10_run()
3916 mddev->private = NULL; in raid10_run()
3921 static void raid10_free(struct mddev *mddev, void *priv) in raid10_free() argument
3934 static void raid10_quiesce(struct mddev *mddev, int quiesce) in raid10_quiesce() argument
3936 struct r10conf *conf = mddev->private; in raid10_quiesce()
3944 static int raid10_resize(struct mddev *mddev, sector_t sectors) in raid10_resize() argument
3958 struct r10conf *conf = mddev->private; in raid10_resize()
3961 if (mddev->reshape_position != MaxSector) in raid10_resize()
3967 oldsize = raid10_size(mddev, 0, 0); in raid10_resize()
3968 size = raid10_size(mddev, sectors, 0); in raid10_resize()
3969 if (mddev->external_size && in raid10_resize()
3970 mddev->array_sectors > size) in raid10_resize()
3972 if (mddev->bitmap) { in raid10_resize()
3973 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); in raid10_resize()
3977 md_set_array_sectors(mddev, size); in raid10_resize()
3978 if (sectors > mddev->dev_sectors && in raid10_resize()
3979 mddev->recovery_cp > oldsize) { in raid10_resize()
3980 mddev->recovery_cp = oldsize; in raid10_resize()
3981 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_resize()
3984 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()
3985 mddev->resync_max_sectors = size; in raid10_resize()
3989 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
3994 if (mddev->degraded > 0) { in raid10_takeover_raid0()
3996 mdname(mddev)); in raid10_takeover_raid0()
4002 mddev->new_level = 10; in raid10_takeover_raid0()
4004 mddev->new_layout = (1<<8) + 2; in raid10_takeover_raid0()
4005 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0()
4006 mddev->delta_disks = mddev->raid_disks; in raid10_takeover_raid0()
4007 mddev->raid_disks *= 2; in raid10_takeover_raid0()
4009 mddev->recovery_cp = MaxSector; in raid10_takeover_raid0()
4010 mddev->dev_sectors = size; in raid10_takeover_raid0()
4012 conf = setup_conf(mddev); in raid10_takeover_raid0()
4014 rdev_for_each(rdev, mddev) in raid10_takeover_raid0()
4025 static void *raid10_takeover(struct mddev *mddev) in raid10_takeover() argument
4032 if (mddev->level == 0) { in raid10_takeover()
4034 raid0_conf = mddev->private; in raid10_takeover()
4037 mdname(mddev)); in raid10_takeover()
4040 return raid10_takeover_raid0(mddev, in raid10_takeover()
4047 static int raid10_check_reshape(struct mddev *mddev) in raid10_check_reshape() argument
4063 struct r10conf *conf = mddev->private; in raid10_check_reshape()
4069 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
4076 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
4085 if (mddev->delta_disks > 0) { in raid10_check_reshape()
4088 kcalloc(mddev->raid_disks + mddev->delta_disks, in raid10_check_reshape()
4154 static int raid10_start_reshape(struct mddev *mddev) in raid10_start_reshape() argument
4170 struct r10conf *conf = mddev->private; in raid10_start_reshape()
4175 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in raid10_start_reshape()
4178 if (setup_geo(&new, mddev, geo_start) != conf->copies) in raid10_start_reshape()
4186 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4193 if (!mddev->reshape_backwards) in raid10_start_reshape()
4206 if (spares < mddev->delta_disks) in raid10_start_reshape()
4220 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4222 if (mddev->reshape_backwards) { in raid10_start_reshape()
4223 sector_t size = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4224 if (size < mddev->array_sectors) { in raid10_start_reshape()
4227 mdname(mddev)); in raid10_start_reshape()
4230 mddev->resync_max_sectors = size; in raid10_start_reshape()
4237 if (mddev->delta_disks && mddev->bitmap) { in raid10_start_reshape()
4241 oldsize = raid10_size(mddev, 0, 0); in raid10_start_reshape()
4242 newsize = raid10_size(mddev, 0, conf->geo.raid_disks); in raid10_start_reshape()
4244 if (!mddev_is_clustered(mddev)) { in raid10_start_reshape()
4245 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4252 rdev_for_each(rdev, mddev) { in raid10_start_reshape()
4267 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid10_start_reshape()
4271 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); in raid10_start_reshape()
4273 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); in raid10_start_reshape()
4278 if (mddev->delta_disks > 0) { in raid10_start_reshape()
4279 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4282 if (raid10_add_disk(mddev, rdev) == 0) { in raid10_start_reshape()
4290 sysfs_link_rdev(mddev, rdev); in raid10_start_reshape()
4303 mddev->degraded = calc_degraded(conf); in raid10_start_reshape()
4305 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4306 mddev->reshape_position = conf->reshape_progress; in raid10_start_reshape()
4307 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid10_start_reshape()
4309 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in raid10_start_reshape()
4310 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in raid10_start_reshape()
4311 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in raid10_start_reshape()
4312 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in raid10_start_reshape()
4313 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in raid10_start_reshape()
4315 mddev->sync_thread = md_register_thread(md_do_sync, mddev, in raid10_start_reshape()
4317 if (!mddev->sync_thread) { in raid10_start_reshape()
4322 md_wakeup_thread(mddev->sync_thread); in raid10_start_reshape()
4323 md_new_event(mddev); in raid10_start_reshape()
4327 mddev->recovery = 0; in raid10_start_reshape()
4330 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4331 rdev_for_each(rdev, mddev) in raid10_start_reshape()
4336 mddev->reshape_position = MaxSector; in raid10_start_reshape()
4372 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, in reshape_request() argument
4412 struct r10conf *conf = mddev->private; in reshape_request()
4427 if (mddev->reshape_backwards && in reshape_request()
4428 conf->reshape_progress < raid10_size(mddev, 0, 0)) { in reshape_request()
4429 sector_nr = (raid10_size(mddev, 0, 0) in reshape_request()
4431 } else if (!mddev->reshape_backwards && in reshape_request()
4435 mddev->curr_resync_completed = sector_nr; in reshape_request()
4436 sysfs_notify_dirent_safe(mddev->sysfs_completed); in reshape_request()
4446 if (mddev->reshape_backwards) { in reshape_request()
4496 mddev->reshape_position = conf->reshape_progress; in reshape_request()
4497 if (mddev->reshape_backwards) in reshape_request()
4498 mddev->curr_resync_completed = raid10_size(mddev, 0, 0) in reshape_request()
4501 mddev->curr_resync_completed = conf->reshape_progress; in reshape_request()
4503 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in reshape_request()
4504 md_wakeup_thread(mddev->thread); in reshape_request()
4505 wait_event(mddev->sb_wait, mddev->sb_flags == 0 || in reshape_request()
4506 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); in reshape_request()
4507 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in reshape_request()
4511 conf->reshape_safe = mddev->reshape_position; in reshape_request()
4522 r10_bio->mddev = mddev; in reshape_request()
4535 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in reshape_request()
4539 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); in reshape_request()
4558 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { in reshape_request()
4576 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, in reshape_request()
4646 if (mddev->reshape_backwards) in reshape_request()
4655 static int handle_reshape_read_error(struct mddev *mddev,
4657 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4664 struct r10conf *conf = mddev->private; in reshape_request_write()
4668 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4670 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4706 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) in end_reshape()
4711 md_finish_reshape(conf->mddev); in end_reshape()
4717 if (conf->mddev->queue) in end_reshape()
4722 static void raid10_update_reshape_pos(struct mddev *mddev) in raid10_update_reshape_pos() argument
4724 struct r10conf *conf = mddev->private; in raid10_update_reshape_pos()
4727 md_cluster_ops->resync_info_get(mddev, &lo, &hi); in raid10_update_reshape_pos()
4728 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) in raid10_update_reshape_pos()
4729 || mddev->reshape_position == MaxSector) in raid10_update_reshape_pos()
4730 conf->reshape_progress = mddev->reshape_position; in raid10_update_reshape_pos()
4735 static int handle_reshape_read_error(struct mddev *mddev, in handle_reshape_read_error() argument
4740 struct r10conf *conf = mddev->private; in handle_reshape_read_error()
4748 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in handle_reshape_read_error()
4784 rdev_dec_pending(rdev, mddev); in handle_reshape_read_error()
4799 &mddev->recovery); in handle_reshape_read_error()
4813 struct mddev *mddev = r10_bio->mddev; in end_reshape_write() local
4814 struct r10conf *conf = mddev->private; in end_reshape_write()
4830 md_error(mddev, rdev); in end_reshape_write()
4833 rdev_dec_pending(rdev, mddev); in end_reshape_write()
4841 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
4846 static void raid10_finish_reshape(struct mddev *mddev) in raid10_finish_reshape() argument
4848 struct r10conf *conf = mddev->private; in raid10_finish_reshape()
4850 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in raid10_finish_reshape()
4853 if (mddev->delta_disks > 0) { in raid10_finish_reshape()
4854 if (mddev->recovery_cp > mddev->resync_max_sectors) { in raid10_finish_reshape()
4855 mddev->recovery_cp = mddev->resync_max_sectors; in raid10_finish_reshape()
4856 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid10_finish_reshape()
4858 mddev->resync_max_sectors = mddev->array_sectors; in raid10_finish_reshape()
4863 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4874 mddev->layout = mddev->new_layout; in raid10_finish_reshape()
4875 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
4876 mddev->reshape_position = MaxSector; in raid10_finish_reshape()
4877 mddev->delta_disks = 0; in raid10_finish_reshape()
4878 mddev->reshape_backwards = 0; in raid10_finish_reshape()