Lines Matching refs:mddev

84 	struct mddev *mddev = rdev->mddev;  in wait_for_serialization()  local
89 if (WARN_ON(!mddev->serial_info_pool)) in wait_for_serialization()
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO); in wait_for_serialization()
101 struct mddev *mddev = rdev->mddev; in remove_serial() local
110 mempool_free(si, mddev->serial_info_pool); in remove_serial()
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) in r1buf_pool_alloc()
251 struct r1conf *conf = r1_bio->mddev->private; in free_r1bio()
259 struct r1conf *conf = r1_bio->mddev->private; in put_buf()
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); in put_buf()
277 struct mddev *mddev = r1_bio->mddev; in reschedule_retry() local
278 struct r1conf *conf = mddev->private; in reschedule_retry()
288 md_wakeup_thread(mddev->thread); in reschedule_retry()
309 struct r1conf *conf = r1_bio->mddev->private; in raid_end_bio_io()
334 struct r1conf *conf = r1_bio->mddev->private; in update_head_pos()
346 struct r1conf *conf = r1_bio->mddev->private; in find_bio_disk()
363 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_read_request()
385 if (r1_bio->mddev->degraded == conf->raid_disks || in raid1_end_read_request()
386 (r1_bio->mddev->degraded == conf->raid_disks-1 && in raid1_end_read_request()
394 rdev_dec_pending(rdev, conf->mddev); in raid1_end_read_request()
401 mdname(conf->mddev), in raid1_end_read_request()
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
423 md_write_end(r1_bio->mddev); in close_write()
446 struct r1conf *conf = r1_bio->mddev->private; in raid1_end_write_request()
463 conf->mddev->recovery); in raid1_end_write_request()
469 md_error(r1_bio->mddev, rdev); in raid1_end_write_request()
546 } else if (rdev->mddev->serialize_policy) in raid1_end_write_request()
549 rdev_dec_pending(rdev, conf->mddev); in raid1_end_write_request()
626 if ((conf->mddev->recovery_cp < this_sector + sectors) || in read_balance()
627 (mddev_is_clustered(conf->mddev) && in read_balance()
628 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, in read_balance()
792 md_bitmap_unplug(conf->mddev->bitmap); in flush_bio_list()
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), in raise_barrier()
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in raise_barrier()
1082 raid1_log(conf->mddev, "wait freeze"); in freeze_array()
1107 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); in alloc_behind_master_bio()
1157 struct mddev *mddev = plug->cb.data; in raid1_unplug() local
1158 struct r1conf *conf = mddev->private; in raid1_unplug()
1167 md_wakeup_thread(mddev->thread); in raid1_unplug()
1178 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) in init_r1bio() argument
1183 r1_bio->mddev = mddev; in init_r1bio()
1188 alloc_r1bio(struct mddev *mddev, struct bio *bio) in alloc_r1bio() argument
1190 struct r1conf *conf = mddev->private; in alloc_r1bio()
1196 init_r1bio(r1_bio, mddev, bio); in alloc_r1bio()
1200 static void raid1_read_request(struct mddev *mddev, struct bio *bio, in raid1_read_request() argument
1203 struct r1conf *conf = mddev->private; in raid1_read_request()
1206 struct bitmap *bitmap = mddev->bitmap; in raid1_read_request()
1240 r1_bio = alloc_r1bio(mddev, bio); in raid1_read_request()
1242 init_r1bio(r1_bio, mddev, bio); in raid1_read_request()
1255 mdname(mddev), in raid1_read_request()
1266 mdname(mddev), in raid1_read_request()
1276 raid1_log(mddev, "wait behind writes"); in raid1_read_request()
1293 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); in raid1_read_request()
1307 if (mddev->gendisk) in raid1_read_request()
1309 disk_devt(mddev->gendisk), r1_bio->sector); in raid1_read_request()
1314 static void raid1_write_request(struct mddev *mddev, struct bio *bio, in raid1_write_request() argument
1317 struct r1conf *conf = mddev->private; in raid1_write_request()
1320 struct bitmap *bitmap = mddev->bitmap; in raid1_write_request()
1328 if (mddev_is_clustered(mddev) && in raid1_write_request()
1329 md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1336 if (!md_cluster_ops->area_resyncing(mddev, WRITE, in raid1_write_request()
1352 r1_bio = alloc_r1bio(mddev, bio); in raid1_write_request()
1356 md_wakeup_thread(mddev->thread); in raid1_write_request()
1357 raid1_log(mddev, "wait queued"); in raid1_write_request()
1414 rdev_dec_pending(rdev, mddev); in raid1_write_request()
1443 rdev_dec_pending(conf->mirrors[j].rdev, mddev); in raid1_write_request()
1446 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); in raid1_write_request()
1447 md_wait_for_blocked_rdev(blocked_rdev, mddev); in raid1_write_request()
1480 < mddev->bitmap_info.max_write_behind) && in raid1_write_request()
1492 GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1494 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); in raid1_write_request()
1501 } else if (mddev->serialize_policy) in raid1_write_request()
1513 conf->raid_disks - mddev->degraded > 1) in raid1_write_request()
1519 if (mddev->gendisk) in raid1_write_request()
1521 mbio, disk_devt(mddev->gendisk), in raid1_write_request()
1526 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); in raid1_write_request()
1539 md_wakeup_thread(mddev->thread); in raid1_write_request()
1549 static bool raid1_make_request(struct mddev *mddev, struct bio *bio) in raid1_make_request() argument
1554 && md_flush_request(mddev, bio)) in raid1_make_request()
1568 raid1_read_request(mddev, bio, sectors, NULL); in raid1_make_request()
1570 if (!md_write_start(mddev,bio)) in raid1_make_request()
1572 raid1_write_request(mddev, bio, sectors); in raid1_make_request()
1577 static void raid1_status(struct seq_file *seq, struct mddev *mddev) in raid1_status() argument
1579 struct r1conf *conf = mddev->private; in raid1_status()
1583 conf->raid_disks - mddev->degraded); in raid1_status()
1594 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) in raid1_error() argument
1597 struct r1conf *conf = mddev->private; in raid1_error()
1607 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev in raid1_error()
1608 && (conf->raid_disks - mddev->degraded) == 1) { in raid1_error()
1615 conf->recovery_disabled = mddev->recovery_disabled; in raid1_error()
1621 mddev->degraded++; in raid1_error()
1627 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_error()
1628 set_mask_bits(&mddev->sb_flags, 0, in raid1_error()
1632 mdname(mddev), bdevname(rdev->bdev, b), in raid1_error()
1633 mdname(mddev), conf->raid_disks - mddev->degraded); in raid1_error()
1645 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, in print_conf()
1673 static int raid1_spare_active(struct mddev *mddev) in raid1_spare_active() argument
1676 struct r1conf *conf = mddev->private; in raid1_spare_active()
1718 mddev->degraded -= count; in raid1_spare_active()
1725 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_add_disk() argument
1727 struct r1conf *conf = mddev->private; in raid1_add_disk()
1734 if (mddev->recovery_disabled == conf->recovery_disabled) in raid1_add_disk()
1737 if (md_integrity_add_rdev(rdev, mddev)) in raid1_add_disk()
1756 if (mddev->gendisk) in raid1_add_disk()
1757 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_add_disk()
1783 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) in raid1_add_disk()
1784 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); in raid1_add_disk()
1789 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) in raid1_remove_disk() argument
1791 struct r1conf *conf = mddev->private; in raid1_remove_disk()
1810 mddev->recovery_disabled != conf->recovery_disabled && in raid1_remove_disk()
1811 mddev->degraded < conf->raid_disks) { in raid1_remove_disk()
1851 err = md_integrity_register(mddev); in raid1_remove_disk()
1877 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) in abort_sync_write() argument
1885 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); in abort_sync_write()
1894 struct mddev *mddev = r1_bio->mddev; in put_sync_write_buf() local
1902 md_done_sync(mddev, s, uptodate); in put_sync_write_buf()
1911 struct mddev *mddev = r1_bio->mddev; in end_sync_write() local
1912 struct r1conf *conf = mddev->private; in end_sync_write()
1918 abort_sync_write(mddev, r1_bio); in end_sync_write()
1922 mddev->recovery); in end_sync_write()
1947 rdev->mddev->recovery); in r1_sync_page_io()
1951 md_error(rdev->mddev, rdev); in r1_sync_page_io()
1968 struct mddev *mddev = r1_bio->mddev; in fix_sync_read_error() local
1969 struct r1conf *conf = mddev->private; in fix_sync_read_error()
1981 md_error(mddev, rdev); in fix_sync_read_error()
2025 mdname(mddev), bio_devname(bio, b), in fix_sync_read_error()
2036 mddev->recovery_disabled; in fix_sync_read_error()
2037 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in fix_sync_read_error()
2038 md_done_sync(mddev, r1_bio->sectors, 0); in fix_sync_read_error()
2062 rdev_dec_pending(rdev, mddev); in fix_sync_read_error()
2096 struct mddev *mddev = r1_bio->mddev; in process_checks() local
2097 struct r1conf *conf = mddev->private; in process_checks()
2128 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); in process_checks()
2161 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); in process_checks()
2162 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) in process_checks()
2166 rdev_dec_pending(conf->mirrors[i].rdev, mddev); in process_checks()
2174 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) in sync_request_write() argument
2176 struct r1conf *conf = mddev->private; in sync_request_write()
2186 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in sync_request_write()
2198 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) in sync_request_write()
2201 abort_sync_write(mddev, r1_bio); in sync_request_write()
2230 struct mddev *mddev = conf->mddev; in fix_read_error() local
2258 rdev_dec_pending(rdev, mddev); in fix_read_error()
2272 md_error(mddev, rdev); in fix_read_error()
2289 rdev_dec_pending(rdev, mddev); in fix_read_error()
2309 mdname(mddev), s, in fix_read_error()
2314 rdev_dec_pending(rdev, mddev); in fix_read_error()
2325 struct mddev *mddev = r1_bio->mddev; in narrow_write_error() local
2326 struct r1conf *conf = mddev->private; in narrow_write_error()
2365 &mddev->bio_set); in narrow_write_error()
2368 &mddev->bio_set); in narrow_write_error()
2409 md_error(conf->mddev, rdev); in handle_sync_write_finished()
2413 md_done_sync(conf->mddev, s, 1); in handle_sync_write_finished()
2427 rdev_dec_pending(rdev, conf->mddev); in handle_write_finished()
2435 md_error(conf->mddev, in handle_write_finished()
2441 conf->mddev); in handle_write_finished()
2454 md_wakeup_thread(conf->mddev->thread); in handle_write_finished()
2464 struct mddev *mddev = conf->mddev; in handle_read_error() local
2483 if (mddev->ro == 0 in handle_read_error()
2489 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { in handle_read_error()
2490 md_error(mddev, rdev); in handle_read_error()
2495 rdev_dec_pending(rdev, conf->mddev); in handle_read_error()
2501 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); in handle_read_error()
2506 struct mddev *mddev = thread->mddev; in raid1d() local
2509 struct r1conf *conf = mddev->private; in raid1d()
2514 md_check_recovery(mddev); in raid1d()
2517 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in raid1d()
2520 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in raid1d()
2529 if (mddev->degraded) in raid1d()
2553 mddev = r1_bio->mddev; in raid1d()
2554 conf = mddev->private; in raid1d()
2560 sync_request_write(mddev, r1_bio); in raid1d()
2570 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) in raid1d()
2571 md_check_recovery(mddev); in raid1d()
2614 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, in raid1_sync_request() argument
2617 struct r1conf *conf = mddev->private; in raid1_sync_request()
2636 max_sector = mddev->dev_sectors; in raid1_sync_request()
2643 if (mddev->curr_resync < max_sector) /* aborted */ in raid1_sync_request()
2644 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, in raid1_sync_request()
2649 md_bitmap_close_sync(mddev->bitmap); in raid1_sync_request()
2652 if (mddev_is_clustered(mddev)) { in raid1_sync_request()
2659 if (mddev->bitmap == NULL && in raid1_sync_request()
2660 mddev->recovery_cp == MaxSector && in raid1_sync_request()
2661 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in raid1_sync_request()
2669 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && in raid1_sync_request()
2670 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2687 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2688 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); in raid1_sync_request()
2706 r1_bio->mddev = mddev; in raid1_sync_request()
2754 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in raid1_sync_request()
2755 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in raid1_sync_request()
2792 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in raid1_sync_request()
2802 conf->recovery_disabled = mddev->recovery_disabled; in raid1_sync_request()
2803 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in raid1_sync_request()
2815 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) in raid1_sync_request()
2832 if (max_sector > mddev->resync_max) in raid1_sync_request()
2833 max_sector = mddev->resync_max; /* Don't do IO beyond here */ in raid1_sync_request()
2846 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, in raid1_sync_request()
2849 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in raid1_sync_request()
2877 if (mddev_is_clustered(mddev) && in raid1_sync_request()
2879 conf->cluster_sync_low = mddev->curr_resync_completed; in raid1_sync_request()
2882 md_cluster_ops->resync_info_update(mddev, in raid1_sync_request()
2890 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in raid1_sync_request()
2913 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) in raid1_size() argument
2918 return mddev->dev_sectors; in raid1_size()
2921 static struct r1conf *setup_conf(struct mddev *mddev) in setup_conf() argument
2954 mddev->raid_disks, 2), in setup_conf()
2966 conf->poolinfo->raid_disks = mddev->raid_disks * 2; in setup_conf()
2976 conf->poolinfo->mddev = mddev; in setup_conf()
2980 rdev_for_each(rdev, mddev) { in setup_conf()
2982 if (disk_idx >= mddev->raid_disks in setup_conf()
2986 disk = conf->mirrors + mddev->raid_disks + disk_idx; in setup_conf()
2996 conf->raid_disks = mddev->raid_disks; in setup_conf()
2997 conf->mddev = mddev; in setup_conf()
3006 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
3038 conf->thread = md_register_thread(raid1d, mddev, "raid1"); in setup_conf()
3060 static void raid1_free(struct mddev *mddev, void *priv);
3061 static int raid1_run(struct mddev *mddev) in raid1_run() argument
3069 if (mddev->level != 1) { in raid1_run()
3071 mdname(mddev), mddev->level); in raid1_run()
3074 if (mddev->reshape_position != MaxSector) { in raid1_run()
3076 mdname(mddev)); in raid1_run()
3079 if (mddev_init_writes_pending(mddev) < 0) in raid1_run()
3086 if (mddev->private == NULL) in raid1_run()
3087 conf = setup_conf(mddev); in raid1_run()
3089 conf = mddev->private; in raid1_run()
3094 if (mddev->queue) { in raid1_run()
3095 blk_queue_max_write_same_sectors(mddev->queue, 0); in raid1_run()
3096 blk_queue_max_write_zeroes_sectors(mddev->queue, 0); in raid1_run()
3099 rdev_for_each(rdev, mddev) { in raid1_run()
3100 if (!mddev->gendisk) in raid1_run()
3102 disk_stack_limits(mddev->gendisk, rdev->bdev, in raid1_run()
3108 mddev->degraded = 0; in raid1_run()
3113 mddev->degraded++; in raid1_run()
3117 if (conf->raid_disks - mddev->degraded < 1) { in raid1_run()
3122 if (conf->raid_disks - mddev->degraded == 1) in raid1_run()
3123 mddev->recovery_cp = MaxSector; in raid1_run()
3125 if (mddev->recovery_cp != MaxSector) in raid1_run()
3127 mdname(mddev)); in raid1_run()
3129 mdname(mddev), mddev->raid_disks - mddev->degraded, in raid1_run()
3130 mddev->raid_disks); in raid1_run()
3135 mddev->thread = conf->thread; in raid1_run()
3137 mddev->private = conf; in raid1_run()
3138 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); in raid1_run()
3140 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); in raid1_run()
3142 if (mddev->queue) { in raid1_run()
3145 mddev->queue); in raid1_run()
3148 mddev->queue); in raid1_run()
3151 ret = md_integrity_register(mddev); in raid1_run()
3153 md_unregister_thread(&mddev->thread); in raid1_run()
3159 raid1_free(mddev, conf); in raid1_run()
3163 static void raid1_free(struct mddev *mddev, void *priv) in raid1_free() argument
3179 static int raid1_resize(struct mddev *mddev, sector_t sectors) in raid1_resize() argument
3188 sector_t newsize = raid1_size(mddev, sectors, 0); in raid1_resize()
3189 if (mddev->external_size && in raid1_resize()
3190 mddev->array_sectors > newsize) in raid1_resize()
3192 if (mddev->bitmap) { in raid1_resize()
3193 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); in raid1_resize()
3197 md_set_array_sectors(mddev, newsize); in raid1_resize()
3198 if (sectors > mddev->dev_sectors && in raid1_resize()
3199 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()
3200 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()
3201 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_resize()
3203 mddev->dev_sectors = sectors; in raid1_resize()
3204 mddev->resync_max_sectors = sectors; in raid1_resize()
3208 static int raid1_reshape(struct mddev *mddev) in raid1_reshape() argument
3224 struct r1conf *conf = mddev->private; in raid1_reshape()
3234 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape()
3235 mddev->layout != mddev->new_layout || in raid1_reshape()
3236 mddev->level != mddev->new_level) { in raid1_reshape()
3237 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
3238 mddev->new_layout = mddev->layout; in raid1_reshape()
3239 mddev->new_level = mddev->level; in raid1_reshape()
3243 if (!mddev_is_clustered(mddev)) in raid1_reshape()
3244 md_allow_write(mddev); in raid1_reshape()
3246 raid_disks = mddev->raid_disks + mddev->delta_disks; in raid1_reshape()
3260 newpoolinfo->mddev = mddev; in raid1_reshape()
3287 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3289 sysfs_unlink_rdev(mddev, rdev); in raid1_reshape()
3290 if (sysfs_link_rdev(mddev, rdev)) in raid1_reshape()
3292 mdname(mddev), rdev->raid_disk); in raid1_reshape()
3303 mddev->degraded += (raid_disks - conf->raid_disks); in raid1_reshape()
3305 conf->raid_disks = mddev->raid_disks = raid_disks; in raid1_reshape()
3306 mddev->delta_disks = 0; in raid1_reshape()
3310 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in raid1_reshape()
3311 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in raid1_reshape()
3312 md_wakeup_thread(mddev->thread); in raid1_reshape()
3318 static void raid1_quiesce(struct mddev *mddev, int quiesce) in raid1_quiesce() argument
3320 struct r1conf *conf = mddev->private; in raid1_quiesce()
3328 static void *raid1_takeover(struct mddev *mddev) in raid1_takeover() argument
3333 if (mddev->level == 5 && mddev->raid_disks == 2) { in raid1_takeover()
3335 mddev->new_level = 1; in raid1_takeover()
3336 mddev->new_layout = 0; in raid1_takeover()
3337 mddev->new_chunk_sectors = 0; in raid1_takeover()
3338 conf = setup_conf(mddev); in raid1_takeover()
3342 mddev_clear_unsupported_flags(mddev, in raid1_takeover()