Lines Matching refs:mddev
90 static int remove_and_add_spares(struct mddev *mddev,
92 static void mddev_detach(struct mddev *mddev);
117 static inline int speed_min(struct mddev *mddev) in speed_min() argument
119 return mddev->sync_speed_min ? in speed_min()
120 mddev->sync_speed_min : sysctl_speed_limit_min; in speed_min()
123 static inline int speed_max(struct mddev *mddev) in speed_max() argument
125 return mddev->sync_speed_max ? in speed_max()
126 mddev->sync_speed_max : sysctl_speed_limit_max; in speed_max()
138 static void rdevs_uninit_serial(struct mddev *mddev) in rdevs_uninit_serial() argument
142 rdev_for_each(rdev, mddev) in rdevs_uninit_serial()
174 static int rdevs_init_serial(struct mddev *mddev) in rdevs_init_serial() argument
179 rdev_for_each(rdev, mddev) { in rdevs_init_serial()
186 if (ret && !mddev->serial_info_pool) in rdevs_init_serial()
187 rdevs_uninit_serial(mddev); in rdevs_init_serial()
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && in rdev_need_serial()
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_create_serial_pool() argument
219 mddev_suspend(mddev); in mddev_create_serial_pool()
222 ret = rdevs_init_serial(mddev); in mddev_create_serial_pool()
228 if (mddev->serial_info_pool == NULL) { in mddev_create_serial_pool()
233 mddev->serial_info_pool = in mddev_create_serial_pool()
236 if (!mddev->serial_info_pool) { in mddev_create_serial_pool()
237 rdevs_uninit_serial(mddev); in mddev_create_serial_pool()
244 mddev_resume(mddev); in mddev_create_serial_pool()
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, in mddev_destroy_serial_pool() argument
259 if (mddev->serial_info_pool) { in mddev_destroy_serial_pool()
264 mddev_suspend(mddev); in mddev_destroy_serial_pool()
265 rdev_for_each(temp, mddev) { in mddev_destroy_serial_pool()
267 if (!mddev->serialize_policy || in mddev_destroy_serial_pool()
283 mempool_destroy(mddev->serial_info_pool); in mddev_destroy_serial_pool()
284 mddev->serial_info_pool = NULL; in mddev_destroy_serial_pool()
287 mddev_resume(mddev); in mddev_destroy_serial_pool()
344 struct mddev *mddev) in bio_alloc_mddev() argument
346 if (!mddev || !bioset_initialized(&mddev->bio_set)) in bio_alloc_mddev()
349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); in bio_alloc_mddev()
353 static struct bio *md_bio_alloc_sync(struct mddev *mddev) in md_bio_alloc_sync() argument
355 if (!mddev || !bioset_initialized(&mddev->sync_set)) in md_bio_alloc_sync()
358 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); in md_bio_alloc_sync()
373 void md_new_event(struct mddev *mddev) in md_new_event() argument
400 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
403 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
416 static bool is_suspended(struct mddev *mddev, struct bio *bio) in is_suspended() argument
418 if (mddev->suspended) in is_suspended()
422 if (mddev->suspend_lo >= mddev->suspend_hi) in is_suspended()
424 if (bio->bi_iter.bi_sector >= mddev->suspend_hi) in is_suspended()
426 if (bio_end_sector(bio) < mddev->suspend_lo) in is_suspended()
431 void md_handle_request(struct mddev *mddev, struct bio *bio) in md_handle_request() argument
435 if (is_suspended(mddev, bio)) { in md_handle_request()
438 prepare_to_wait(&mddev->sb_wait, &__wait, in md_handle_request()
440 if (!is_suspended(mddev, bio)) in md_handle_request()
446 finish_wait(&mddev->sb_wait, &__wait); in md_handle_request()
448 atomic_inc(&mddev->active_io); in md_handle_request()
451 if (!mddev->pers->make_request(mddev, bio)) { in md_handle_request()
452 atomic_dec(&mddev->active_io); in md_handle_request()
453 wake_up(&mddev->sb_wait); in md_handle_request()
457 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) in md_handle_request()
458 wake_up(&mddev->sb_wait); in md_handle_request()
466 struct mddev *mddev = bio->bi_disk->private_data; in md_submit_bio() local
469 if (mddev == NULL || mddev->pers == NULL) { in md_submit_bio()
474 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { in md_submit_bio()
481 if (mddev->ro == 1 && unlikely(rw == WRITE)) { in md_submit_bio()
496 md_handle_request(mddev, bio); in md_submit_bio()
499 part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); in md_submit_bio()
500 part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); in md_submit_bio()
512 void mddev_suspend(struct mddev *mddev) in mddev_suspend() argument
514 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); in mddev_suspend()
515 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_suspend()
516 if (mddev->suspended++) in mddev_suspend()
519 wake_up(&mddev->sb_wait); in mddev_suspend()
520 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
522 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); in mddev_suspend()
523 mddev->pers->quiesce(mddev, 1); in mddev_suspend()
524 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); in mddev_suspend()
525 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); in mddev_suspend()
527 del_timer_sync(&mddev->safemode_timer); in mddev_suspend()
529 mddev->noio_flag = memalloc_noio_save(); in mddev_suspend()
533 void mddev_resume(struct mddev *mddev) in mddev_resume() argument
536 memalloc_noio_restore(mddev->noio_flag); in mddev_resume()
537 lockdep_assert_held(&mddev->reconfig_mutex); in mddev_resume()
538 if (--mddev->suspended) in mddev_resume()
540 wake_up(&mddev->sb_wait); in mddev_resume()
541 mddev->pers->quiesce(mddev, 0); in mddev_resume()
543 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in mddev_resume()
544 md_wakeup_thread(mddev->thread); in mddev_resume()
545 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in mddev_resume()
556 struct mddev *mddev = rdev->mddev; in md_end_flush() local
558 rdev_dec_pending(rdev, mddev); in md_end_flush()
560 if (atomic_dec_and_test(&mddev->flush_pending)) { in md_end_flush()
562 queue_work(md_wq, &mddev->flush_work); in md_end_flush()
571 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in submit_flushes() local
574 mddev->start_flush = ktime_get_boottime(); in submit_flushes()
575 INIT_WORK(&mddev->flush_work, md_submit_flush_data); in submit_flushes()
576 atomic_set(&mddev->flush_pending, 1); in submit_flushes()
578 rdev_for_each_rcu(rdev, mddev) in submit_flushes()
589 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); in submit_flushes()
594 atomic_inc(&mddev->flush_pending); in submit_flushes()
597 rdev_dec_pending(rdev, mddev); in submit_flushes()
600 if (atomic_dec_and_test(&mddev->flush_pending)) in submit_flushes()
601 queue_work(md_wq, &mddev->flush_work); in submit_flushes()
606 struct mddev *mddev = container_of(ws, struct mddev, flush_work); in md_submit_flush_data() local
607 struct bio *bio = mddev->flush_bio; in md_submit_flush_data()
615 spin_lock_irq(&mddev->lock); in md_submit_flush_data()
616 mddev->last_flush = mddev->start_flush; in md_submit_flush_data()
617 mddev->flush_bio = NULL; in md_submit_flush_data()
618 spin_unlock_irq(&mddev->lock); in md_submit_flush_data()
619 wake_up(&mddev->sb_wait); in md_submit_flush_data()
626 md_handle_request(mddev, bio); in md_submit_flush_data()
636 bool md_flush_request(struct mddev *mddev, struct bio *bio) in md_flush_request() argument
639 spin_lock_irq(&mddev->lock); in md_flush_request()
640 wait_event_lock_irq(mddev->sb_wait, in md_flush_request()
641 !mddev->flush_bio || in md_flush_request()
642 ktime_after(mddev->last_flush, start), in md_flush_request()
643 mddev->lock); in md_flush_request()
644 if (!ktime_after(mddev->last_flush, start)) { in md_flush_request()
645 WARN_ON(mddev->flush_bio); in md_flush_request()
646 mddev->flush_bio = bio; in md_flush_request()
649 spin_unlock_irq(&mddev->lock); in md_flush_request()
652 INIT_WORK(&mddev->flush_work, submit_flushes); in md_flush_request()
653 queue_work(md_wq, &mddev->flush_work); in md_flush_request()
668 static inline struct mddev *mddev_get(struct mddev *mddev) in mddev_get() argument
670 atomic_inc(&mddev->active); in mddev_get()
671 return mddev; in mddev_get()
676 static void mddev_put(struct mddev *mddev) in mddev_put() argument
678 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) in mddev_put()
680 if (!mddev->raid_disks && list_empty(&mddev->disks) && in mddev_put()
681 mddev->ctime == 0 && !mddev->hold_active) { in mddev_put()
684 list_del_init(&mddev->all_mddevs); in mddev_put()
691 INIT_WORK(&mddev->del_work, mddev_delayed_delete); in mddev_put()
692 queue_work(md_misc_wq, &mddev->del_work); in mddev_put()
699 void mddev_init(struct mddev *mddev) in mddev_init() argument
701 kobject_init(&mddev->kobj, &md_ktype); in mddev_init()
702 mutex_init(&mddev->open_mutex); in mddev_init()
703 mutex_init(&mddev->reconfig_mutex); in mddev_init()
704 mutex_init(&mddev->bitmap_info.mutex); in mddev_init()
705 INIT_LIST_HEAD(&mddev->disks); in mddev_init()
706 INIT_LIST_HEAD(&mddev->all_mddevs); in mddev_init()
707 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); in mddev_init()
708 atomic_set(&mddev->active, 1); in mddev_init()
709 atomic_set(&mddev->openers, 0); in mddev_init()
710 atomic_set(&mddev->active_io, 0); in mddev_init()
711 spin_lock_init(&mddev->lock); in mddev_init()
712 atomic_set(&mddev->flush_pending, 0); in mddev_init()
713 init_waitqueue_head(&mddev->sb_wait); in mddev_init()
714 init_waitqueue_head(&mddev->recovery_wait); in mddev_init()
715 mddev->reshape_position = MaxSector; in mddev_init()
716 mddev->reshape_backwards = 0; in mddev_init()
717 mddev->last_sync_action = "none"; in mddev_init()
718 mddev->resync_min = 0; in mddev_init()
719 mddev->resync_max = MaxSector; in mddev_init()
720 mddev->level = LEVEL_NONE; in mddev_init()
724 static struct mddev *mddev_find_locked(dev_t unit) in mddev_find_locked()
726 struct mddev *mddev; in mddev_find_locked() local
728 list_for_each_entry(mddev, &all_mddevs, all_mddevs) in mddev_find_locked()
729 if (mddev->unit == unit) in mddev_find_locked()
730 return mddev; in mddev_find_locked()
735 static struct mddev *mddev_find(dev_t unit) in mddev_find()
737 struct mddev *mddev; in mddev_find() local
743 mddev = mddev_find_locked(unit); in mddev_find()
744 if (mddev) in mddev_find()
745 mddev_get(mddev); in mddev_find()
748 return mddev; in mddev_find()
751 static struct mddev *mddev_find_or_alloc(dev_t unit) in mddev_find_or_alloc()
753 struct mddev *mddev, *new = NULL; in mddev_find_or_alloc() local
762 mddev = mddev_find_locked(unit); in mddev_find_or_alloc()
763 if (mddev) { in mddev_find_or_alloc()
764 mddev_get(mddev); in mddev_find_or_alloc()
767 return mddev; in mddev_find_or_alloc()
822 void mddev_unlock(struct mddev *mddev) in mddev_unlock() argument
824 if (mddev->to_remove) { in mddev_unlock()
837 struct attribute_group *to_remove = mddev->to_remove; in mddev_unlock()
838 mddev->to_remove = NULL; in mddev_unlock()
839 mddev->sysfs_active = 1; in mddev_unlock()
840 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
842 if (mddev->kobj.sd) { in mddev_unlock()
844 sysfs_remove_group(&mddev->kobj, to_remove); in mddev_unlock()
845 if (mddev->pers == NULL || in mddev_unlock()
846 mddev->pers->sync_request == NULL) { in mddev_unlock()
847 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); in mddev_unlock()
848 if (mddev->sysfs_action) in mddev_unlock()
849 sysfs_put(mddev->sysfs_action); in mddev_unlock()
850 if (mddev->sysfs_completed) in mddev_unlock()
851 sysfs_put(mddev->sysfs_completed); in mddev_unlock()
852 if (mddev->sysfs_degraded) in mddev_unlock()
853 sysfs_put(mddev->sysfs_degraded); in mddev_unlock()
854 mddev->sysfs_action = NULL; in mddev_unlock()
855 mddev->sysfs_completed = NULL; in mddev_unlock()
856 mddev->sysfs_degraded = NULL; in mddev_unlock()
859 mddev->sysfs_active = 0; in mddev_unlock()
861 mutex_unlock(&mddev->reconfig_mutex); in mddev_unlock()
867 md_wakeup_thread(mddev->thread); in mddev_unlock()
868 wake_up(&mddev->sb_wait); in mddev_unlock()
873 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) in md_find_rdev_nr_rcu() argument
877 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_nr_rcu()
885 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) in find_rdev() argument
889 rdev_for_each(rdev, mddev) in find_rdev()
896 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) in md_find_rdev_rcu() argument
900 rdev_for_each_rcu(rdev, mddev) in md_find_rdev_rcu()
955 struct mddev *mddev = rdev->mddev; in super_written() local
960 md_error(mddev, rdev); in super_written()
963 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); in super_written()
969 if (atomic_dec_and_test(&mddev->pending_writes)) in super_written()
970 wake_up(&mddev->sb_wait); in super_written()
971 rdev_dec_pending(rdev, mddev); in super_written()
975 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, in md_super_write() argument
993 bio = md_bio_alloc_sync(mddev); in md_super_write()
1003 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && in md_super_write()
1009 atomic_inc(&mddev->pending_writes); in md_super_write()
1013 int md_super_wait(struct mddev *mddev) in md_super_wait() argument
1016 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); in md_super_wait()
1017 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) in md_super_wait()
1025 struct bio *bio = md_bio_alloc_sync(rdev->mddev); in sync_page_io()
1035 else if (rdev->mddev->reshape_position != MaxSector && in sync_page_io()
1036 (rdev->mddev->reshape_backwards == in sync_page_io()
1037 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
1178 int (*validate_super)(struct mddev *mddev,
1180 void (*sync_super)(struct mddev *mddev,
1196 int md_check_no_bitmap(struct mddev *mddev) in md_check_no_bitmap() argument
1198 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) in md_check_no_bitmap()
1201 mdname(mddev), mddev->pers->name); in md_check_no_bitmap()
1318 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) in super_90_validate() argument
1330 if (mddev->raid_disks == 0) { in super_90_validate()
1331 mddev->major_version = 0; in super_90_validate()
1332 mddev->minor_version = sb->minor_version; in super_90_validate()
1333 mddev->patch_version = sb->patch_version; in super_90_validate()
1334 mddev->external = 0; in super_90_validate()
1335 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate()
1336 mddev->ctime = sb->ctime; in super_90_validate()
1337 mddev->utime = sb->utime; in super_90_validate()
1338 mddev->level = sb->level; in super_90_validate()
1339 mddev->clevel[0] = 0; in super_90_validate()
1340 mddev->layout = sb->layout; in super_90_validate()
1341 mddev->raid_disks = sb->raid_disks; in super_90_validate()
1342 mddev->dev_sectors = ((sector_t)sb->size) * 2; in super_90_validate()
1343 mddev->events = ev1; in super_90_validate()
1344 mddev->bitmap_info.offset = 0; in super_90_validate()
1345 mddev->bitmap_info.space = 0; in super_90_validate()
1347 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in super_90_validate()
1348 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in super_90_validate()
1349 mddev->reshape_backwards = 0; in super_90_validate()
1351 if (mddev->minor_version >= 91) { in super_90_validate()
1352 mddev->reshape_position = sb->reshape_position; in super_90_validate()
1353 mddev->delta_disks = sb->delta_disks; in super_90_validate()
1354 mddev->new_level = sb->new_level; in super_90_validate()
1355 mddev->new_layout = sb->new_layout; in super_90_validate()
1356 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate()
1357 if (mddev->delta_disks < 0) in super_90_validate()
1358 mddev->reshape_backwards = 1; in super_90_validate()
1360 mddev->reshape_position = MaxSector; in super_90_validate()
1361 mddev->delta_disks = 0; in super_90_validate()
1362 mddev->new_level = mddev->level; in super_90_validate()
1363 mddev->new_layout = mddev->layout; in super_90_validate()
1364 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate()
1366 if (mddev->level == 0) in super_90_validate()
1367 mddev->layout = -1; in super_90_validate()
1370 mddev->recovery_cp = MaxSector; in super_90_validate()
1374 mddev->recovery_cp = sb->recovery_cp; in super_90_validate()
1376 mddev->recovery_cp = 0; in super_90_validate()
1379 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); in super_90_validate()
1380 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); in super_90_validate()
1381 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); in super_90_validate()
1382 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); in super_90_validate()
1384 mddev->max_disks = MD_SB_DISKS; in super_90_validate()
1387 mddev->bitmap_info.file == NULL) { in super_90_validate()
1388 mddev->bitmap_info.offset = in super_90_validate()
1389 mddev->bitmap_info.default_offset; in super_90_validate()
1390 mddev->bitmap_info.space = in super_90_validate()
1391 mddev->bitmap_info.default_space; in super_90_validate()
1394 } else if (mddev->pers == NULL) { in super_90_validate()
1400 if (ev1 < mddev->events) in super_90_validate()
1402 } else if (mddev->bitmap) { in super_90_validate()
1406 if (ev1 < mddev->bitmap->events_cleared) in super_90_validate()
1408 if (ev1 < mddev->events) in super_90_validate()
1411 if (ev1 < mddev->events) in super_90_validate()
1416 if (mddev->level != LEVEL_MULTIPATH) { in super_90_validate()
1430 if (mddev->minor_version >= 91) { in super_90_validate()
1447 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) in super_90_sync() argument
1451 int next_spare = mddev->raid_disks; in super_90_sync()
1473 sb->major_version = mddev->major_version; in super_90_sync()
1474 sb->patch_version = mddev->patch_version; in super_90_sync()
1476 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); in super_90_sync()
1477 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); in super_90_sync()
1478 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); in super_90_sync()
1479 memcpy(&sb->set_uuid3, mddev->uuid+12,4); in super_90_sync()
1481 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in super_90_sync()
1482 sb->level = mddev->level; in super_90_sync()
1483 sb->size = mddev->dev_sectors / 2; in super_90_sync()
1484 sb->raid_disks = mddev->raid_disks; in super_90_sync()
1485 sb->md_minor = mddev->md_minor; in super_90_sync()
1487 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in super_90_sync()
1489 sb->events_hi = (mddev->events>>32); in super_90_sync()
1490 sb->events_lo = (u32)mddev->events; in super_90_sync()
1492 if (mddev->reshape_position == MaxSector) in super_90_sync()
1496 sb->reshape_position = mddev->reshape_position; in super_90_sync()
1497 sb->new_level = mddev->new_level; in super_90_sync()
1498 sb->delta_disks = mddev->delta_disks; in super_90_sync()
1499 sb->new_layout = mddev->new_layout; in super_90_sync()
1500 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync()
1502 mddev->minor_version = sb->minor_version; in super_90_sync()
1503 if (mddev->in_sync) in super_90_sync()
1505 sb->recovery_cp = mddev->recovery_cp; in super_90_sync()
1506 sb->cp_events_hi = (mddev->events>>32); in super_90_sync()
1507 sb->cp_events_lo = (u32)mddev->events; in super_90_sync()
1508 if (mddev->recovery_cp == MaxSector) in super_90_sync()
1513 sb->layout = mddev->layout; in super_90_sync()
1514 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync()
1516 if (mddev->bitmap && mddev->bitmap_info.file == NULL) in super_90_sync()
1520 rdev_for_each(rdev2, mddev) { in super_90_sync()
1568 for (i=0 ; i < mddev->raid_disks ; i++) { in super_90_sync()
1594 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_90_rdev_size_change()
1596 if (rdev->mddev->bitmap_info.offset) in super_90_rdev_size_change()
1604 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) in super_90_rdev_size_change()
1607 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_90_rdev_size_change()
1609 } while (md_super_wait(rdev->mddev) < 0); in super_90_rdev_size_change()
1833 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) in super_1_validate() argument
1844 if (mddev->raid_disks == 0) { in super_1_validate()
1845 mddev->major_version = 1; in super_1_validate()
1846 mddev->patch_version = 0; in super_1_validate()
1847 mddev->external = 0; in super_1_validate()
1848 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate()
1849 mddev->ctime = le64_to_cpu(sb->ctime); in super_1_validate()
1850 mddev->utime = le64_to_cpu(sb->utime); in super_1_validate()
1851 mddev->level = le32_to_cpu(sb->level); in super_1_validate()
1852 mddev->clevel[0] = 0; in super_1_validate()
1853 mddev->layout = le32_to_cpu(sb->layout); in super_1_validate()
1854 mddev->raid_disks = le32_to_cpu(sb->raid_disks); in super_1_validate()
1855 mddev->dev_sectors = le64_to_cpu(sb->size); in super_1_validate()
1856 mddev->events = ev1; in super_1_validate()
1857 mddev->bitmap_info.offset = 0; in super_1_validate()
1858 mddev->bitmap_info.space = 0; in super_1_validate()
1862 mddev->bitmap_info.default_offset = 1024 >> 9; in super_1_validate()
1863 mddev->bitmap_info.default_space = (4096-1024) >> 9; in super_1_validate()
1864 mddev->reshape_backwards = 0; in super_1_validate()
1866 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); in super_1_validate()
1867 memcpy(mddev->uuid, sb->set_uuid, 16); in super_1_validate()
1869 mddev->max_disks = (4096-256)/2; in super_1_validate()
1872 mddev->bitmap_info.file == NULL) { in super_1_validate()
1873 mddev->bitmap_info.offset = in super_1_validate()
1880 if (mddev->minor_version > 0) in super_1_validate()
1881 mddev->bitmap_info.space = 0; in super_1_validate()
1882 else if (mddev->bitmap_info.offset > 0) in super_1_validate()
1883 mddev->bitmap_info.space = in super_1_validate()
1884 8 - mddev->bitmap_info.offset; in super_1_validate()
1886 mddev->bitmap_info.space = in super_1_validate()
1887 -mddev->bitmap_info.offset; in super_1_validate()
1891 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in super_1_validate()
1892 mddev->delta_disks = le32_to_cpu(sb->delta_disks); in super_1_validate()
1893 mddev->new_level = le32_to_cpu(sb->new_level); in super_1_validate()
1894 mddev->new_layout = le32_to_cpu(sb->new_layout); in super_1_validate()
1895 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate()
1896 if (mddev->delta_disks < 0 || in super_1_validate()
1897 (mddev->delta_disks == 0 && in super_1_validate()
1900 mddev->reshape_backwards = 1; in super_1_validate()
1902 mddev->reshape_position = MaxSector; in super_1_validate()
1903 mddev->delta_disks = 0; in super_1_validate()
1904 mddev->new_level = mddev->level; in super_1_validate()
1905 mddev->new_layout = mddev->layout; in super_1_validate()
1906 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate()
1909 if (mddev->level == 0 && in super_1_validate()
1911 mddev->layout = -1; in super_1_validate()
1914 set_bit(MD_HAS_JOURNAL, &mddev->flags); in super_1_validate()
1925 set_bit(MD_HAS_PPL, &mddev->flags); in super_1_validate()
1927 } else if (mddev->pers == NULL) { in super_1_validate()
1935 if (ev1 < mddev->events) in super_1_validate()
1937 } else if (mddev->bitmap) { in super_1_validate()
1941 if (ev1 < mddev->bitmap->events_cleared) in super_1_validate()
1943 if (ev1 < mddev->events) in super_1_validate()
1946 if (ev1 < mddev->events) in super_1_validate()
1950 if (mddev->level != LEVEL_MULTIPATH) { in super_1_validate()
1988 &mddev->recovery)) in super_1_validate()
2006 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) in super_1_sync() argument
2020 sb->utime = cpu_to_le64((__u64)mddev->utime); in super_1_sync()
2021 sb->events = cpu_to_le64(mddev->events); in super_1_sync()
2022 if (mddev->in_sync) in super_1_sync()
2023 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); in super_1_sync()
2024 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) in super_1_sync()
2031 sb->raid_disks = cpu_to_le32(mddev->raid_disks); in super_1_sync()
2032 sb->size = cpu_to_le64(mddev->dev_sectors); in super_1_sync()
2033 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync()
2034 sb->level = cpu_to_le32(mddev->level); in super_1_sync()
2035 sb->layout = cpu_to_le32(mddev->layout); in super_1_sync()
2048 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { in super_1_sync()
2049 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); in super_1_sync()
2059 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) in super_1_sync()
2070 if (mddev->reshape_position != MaxSector) { in super_1_sync()
2072 sb->reshape_position = cpu_to_le64(mddev->reshape_position); in super_1_sync()
2073 sb->new_layout = cpu_to_le32(mddev->new_layout); in super_1_sync()
2074 sb->delta_disks = cpu_to_le32(mddev->delta_disks); in super_1_sync()
2075 sb->new_level = cpu_to_le32(mddev->new_level); in super_1_sync()
2076 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
2077 if (mddev->delta_disks == 0 && in super_1_sync()
2078 mddev->reshape_backwards) in super_1_sync()
2089 if (mddev_is_clustered(mddev)) in super_1_sync()
2096 md_error(mddev, rdev); in super_1_sync()
2127 rdev_for_each(rdev2, mddev) in super_1_sync()
2144 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) in super_1_sync()
2147 if (test_bit(MD_HAS_PPL, &mddev->flags)) { in super_1_sync()
2148 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) in super_1_sync()
2157 rdev_for_each(rdev2, mddev) { in super_1_sync()
2197 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) in super_1_rdev_size_change()
2207 } else if (rdev->mddev->bitmap_info.offset) { in super_1_rdev_size_change()
2235 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, in super_1_rdev_size_change()
2237 } while (md_super_wait(rdev->mddev) < 0); in super_1_rdev_size_change()
2253 if (rdev->mddev->minor_version == 0) in super_1_allow_new_offset()
2264 bitmap = rdev->mddev->bitmap; in super_1_allow_new_offset()
2265 if (bitmap && !rdev->mddev->bitmap_info.file && in super_1_allow_new_offset()
2266 rdev->sb_start + rdev->mddev->bitmap_info.offset + in super_1_allow_new_offset()
2296 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) in sync_super() argument
2298 if (mddev->sync_super) { in sync_super()
2299 mddev->sync_super(mddev, rdev); in sync_super()
2303 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); in sync_super()
2305 super_types[mddev->major_version].sync_super(mddev, rdev); in sync_super()
2308 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) in match_mddev_units()
2342 int md_integrity_register(struct mddev *mddev) in md_integrity_register() argument
2346 if (list_empty(&mddev->disks)) in md_integrity_register()
2348 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) in md_integrity_register()
2350 rdev_for_each(rdev, mddev) { in md_integrity_register()
2372 blk_integrity_register(mddev->gendisk, in md_integrity_register()
2375 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); in md_integrity_register()
2376 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { in md_integrity_register()
2378 mdname(mddev)); in md_integrity_register()
2389 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_integrity_add_rdev() argument
2394 if (!mddev->gendisk) in md_integrity_add_rdev()
2397 bi_mddev = blk_get_integrity(mddev->gendisk); in md_integrity_add_rdev()
2402 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { in md_integrity_add_rdev()
2404 mdname(mddev), bdevname(rdev->bdev, name)); in md_integrity_add_rdev()
2412 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) in bind_rdev_to_array() argument
2419 if (find_rdev(mddev, rdev->bdev->bd_dev)) in bind_rdev_to_array()
2423 mddev->pers) in bind_rdev_to_array()
2429 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()
2430 if (mddev->pers) { in bind_rdev_to_array()
2435 if (mddev->level > 0) in bind_rdev_to_array()
2438 mddev->dev_sectors = rdev->sectors; in bind_rdev_to_array()
2448 if (mddev->pers) in bind_rdev_to_array()
2449 choice = mddev->raid_disks; in bind_rdev_to_array()
2450 while (md_find_rdev_nr_rcu(mddev, choice)) in bind_rdev_to_array()
2454 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { in bind_rdev_to_array()
2461 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { in bind_rdev_to_array()
2463 mdname(mddev), mddev->max_disks); in bind_rdev_to_array()
2469 rdev->mddev = mddev; in bind_rdev_to_array()
2472 if (mddev->raid_disks) in bind_rdev_to_array()
2473 mddev_create_serial_pool(mddev, rdev, false); in bind_rdev_to_array()
2475 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) in bind_rdev_to_array()
2487 list_add_rcu(&rdev->same_set, &mddev->disks); in bind_rdev_to_array()
2488 bd_link_disk_holder(rdev->bdev, mddev->gendisk); in bind_rdev_to_array()
2491 mddev->recovery_disabled++; in bind_rdev_to_array()
2497 b, mdname(mddev)); in bind_rdev_to_array()
2512 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); in unbind_rdev_from_array()
2515 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in unbind_rdev_from_array()
2516 rdev->mddev = NULL; in unbind_rdev_from_array()
2586 static void export_array(struct mddev *mddev) in export_array() argument
2590 while (!list_empty(&mddev->disks)) { in export_array()
2591 rdev = list_first_entry(&mddev->disks, struct md_rdev, in export_array()
2595 mddev->raid_disks = 0; in export_array()
2596 mddev->major_version = 0; in export_array()
2599 static bool set_in_sync(struct mddev *mddev) in set_in_sync() argument
2601 lockdep_assert_held(&mddev->lock); in set_in_sync()
2602 if (!mddev->in_sync) { in set_in_sync()
2603 mddev->sync_checkers++; in set_in_sync()
2604 spin_unlock(&mddev->lock); in set_in_sync()
2605 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); in set_in_sync()
2606 spin_lock(&mddev->lock); in set_in_sync()
2607 if (!mddev->in_sync && in set_in_sync()
2608 percpu_ref_is_zero(&mddev->writes_pending)) { in set_in_sync()
2609 mddev->in_sync = 1; in set_in_sync()
2615 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in set_in_sync()
2616 sysfs_notify_dirent_safe(mddev->sysfs_state); in set_in_sync()
2618 if (--mddev->sync_checkers == 0) in set_in_sync()
2619 percpu_ref_switch_to_percpu(&mddev->writes_pending); in set_in_sync()
2621 if (mddev->safemode == 1) in set_in_sync()
2622 mddev->safemode = 0; in set_in_sync()
2623 return mddev->in_sync; in set_in_sync()
2626 static void sync_sbs(struct mddev *mddev, int nospares) in sync_sbs() argument
2635 rdev_for_each(rdev, mddev) { in sync_sbs()
2636 if (rdev->sb_events == mddev->events || in sync_sbs()
2639 rdev->sb_events+1 == mddev->events)) { in sync_sbs()
2643 sync_super(mddev, rdev); in sync_sbs()
2649 static bool does_sb_need_changing(struct mddev *mddev) in does_sb_need_changing() argument
2656 rdev_for_each(iter, mddev) in does_sb_need_changing()
2668 rdev_for_each(rdev, mddev) { in does_sb_need_changing()
2680 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || in does_sb_need_changing()
2681 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || in does_sb_need_changing()
2682 (mddev->layout != le32_to_cpu(sb->layout)) || in does_sb_need_changing()
2683 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || in does_sb_need_changing()
2684 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing()
2690 void md_update_sb(struct mddev *mddev, int force_change) in md_update_sb() argument
2698 if (mddev->ro) { in md_update_sb()
2700 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2705 if (mddev_is_clustered(mddev)) { in md_update_sb()
2706 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2708 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2710 ret = md_cluster_ops->metadata_update_start(mddev); in md_update_sb()
2712 if (!does_sb_need_changing(mddev)) { in md_update_sb()
2714 md_cluster_ops->metadata_update_cancel(mddev); in md_update_sb()
2715 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2728 rdev_for_each(rdev, mddev) { in md_update_sb()
2730 mddev->delta_disks >= 0 && in md_update_sb()
2731 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_update_sb()
2732 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && in md_update_sb()
2733 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_update_sb()
2736 mddev->curr_resync_completed > rdev->recovery_offset) in md_update_sb()
2737 rdev->recovery_offset = mddev->curr_resync_completed; in md_update_sb()
2740 if (!mddev->persistent) { in md_update_sb()
2741 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_update_sb()
2742 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_update_sb()
2743 if (!mddev->external) { in md_update_sb()
2744 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_update_sb()
2745 rdev_for_each(rdev, mddev) { in md_update_sb()
2749 md_error(mddev, rdev); in md_update_sb()
2756 wake_up(&mddev->sb_wait); in md_update_sb()
2760 spin_lock(&mddev->lock); in md_update_sb()
2762 mddev->utime = ktime_get_real_seconds(); in md_update_sb()
2764 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) in md_update_sb()
2766 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) in md_update_sb()
2774 if (mddev->degraded) in md_update_sb()
2786 sync_req = mddev->in_sync; in md_update_sb()
2791 && (mddev->in_sync && mddev->recovery_cp == MaxSector) in md_update_sb()
2792 && mddev->can_decrease_events in md_update_sb()
2793 && mddev->events != 1) { in md_update_sb()
2794 mddev->events--; in md_update_sb()
2795 mddev->can_decrease_events = 0; in md_update_sb()
2798 mddev->events ++; in md_update_sb()
2799 mddev->can_decrease_events = nospares; in md_update_sb()
2807 WARN_ON(mddev->events == 0); in md_update_sb()
2809 rdev_for_each(rdev, mddev) { in md_update_sb()
2816 sync_sbs(mddev, nospares); in md_update_sb()
2817 spin_unlock(&mddev->lock); in md_update_sb()
2820 mdname(mddev), mddev->in_sync); in md_update_sb()
2822 if (mddev->queue) in md_update_sb()
2823 blk_add_trace_msg(mddev->queue, "md md_update_sb"); in md_update_sb()
2825 md_bitmap_update_sb(mddev->bitmap); in md_update_sb()
2826 rdev_for_each(rdev, mddev) { in md_update_sb()
2833 md_super_write(mddev,rdev, in md_update_sb()
2839 rdev->sb_events = mddev->events; in md_update_sb()
2841 md_super_write(mddev, rdev, in md_update_sb()
2852 if (mddev->level == LEVEL_MULTIPATH) in md_update_sb()
2856 if (md_super_wait(mddev) < 0) in md_update_sb()
2860 if (mddev_is_clustered(mddev) && ret == 0) in md_update_sb()
2861 md_cluster_ops->metadata_update_finish(mddev); in md_update_sb()
2863 if (mddev->in_sync != sync_req || in md_update_sb()
2864 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), in md_update_sb()
2868 wake_up(&mddev->sb_wait); in md_update_sb()
2869 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_update_sb()
2870 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_update_sb()
2872 rdev_for_each(rdev, mddev) { in md_update_sb()
2886 struct mddev *mddev = rdev->mddev; in add_bound_rdev() local
2890 if (!mddev->pers->hot_remove_disk || add_journal) { in add_bound_rdev()
2895 super_types[mddev->major_version]. in add_bound_rdev()
2896 validate_super(mddev, rdev); in add_bound_rdev()
2898 mddev_suspend(mddev); in add_bound_rdev()
2899 err = mddev->pers->hot_add_disk(mddev, rdev); in add_bound_rdev()
2901 mddev_resume(mddev); in add_bound_rdev()
2909 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in add_bound_rdev()
2910 if (mddev->degraded) in add_bound_rdev()
2911 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in add_bound_rdev()
2912 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in add_bound_rdev()
2913 md_new_event(mddev); in add_bound_rdev()
2914 md_wakeup_thread(mddev->thread); in add_bound_rdev()
3004 struct mddev *mddev = rdev->mddev; in state_store() local
3008 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { in state_store()
3009 md_error(rdev->mddev, rdev); in state_store()
3015 if (rdev->mddev->pers) { in state_store()
3017 remove_and_add_spares(rdev->mddev, rdev); in state_store()
3023 if (mddev_is_clustered(mddev)) in state_store()
3024 err = md_cluster_ops->remove_disk(mddev, rdev); in state_store()
3028 if (mddev->pers) { in state_store()
3029 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in state_store()
3030 md_wakeup_thread(mddev->thread); in state_store()
3032 md_new_event(mddev); in state_store()
3037 mddev_create_serial_pool(rdev->mddev, rdev, false); in state_store()
3041 mddev_destroy_serial_pool(rdev->mddev, rdev, false); in state_store()
3055 md_error(rdev->mddev, rdev); in state_store()
3060 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3061 md_wakeup_thread(rdev->mddev->thread); in state_store()
3077 if (rdev->mddev->pers == NULL) { in state_store()
3098 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in state_store()
3099 md_wakeup_thread(rdev->mddev->thread); in state_store()
3112 if (rdev->mddev->pers) in state_store()
3120 if (rdev->mddev->pers) in state_store()
3127 if (!rdev->mddev->pers) in state_store()
3137 if (!mddev_is_clustered(rdev->mddev) || in state_store()
3144 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { in state_store()
3148 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { in state_store()
3153 md_update_sb(mddev, 1); in state_store()
3208 if (rdev->mddev->pers && slot == -1) { in slot_store()
3219 if (rdev->mddev->pers->hot_remove_disk == NULL) in slot_store()
3222 remove_and_add_spares(rdev->mddev, rdev); in slot_store()
3225 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); in slot_store()
3226 md_wakeup_thread(rdev->mddev->thread); in slot_store()
3227 } else if (rdev->mddev->pers) { in slot_store()
3236 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) in slot_store()
3239 if (rdev->mddev->pers->hot_add_disk == NULL) in slot_store()
3242 if (slot >= rdev->mddev->raid_disks && in slot_store()
3243 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3253 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); in slot_store()
3260 sysfs_link_rdev(rdev->mddev, rdev); in slot_store()
3263 if (slot >= rdev->mddev->raid_disks && in slot_store()
3264 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) in slot_store()
3291 if (rdev->mddev->pers && rdev->raid_disk >= 0) in offset_store()
3293 if (rdev->sectors && rdev->mddev->external) in offset_store()
3315 struct mddev *mddev = rdev->mddev; in new_offset_store() local
3320 if (mddev->sync_thread || in new_offset_store()
3321 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) in new_offset_store()
3329 + mddev->dev_sectors > rdev->sectors) in new_offset_store()
3338 mddev->reshape_backwards) in new_offset_store()
3345 !mddev->reshape_backwards) in new_offset_store()
3348 if (mddev->pers && mddev->persistent && in new_offset_store()
3349 !super_types[mddev->major_version] in new_offset_store()
3354 mddev->reshape_backwards = 1; in new_offset_store()
3356 mddev->reshape_backwards = 0; in new_offset_store()
3401 struct mddev *my_mddev = rdev->mddev; in rdev_size_store()
3435 struct mddev *mddev; in rdev_size_store() local
3440 for_each_mddev(mddev, tmp) { in rdev_size_store()
3443 rdev_for_each(rdev2, mddev) in rdev_size_store()
3453 mddev_put(mddev); in rdev_size_store()
3495 if (rdev->mddev->pers && in recovery_start_store()
3563 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_sector_store()
3567 if (rdev->mddev->persistent) { in ppl_sector_store()
3568 if (rdev->mddev->major_version == 0) in ppl_sector_store()
3576 } else if (!rdev->mddev->external) { in ppl_sector_store()
3600 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && in ppl_size_store()
3604 if (rdev->mddev->persistent) { in ppl_size_store()
3605 if (rdev->mddev->major_version == 0) in ppl_size_store()
3609 } else if (!rdev->mddev->external) { in ppl_size_store()
3641 if (!rdev->mddev) in rdev_attr_show()
3653 struct mddev *mddev = rdev->mddev; in rdev_attr_store() local
3659 rv = mddev ? mddev_lock(mddev) : -ENODEV; in rdev_attr_store()
3661 if (rdev->mddev == NULL) in rdev_attr_store()
3665 mddev_unlock(mddev); in rdev_attr_store()
3783 static int analyze_sbs(struct mddev *mddev) in analyze_sbs() argument
3790 rdev_for_each_safe(rdev, tmp, mddev) in analyze_sbs()
3791 switch (super_types[mddev->major_version]. in analyze_sbs()
3792 load_super(rdev, freshest, mddev->minor_version)) { in analyze_sbs()
3810 super_types[mddev->major_version]. in analyze_sbs()
3811 validate_super(mddev, freshest); in analyze_sbs()
3814 rdev_for_each_safe(rdev, tmp, mddev) { in analyze_sbs()
3815 if (mddev->max_disks && in analyze_sbs()
3816 (rdev->desc_nr >= mddev->max_disks || in analyze_sbs()
3817 i > mddev->max_disks)) { in analyze_sbs()
3819 mdname(mddev), bdevname(rdev->bdev, b), in analyze_sbs()
3820 mddev->max_disks); in analyze_sbs()
3825 if (super_types[mddev->major_version]. in analyze_sbs()
3826 validate_super(mddev, rdev)) { in analyze_sbs()
3833 if (mddev->level == LEVEL_MULTIPATH) { in analyze_sbs()
3838 (mddev->raid_disks - min(0, mddev->delta_disks)) && in analyze_sbs()
3885 safe_delay_show(struct mddev *mddev, char *page) in safe_delay_show() argument
3887 int msec = (mddev->safemode_delay*1000)/HZ; in safe_delay_show()
3891 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) in safe_delay_store() argument
3895 if (mddev_is_clustered(mddev)) { in safe_delay_store()
3903 mddev->safemode_delay = 0; in safe_delay_store()
3905 unsigned long old_delay = mddev->safemode_delay; in safe_delay_store()
3910 mddev->safemode_delay = new_delay; in safe_delay_store()
3912 mod_timer(&mddev->safemode_timer, jiffies+1); in safe_delay_store()
3920 level_show(struct mddev *mddev, char *page) in level_show() argument
3924 spin_lock(&mddev->lock); in level_show()
3925 p = mddev->pers; in level_show()
3928 else if (mddev->clevel[0]) in level_show()
3929 ret = sprintf(page, "%s\n", mddev->clevel); in level_show()
3930 else if (mddev->level != LEVEL_NONE) in level_show()
3931 ret = sprintf(page, "%d\n", mddev->level); in level_show()
3934 spin_unlock(&mddev->lock); in level_show()
3939 level_store(struct mddev *mddev, const char *buf, size_t len) in level_store() argument
3952 rv = mddev_lock(mddev); in level_store()
3956 if (mddev->pers == NULL) { in level_store()
3957 strncpy(mddev->clevel, buf, slen); in level_store()
3958 if (mddev->clevel[slen-1] == '\n') in level_store()
3960 mddev->clevel[slen] = 0; in level_store()
3961 mddev->level = LEVEL_NONE; in level_store()
3966 if (mddev->ro) in level_store()
3976 if (mddev->sync_thread || in level_store()
3977 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in level_store()
3978 mddev->reshape_position != MaxSector || in level_store()
3979 mddev->sysfs_active) in level_store()
3983 if (!mddev->pers->quiesce) { in level_store()
3985 mdname(mddev), mddev->pers->name); in level_store()
4009 if (pers == mddev->pers) { in level_store()
4018 mdname(mddev), clevel); in level_store()
4023 rdev_for_each(rdev, mddev) in level_store()
4029 priv = pers->takeover(mddev); in level_store()
4031 mddev->new_level = mddev->level; in level_store()
4032 mddev->new_layout = mddev->layout; in level_store()
4033 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store()
4034 mddev->raid_disks -= mddev->delta_disks; in level_store()
4035 mddev->delta_disks = 0; in level_store()
4036 mddev->reshape_backwards = 0; in level_store()
4039 mdname(mddev), clevel); in level_store()
4045 mddev_suspend(mddev); in level_store()
4046 mddev_detach(mddev); in level_store()
4048 spin_lock(&mddev->lock); in level_store()
4049 oldpers = mddev->pers; in level_store()
4050 oldpriv = mddev->private; in level_store()
4051 mddev->pers = pers; in level_store()
4052 mddev->private = priv; in level_store()
4053 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in level_store()
4054 mddev->level = mddev->new_level; in level_store()
4055 mddev->layout = mddev->new_layout; in level_store()
4056 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store()
4057 mddev->delta_disks = 0; in level_store()
4058 mddev->reshape_backwards = 0; in level_store()
4059 mddev->degraded = 0; in level_store()
4060 spin_unlock(&mddev->lock); in level_store()
4063 mddev->external) { in level_store()
4071 mddev->in_sync = 0; in level_store()
4072 mddev->safemode_delay = 0; in level_store()
4073 mddev->safemode = 0; in level_store()
4076 oldpers->free(mddev, oldpriv); in level_store()
4081 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in level_store()
4083 mdname(mddev)); in level_store()
4084 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); in level_store()
4085 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in level_store()
4086 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in level_store()
4091 if (mddev->to_remove == NULL) in level_store()
4092 mddev->to_remove = &md_redundancy_group; in level_store()
4097 rdev_for_each(rdev, mddev) { in level_store()
4100 if (rdev->new_raid_disk >= mddev->raid_disks) in level_store()
4104 sysfs_unlink_rdev(mddev, rdev); in level_store()
4106 rdev_for_each(rdev, mddev) { in level_store()
4115 if (sysfs_link_rdev(mddev, rdev)) in level_store()
4117 rdev->raid_disk, mdname(mddev)); in level_store()
4125 mddev->in_sync = 1; in level_store()
4126 del_timer_sync(&mddev->safemode_timer); in level_store()
4128 blk_set_stacking_limits(&mddev->queue->limits); in level_store()
4129 pers->run(mddev); in level_store()
4130 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in level_store()
4131 mddev_resume(mddev); in level_store()
4132 if (!mddev->thread) in level_store()
4133 md_update_sb(mddev, 1); in level_store()
4134 sysfs_notify_dirent_safe(mddev->sysfs_level); in level_store()
4135 md_new_event(mddev); in level_store()
4138 mddev_unlock(mddev); in level_store()
4146 layout_show(struct mddev *mddev, char *page) in layout_show() argument
4149 if (mddev->reshape_position != MaxSector && in layout_show()
4150 mddev->layout != mddev->new_layout) in layout_show()
4152 mddev->new_layout, mddev->layout); in layout_show()
4153 return sprintf(page, "%d\n", mddev->layout); in layout_show()
4157 layout_store(struct mddev *mddev, const char *buf, size_t len) in layout_store() argument
4165 err = mddev_lock(mddev); in layout_store()
4169 if (mddev->pers) { in layout_store()
4170 if (mddev->pers->check_reshape == NULL) in layout_store()
4172 else if (mddev->ro) in layout_store()
4175 mddev->new_layout = n; in layout_store()
4176 err = mddev->pers->check_reshape(mddev); in layout_store()
4178 mddev->new_layout = mddev->layout; in layout_store()
4181 mddev->new_layout = n; in layout_store()
4182 if (mddev->reshape_position == MaxSector) in layout_store()
4183 mddev->layout = n; in layout_store()
4185 mddev_unlock(mddev); in layout_store()
4192 raid_disks_show(struct mddev *mddev, char *page) in raid_disks_show() argument
4194 if (mddev->raid_disks == 0) in raid_disks_show()
4196 if (mddev->reshape_position != MaxSector && in raid_disks_show()
4197 mddev->delta_disks != 0) in raid_disks_show()
4198 return sprintf(page, "%d (%d)\n", mddev->raid_disks, in raid_disks_show()
4199 mddev->raid_disks - mddev->delta_disks); in raid_disks_show()
4200 return sprintf(page, "%d\n", mddev->raid_disks); in raid_disks_show()
4203 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4206 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) in raid_disks_store() argument
4215 err = mddev_lock(mddev); in raid_disks_store()
4218 if (mddev->pers) in raid_disks_store()
4219 err = update_raid_disks(mddev, n); in raid_disks_store()
4220 else if (mddev->reshape_position != MaxSector) { in raid_disks_store()
4222 int olddisks = mddev->raid_disks - mddev->delta_disks; in raid_disks_store()
4225 rdev_for_each(rdev, mddev) { in raid_disks_store()
4234 mddev->delta_disks = n - olddisks; in raid_disks_store()
4235 mddev->raid_disks = n; in raid_disks_store()
4236 mddev->reshape_backwards = (mddev->delta_disks < 0); in raid_disks_store()
4238 mddev->raid_disks = n; in raid_disks_store()
4240 mddev_unlock(mddev); in raid_disks_store()
4247 uuid_show(struct mddev *mddev, char *page) in uuid_show() argument
4249 return sprintf(page, "%pU\n", mddev->uuid); in uuid_show()
4255 chunk_size_show(struct mddev *mddev, char *page) in chunk_size_show() argument
4257 if (mddev->reshape_position != MaxSector && in chunk_size_show()
4258 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show()
4260 mddev->new_chunk_sectors << 9, in chunk_size_show()
4261 mddev->chunk_sectors << 9); in chunk_size_show()
4262 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); in chunk_size_show()
4266 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) in chunk_size_store() argument
4275 err = mddev_lock(mddev); in chunk_size_store()
4278 if (mddev->pers) { in chunk_size_store()
4279 if (mddev->pers->check_reshape == NULL) in chunk_size_store()
4281 else if (mddev->ro) in chunk_size_store()
4284 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4285 err = mddev->pers->check_reshape(mddev); in chunk_size_store()
4287 mddev->new_chunk_sectors = mddev->chunk_sectors; in chunk_size_store()
4290 mddev->new_chunk_sectors = n >> 9; in chunk_size_store()
4291 if (mddev->reshape_position == MaxSector) in chunk_size_store()
4292 mddev->chunk_sectors = n >> 9; in chunk_size_store()
4294 mddev_unlock(mddev); in chunk_size_store()
4301 resync_start_show(struct mddev *mddev, char *page) in resync_start_show() argument
4303 if (mddev->recovery_cp == MaxSector) in resync_start_show()
4305 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); in resync_start_show()
4309 resync_start_store(struct mddev *mddev, const char *buf, size_t len) in resync_start_store() argument
4324 err = mddev_lock(mddev); in resync_start_store()
4327 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in resync_start_store()
4331 mddev->recovery_cp = n; in resync_start_store()
4332 if (mddev->pers) in resync_start_store()
4333 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in resync_start_store()
4335 mddev_unlock(mddev); in resync_start_store()
4399 array_state_show(struct mddev *mddev, char *page) in array_state_show() argument
4403 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { in array_state_show()
4404 switch(mddev->ro) { in array_state_show()
4412 spin_lock(&mddev->lock); in array_state_show()
4413 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in array_state_show()
4415 else if (mddev->in_sync) in array_state_show()
4417 else if (mddev->safemode) in array_state_show()
4421 spin_unlock(&mddev->lock); in array_state_show()
4424 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) in array_state_show()
4427 if (list_empty(&mddev->disks) && in array_state_show()
4428 mddev->raid_disks == 0 && in array_state_show()
4429 mddev->dev_sectors == 0) in array_state_show()
4437 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4438 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4439 static int restart_array(struct mddev *mddev);
4442 array_state_store(struct mddev *mddev, const char *buf, size_t len) in array_state_store() argument
4447 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { in array_state_store()
4451 spin_lock(&mddev->lock); in array_state_store()
4453 restart_array(mddev); in array_state_store()
4454 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4455 md_wakeup_thread(mddev->thread); in array_state_store()
4456 wake_up(&mddev->sb_wait); in array_state_store()
4458 restart_array(mddev); in array_state_store()
4459 if (!set_in_sync(mddev)) in array_state_store()
4463 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4464 spin_unlock(&mddev->lock); in array_state_store()
4467 err = mddev_lock(mddev); in array_state_store()
4476 err = do_md_stop(mddev, 0, NULL); in array_state_store()
4480 if (mddev->pers) in array_state_store()
4481 err = do_md_stop(mddev, 2, NULL); in array_state_store()
4488 if (mddev->pers) in array_state_store()
4489 err = md_set_readonly(mddev, NULL); in array_state_store()
4491 mddev->ro = 1; in array_state_store()
4492 set_disk_ro(mddev->gendisk, 1); in array_state_store()
4493 err = do_md_run(mddev); in array_state_store()
4497 if (mddev->pers) { in array_state_store()
4498 if (mddev->ro == 0) in array_state_store()
4499 err = md_set_readonly(mddev, NULL); in array_state_store()
4500 else if (mddev->ro == 1) in array_state_store()
4501 err = restart_array(mddev); in array_state_store()
4503 mddev->ro = 2; in array_state_store()
4504 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4507 mddev->ro = 2; in array_state_store()
4508 err = do_md_run(mddev); in array_state_store()
4512 if (mddev->pers) { in array_state_store()
4513 err = restart_array(mddev); in array_state_store()
4516 spin_lock(&mddev->lock); in array_state_store()
4517 if (!set_in_sync(mddev)) in array_state_store()
4519 spin_unlock(&mddev->lock); in array_state_store()
4524 if (mddev->pers) { in array_state_store()
4525 err = restart_array(mddev); in array_state_store()
4528 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in array_state_store()
4529 wake_up(&mddev->sb_wait); in array_state_store()
4532 mddev->ro = 0; in array_state_store()
4533 set_disk_ro(mddev->gendisk, 0); in array_state_store()
4534 err = do_md_run(mddev); in array_state_store()
4545 if (mddev->hold_active == UNTIL_IOCTL) in array_state_store()
4546 mddev->hold_active = 0; in array_state_store()
4547 sysfs_notify_dirent_safe(mddev->sysfs_state); in array_state_store()
4549 mddev_unlock(mddev); in array_state_store()
4556 max_corrected_read_errors_show(struct mddev *mddev, char *page) { in max_corrected_read_errors_show() argument
4558 atomic_read(&mddev->max_corr_read_errors)); in max_corrected_read_errors_show()
4562 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) in max_corrected_read_errors_store() argument
4570 atomic_set(&mddev->max_corr_read_errors, n); in max_corrected_read_errors_store()
4579 null_show(struct mddev *mddev, char *page) in null_show() argument
4585 static void flush_rdev_wq(struct mddev *mddev) in flush_rdev_wq() argument
4590 rdev_for_each_rcu(rdev, mddev) in flush_rdev_wq()
4599 new_dev_store(struct mddev *mddev, const char *buf, size_t len) in new_dev_store() argument
4625 flush_rdev_wq(mddev); in new_dev_store()
4626 err = mddev_lock(mddev); in new_dev_store()
4629 if (mddev->persistent) { in new_dev_store()
4630 rdev = md_import_device(dev, mddev->major_version, in new_dev_store()
4631 mddev->minor_version); in new_dev_store()
4632 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { in new_dev_store()
4634 = list_entry(mddev->disks.next, in new_dev_store()
4636 err = super_types[mddev->major_version] in new_dev_store()
4637 .load_super(rdev, rdev0, mddev->minor_version); in new_dev_store()
4641 } else if (mddev->external) in new_dev_store()
4647 mddev_unlock(mddev); in new_dev_store()
4650 err = bind_rdev_to_array(rdev, mddev); in new_dev_store()
4654 mddev_unlock(mddev); in new_dev_store()
4656 md_new_event(mddev); in new_dev_store()
4664 bitmap_store(struct mddev *mddev, const char *buf, size_t len) in bitmap_store() argument
4670 err = mddev_lock(mddev); in bitmap_store()
4673 if (!mddev->bitmap) in bitmap_store()
4685 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); in bitmap_store()
4688 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ in bitmap_store()
4690 mddev_unlock(mddev); in bitmap_store()
4698 size_show(struct mddev *mddev, char *page) in size_show() argument
4701 (unsigned long long)mddev->dev_sectors / 2); in size_show()
4704 static int update_size(struct mddev *mddev, sector_t num_sectors);
4707 size_store(struct mddev *mddev, const char *buf, size_t len) in size_store() argument
4718 err = mddev_lock(mddev); in size_store()
4721 if (mddev->pers) { in size_store()
4722 err = update_size(mddev, sectors); in size_store()
4724 md_update_sb(mddev, 1); in size_store()
4726 if (mddev->dev_sectors == 0 || in size_store()
4727 mddev->dev_sectors > sectors) in size_store()
4728 mddev->dev_sectors = sectors; in size_store()
4732 mddev_unlock(mddev); in size_store()
4746 metadata_show(struct mddev *mddev, char *page) in metadata_show() argument
4748 if (mddev->persistent) in metadata_show()
4750 mddev->major_version, mddev->minor_version); in metadata_show()
4751 else if (mddev->external) in metadata_show()
4752 return sprintf(page, "external:%s\n", mddev->metadata_type); in metadata_show()
4758 metadata_store(struct mddev *mddev, const char *buf, size_t len) in metadata_store() argument
4768 err = mddev_lock(mddev); in metadata_store()
4772 if (mddev->external && strncmp(buf, "external:", 9) == 0) in metadata_store()
4774 else if (!list_empty(&mddev->disks)) in metadata_store()
4779 mddev->persistent = 0; in metadata_store()
4780 mddev->external = 0; in metadata_store()
4781 mddev->major_version = 0; in metadata_store()
4782 mddev->minor_version = 90; in metadata_store()
4787 if (namelen >= sizeof(mddev->metadata_type)) in metadata_store()
4788 namelen = sizeof(mddev->metadata_type)-1; in metadata_store()
4789 strncpy(mddev->metadata_type, buf+9, namelen); in metadata_store()
4790 mddev->metadata_type[namelen] = 0; in metadata_store()
4791 if (namelen && mddev->metadata_type[namelen-1] == '\n') in metadata_store()
4792 mddev->metadata_type[--namelen] = 0; in metadata_store()
4793 mddev->persistent = 0; in metadata_store()
4794 mddev->external = 1; in metadata_store()
4795 mddev->major_version = 0; in metadata_store()
4796 mddev->minor_version = 90; in metadata_store()
4810 mddev->major_version = major; in metadata_store()
4811 mddev->minor_version = minor; in metadata_store()
4812 mddev->persistent = 1; in metadata_store()
4813 mddev->external = 0; in metadata_store()
4816 mddev_unlock(mddev); in metadata_store()
4824 action_show(struct mddev *mddev, char *page) in action_show() argument
4827 unsigned long recovery = mddev->recovery; in action_show()
4831 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { in action_show()
4843 else if (mddev->reshape_position != MaxSector) in action_show()
4850 action_store(struct mddev *mddev, const char *page, size_t len) in action_store() argument
4852 if (!mddev->pers || !mddev->pers->sync_request) in action_store()
4858 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4860 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4861 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in action_store()
4862 mddev_lock(mddev) == 0) { in action_store()
4863 if (work_pending(&mddev->del_work)) in action_store()
4865 if (mddev->sync_thread) { in action_store()
4866 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in action_store()
4867 md_reap_sync_thread(mddev); in action_store()
4869 mddev_unlock(mddev); in action_store()
4871 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4874 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4876 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4877 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in action_store()
4880 if (mddev->pers->start_reshape == NULL) in action_store()
4882 err = mddev_lock(mddev); in action_store()
4884 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in action_store()
4887 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4888 err = mddev->pers->start_reshape(mddev); in action_store()
4890 mddev_unlock(mddev); in action_store()
4894 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in action_store()
4897 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); in action_store()
4900 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in action_store()
4901 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in action_store()
4902 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in action_store()
4904 if (mddev->ro == 2) { in action_store()
4908 mddev->ro = 0; in action_store()
4909 md_wakeup_thread(mddev->sync_thread); in action_store()
4911 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in action_store()
4912 md_wakeup_thread(mddev->thread); in action_store()
4913 sysfs_notify_dirent_safe(mddev->sysfs_action); in action_store()
4921 last_sync_action_show(struct mddev *mddev, char *page) in last_sync_action_show() argument
4923 return sprintf(page, "%s\n", mddev->last_sync_action); in last_sync_action_show()
4929 mismatch_cnt_show(struct mddev *mddev, char *page) in mismatch_cnt_show() argument
4933 atomic64_read(&mddev->resync_mismatches)); in mismatch_cnt_show()
4939 sync_min_show(struct mddev *mddev, char *page) in sync_min_show() argument
4941 return sprintf(page, "%d (%s)\n", speed_min(mddev), in sync_min_show()
4942 mddev->sync_speed_min ? "local": "system"); in sync_min_show()
4946 sync_min_store(struct mddev *mddev, const char *buf, size_t len) in sync_min_store() argument
4960 mddev->sync_speed_min = min; in sync_min_store()
4968 sync_max_show(struct mddev *mddev, char *page) in sync_max_show() argument
4970 return sprintf(page, "%d (%s)\n", speed_max(mddev), in sync_max_show()
4971 mddev->sync_speed_max ? "local": "system"); in sync_max_show()
4975 sync_max_store(struct mddev *mddev, const char *buf, size_t len) in sync_max_store() argument
4989 mddev->sync_speed_max = max; in sync_max_store()
4997 degraded_show(struct mddev *mddev, char *page) in degraded_show() argument
4999 return sprintf(page, "%d\n", mddev->degraded); in degraded_show()
5004 sync_force_parallel_show(struct mddev *mddev, char *page) in sync_force_parallel_show() argument
5006 return sprintf(page, "%d\n", mddev->parallel_resync); in sync_force_parallel_show()
5010 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) in sync_force_parallel_store() argument
5020 mddev->parallel_resync = n; in sync_force_parallel_store()
5022 if (mddev->sync_thread) in sync_force_parallel_store()
5034 sync_speed_show(struct mddev *mddev, char *page) in sync_speed_show() argument
5037 if (mddev->curr_resync == 0) in sync_speed_show()
5039 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); in sync_speed_show()
5040 dt = (jiffies - mddev->resync_mark) / HZ; in sync_speed_show()
5042 db = resync - mddev->resync_mark_cnt; in sync_speed_show()
5049 sync_completed_show(struct mddev *mddev, char *page) in sync_completed_show() argument
5053 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in sync_completed_show()
5056 if (mddev->curr_resync == 1 || in sync_completed_show()
5057 mddev->curr_resync == 2) in sync_completed_show()
5060 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in sync_completed_show()
5061 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in sync_completed_show()
5062 max_sectors = mddev->resync_max_sectors; in sync_completed_show()
5064 max_sectors = mddev->dev_sectors; in sync_completed_show()
5066 resync = mddev->curr_resync_completed; in sync_completed_show()
5074 min_sync_show(struct mddev *mddev, char *page) in min_sync_show() argument
5077 (unsigned long long)mddev->resync_min); in min_sync_show()
5080 min_sync_store(struct mddev *mddev, const char *buf, size_t len) in min_sync_store() argument
5088 spin_lock(&mddev->lock); in min_sync_store()
5090 if (min > mddev->resync_max) in min_sync_store()
5094 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in min_sync_store()
5098 mddev->resync_min = round_down(min, 8); in min_sync_store()
5102 spin_unlock(&mddev->lock); in min_sync_store()
5110 max_sync_show(struct mddev *mddev, char *page) in max_sync_show() argument
5112 if (mddev->resync_max == MaxSector) in max_sync_show()
5116 (unsigned long long)mddev->resync_max); in max_sync_show()
5119 max_sync_store(struct mddev *mddev, const char *buf, size_t len) in max_sync_store() argument
5122 spin_lock(&mddev->lock); in max_sync_store()
5124 mddev->resync_max = MaxSector; in max_sync_store()
5132 if (max < mddev->resync_min) in max_sync_store()
5136 if (max < mddev->resync_max && in max_sync_store()
5137 mddev->ro == 0 && in max_sync_store()
5138 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in max_sync_store()
5142 chunk = mddev->chunk_sectors; in max_sync_store()
5150 mddev->resync_max = max; in max_sync_store()
5152 wake_up(&mddev->recovery_wait); in max_sync_store()
5155 spin_unlock(&mddev->lock); in max_sync_store()
5163 suspend_lo_show(struct mddev *mddev, char *page) in suspend_lo_show() argument
5165 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); in suspend_lo_show()
5169 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) in suspend_lo_store() argument
5180 err = mddev_lock(mddev); in suspend_lo_store()
5184 if (mddev->pers == NULL || in suspend_lo_store()
5185 mddev->pers->quiesce == NULL) in suspend_lo_store()
5187 mddev_suspend(mddev); in suspend_lo_store()
5188 mddev->suspend_lo = new; in suspend_lo_store()
5189 mddev_resume(mddev); in suspend_lo_store()
5193 mddev_unlock(mddev); in suspend_lo_store()
5200 suspend_hi_show(struct mddev *mddev, char *page) in suspend_hi_show() argument
5202 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); in suspend_hi_show()
5206 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) in suspend_hi_store() argument
5217 err = mddev_lock(mddev); in suspend_hi_store()
5221 if (mddev->pers == NULL) in suspend_hi_store()
5224 mddev_suspend(mddev); in suspend_hi_store()
5225 mddev->suspend_hi = new; in suspend_hi_store()
5226 mddev_resume(mddev); in suspend_hi_store()
5230 mddev_unlock(mddev); in suspend_hi_store()
5237 reshape_position_show(struct mddev *mddev, char *page) in reshape_position_show() argument
5239 if (mddev->reshape_position != MaxSector) in reshape_position_show()
5241 (unsigned long long)mddev->reshape_position); in reshape_position_show()
5247 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) in reshape_position_store() argument
5258 err = mddev_lock(mddev); in reshape_position_store()
5262 if (mddev->pers) in reshape_position_store()
5264 mddev->reshape_position = new; in reshape_position_store()
5265 mddev->delta_disks = 0; in reshape_position_store()
5266 mddev->reshape_backwards = 0; in reshape_position_store()
5267 mddev->new_level = mddev->level; in reshape_position_store()
5268 mddev->new_layout = mddev->layout; in reshape_position_store()
5269 mddev->new_chunk_sectors = mddev->chunk_sectors; in reshape_position_store()
5270 rdev_for_each(rdev, mddev) in reshape_position_store()
5274 mddev_unlock(mddev); in reshape_position_store()
5283 reshape_direction_show(struct mddev *mddev, char *page) in reshape_direction_show() argument
5286 mddev->reshape_backwards ? "backwards" : "forwards"); in reshape_direction_show()
5290 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) in reshape_direction_store() argument
5301 if (mddev->reshape_backwards == backwards) in reshape_direction_store()
5304 err = mddev_lock(mddev); in reshape_direction_store()
5308 if (mddev->delta_disks) in reshape_direction_store()
5310 else if (mddev->persistent && in reshape_direction_store()
5311 mddev->major_version == 0) in reshape_direction_store()
5314 mddev->reshape_backwards = backwards; in reshape_direction_store()
5315 mddev_unlock(mddev); in reshape_direction_store()
5324 array_size_show(struct mddev *mddev, char *page) in array_size_show() argument
5326 if (mddev->external_size) in array_size_show()
5328 (unsigned long long)mddev->array_sectors/2); in array_size_show()
5334 array_size_store(struct mddev *mddev, const char *buf, size_t len) in array_size_store() argument
5339 err = mddev_lock(mddev); in array_size_store()
5344 if (mddev_is_clustered(mddev)) { in array_size_store()
5345 mddev_unlock(mddev); in array_size_store()
5350 if (mddev->pers) in array_size_store()
5351 sectors = mddev->pers->size(mddev, 0, 0); in array_size_store()
5353 sectors = mddev->array_sectors; in array_size_store()
5355 mddev->external_size = 0; in array_size_store()
5359 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) in array_size_store()
5362 mddev->external_size = 1; in array_size_store()
5366 mddev->array_sectors = sectors; in array_size_store()
5367 if (mddev->pers) { in array_size_store()
5368 set_capacity(mddev->gendisk, mddev->array_sectors); in array_size_store()
5369 revalidate_disk_size(mddev->gendisk, true); in array_size_store()
5372 mddev_unlock(mddev); in array_size_store()
5381 consistency_policy_show(struct mddev *mddev, char *page) in consistency_policy_show() argument
5385 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { in consistency_policy_show()
5387 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { in consistency_policy_show()
5389 } else if (mddev->bitmap) { in consistency_policy_show()
5391 } else if (mddev->pers) { in consistency_policy_show()
5392 if (mddev->pers->sync_request) in consistency_policy_show()
5404 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) in consistency_policy_store() argument
5408 if (mddev->pers) { in consistency_policy_store()
5409 if (mddev->pers->change_consistency_policy) in consistency_policy_store()
5410 err = mddev->pers->change_consistency_policy(mddev, buf); in consistency_policy_store()
5413 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { in consistency_policy_store()
5414 set_bit(MD_HAS_PPL, &mddev->flags); in consistency_policy_store()
5426 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) in fail_last_dev_show() argument
5428 return sprintf(page, "%d\n", mddev->fail_last_dev); in fail_last_dev_show()
5436 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) in fail_last_dev_store() argument
5445 if (value != mddev->fail_last_dev) in fail_last_dev_store()
5446 mddev->fail_last_dev = value; in fail_last_dev_store()
5454 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) in serialize_policy_show() argument
5456 if (mddev->pers == NULL || (mddev->pers->level != 1)) in serialize_policy_show()
5459 return sprintf(page, "%d\n", mddev->serialize_policy); in serialize_policy_show()
5467 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) in serialize_policy_store() argument
5476 if (value == mddev->serialize_policy) in serialize_policy_store()
5479 err = mddev_lock(mddev); in serialize_policy_store()
5482 if (mddev->pers == NULL || (mddev->pers->level != 1)) { in serialize_policy_store()
5488 mddev_suspend(mddev); in serialize_policy_store()
5490 mddev_create_serial_pool(mddev, NULL, true); in serialize_policy_store()
5492 mddev_destroy_serial_pool(mddev, NULL, true); in serialize_policy_store()
5493 mddev->serialize_policy = value; in serialize_policy_store()
5494 mddev_resume(mddev); in serialize_policy_store()
5496 mddev_unlock(mddev); in serialize_policy_store()
5553 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_show() local
5559 if (list_empty(&mddev->all_mddevs)) { in md_attr_show()
5563 mddev_get(mddev); in md_attr_show()
5566 rv = entry->show(mddev, page); in md_attr_show()
5567 mddev_put(mddev); in md_attr_show()
5576 struct mddev *mddev = container_of(kobj, struct mddev, kobj); in md_attr_store() local
5584 if (list_empty(&mddev->all_mddevs)) { in md_attr_store()
5588 mddev_get(mddev); in md_attr_store()
5590 rv = entry->store(mddev, page, length); in md_attr_store()
5591 mddev_put(mddev); in md_attr_store()
5597 struct mddev *mddev = container_of(ko, struct mddev, kobj); in md_free() local
5599 if (mddev->sysfs_state) in md_free()
5600 sysfs_put(mddev->sysfs_state); in md_free()
5601 if (mddev->sysfs_level) in md_free()
5602 sysfs_put(mddev->sysfs_level); in md_free()
5604 if (mddev->gendisk) in md_free()
5605 del_gendisk(mddev->gendisk); in md_free()
5606 if (mddev->queue) in md_free()
5607 blk_cleanup_queue(mddev->queue); in md_free()
5608 if (mddev->gendisk) in md_free()
5609 put_disk(mddev->gendisk); in md_free()
5610 percpu_ref_exit(&mddev->writes_pending); in md_free()
5612 bioset_exit(&mddev->bio_set); in md_free()
5613 bioset_exit(&mddev->sync_set); in md_free()
5614 kfree(mddev); in md_free()
5631 struct mddev *mddev = container_of(ws, struct mddev, del_work); in mddev_delayed_delete() local
5633 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); in mddev_delayed_delete()
5634 kobject_del(&mddev->kobj); in mddev_delayed_delete()
5635 kobject_put(&mddev->kobj); in mddev_delayed_delete()
5640 int mddev_init_writes_pending(struct mddev *mddev) in mddev_init_writes_pending() argument
5642 if (mddev->writes_pending.percpu_count_ptr) in mddev_init_writes_pending()
5644 if (percpu_ref_init(&mddev->writes_pending, no_op, in mddev_init_writes_pending()
5648 percpu_ref_put(&mddev->writes_pending); in mddev_init_writes_pending()
5665 struct mddev *mddev = mddev_find_or_alloc(dev); in md_alloc() local
5672 if (!mddev) in md_alloc()
5675 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); in md_alloc()
5677 unit = MINOR(mddev->unit) >> shift; in md_alloc()
5686 if (mddev->gendisk) in md_alloc()
5692 struct mddev *mddev2; in md_alloc()
5707 mddev->hold_active = UNTIL_STOP; in md_alloc()
5710 mddev->queue = blk_alloc_queue(NUMA_NO_NODE); in md_alloc()
5711 if (!mddev->queue) in md_alloc()
5714 blk_set_stacking_limits(&mddev->queue->limits); in md_alloc()
5718 blk_cleanup_queue(mddev->queue); in md_alloc()
5719 mddev->queue = NULL; in md_alloc()
5722 disk->major = MAJOR(mddev->unit); in md_alloc()
5731 disk->private_data = mddev; in md_alloc()
5732 disk->queue = mddev->queue; in md_alloc()
5733 blk_queue_write_cache(mddev->queue, true, true); in md_alloc()
5740 mddev->gendisk = disk; in md_alloc()
5743 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); in md_alloc()
5752 if (mddev->kobj.sd && in md_alloc()
5753 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) in md_alloc()
5757 if (!error && mddev->kobj.sd) { in md_alloc()
5758 kobject_uevent(&mddev->kobj, KOBJ_ADD); in md_alloc()
5759 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); in md_alloc()
5760 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); in md_alloc()
5762 mddev_put(mddev); in md_alloc()
5804 struct mddev *mddev = from_timer(mddev, t, safemode_timer); in md_safemode_timeout() local
5806 mddev->safemode = 1; in md_safemode_timeout()
5807 if (mddev->external) in md_safemode_timeout()
5808 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_safemode_timeout()
5810 md_wakeup_thread(mddev->thread); in md_safemode_timeout()
5815 int md_run(struct mddev *mddev) in md_run() argument
5821 if (list_empty(&mddev->disks)) in md_run()
5825 if (mddev->pers) in md_run()
5828 if (mddev->sysfs_active) in md_run()
5834 if (!mddev->raid_disks) { in md_run()
5835 if (!mddev->persistent) in md_run()
5837 err = analyze_sbs(mddev); in md_run()
5842 if (mddev->level != LEVEL_NONE) in md_run()
5843 request_module("md-level-%d", mddev->level); in md_run()
5844 else if (mddev->clevel[0]) in md_run()
5845 request_module("md-%s", mddev->clevel); in md_run()
5852 mddev->has_superblocks = false; in md_run()
5853 rdev_for_each(rdev, mddev) { in md_run()
5858 if (mddev->ro != 1 && in md_run()
5861 mddev->ro = 1; in md_run()
5862 if (mddev->gendisk) in md_run()
5863 set_disk_ro(mddev->gendisk, 1); in md_run()
5867 mddev->has_superblocks = true; in md_run()
5876 if (mddev->dev_sectors && in md_run()
5877 rdev->data_offset + mddev->dev_sectors in md_run()
5880 mdname(mddev)); in md_run()
5887 mdname(mddev)); in md_run()
5894 if (!bioset_initialized(&mddev->bio_set)) { in md_run()
5895 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5899 if (!bioset_initialized(&mddev->sync_set)) { in md_run()
5900 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in md_run()
5906 pers = find_pers(mddev->level, mddev->clevel); in md_run()
5909 if (mddev->level != LEVEL_NONE) in md_run()
5911 mddev->level); in md_run()
5914 mddev->clevel); in md_run()
5919 if (mddev->level != pers->level) { in md_run()
5920 mddev->level = pers->level; in md_run()
5921 mddev->new_level = pers->level; in md_run()
5923 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); in md_run()
5925 if (mddev->reshape_position != MaxSector && in md_run()
5941 rdev_for_each(rdev, mddev) in md_run()
5942 rdev_for_each(rdev2, mddev) { in md_run()
5947 mdname(mddev), in md_run()
5958 mddev->recovery = 0; in md_run()
5960 mddev->resync_max_sectors = mddev->dev_sectors; in md_run()
5962 mddev->ok_start_degraded = start_dirty_degraded; in md_run()
5964 if (start_readonly && mddev->ro == 0) in md_run()
5965 mddev->ro = 2; /* read-only, but switch on first write */ in md_run()
5967 err = pers->run(mddev); in md_run()
5970 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { in md_run()
5971 WARN_ONCE(!mddev->external_size, in md_run()
5975 (unsigned long long)mddev->array_sectors / 2, in md_run()
5976 (unsigned long long)pers->size(mddev, 0, 0) / 2); in md_run()
5980 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { in md_run()
5983 bitmap = md_bitmap_create(mddev, -1); in md_run()
5987 mdname(mddev), err); in md_run()
5989 mddev->bitmap = bitmap; in md_run()
5995 if (mddev->bitmap_info.max_write_behind > 0) { in md_run()
5998 rdev_for_each(rdev, mddev) { in md_run()
6003 if (create_pool && mddev->serial_info_pool == NULL) { in md_run()
6004 mddev->serial_info_pool = in md_run()
6007 if (!mddev->serial_info_pool) { in md_run()
6014 if (mddev->queue) { in md_run()
6017 rdev_for_each(rdev, mddev) { in md_run()
6024 if (mddev->degraded) in md_run()
6027 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6029 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); in md_run()
6032 if (mddev->kobj.sd && in md_run()
6033 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) in md_run()
6035 mdname(mddev)); in md_run()
6036 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); in md_run()
6037 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); in md_run()
6038 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); in md_run()
6039 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ in md_run()
6040 mddev->ro = 0; in md_run()
6042 atomic_set(&mddev->max_corr_read_errors, in md_run()
6044 mddev->safemode = 0; in md_run()
6045 if (mddev_is_clustered(mddev)) in md_run()
6046 mddev->safemode_delay = 0; in md_run()
6048 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in md_run()
6049 mddev->in_sync = 1; in md_run()
6051 spin_lock(&mddev->lock); in md_run()
6052 mddev->pers = pers; in md_run()
6053 spin_unlock(&mddev->lock); in md_run()
6054 rdev_for_each(rdev, mddev) in md_run()
6056 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ in md_run()
6058 if (mddev->degraded && !mddev->ro) in md_run()
6062 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_run()
6063 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_run()
6065 if (mddev->sb_flags) in md_run()
6066 md_update_sb(mddev, 0); in md_run()
6068 md_new_event(mddev); in md_run()
6072 mddev_detach(mddev); in md_run()
6073 if (mddev->private) in md_run()
6074 pers->free(mddev, mddev->private); in md_run()
6075 mddev->private = NULL; in md_run()
6077 md_bitmap_destroy(mddev); in md_run()
6079 bioset_exit(&mddev->bio_set); in md_run()
6080 bioset_exit(&mddev->sync_set); in md_run()
6085 int do_md_run(struct mddev *mddev) in do_md_run() argument
6089 set_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6090 err = md_run(mddev); in do_md_run()
6093 err = md_bitmap_load(mddev); in do_md_run()
6095 md_bitmap_destroy(mddev); in do_md_run()
6099 if (mddev_is_clustered(mddev)) in do_md_run()
6100 md_allow_write(mddev); in do_md_run()
6103 md_start(mddev); in do_md_run()
6105 md_wakeup_thread(mddev->thread); in do_md_run()
6106 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ in do_md_run()
6108 set_capacity(mddev->gendisk, mddev->array_sectors); in do_md_run()
6109 revalidate_disk_size(mddev->gendisk, true); in do_md_run()
6110 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6111 mddev->changed = 1; in do_md_run()
6112 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); in do_md_run()
6113 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_run()
6114 sysfs_notify_dirent_safe(mddev->sysfs_action); in do_md_run()
6115 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in do_md_run()
6117 clear_bit(MD_NOT_READY, &mddev->flags); in do_md_run()
6121 int md_start(struct mddev *mddev) in md_start() argument
6125 if (mddev->pers->start) { in md_start()
6126 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6127 md_wakeup_thread(mddev->thread); in md_start()
6128 ret = mddev->pers->start(mddev); in md_start()
6129 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); in md_start()
6130 md_wakeup_thread(mddev->sync_thread); in md_start()
6136 static int restart_array(struct mddev *mddev) in restart_array() argument
6138 struct gendisk *disk = mddev->gendisk; in restart_array()
6144 if (list_empty(&mddev->disks)) in restart_array()
6146 if (!mddev->pers) in restart_array()
6148 if (!mddev->ro) in restart_array()
6152 rdev_for_each_rcu(rdev, mddev) { in restart_array()
6160 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) in restart_array()
6166 mddev->safemode = 0; in restart_array()
6167 mddev->ro = 0; in restart_array()
6169 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); in restart_array()
6171 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in restart_array()
6172 md_wakeup_thread(mddev->thread); in restart_array()
6173 md_wakeup_thread(mddev->sync_thread); in restart_array()
6174 sysfs_notify_dirent_safe(mddev->sysfs_state); in restart_array()
6178 static void md_clean(struct mddev *mddev) in md_clean() argument
6180 mddev->array_sectors = 0; in md_clean()
6181 mddev->external_size = 0; in md_clean()
6182 mddev->dev_sectors = 0; in md_clean()
6183 mddev->raid_disks = 0; in md_clean()
6184 mddev->recovery_cp = 0; in md_clean()
6185 mddev->resync_min = 0; in md_clean()
6186 mddev->resync_max = MaxSector; in md_clean()
6187 mddev->reshape_position = MaxSector; in md_clean()
6188 mddev->external = 0; in md_clean()
6189 mddev->persistent = 0; in md_clean()
6190 mddev->level = LEVEL_NONE; in md_clean()
6191 mddev->clevel[0] = 0; in md_clean()
6192 mddev->flags = 0; in md_clean()
6193 mddev->sb_flags = 0; in md_clean()
6194 mddev->ro = 0; in md_clean()
6195 mddev->metadata_type[0] = 0; in md_clean()
6196 mddev->chunk_sectors = 0; in md_clean()
6197 mddev->ctime = mddev->utime = 0; in md_clean()
6198 mddev->layout = 0; in md_clean()
6199 mddev->max_disks = 0; in md_clean()
6200 mddev->events = 0; in md_clean()
6201 mddev->can_decrease_events = 0; in md_clean()
6202 mddev->delta_disks = 0; in md_clean()
6203 mddev->reshape_backwards = 0; in md_clean()
6204 mddev->new_level = LEVEL_NONE; in md_clean()
6205 mddev->new_layout = 0; in md_clean()
6206 mddev->new_chunk_sectors = 0; in md_clean()
6207 mddev->curr_resync = 0; in md_clean()
6208 atomic64_set(&mddev->resync_mismatches, 0); in md_clean()
6209 mddev->suspend_lo = mddev->suspend_hi = 0; in md_clean()
6210 mddev->sync_speed_min = mddev->sync_speed_max = 0; in md_clean()
6211 mddev->recovery = 0; in md_clean()
6212 mddev->in_sync = 0; in md_clean()
6213 mddev->changed = 0; in md_clean()
6214 mddev->degraded = 0; in md_clean()
6215 mddev->safemode = 0; in md_clean()
6216 mddev->private = NULL; in md_clean()
6217 mddev->cluster_info = NULL; in md_clean()
6218 mddev->bitmap_info.offset = 0; in md_clean()
6219 mddev->bitmap_info.default_offset = 0; in md_clean()
6220 mddev->bitmap_info.default_space = 0; in md_clean()
6221 mddev->bitmap_info.chunksize = 0; in md_clean()
6222 mddev->bitmap_info.daemon_sleep = 0; in md_clean()
6223 mddev->bitmap_info.max_write_behind = 0; in md_clean()
6224 mddev->bitmap_info.nodes = 0; in md_clean()
6227 static void __md_stop_writes(struct mddev *mddev) in __md_stop_writes() argument
6229 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop_writes()
6230 if (work_pending(&mddev->del_work)) in __md_stop_writes()
6232 if (mddev->sync_thread) { in __md_stop_writes()
6233 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in __md_stop_writes()
6234 md_reap_sync_thread(mddev); in __md_stop_writes()
6237 del_timer_sync(&mddev->safemode_timer); in __md_stop_writes()
6239 if (mddev->pers && mddev->pers->quiesce) { in __md_stop_writes()
6240 mddev->pers->quiesce(mddev, 1); in __md_stop_writes()
6241 mddev->pers->quiesce(mddev, 0); in __md_stop_writes()
6243 md_bitmap_flush(mddev); in __md_stop_writes()
6245 if (mddev->ro == 0 && in __md_stop_writes()
6246 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || in __md_stop_writes()
6247 mddev->sb_flags)) { in __md_stop_writes()
6249 if (!mddev_is_clustered(mddev)) in __md_stop_writes()
6250 mddev->in_sync = 1; in __md_stop_writes()
6251 md_update_sb(mddev, 1); in __md_stop_writes()
6254 mddev->serialize_policy = 0; in __md_stop_writes()
6255 mddev_destroy_serial_pool(mddev, NULL, true); in __md_stop_writes()
6258 void md_stop_writes(struct mddev *mddev) in md_stop_writes() argument
6260 mddev_lock_nointr(mddev); in md_stop_writes()
6261 __md_stop_writes(mddev); in md_stop_writes()
6262 mddev_unlock(mddev); in md_stop_writes()
6266 static void mddev_detach(struct mddev *mddev) in mddev_detach() argument
6268 md_bitmap_wait_behind_writes(mddev); in mddev_detach()
6269 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { in mddev_detach()
6270 mddev->pers->quiesce(mddev, 1); in mddev_detach()
6271 mddev->pers->quiesce(mddev, 0); in mddev_detach()
6273 md_unregister_thread(&mddev->thread); in mddev_detach()
6274 if (mddev->queue) in mddev_detach()
6275 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ in mddev_detach()
6278 static void __md_stop(struct mddev *mddev) in __md_stop() argument
6280 struct md_personality *pers = mddev->pers; in __md_stop()
6281 md_bitmap_destroy(mddev); in __md_stop()
6282 mddev_detach(mddev); in __md_stop()
6284 if (mddev->event_work.func) in __md_stop()
6286 spin_lock(&mddev->lock); in __md_stop()
6287 mddev->pers = NULL; in __md_stop()
6288 spin_unlock(&mddev->lock); in __md_stop()
6289 pers->free(mddev, mddev->private); in __md_stop()
6290 mddev->private = NULL; in __md_stop()
6291 if (pers->sync_request && mddev->to_remove == NULL) in __md_stop()
6292 mddev->to_remove = &md_redundancy_group; in __md_stop()
6294 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in __md_stop()
6297 void md_stop(struct mddev *mddev) in md_stop() argument
6302 __md_stop_writes(mddev); in md_stop()
6303 __md_stop(mddev); in md_stop()
6304 bioset_exit(&mddev->bio_set); in md_stop()
6305 bioset_exit(&mddev->sync_set); in md_stop()
6310 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) in md_set_readonly() argument
6315 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in md_set_readonly()
6317 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6318 md_wakeup_thread(mddev->thread); in md_set_readonly()
6320 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in md_set_readonly()
6321 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_set_readonly()
6322 if (mddev->sync_thread) in md_set_readonly()
6325 wake_up_process(mddev->sync_thread->tsk); in md_set_readonly()
6327 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) in md_set_readonly()
6329 mddev_unlock(mddev); in md_set_readonly()
6331 &mddev->recovery)); in md_set_readonly()
6332 wait_event(mddev->sb_wait, in md_set_readonly()
6333 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_set_readonly()
6334 mddev_lock_nointr(mddev); in md_set_readonly()
6336 mutex_lock(&mddev->open_mutex); in md_set_readonly()
6337 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in md_set_readonly()
6338 mddev->sync_thread || in md_set_readonly()
6339 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in md_set_readonly()
6340 pr_warn("md: %s still in use.\n",mdname(mddev)); in md_set_readonly()
6342 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6343 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6344 md_wakeup_thread(mddev->thread); in md_set_readonly()
6349 if (mddev->pers) { in md_set_readonly()
6350 __md_stop_writes(mddev); in md_set_readonly()
6353 if (mddev->ro==1) in md_set_readonly()
6355 mddev->ro = 1; in md_set_readonly()
6356 set_disk_ro(mddev->gendisk, 1); in md_set_readonly()
6357 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in md_set_readonly()
6358 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_set_readonly()
6359 md_wakeup_thread(mddev->thread); in md_set_readonly()
6360 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_set_readonly()
6364 mutex_unlock(&mddev->open_mutex); in md_set_readonly()
6372 static int do_md_stop(struct mddev *mddev, int mode, in do_md_stop() argument
6375 struct gendisk *disk = mddev->gendisk; in do_md_stop()
6379 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { in do_md_stop()
6381 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6382 md_wakeup_thread(mddev->thread); in do_md_stop()
6384 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in do_md_stop()
6385 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in do_md_stop()
6386 if (mddev->sync_thread) in do_md_stop()
6389 wake_up_process(mddev->sync_thread->tsk); in do_md_stop()
6391 mddev_unlock(mddev); in do_md_stop()
6392 wait_event(resync_wait, (mddev->sync_thread == NULL && in do_md_stop()
6394 &mddev->recovery))); in do_md_stop()
6395 mddev_lock_nointr(mddev); in do_md_stop()
6397 mutex_lock(&mddev->open_mutex); in do_md_stop()
6398 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || in do_md_stop()
6399 mddev->sysfs_active || in do_md_stop()
6400 mddev->sync_thread || in do_md_stop()
6401 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { in do_md_stop()
6402 pr_warn("md: %s still in use.\n",mdname(mddev)); in do_md_stop()
6403 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6405 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); in do_md_stop()
6406 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in do_md_stop()
6407 md_wakeup_thread(mddev->thread); in do_md_stop()
6411 if (mddev->pers) { in do_md_stop()
6412 if (mddev->ro) in do_md_stop()
6415 __md_stop_writes(mddev); in do_md_stop()
6416 __md_stop(mddev); in do_md_stop()
6419 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6421 rdev_for_each(rdev, mddev) in do_md_stop()
6423 sysfs_unlink_rdev(mddev, rdev); in do_md_stop()
6426 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6427 mddev->changed = 1; in do_md_stop()
6430 if (mddev->ro) in do_md_stop()
6431 mddev->ro = 0; in do_md_stop()
6433 mutex_unlock(&mddev->open_mutex); in do_md_stop()
6438 pr_info("md: %s stopped.\n", mdname(mddev)); in do_md_stop()
6440 if (mddev->bitmap_info.file) { in do_md_stop()
6441 struct file *f = mddev->bitmap_info.file; in do_md_stop()
6442 spin_lock(&mddev->lock); in do_md_stop()
6443 mddev->bitmap_info.file = NULL; in do_md_stop()
6444 spin_unlock(&mddev->lock); in do_md_stop()
6447 mddev->bitmap_info.offset = 0; in do_md_stop()
6449 export_array(mddev); in do_md_stop()
6451 md_clean(mddev); in do_md_stop()
6452 if (mddev->hold_active == UNTIL_STOP) in do_md_stop()
6453 mddev->hold_active = 0; in do_md_stop()
6455 md_new_event(mddev); in do_md_stop()
6456 sysfs_notify_dirent_safe(mddev->sysfs_state); in do_md_stop()
6461 static void autorun_array(struct mddev *mddev) in autorun_array() argument
6466 if (list_empty(&mddev->disks)) in autorun_array()
6471 rdev_for_each(rdev, mddev) { in autorun_array()
6477 err = do_md_run(mddev); in autorun_array()
6480 do_md_stop(mddev, 0, NULL); in autorun_array()
6499 struct mddev *mddev; in autorun_devices() local
6538 mddev = mddev_find(dev); in autorun_devices()
6539 if (!mddev) in autorun_devices()
6542 if (mddev_lock(mddev)) in autorun_devices()
6543 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); in autorun_devices()
6544 else if (mddev->raid_disks || mddev->major_version in autorun_devices()
6545 || !list_empty(&mddev->disks)) { in autorun_devices()
6547 mdname(mddev), bdevname(rdev0->bdev,b)); in autorun_devices()
6548 mddev_unlock(mddev); in autorun_devices()
6550 pr_debug("md: created %s\n", mdname(mddev)); in autorun_devices()
6551 mddev->persistent = 1; in autorun_devices()
6554 if (bind_rdev_to_array(rdev, mddev)) in autorun_devices()
6557 autorun_array(mddev); in autorun_devices()
6558 mddev_unlock(mddev); in autorun_devices()
6567 mddev_put(mddev); in autorun_devices()
6587 static int get_array_info(struct mddev *mddev, void __user *arg) in get_array_info() argument
6595 rdev_for_each_rcu(rdev, mddev) { in get_array_info()
6612 info.major_version = mddev->major_version; in get_array_info()
6613 info.minor_version = mddev->minor_version; in get_array_info()
6615 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); in get_array_info()
6616 info.level = mddev->level; in get_array_info()
6617 info.size = mddev->dev_sectors / 2; in get_array_info()
6618 if (info.size != mddev->dev_sectors / 2) /* overflow */ in get_array_info()
6621 info.raid_disks = mddev->raid_disks; in get_array_info()
6622 info.md_minor = mddev->md_minor; in get_array_info()
6623 info.not_persistent= !mddev->persistent; in get_array_info()
6625 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); in get_array_info()
6627 if (mddev->in_sync) in get_array_info()
6629 if (mddev->bitmap && mddev->bitmap_info.offset) in get_array_info()
6631 if (mddev_is_clustered(mddev)) in get_array_info()
6638 info.layout = mddev->layout; in get_array_info()
6639 info.chunk_size = mddev->chunk_sectors << 9; in get_array_info()
6647 static int get_bitmap_file(struct mddev *mddev, void __user * arg) in get_bitmap_file() argument
6658 spin_lock(&mddev->lock); in get_bitmap_file()
6660 if (mddev->bitmap_info.file) { in get_bitmap_file()
6661 ptr = file_path(mddev->bitmap_info.file, file->pathname, in get_bitmap_file()
6669 spin_unlock(&mddev->lock); in get_bitmap_file()
6679 static int get_disk_info(struct mddev *mddev, void __user * arg) in get_disk_info() argument
6688 rdev = md_find_rdev_nr_rcu(mddev, info.number); in get_disk_info()
6719 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) in md_add_new_disk() argument
6725 if (mddev_is_clustered(mddev) && in md_add_new_disk()
6728 mdname(mddev)); in md_add_new_disk()
6735 if (!mddev->raid_disks) { in md_add_new_disk()
6738 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); in md_add_new_disk()
6744 if (!list_empty(&mddev->disks)) { in md_add_new_disk()
6746 = list_entry(mddev->disks.next, in md_add_new_disk()
6748 err = super_types[mddev->major_version] in md_add_new_disk()
6749 .load_super(rdev, rdev0, mddev->minor_version); in md_add_new_disk()
6758 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6769 if (mddev->pers) { in md_add_new_disk()
6771 if (!mddev->pers->hot_add_disk) { in md_add_new_disk()
6773 mdname(mddev)); in md_add_new_disk()
6776 if (mddev->persistent) in md_add_new_disk()
6777 rdev = md_import_device(dev, mddev->major_version, in md_add_new_disk()
6778 mddev->minor_version); in md_add_new_disk()
6787 if (!mddev->persistent) { in md_add_new_disk()
6789 info->raid_disk < mddev->raid_disks) { in md_add_new_disk()
6797 super_types[mddev->major_version]. in md_add_new_disk()
6798 validate_super(mddev, rdev); in md_add_new_disk()
6823 rdev_for_each(rdev2, mddev) { in md_add_new_disk()
6829 if (has_journal || mddev->bitmap) { in md_add_new_disk()
6838 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6843 err = md_cluster_ops->add_new_disk(mddev, rdev); in md_add_new_disk()
6852 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6857 if (mddev_is_clustered(mddev)) { in md_add_new_disk()
6860 err = md_cluster_ops->new_disk_ack(mddev, in md_add_new_disk()
6867 md_cluster_ops->add_new_disk_cancel(mddev); in md_add_new_disk()
6881 if (mddev->major_version != 0) { in md_add_new_disk()
6882 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); in md_add_new_disk()
6895 if (info->raid_disk < mddev->raid_disks) in md_add_new_disk()
6900 if (rdev->raid_disk < mddev->raid_disks) in md_add_new_disk()
6909 if (!mddev->persistent) { in md_add_new_disk()
6916 err = bind_rdev_to_array(rdev, mddev); in md_add_new_disk()
6926 static int hot_remove_disk(struct mddev *mddev, dev_t dev) in hot_remove_disk() argument
6931 if (!mddev->pers) in hot_remove_disk()
6934 rdev = find_rdev(mddev, dev); in hot_remove_disk()
6942 remove_and_add_spares(mddev, rdev); in hot_remove_disk()
6948 if (mddev_is_clustered(mddev)) { in hot_remove_disk()
6949 if (md_cluster_ops->remove_disk(mddev, rdev)) in hot_remove_disk()
6954 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_remove_disk()
6955 if (mddev->thread) in hot_remove_disk()
6956 md_wakeup_thread(mddev->thread); in hot_remove_disk()
6958 md_update_sb(mddev, 1); in hot_remove_disk()
6959 md_new_event(mddev); in hot_remove_disk()
6964 bdevname(rdev->bdev,b), mdname(mddev)); in hot_remove_disk()
6968 static int hot_add_disk(struct mddev *mddev, dev_t dev) in hot_add_disk() argument
6974 if (!mddev->pers) in hot_add_disk()
6977 if (mddev->major_version != 0) { in hot_add_disk()
6979 mdname(mddev)); in hot_add_disk()
6982 if (!mddev->pers->hot_add_disk) { in hot_add_disk()
6984 mdname(mddev)); in hot_add_disk()
6995 if (mddev->persistent) in hot_add_disk()
7004 bdevname(rdev->bdev,b), mdname(mddev)); in hot_add_disk()
7012 err = bind_rdev_to_array(rdev, mddev); in hot_add_disk()
7023 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in hot_add_disk()
7024 if (!mddev->thread) in hot_add_disk()
7025 md_update_sb(mddev, 1); in hot_add_disk()
7030 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in hot_add_disk()
7031 md_wakeup_thread(mddev->thread); in hot_add_disk()
7032 md_new_event(mddev); in hot_add_disk()
7040 static int set_bitmap_file(struct mddev *mddev, int fd) in set_bitmap_file() argument
7044 if (mddev->pers) { in set_bitmap_file()
7045 if (!mddev->pers->quiesce || !mddev->thread) in set_bitmap_file()
7047 if (mddev->recovery || mddev->sync_thread) in set_bitmap_file()
7056 if (mddev->bitmap || mddev->bitmap_info.file) in set_bitmap_file()
7062 mdname(mddev)); in set_bitmap_file()
7069 mdname(mddev)); in set_bitmap_file()
7073 mdname(mddev)); in set_bitmap_file()
7077 mdname(mddev)); in set_bitmap_file()
7084 mddev->bitmap_info.file = f; in set_bitmap_file()
7085 mddev->bitmap_info.offset = 0; /* file overrides offset */ in set_bitmap_file()
7086 } else if (mddev->bitmap == NULL) in set_bitmap_file()
7089 if (mddev->pers) { in set_bitmap_file()
7093 bitmap = md_bitmap_create(mddev, -1); in set_bitmap_file()
7094 mddev_suspend(mddev); in set_bitmap_file()
7096 mddev->bitmap = bitmap; in set_bitmap_file()
7097 err = md_bitmap_load(mddev); in set_bitmap_file()
7101 md_bitmap_destroy(mddev); in set_bitmap_file()
7104 mddev_resume(mddev); in set_bitmap_file()
7106 mddev_suspend(mddev); in set_bitmap_file()
7107 md_bitmap_destroy(mddev); in set_bitmap_file()
7108 mddev_resume(mddev); in set_bitmap_file()
7112 struct file *f = mddev->bitmap_info.file; in set_bitmap_file()
7114 spin_lock(&mddev->lock); in set_bitmap_file()
7115 mddev->bitmap_info.file = NULL; in set_bitmap_file()
7116 spin_unlock(&mddev->lock); in set_bitmap_file()
7137 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) in md_set_array_info() argument
7149 mddev->major_version = info->major_version; in md_set_array_info()
7150 mddev->minor_version = info->minor_version; in md_set_array_info()
7151 mddev->patch_version = info->patch_version; in md_set_array_info()
7152 mddev->persistent = !info->not_persistent; in md_set_array_info()
7156 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7159 mddev->major_version = MD_MAJOR_VERSION; in md_set_array_info()
7160 mddev->minor_version = MD_MINOR_VERSION; in md_set_array_info()
7161 mddev->patch_version = MD_PATCHLEVEL_VERSION; in md_set_array_info()
7162 mddev->ctime = ktime_get_real_seconds(); in md_set_array_info()
7164 mddev->level = info->level; in md_set_array_info()
7165 mddev->clevel[0] = 0; in md_set_array_info()
7166 mddev->dev_sectors = 2 * (sector_t)info->size; in md_set_array_info()
7167 mddev->raid_disks = info->raid_disks; in md_set_array_info()
7172 mddev->recovery_cp = MaxSector; in md_set_array_info()
7174 mddev->recovery_cp = 0; in md_set_array_info()
7175 mddev->persistent = ! info->not_persistent; in md_set_array_info()
7176 mddev->external = 0; in md_set_array_info()
7178 mddev->layout = info->layout; in md_set_array_info()
7179 if (mddev->level == 0) in md_set_array_info()
7181 mddev->layout = -1; in md_set_array_info()
7182 mddev->chunk_sectors = info->chunk_size >> 9; in md_set_array_info()
7184 if (mddev->persistent) { in md_set_array_info()
7185 mddev->max_disks = MD_SB_DISKS; in md_set_array_info()
7186 mddev->flags = 0; in md_set_array_info()
7187 mddev->sb_flags = 0; in md_set_array_info()
7189 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_set_array_info()
7191 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; in md_set_array_info()
7192 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); in md_set_array_info()
7193 mddev->bitmap_info.offset = 0; in md_set_array_info()
7195 mddev->reshape_position = MaxSector; in md_set_array_info()
7200 get_random_bytes(mddev->uuid, 16); in md_set_array_info()
7202 mddev->new_level = mddev->level; in md_set_array_info()
7203 mddev->new_chunk_sectors = mddev->chunk_sectors; in md_set_array_info()
7204 mddev->new_layout = mddev->layout; in md_set_array_info()
7205 mddev->delta_disks = 0; in md_set_array_info()
7206 mddev->reshape_backwards = 0; in md_set_array_info()
7211 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) in md_set_array_sectors() argument
7213 lockdep_assert_held(&mddev->reconfig_mutex); in md_set_array_sectors()
7215 if (mddev->external_size) in md_set_array_sectors()
7218 mddev->array_sectors = array_sectors; in md_set_array_sectors()
7222 static int update_size(struct mddev *mddev, sector_t num_sectors) in update_size() argument
7227 sector_t old_dev_sectors = mddev->dev_sectors; in update_size()
7229 if (mddev->pers->resize == NULL) in update_size()
7240 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_size()
7241 mddev->sync_thread) in update_size()
7243 if (mddev->ro) in update_size()
7246 rdev_for_each(rdev, mddev) { in update_size()
7254 rv = mddev->pers->resize(mddev, num_sectors); in update_size()
7256 if (mddev_is_clustered(mddev)) in update_size()
7257 md_cluster_ops->update_size(mddev, old_dev_sectors); in update_size()
7258 else if (mddev->queue) { in update_size()
7259 set_capacity(mddev->gendisk, mddev->array_sectors); in update_size()
7260 revalidate_disk_size(mddev->gendisk, true); in update_size()
7266 static int update_raid_disks(struct mddev *mddev, int raid_disks) in update_raid_disks() argument
7271 if (mddev->pers->check_reshape == NULL) in update_raid_disks()
7273 if (mddev->ro) in update_raid_disks()
7276 (mddev->max_disks && raid_disks >= mddev->max_disks)) in update_raid_disks()
7278 if (mddev->sync_thread || in update_raid_disks()
7279 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || in update_raid_disks()
7280 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || in update_raid_disks()
7281 mddev->reshape_position != MaxSector) in update_raid_disks()
7284 rdev_for_each(rdev, mddev) { in update_raid_disks()
7285 if (mddev->raid_disks < raid_disks && in update_raid_disks()
7288 if (mddev->raid_disks > raid_disks && in update_raid_disks()
7293 mddev->delta_disks = raid_disks - mddev->raid_disks; in update_raid_disks()
7294 if (mddev->delta_disks < 0) in update_raid_disks()
7295 mddev->reshape_backwards = 1; in update_raid_disks()
7296 else if (mddev->delta_disks > 0) in update_raid_disks()
7297 mddev->reshape_backwards = 0; in update_raid_disks()
7299 rv = mddev->pers->check_reshape(mddev); in update_raid_disks()
7301 mddev->delta_disks = 0; in update_raid_disks()
7302 mddev->reshape_backwards = 0; in update_raid_disks()
7315 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) in update_array_info() argument
7322 if (mddev->bitmap && mddev->bitmap_info.offset) in update_array_info()
7325 if (mddev->major_version != info->major_version || in update_array_info()
7326 mddev->minor_version != info->minor_version || in update_array_info()
7328 mddev->ctime != info->ctime || in update_array_info()
7329 mddev->level != info->level || in update_array_info()
7331 mddev->persistent != !info->not_persistent || in update_array_info()
7332 mddev->chunk_sectors != info->chunk_size >> 9 || in update_array_info()
7338 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7340 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7342 if (mddev->layout != info->layout) in update_array_info()
7351 if (mddev->layout != info->layout) { in update_array_info()
7356 if (mddev->pers->check_reshape == NULL) in update_array_info()
7359 mddev->new_layout = info->layout; in update_array_info()
7360 rv = mddev->pers->check_reshape(mddev); in update_array_info()
7362 mddev->new_layout = mddev->layout; in update_array_info()
7366 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) in update_array_info()
7367 rv = update_size(mddev, (sector_t)info->size * 2); in update_array_info()
7369 if (mddev->raid_disks != info->raid_disks) in update_array_info()
7370 rv = update_raid_disks(mddev, info->raid_disks); in update_array_info()
7373 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { in update_array_info()
7377 if (mddev->recovery || mddev->sync_thread) { in update_array_info()
7384 if (mddev->bitmap) { in update_array_info()
7388 if (mddev->bitmap_info.default_offset == 0) { in update_array_info()
7392 mddev->bitmap_info.offset = in update_array_info()
7393 mddev->bitmap_info.default_offset; in update_array_info()
7394 mddev->bitmap_info.space = in update_array_info()
7395 mddev->bitmap_info.default_space; in update_array_info()
7396 bitmap = md_bitmap_create(mddev, -1); in update_array_info()
7397 mddev_suspend(mddev); in update_array_info()
7399 mddev->bitmap = bitmap; in update_array_info()
7400 rv = md_bitmap_load(mddev); in update_array_info()
7404 md_bitmap_destroy(mddev); in update_array_info()
7405 mddev_resume(mddev); in update_array_info()
7408 if (!mddev->bitmap) { in update_array_info()
7412 if (mddev->bitmap->storage.file) { in update_array_info()
7416 if (mddev->bitmap_info.nodes) { in update_array_info()
7418 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { in update_array_info()
7421 md_cluster_ops->unlock_all_bitmaps(mddev); in update_array_info()
7425 mddev->bitmap_info.nodes = 0; in update_array_info()
7426 md_cluster_ops->leave(mddev); in update_array_info()
7428 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; in update_array_info()
7430 mddev_suspend(mddev); in update_array_info()
7431 md_bitmap_destroy(mddev); in update_array_info()
7432 mddev_resume(mddev); in update_array_info()
7433 mddev->bitmap_info.offset = 0; in update_array_info()
7436 md_update_sb(mddev, 1); in update_array_info()
7442 static int set_disk_faulty(struct mddev *mddev, dev_t dev) in set_disk_faulty() argument
7447 if (mddev->pers == NULL) in set_disk_faulty()
7451 rdev = md_find_rdev_rcu(mddev, dev); in set_disk_faulty()
7455 md_error(mddev, rdev); in set_disk_faulty()
7471 struct mddev *mddev = bdev->bd_disk->private_data; in md_getgeo() local
7475 geo->cylinders = mddev->array_sectors / 8; in md_getgeo()
7509 struct mddev *mddev = NULL; in md_ioctl() local
7541 mddev = bdev->bd_disk->private_data; in md_ioctl()
7543 if (!mddev) { in md_ioctl()
7551 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7554 err = get_array_info(mddev, argp); in md_ioctl()
7558 if (!mddev->raid_disks && !mddev->external) in md_ioctl()
7561 err = get_disk_info(mddev, argp); in md_ioctl()
7565 err = set_disk_faulty(mddev, new_decode_dev(arg)); in md_ioctl()
7569 err = get_bitmap_file(mddev, argp); in md_ioctl()
7575 flush_rdev_wq(mddev); in md_ioctl()
7579 wait_event_interruptible_timeout(mddev->sb_wait, in md_ioctl()
7581 &mddev->recovery), in md_ioctl()
7587 mutex_lock(&mddev->open_mutex); in md_ioctl()
7588 if (mddev->pers && atomic_read(&mddev->openers) > 1) { in md_ioctl()
7589 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7593 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { in md_ioctl()
7594 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7599 mutex_unlock(&mddev->open_mutex); in md_ioctl()
7602 err = mddev_lock(mddev); in md_ioctl()
7617 if (mddev->pers) { in md_ioctl()
7618 err = update_array_info(mddev, &info); in md_ioctl()
7625 if (!list_empty(&mddev->disks)) { in md_ioctl()
7626 pr_warn("md: array %s already has disks!\n", mdname(mddev)); in md_ioctl()
7630 if (mddev->raid_disks) { in md_ioctl()
7631 pr_warn("md: array %s already initialised!\n", mdname(mddev)); in md_ioctl()
7635 err = md_set_array_info(mddev, &info); in md_ioctl()
7648 if ((!mddev->raid_disks && !mddev->external) in md_ioctl()
7661 err = restart_array(mddev); in md_ioctl()
7665 err = do_md_stop(mddev, 0, bdev); in md_ioctl()
7669 err = md_set_readonly(mddev, bdev); in md_ioctl()
7673 err = hot_remove_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7681 if (mddev->pers) { in md_ioctl()
7689 err = md_add_new_disk(mddev, &info); in md_ioctl()
7708 if (mddev->ro != 1) in md_ioctl()
7714 if (mddev->pers) { in md_ioctl()
7715 err = restart_array(mddev); in md_ioctl()
7717 mddev->ro = 2; in md_ioctl()
7718 set_disk_ro(mddev->gendisk, 0); in md_ioctl()
7728 if (mddev->ro && mddev->pers) { in md_ioctl()
7729 if (mddev->ro == 2) { in md_ioctl()
7730 mddev->ro = 0; in md_ioctl()
7731 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_ioctl()
7732 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_ioctl()
7737 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { in md_ioctl()
7738 mddev_unlock(mddev); in md_ioctl()
7739 wait_event(mddev->sb_wait, in md_ioctl()
7740 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && in md_ioctl()
7741 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_ioctl()
7742 mddev_lock_nointr(mddev); in md_ioctl()
7757 err = md_add_new_disk(mddev, &info); in md_ioctl()
7762 if (mddev_is_clustered(mddev)) in md_ioctl()
7763 md_cluster_ops->new_disk_ack(mddev, false); in md_ioctl()
7769 err = hot_add_disk(mddev, new_decode_dev(arg)); in md_ioctl()
7773 err = do_md_run(mddev); in md_ioctl()
7777 err = set_bitmap_file(mddev, (int)arg); in md_ioctl()
7786 if (mddev->hold_active == UNTIL_IOCTL && in md_ioctl()
7788 mddev->hold_active = 0; in md_ioctl()
7789 mddev_unlock(mddev); in md_ioctl()
7792 clear_bit(MD_CLOSING, &mddev->flags); in md_ioctl()
7821 struct mddev *mddev = mddev_find(bdev->bd_dev); in md_open() local
7824 if (!mddev) in md_open()
7827 if (mddev->gendisk != bdev->bd_disk) { in md_open()
7831 mddev_put(mddev); in md_open()
7833 if (work_pending(&mddev->del_work)) in md_open()
7837 BUG_ON(mddev != bdev->bd_disk->private_data); in md_open()
7839 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) in md_open()
7842 if (test_bit(MD_CLOSING, &mddev->flags)) { in md_open()
7843 mutex_unlock(&mddev->open_mutex); in md_open()
7849 atomic_inc(&mddev->openers); in md_open()
7850 mutex_unlock(&mddev->open_mutex); in md_open()
7855 mddev_put(mddev); in md_open()
7861 struct mddev *mddev = disk->private_data; in md_release() local
7863 BUG_ON(!mddev); in md_release()
7864 atomic_dec(&mddev->openers); in md_release()
7865 mddev_put(mddev); in md_release()
7870 struct mddev *mddev = disk->private_data; in md_check_events() local
7873 if (mddev->changed) in md_check_events()
7875 mddev->changed = 0; in md_check_events()
7947 struct mddev *mddev, const char *name) in md_register_thread() argument
7958 thread->mddev = mddev; in md_register_thread()
7962 mdname(thread->mddev), in md_register_thread()
7995 void md_error(struct mddev *mddev, struct md_rdev *rdev) in md_error() argument
8000 if (!mddev->pers || !mddev->pers->error_handler) in md_error()
8002 mddev->pers->error_handler(mddev,rdev); in md_error()
8003 if (mddev->degraded) in md_error()
8004 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_error()
8006 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_error()
8007 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_error()
8008 md_wakeup_thread(mddev->thread); in md_error()
8009 if (mddev->event_work.func) in md_error()
8010 queue_work(md_misc_wq, &mddev->event_work); in md_error()
8011 md_new_event(mddev); in md_error()
8036 static int status_resync(struct seq_file *seq, struct mddev *mddev) in status_resync() argument
8044 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in status_resync()
8045 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in status_resync()
8046 max_sectors = mddev->resync_max_sectors; in status_resync()
8048 max_sectors = mddev->dev_sectors; in status_resync()
8050 resync = mddev->curr_resync; in status_resync()
8052 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) in status_resync()
8058 resync -= atomic_read(&mddev->recovery_active); in status_resync()
8061 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { in status_resync()
8064 rdev_for_each(rdev, mddev) in status_resync()
8072 if (mddev->reshape_position != MaxSector) in status_resync()
8078 if (mddev->recovery_cp < MaxSector) { in status_resync()
8115 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? in status_resync()
8117 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? in status_resync()
8119 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? in status_resync()
8142 dt = ((jiffies - mddev->resync_mark) / HZ); in status_resync()
8145 curr_mark_cnt = mddev->curr_mark_cnt; in status_resync()
8146 recovery_active = atomic_read(&mddev->recovery_active); in status_resync()
8147 resync_mark_cnt = mddev->resync_mark_cnt; in status_resync()
8168 struct mddev *mddev; in md_seq_start() local
8183 mddev = list_entry(tmp, struct mddev, all_mddevs); in md_seq_start()
8184 mddev_get(mddev); in md_seq_start()
8186 return mddev; in md_seq_start()
8197 struct mddev *next_mddev, *mddev = v; in md_seq_next() local
8207 tmp = mddev->all_mddevs.next; in md_seq_next()
8209 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); in md_seq_next()
8217 mddev_put(mddev); in md_seq_next()
8224 struct mddev *mddev = v; in md_seq_stop() local
8226 if (mddev && v != (void*)1 && v != (void*)2) in md_seq_stop()
8227 mddev_put(mddev); in md_seq_stop()
8232 struct mddev *mddev = v; in md_seq_show() local
8253 spin_lock(&mddev->lock); in md_seq_show()
8254 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { in md_seq_show()
8255 seq_printf(seq, "%s : %sactive", mdname(mddev), in md_seq_show()
8256 mddev->pers ? "" : "in"); in md_seq_show()
8257 if (mddev->pers) { in md_seq_show()
8258 if (mddev->ro==1) in md_seq_show()
8260 if (mddev->ro==2) in md_seq_show()
8262 seq_printf(seq, " %s", mddev->pers->name); in md_seq_show()
8267 rdev_for_each_rcu(rdev, mddev) { in md_seq_show()
8287 if (!list_empty(&mddev->disks)) { in md_seq_show()
8288 if (mddev->pers) in md_seq_show()
8291 mddev->array_sectors / 2); in md_seq_show()
8296 if (mddev->persistent) { in md_seq_show()
8297 if (mddev->major_version != 0 || in md_seq_show()
8298 mddev->minor_version != 90) { in md_seq_show()
8300 mddev->major_version, in md_seq_show()
8301 mddev->minor_version); in md_seq_show()
8303 } else if (mddev->external) in md_seq_show()
8305 mddev->metadata_type); in md_seq_show()
8309 if (mddev->pers) { in md_seq_show()
8310 mddev->pers->status(seq, mddev); in md_seq_show()
8312 if (mddev->pers->sync_request) { in md_seq_show()
8313 if (status_resync(seq, mddev)) in md_seq_show()
8319 md_bitmap_status(seq, mddev->bitmap); in md_seq_show()
8323 spin_unlock(&mddev->lock); in md_seq_show()
8421 int md_setup_cluster(struct mddev *mddev, int nodes) in md_setup_cluster() argument
8435 ret = md_cluster_ops->join(mddev, nodes); in md_setup_cluster()
8437 mddev->safemode_delay = 0; in md_setup_cluster()
8441 void md_cluster_stop(struct mddev *mddev) in md_cluster_stop() argument
8445 md_cluster_ops->leave(mddev); in md_cluster_stop()
8449 static int is_mddev_idle(struct mddev *mddev, int init) in is_mddev_idle() argument
8457 rdev_for_each_rcu(rdev, mddev) { in is_mddev_idle()
8492 void md_done_sync(struct mddev *mddev, int blocks, int ok) in md_done_sync() argument
8495 atomic_sub(blocks, &mddev->recovery_active); in md_done_sync()
8496 wake_up(&mddev->recovery_wait); in md_done_sync()
8498 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_done_sync()
8499 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); in md_done_sync()
8500 md_wakeup_thread(mddev->thread); in md_done_sync()
8513 bool md_write_start(struct mddev *mddev, struct bio *bi) in md_write_start() argument
8520 BUG_ON(mddev->ro == 1); in md_write_start()
8521 if (mddev->ro == 2) { in md_write_start()
8523 mddev->ro = 0; in md_write_start()
8524 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_write_start()
8525 md_wakeup_thread(mddev->thread); in md_write_start()
8526 md_wakeup_thread(mddev->sync_thread); in md_write_start()
8530 percpu_ref_get(&mddev->writes_pending); in md_write_start()
8532 if (mddev->safemode == 1) in md_write_start()
8533 mddev->safemode = 0; in md_write_start()
8535 if (mddev->in_sync || mddev->sync_checkers) { in md_write_start()
8536 spin_lock(&mddev->lock); in md_write_start()
8537 if (mddev->in_sync) { in md_write_start()
8538 mddev->in_sync = 0; in md_write_start()
8539 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_write_start()
8540 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_write_start()
8541 md_wakeup_thread(mddev->thread); in md_write_start()
8544 spin_unlock(&mddev->lock); in md_write_start()
8548 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_write_start()
8549 if (!mddev->has_superblocks) in md_write_start()
8551 wait_event(mddev->sb_wait, in md_write_start()
8552 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || in md_write_start()
8553 mddev->suspended); in md_write_start()
8554 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { in md_write_start()
8555 percpu_ref_put(&mddev->writes_pending); in md_write_start()
8570 void md_write_inc(struct mddev *mddev, struct bio *bi) in md_write_inc() argument
8574 WARN_ON_ONCE(mddev->in_sync || mddev->ro); in md_write_inc()
8575 percpu_ref_get(&mddev->writes_pending); in md_write_inc()
8579 void md_write_end(struct mddev *mddev) in md_write_end() argument
8581 percpu_ref_put(&mddev->writes_pending); in md_write_end()
8583 if (mddev->safemode == 2) in md_write_end()
8584 md_wakeup_thread(mddev->thread); in md_write_end()
8585 else if (mddev->safemode_delay) in md_write_end()
8589 mod_timer(&mddev->safemode_timer, in md_write_end()
8590 roundup(jiffies, mddev->safemode_delay) + in md_write_end()
8591 mddev->safemode_delay); in md_write_end()
8602 void md_allow_write(struct mddev *mddev) in md_allow_write() argument
8604 if (!mddev->pers) in md_allow_write()
8606 if (mddev->ro) in md_allow_write()
8608 if (!mddev->pers->sync_request) in md_allow_write()
8611 spin_lock(&mddev->lock); in md_allow_write()
8612 if (mddev->in_sync) { in md_allow_write()
8613 mddev->in_sync = 0; in md_allow_write()
8614 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_allow_write()
8615 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_allow_write()
8616 if (mddev->safemode_delay && in md_allow_write()
8617 mddev->safemode == 0) in md_allow_write()
8618 mddev->safemode = 1; in md_allow_write()
8619 spin_unlock(&mddev->lock); in md_allow_write()
8620 md_update_sb(mddev, 0); in md_allow_write()
8621 sysfs_notify_dirent_safe(mddev->sysfs_state); in md_allow_write()
8623 wait_event(mddev->sb_wait, in md_allow_write()
8624 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); in md_allow_write()
8626 spin_unlock(&mddev->lock); in md_allow_write()
8635 struct mddev *mddev = thread->mddev; in md_do_sync() local
8636 struct mddev *mddev2; in md_do_sync()
8652 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_do_sync()
8653 test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) in md_do_sync()
8655 if (mddev->ro) {/* never try to sync a read-only array */ in md_do_sync()
8656 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8660 if (mddev_is_clustered(mddev)) { in md_do_sync()
8661 ret = md_cluster_ops->resync_start(mddev); in md_do_sync()
8665 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); in md_do_sync()
8666 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || in md_do_sync()
8667 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || in md_do_sync()
8668 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) in md_do_sync()
8669 && ((unsigned long long)mddev->curr_resync_completed in md_do_sync()
8670 < (unsigned long long)mddev->resync_max_sectors)) in md_do_sync()
8674 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8675 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { in md_do_sync()
8678 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { in md_do_sync()
8683 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) in md_do_sync()
8688 mddev->last_sync_action = action ?: desc; in md_do_sync()
8708 mddev->curr_resync = 2; in md_do_sync()
8711 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8714 if (mddev2 == mddev) in md_do_sync()
8716 if (!mddev->parallel_resync in md_do_sync()
8718 && match_mddev_units(mddev, mddev2)) { in md_do_sync()
8720 if (mddev < mddev2 && mddev->curr_resync == 2) { in md_do_sync()
8722 mddev->curr_resync = 1; in md_do_sync()
8725 if (mddev > mddev2 && mddev->curr_resync == 1) in md_do_sync()
8735 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8736 mddev2->curr_resync >= mddev->curr_resync) { in md_do_sync()
8740 desc, mdname(mddev), in md_do_sync()
8753 } while (mddev->curr_resync < 2); in md_do_sync()
8756 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8760 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8761 atomic64_set(&mddev->resync_mismatches, 0); in md_do_sync()
8763 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
8764 j = mddev->resync_min; in md_do_sync()
8765 else if (!mddev->bitmap) in md_do_sync()
8766 j = mddev->recovery_cp; in md_do_sync()
8768 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { in md_do_sync()
8769 max_sectors = mddev->resync_max_sectors; in md_do_sync()
8775 if (mddev_is_clustered(mddev) && in md_do_sync()
8776 mddev->reshape_position != MaxSector) in md_do_sync()
8777 j = mddev->reshape_position; in md_do_sync()
8780 max_sectors = mddev->dev_sectors; in md_do_sync()
8783 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
8800 if (mddev->bitmap) { in md_do_sync()
8801 mddev->pers->quiesce(mddev, 1); in md_do_sync()
8802 mddev->pers->quiesce(mddev, 0); in md_do_sync()
8806 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); in md_do_sync()
8807 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); in md_do_sync()
8809 speed_max(mddev), desc); in md_do_sync()
8811 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ in md_do_sync()
8819 mddev->resync_mark = mark[last_mark]; in md_do_sync()
8820 mddev->resync_mark_cnt = mark_cnt[last_mark]; in md_do_sync()
8829 atomic_set(&mddev->recovery_active, 0); in md_do_sync()
8834 desc, mdname(mddev)); in md_do_sync()
8835 mddev->curr_resync = j; in md_do_sync()
8837 mddev->curr_resync = 3; /* no longer delayed */ in md_do_sync()
8838 mddev->curr_resync_completed = j; in md_do_sync()
8839 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8840 md_new_event(mddev); in md_do_sync()
8849 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8850 ((mddev->curr_resync > mddev->curr_resync_completed && in md_do_sync()
8851 (mddev->curr_resync - mddev->curr_resync_completed) in md_do_sync()
8854 (j - mddev->curr_resync_completed)*2 in md_do_sync()
8855 >= mddev->resync_max - mddev->curr_resync_completed || in md_do_sync()
8856 mddev->curr_resync_completed > mddev->resync_max in md_do_sync()
8859 wait_event(mddev->recovery_wait, in md_do_sync()
8860 atomic_read(&mddev->recovery_active) == 0); in md_do_sync()
8861 mddev->curr_resync_completed = j; in md_do_sync()
8862 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && in md_do_sync()
8863 j > mddev->recovery_cp) in md_do_sync()
8864 mddev->recovery_cp = j; in md_do_sync()
8866 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); in md_do_sync()
8867 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8870 while (j >= mddev->resync_max && in md_do_sync()
8871 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8877 wait_event_interruptible(mddev->recovery_wait, in md_do_sync()
8878 mddev->resync_max > j in md_do_sync()
8880 &mddev->recovery)); in md_do_sync()
8883 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8886 sectors = mddev->pers->sync_request(mddev, j, &skipped); in md_do_sync()
8888 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_do_sync()
8894 atomic_add(sectors, &mddev->recovery_active); in md_do_sync()
8897 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8905 mddev->curr_resync = j; in md_do_sync()
8906 mddev->curr_mark_cnt = io_sectors; in md_do_sync()
8911 md_new_event(mddev); in md_do_sync()
8922 mddev->resync_mark = mark[next]; in md_do_sync()
8923 mddev->resync_mark_cnt = mark_cnt[next]; in md_do_sync()
8925 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8929 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8942 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); in md_do_sync()
8943 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 in md_do_sync()
8944 /((jiffies-mddev->resync_mark)/HZ +1) +1; in md_do_sync()
8946 if (currspeed > speed_min(mddev)) { in md_do_sync()
8947 if (currspeed > speed_max(mddev)) { in md_do_sync()
8951 if (!is_mddev_idle(mddev, 0)) { in md_do_sync()
8956 wait_event(mddev->recovery_wait, in md_do_sync()
8957 !atomic_read(&mddev->recovery_active)); in md_do_sync()
8961 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, in md_do_sync()
8962 test_bit(MD_RECOVERY_INTR, &mddev->recovery) in md_do_sync()
8968 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); in md_do_sync()
8970 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8971 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
8972 mddev->curr_resync > 3) { in md_do_sync()
8973 mddev->curr_resync_completed = mddev->curr_resync; in md_do_sync()
8974 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_do_sync()
8976 mddev->pers->sync_request(mddev, max_sectors, &skipped); in md_do_sync()
8978 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && in md_do_sync()
8979 mddev->curr_resync > 3) { in md_do_sync()
8980 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { in md_do_sync()
8981 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
8982 if (mddev->curr_resync >= mddev->recovery_cp) { in md_do_sync()
8984 desc, mdname(mddev)); in md_do_sync()
8986 &mddev->recovery)) in md_do_sync()
8987 mddev->recovery_cp = in md_do_sync()
8988 mddev->curr_resync_completed; in md_do_sync()
8990 mddev->recovery_cp = in md_do_sync()
8991 mddev->curr_resync; in md_do_sync()
8994 mddev->recovery_cp = MaxSector; in md_do_sync()
8996 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) in md_do_sync()
8997 mddev->curr_resync = MaxSector; in md_do_sync()
8998 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
8999 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { in md_do_sync()
9001 rdev_for_each_rcu(rdev, mddev) in md_do_sync()
9003 mddev->delta_disks >= 0 && in md_do_sync()
9007 rdev->recovery_offset < mddev->curr_resync) in md_do_sync()
9008 rdev->recovery_offset = mddev->curr_resync; in md_do_sync()
9017 set_mask_bits(&mddev->sb_flags, 0, in md_do_sync()
9020 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_do_sync()
9021 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_do_sync()
9022 mddev->delta_disks > 0 && in md_do_sync()
9023 mddev->pers->finish_reshape && in md_do_sync()
9024 mddev->pers->size && in md_do_sync()
9025 mddev->queue) { in md_do_sync()
9026 mddev_lock_nointr(mddev); in md_do_sync()
9027 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); in md_do_sync()
9028 mddev_unlock(mddev); in md_do_sync()
9029 if (!mddev_is_clustered(mddev)) { in md_do_sync()
9030 set_capacity(mddev->gendisk, mddev->array_sectors); in md_do_sync()
9031 revalidate_disk_size(mddev->gendisk, true); in md_do_sync()
9035 spin_lock(&mddev->lock); in md_do_sync()
9036 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { in md_do_sync()
9038 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9039 mddev->resync_min = 0; in md_do_sync()
9040 mddev->resync_max = MaxSector; in md_do_sync()
9041 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) in md_do_sync()
9042 mddev->resync_min = mddev->curr_resync_completed; in md_do_sync()
9043 set_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_do_sync()
9044 mddev->curr_resync = 0; in md_do_sync()
9045 spin_unlock(&mddev->lock); in md_do_sync()
9048 md_wakeup_thread(mddev->thread); in md_do_sync()
9053 static int remove_and_add_spares(struct mddev *mddev, in remove_and_add_spares() argument
9061 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) in remove_and_add_spares()
9065 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9083 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9091 if (mddev->pers->hot_remove_disk( in remove_and_add_spares()
9092 mddev, rdev) == 0) { in remove_and_add_spares()
9093 sysfs_unlink_rdev(mddev, rdev); in remove_and_add_spares()
9103 if (removed && mddev->kobj.sd) in remove_and_add_spares()
9104 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in remove_and_add_spares()
9109 rdev_for_each(rdev, mddev) { in remove_and_add_spares()
9124 if (mddev->ro && in remove_and_add_spares()
9131 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { in remove_and_add_spares()
9133 sysfs_link_rdev(mddev, rdev); in remove_and_add_spares()
9136 md_new_event(mddev); in remove_and_add_spares()
9137 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9142 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in remove_and_add_spares()
9148 struct mddev *mddev = container_of(ws, struct mddev, del_work); in md_start_sync() local
9150 mddev->sync_thread = md_register_thread(md_do_sync, in md_start_sync()
9151 mddev, in md_start_sync()
9153 if (!mddev->sync_thread) { in md_start_sync()
9155 mdname(mddev)); in md_start_sync()
9157 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_start_sync()
9158 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_start_sync()
9159 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_start_sync()
9160 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_start_sync()
9161 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_start_sync()
9164 &mddev->recovery)) in md_start_sync()
9165 if (mddev->sysfs_action) in md_start_sync()
9166 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9168 md_wakeup_thread(mddev->sync_thread); in md_start_sync()
9169 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_start_sync()
9170 md_new_event(mddev); in md_start_sync()
9195 void md_check_recovery(struct mddev *mddev) in md_check_recovery() argument
9197 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { in md_check_recovery()
9201 set_bit(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9203 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) in md_check_recovery()
9204 md_update_sb(mddev, 0); in md_check_recovery()
9205 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); in md_check_recovery()
9206 wake_up(&mddev->sb_wait); in md_check_recovery()
9209 if (mddev->suspended) in md_check_recovery()
9212 if (mddev->bitmap) in md_check_recovery()
9213 md_bitmap_daemon_work(mddev); in md_check_recovery()
9216 if (mddev->pers->sync_request && !mddev->external) { in md_check_recovery()
9218 mdname(mddev)); in md_check_recovery()
9219 mddev->safemode = 2; in md_check_recovery()
9224 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) in md_check_recovery()
9227 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || in md_check_recovery()
9228 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9229 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || in md_check_recovery()
9230 (mddev->external == 0 && mddev->safemode == 1) || in md_check_recovery()
9231 (mddev->safemode == 2 in md_check_recovery()
9232 && !mddev->in_sync && mddev->recovery_cp == MaxSector) in md_check_recovery()
9236 if (mddev_trylock(mddev)) { in md_check_recovery()
9238 bool try_set_sync = mddev->safemode != 0; in md_check_recovery()
9240 if (!mddev->external && mddev->safemode == 1) in md_check_recovery()
9241 mddev->safemode = 0; in md_check_recovery()
9243 if (mddev->ro) { in md_check_recovery()
9245 if (!mddev->external && mddev->in_sync) in md_check_recovery()
9251 rdev_for_each(rdev, mddev) in md_check_recovery()
9260 remove_and_add_spares(mddev, NULL); in md_check_recovery()
9264 set_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9265 md_reap_sync_thread(mddev); in md_check_recovery()
9266 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9267 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9268 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); in md_check_recovery()
9272 if (mddev_is_clustered(mddev)) { in md_check_recovery()
9277 rdev_for_each_safe(rdev, tmp, mddev) { in md_check_recovery()
9284 if (try_set_sync && !mddev->external && !mddev->in_sync) { in md_check_recovery()
9285 spin_lock(&mddev->lock); in md_check_recovery()
9286 set_in_sync(mddev); in md_check_recovery()
9287 spin_unlock(&mddev->lock); in md_check_recovery()
9290 if (mddev->sb_flags) in md_check_recovery()
9291 md_update_sb(mddev, 0); in md_check_recovery()
9293 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && in md_check_recovery()
9294 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { in md_check_recovery()
9296 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_check_recovery()
9299 if (mddev->sync_thread) { in md_check_recovery()
9300 md_reap_sync_thread(mddev); in md_check_recovery()
9306 mddev->curr_resync_completed = 0; in md_check_recovery()
9307 spin_lock(&mddev->lock); in md_check_recovery()
9308 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9309 spin_unlock(&mddev->lock); in md_check_recovery()
9313 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); in md_check_recovery()
9314 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_check_recovery()
9316 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || in md_check_recovery()
9317 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) in md_check_recovery()
9326 if (mddev->reshape_position != MaxSector) { in md_check_recovery()
9327 if (mddev->pers->check_reshape == NULL || in md_check_recovery()
9328 mddev->pers->check_reshape(mddev) != 0) in md_check_recovery()
9331 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_check_recovery()
9332 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9333 } else if ((spares = remove_and_add_spares(mddev, NULL))) { in md_check_recovery()
9334 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9335 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_check_recovery()
9336 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_check_recovery()
9337 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9338 } else if (mddev->recovery_cp < MaxSector) { in md_check_recovery()
9339 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_check_recovery()
9340 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); in md_check_recovery()
9341 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) in md_check_recovery()
9345 if (mddev->pers->sync_request) { in md_check_recovery()
9351 md_bitmap_write_all(mddev->bitmap); in md_check_recovery()
9353 INIT_WORK(&mddev->del_work, md_start_sync); in md_check_recovery()
9354 queue_work(md_misc_wq, &mddev->del_work); in md_check_recovery()
9358 if (!mddev->sync_thread) { in md_check_recovery()
9359 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_check_recovery()
9362 &mddev->recovery)) in md_check_recovery()
9363 if (mddev->sysfs_action) in md_check_recovery()
9364 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_check_recovery()
9367 wake_up(&mddev->sb_wait); in md_check_recovery()
9368 mddev_unlock(mddev); in md_check_recovery()
9373 void md_reap_sync_thread(struct mddev *mddev) in md_reap_sync_thread() argument
9376 sector_t old_dev_sectors = mddev->dev_sectors; in md_reap_sync_thread()
9380 md_unregister_thread(&mddev->sync_thread); in md_reap_sync_thread()
9381 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && in md_reap_sync_thread()
9382 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && in md_reap_sync_thread()
9383 mddev->degraded != mddev->raid_disks) { in md_reap_sync_thread()
9386 if (mddev->pers->spare_active(mddev)) { in md_reap_sync_thread()
9387 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in md_reap_sync_thread()
9388 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); in md_reap_sync_thread()
9391 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && in md_reap_sync_thread()
9392 mddev->pers->finish_reshape) { in md_reap_sync_thread()
9393 mddev->pers->finish_reshape(mddev); in md_reap_sync_thread()
9394 if (mddev_is_clustered(mddev)) in md_reap_sync_thread()
9401 if (!mddev->degraded) in md_reap_sync_thread()
9402 rdev_for_each(rdev, mddev) in md_reap_sync_thread()
9405 md_update_sb(mddev, 1); in md_reap_sync_thread()
9409 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) in md_reap_sync_thread()
9410 md_cluster_ops->resync_finish(mddev); in md_reap_sync_thread()
9411 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); in md_reap_sync_thread()
9412 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); in md_reap_sync_thread()
9413 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); in md_reap_sync_thread()
9414 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); in md_reap_sync_thread()
9415 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); in md_reap_sync_thread()
9416 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); in md_reap_sync_thread()
9422 if (mddev_is_clustered(mddev) && is_reshaped in md_reap_sync_thread()
9423 && !test_bit(MD_CLOSING, &mddev->flags)) in md_reap_sync_thread()
9424 md_cluster_ops->update_size(mddev, old_dev_sectors); in md_reap_sync_thread()
9427 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in md_reap_sync_thread()
9428 sysfs_notify_dirent_safe(mddev->sysfs_completed); in md_reap_sync_thread()
9429 sysfs_notify_dirent_safe(mddev->sysfs_action); in md_reap_sync_thread()
9430 md_new_event(mddev); in md_reap_sync_thread()
9431 if (mddev->event_work.func) in md_reap_sync_thread()
9432 queue_work(md_misc_wq, &mddev->event_work); in md_reap_sync_thread()
9436 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) in md_wait_for_blocked_rdev() argument
9443 rdev_dec_pending(rdev, mddev); in md_wait_for_blocked_rdev()
9447 void md_finish_reshape(struct mddev *mddev) in md_finish_reshape() argument
9452 rdev_for_each(rdev, mddev) { in md_finish_reshape()
9468 struct mddev *mddev = rdev->mddev; in rdev_set_badblocks() local
9480 set_mask_bits(&mddev->sb_flags, 0, in rdev_set_badblocks()
9482 md_wakeup_thread(rdev->mddev->thread); in rdev_set_badblocks()
9508 struct mddev *mddev; in md_notify_reboot() local
9511 for_each_mddev(mddev, tmp) { in md_notify_reboot()
9512 if (mddev_trylock(mddev)) { in md_notify_reboot()
9513 if (mddev->pers) in md_notify_reboot()
9514 __md_stop_writes(mddev); in md_notify_reboot()
9515 if (mddev->persistent) in md_notify_reboot()
9516 mddev->safemode = 2; in md_notify_reboot()
9517 mddev_unlock(mddev); in md_notify_reboot()
9592 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) in check_sb_changes() argument
9603 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { in check_sb_changes()
9604 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); in check_sb_changes()
9608 md_bitmap_update_sb(mddev->bitmap); in check_sb_changes()
9612 rdev_for_each_safe(rdev2, tmp, mddev) { in check_sb_changes()
9637 ret = remove_and_add_spares(mddev, rdev2); in check_sb_changes()
9642 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); in check_sb_changes()
9643 md_wakeup_thread(mddev->thread); in check_sb_changes()
9651 md_error(mddev, rdev2); in check_sb_changes()
9657 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { in check_sb_changes()
9658 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); in check_sb_changes()
9667 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9673 mddev->reshape_position = le64_to_cpu(sb->reshape_position); in check_sb_changes()
9674 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9675 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9676 if (mddev->pers->start_reshape) in check_sb_changes()
9677 mddev->pers->start_reshape(mddev); in check_sb_changes()
9678 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && in check_sb_changes()
9679 mddev->reshape_position != MaxSector && in check_sb_changes()
9682 mddev->reshape_position = MaxSector; in check_sb_changes()
9683 if (mddev->pers->update_reshape_pos) in check_sb_changes()
9684 mddev->pers->update_reshape_pos(mddev); in check_sb_changes()
9688 mddev->events = le64_to_cpu(sb->events); in check_sb_changes()
9691 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) in read_rdev() argument
9705 err = super_types[mddev->major_version]. in read_rdev()
9706 load_super(rdev, NULL, mddev->minor_version); in read_rdev()
9731 mddev->pers->spare_active(mddev)) in read_rdev()
9732 sysfs_notify_dirent_safe(mddev->sysfs_degraded); in read_rdev()
9738 void md_reload_sb(struct mddev *mddev, int nr) in md_reload_sb() argument
9744 rdev_for_each_rcu(iter, mddev) { in md_reload_sb()
9756 err = read_rdev(mddev, rdev); in md_reload_sb()
9760 check_sb_changes(mddev, rdev); in md_reload_sb()
9763 rdev_for_each_rcu(rdev, mddev) { in md_reload_sb()
9765 read_rdev(mddev, rdev); in md_reload_sb()
9841 struct mddev *mddev; in md_exit() local
9865 for_each_mddev(mddev, tmp) { in md_exit()
9866 export_array(mddev); in md_exit()
9867 mddev->ctime = 0; in md_exit()
9868 mddev->hold_active = 0; in md_exit()