Lines Matching full:conf

22  * conf->seq_write is the number of the last batch successfully written.
23 * conf->seq_flush is the number of the last batch that was closed to
71 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) in stripe_hash() argument
73 int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK; in stripe_hash()
74 return &conf->stripe_hashtbl[hash]; in stripe_hash()
77 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect) in stripe_hash_locks_hash() argument
79 return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK; in stripe_hash_locks_hash()
82 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) in lock_device_hash_lock() argument
84 spin_lock_irq(conf->hash_locks + hash); in lock_device_hash_lock()
85 spin_lock(&conf->device_lock); in lock_device_hash_lock()
88 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) in unlock_device_hash_lock() argument
90 spin_unlock(&conf->device_lock); in unlock_device_hash_lock()
91 spin_unlock_irq(conf->hash_locks + hash); in unlock_device_hash_lock()
94 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) in lock_all_device_hash_locks_irq() argument
97 spin_lock_irq(conf->hash_locks); in lock_all_device_hash_locks_irq()
99 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); in lock_all_device_hash_locks_irq()
100 spin_lock(&conf->device_lock); in lock_all_device_hash_locks_irq()
103 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) in unlock_all_device_hash_locks_irq() argument
106 spin_unlock(&conf->device_lock); in unlock_all_device_hash_locks_irq()
108 spin_unlock(conf->hash_locks + i); in unlock_all_device_hash_locks_irq()
109 spin_unlock_irq(conf->hash_locks); in unlock_all_device_hash_locks_irq()
151 static void print_raid5_conf (struct r5conf *conf);
169 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread() local
181 group = conf->worker_groups + cpu_to_group(cpu); in raid5_wakeup_stripe_thread()
190 if (conf->worker_cnt_per_group == 0) { in raid5_wakeup_stripe_thread()
191 md_wakeup_thread(conf->mddev->thread); in raid5_wakeup_stripe_thread()
195 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
203 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { in raid5_wakeup_stripe_thread()
213 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
220 BUG_ON(atomic_read(&conf->active_stripes)==0); in do_release_stripe()
222 if (r5c_is_writeback(conf->log)) in do_release_stripe()
234 (conf->quiesce && r5c_is_writeback(conf->log) && in do_release_stripe()
244 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
246 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
247 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
251 if (conf->worker_cnt_per_group == 0) { in do_release_stripe()
254 &conf->loprio_list); in do_release_stripe()
257 &conf->handle_list); in do_release_stripe()
263 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
267 if (atomic_dec_return(&conf->preread_active_stripes) in do_release_stripe()
269 md_wakeup_thread(conf->mddev->thread); in do_release_stripe()
270 atomic_dec(&conf->active_stripes); in do_release_stripe()
272 if (!r5c_is_writeback(conf->log)) in do_release_stripe()
278 else if (injournal == conf->raid_disks - conf->max_degraded) { in do_release_stripe()
281 atomic_inc(&conf->r5c_cached_full_stripes); in do_release_stripe()
283 atomic_dec(&conf->r5c_cached_partial_stripes); in do_release_stripe()
284 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
285 r5c_check_cached_full_stripe(conf); in do_release_stripe()
292 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
298 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
302 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
312 static void release_inactive_stripe_list(struct r5conf *conf, in release_inactive_stripe_list() argument
333 spin_lock_irqsave(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
334 if (list_empty(conf->inactive_list + hash) && in release_inactive_stripe_list()
336 atomic_dec(&conf->empty_inactive_list_nr); in release_inactive_stripe_list()
337 list_splice_tail_init(list, conf->inactive_list + hash); in release_inactive_stripe_list()
339 spin_unlock_irqrestore(conf->hash_locks + hash, flags); in release_inactive_stripe_list()
346 wake_up(&conf->wait_for_stripe); in release_inactive_stripe_list()
347 if (atomic_read(&conf->active_stripes) == 0) in release_inactive_stripe_list()
348 wake_up(&conf->wait_for_quiescent); in release_inactive_stripe_list()
349 if (conf->retry_read_aligned) in release_inactive_stripe_list()
350 md_wakeup_thread(conf->mddev->thread); in release_inactive_stripe_list()
354 /* should hold conf->device_lock already */
355 static int release_stripe_list(struct r5conf *conf, in release_stripe_list() argument
362 head = llist_del_all(&conf->released_stripes); in release_stripe_list()
376 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
385 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe() local
396 if (unlikely(!conf->mddev->thread) || in raid5_release_stripe()
399 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
401 md_wakeup_thread(conf->mddev->thread); in raid5_release_stripe()
405 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
408 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
409 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_release_stripe()
410 release_inactive_stripe_list(conf, &list, hash); in raid5_release_stripe()
422 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
424 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
433 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) in get_free_stripe() argument
438 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
440 first = (conf->inactive_list + hash)->next; in get_free_stripe()
444 atomic_inc(&conf->active_stripes); in get_free_stripe()
446 if (list_empty(conf->inactive_list + hash)) in get_free_stripe()
447 atomic_inc(&conf->empty_inactive_list_nr); in get_free_stripe()
491 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
498 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
499 cnt = PAGE_SIZE / conf->stripe_size; in init_stripe_shared_pages()
563 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
568 struct r5conf *conf = sh->raid_conf; in init_stripe() local
579 seq = read_seqcount_begin(&conf->gen_lock); in init_stripe()
580 sh->generation = conf->generation - previous; in init_stripe()
581 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
583 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
600 if (read_seqcount_retry(&conf->gen_lock, seq)) in init_stripe()
603 insert_hash(conf, sh); in init_stripe()
608 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, in __find_stripe() argument
614 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
634 int raid5_calc_degraded(struct r5conf *conf) in raid5_calc_degraded() argument
641 for (i = 0; i < conf->previous_raid_disks; i++) { in raid5_calc_degraded()
642 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
644 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
659 if (conf->raid_disks >= conf->previous_raid_disks) in raid5_calc_degraded()
663 if (conf->raid_disks == conf->previous_raid_disks) in raid5_calc_degraded()
667 for (i = 0; i < conf->raid_disks; i++) { in raid5_calc_degraded()
668 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_calc_degraded()
670 rdev = rcu_dereference(conf->disks[i].replacement); in raid5_calc_degraded()
681 if (conf->raid_disks <= conf->previous_raid_disks) in raid5_calc_degraded()
690 static bool has_failed(struct r5conf *conf) in has_failed() argument
692 int degraded = conf->mddev->degraded; in has_failed()
694 if (test_bit(MD_BROKEN, &conf->mddev->flags)) in has_failed()
697 if (conf->mddev->reshape_position != MaxSector) in has_failed()
698 degraded = raid5_calc_degraded(conf); in has_failed()
700 return degraded > conf->max_degraded; in has_failed()
704 raid5_get_active_stripe(struct r5conf *conf, sector_t sector, in raid5_get_active_stripe() argument
708 int hash = stripe_hash_locks_hash(conf, sector); in raid5_get_active_stripe()
713 spin_lock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
716 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_get_active_stripe()
717 conf->quiesce == 0 || noquiesce, in raid5_get_active_stripe()
718 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
719 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
721 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { in raid5_get_active_stripe()
722 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
724 &conf->cache_state)) in raid5_get_active_stripe()
726 &conf->cache_state); in raid5_get_active_stripe()
731 r5c_check_stripe_cache_usage(conf); in raid5_get_active_stripe()
734 &conf->cache_state); in raid5_get_active_stripe()
735 r5l_wake_reclaim(conf->log, 0); in raid5_get_active_stripe()
737 conf->wait_for_stripe, in raid5_get_active_stripe()
738 !list_empty(conf->inactive_list + hash) && in raid5_get_active_stripe()
739 (atomic_read(&conf->active_stripes) in raid5_get_active_stripe()
740 < (conf->max_nr_stripes * 3 / 4) in raid5_get_active_stripe()
742 &conf->cache_state)), in raid5_get_active_stripe()
743 *(conf->hash_locks + hash)); in raid5_get_active_stripe()
745 &conf->cache_state); in raid5_get_active_stripe()
751 spin_lock(&conf->device_lock); in raid5_get_active_stripe()
754 atomic_inc(&conf->active_stripes); in raid5_get_active_stripe()
758 if (!list_empty(conf->inactive_list + hash)) in raid5_get_active_stripe()
761 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in raid5_get_active_stripe()
762 atomic_inc(&conf->empty_inactive_list_nr); in raid5_get_active_stripe()
769 spin_unlock(&conf->device_lock); in raid5_get_active_stripe()
773 spin_unlock_irq(conf->hash_locks + hash); in raid5_get_active_stripe()
807 struct r5conf *conf = sh->raid_conf; in stripe_can_batch() local
809 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in stripe_can_batch()
817 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
827 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list()
829 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
831 hash = stripe_hash_locks_hash(conf, head_sector); in stripe_add_to_batch_list()
832 spin_lock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
833 head = __find_stripe(conf, head_sector, conf->generation); in stripe_add_to_batch_list()
835 spin_lock(&conf->device_lock); in stripe_add_to_batch_list()
838 atomic_inc(&conf->active_stripes); in stripe_add_to_batch_list()
842 if (!list_empty(conf->inactive_list + hash)) in stripe_add_to_batch_list()
845 if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag) in stripe_add_to_batch_list()
846 atomic_inc(&conf->empty_inactive_list_nr); in stripe_add_to_batch_list()
853 spin_unlock(&conf->device_lock); in stripe_add_to_batch_list()
855 spin_unlock_irq(conf->hash_locks + hash); in stripe_add_to_batch_list()
908 if (atomic_dec_return(&conf->preread_active_stripes) in stripe_add_to_batch_list()
910 md_wakeup_thread(conf->mddev->thread); in stripe_add_to_batch_list()
931 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
933 sector_t progress = conf->reshape_progress; in use_new_offset()
935 * of conf->generation, or ->data_offset that was set before in use_new_offset()
941 if (sh->generation == conf->generation - 1) in use_new_offset()
970 static void dispatch_defer_bios(struct r5conf *conf, int target, in dispatch_defer_bios() argument
977 if (conf->pending_data_cnt == 0) in dispatch_defer_bios()
980 list_sort(NULL, &conf->pending_list, cmp_stripe); in dispatch_defer_bios()
982 first = conf->pending_list.next; in dispatch_defer_bios()
985 if (conf->next_pending_data) in dispatch_defer_bios()
986 list_move_tail(&conf->pending_list, in dispatch_defer_bios()
987 &conf->next_pending_data->sibling); in dispatch_defer_bios()
989 while (!list_empty(&conf->pending_list)) { in dispatch_defer_bios()
990 data = list_first_entry(&conf->pending_list, in dispatch_defer_bios()
997 list_move(&data->sibling, &conf->free_list); in dispatch_defer_bios()
1002 conf->pending_data_cnt -= cnt; in dispatch_defer_bios()
1003 BUG_ON(conf->pending_data_cnt < 0 || cnt < target); in dispatch_defer_bios()
1005 if (next != &conf->pending_list) in dispatch_defer_bios()
1006 conf->next_pending_data = list_entry(next, in dispatch_defer_bios()
1009 conf->next_pending_data = NULL; in dispatch_defer_bios()
1011 if (first != &conf->pending_list) in dispatch_defer_bios()
1012 list_move_tail(&conf->pending_list, first); in dispatch_defer_bios()
1015 static void flush_deferred_bios(struct r5conf *conf) in flush_deferred_bios() argument
1019 if (conf->pending_data_cnt == 0) in flush_deferred_bios()
1022 spin_lock(&conf->pending_bios_lock); in flush_deferred_bios()
1023 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp); in flush_deferred_bios()
1024 BUG_ON(conf->pending_data_cnt != 0); in flush_deferred_bios()
1025 spin_unlock(&conf->pending_bios_lock); in flush_deferred_bios()
1030 static void defer_issue_bios(struct r5conf *conf, sector_t sector, in defer_issue_bios() argument
1036 spin_lock(&conf->pending_bios_lock); in defer_issue_bios()
1037 ent = list_first_entry(&conf->free_list, struct r5pending_data, in defer_issue_bios()
1039 list_move_tail(&ent->sibling, &conf->pending_list); in defer_issue_bios()
1043 conf->pending_data_cnt++; in defer_issue_bios()
1044 if (conf->pending_data_cnt >= PENDING_IO_MAX) in defer_issue_bios()
1045 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp); in defer_issue_bios()
1047 spin_unlock(&conf->pending_bios_lock); in defer_issue_bios()
1059 struct r5conf *conf = sh->raid_conf; in ops_run_io() local
1070 should_defer = conf->batch_bio_dispatch && conf->group_cnt; in ops_run_io()
1101 rrdev = rcu_dereference(conf->disks[i].replacement); in ops_run_io()
1103 rdev = rcu_dereference(conf->disks[i].rdev); in ops_run_io()
1138 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in ops_run_io()
1145 if (!conf->mddev->external && in ops_run_io()
1146 conf->mddev->sb_flags) { in ops_run_io()
1151 md_check_recovery(conf->mddev); in ops_run_io()
1159 md_wait_for_blocked_rdev(rdev, conf->mddev); in ops_run_io()
1162 rdev_dec_pending(rdev, conf->mddev); in ops_run_io()
1170 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1187 if (use_new_offset(conf, sh)) in ops_run_io()
1210 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1212 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1225 if (conf->mddev->gendisk) in ops_run_io()
1227 bi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1237 md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf)); in ops_run_io()
1254 if (use_new_offset(conf, sh)) in ops_run_io()
1264 rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1266 rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf); in ops_run_io()
1275 if (conf->mddev->gendisk) in ops_run_io()
1277 rbi, disk_devt(conf->mddev->gendisk), in ops_run_io()
1302 defer_issue_bios(conf, head_sh->sector, &pending_bios); in ops_run_io()
1316 struct r5conf *conf = sh->raid_conf; in async_copy_data() local
1338 if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf)) in async_copy_data()
1339 clen = RAID5_STRIPE_SIZE(conf) - page_offset; in async_copy_data()
1347 if (conf->skip_copy && in async_copy_data()
1349 clen == RAID5_STRIPE_SIZE(conf) && in async_copy_data()
1374 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill() local
1395 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_complete_biofill()
1396 rbi2 = r5_next_bio(conf, rbi, dev->sector); in ops_complete_biofill()
1413 struct r5conf *conf = sh->raid_conf; in ops_run_biofill() local
1428 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biofill()
1432 rbi = r5_next_bio(conf, rbi, dev->sector); in ops_run_biofill()
1858 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain() local
1891 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in ops_run_biodrain()
1902 r5c_is_writeback(conf->log)); in ops_run_biodrain()
1904 !r5c_is_writeback(conf->log)) { in ops_run_biodrain()
1910 wbi = r5_next_bio(conf, wbi, dev->sector); in ops_run_biodrain()
2215 struct r5conf *conf = sh->raid_conf; in raid_run_ops() local
2216 int level = conf->level; in raid_run_ops()
2221 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2294 int disks, struct r5conf *conf) in alloc_stripe() argument
2308 sh->raid_conf = conf; in alloc_stripe()
2317 if (raid5_has_ppl(conf)) { in alloc_stripe()
2325 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2333 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) in grow_one_stripe() argument
2337 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2343 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2347 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; in grow_one_stripe()
2349 atomic_inc(&conf->active_stripes); in grow_one_stripe()
2352 conf->max_nr_stripes++; in grow_one_stripe()
2356 static int grow_stripes(struct r5conf *conf, int num) in grow_stripes() argument
2359 size_t namelen = sizeof(conf->cache_name[0]); in grow_stripes()
2360 int devs = max(conf->raid_disks, conf->previous_raid_disks); in grow_stripes()
2362 if (conf->mddev->gendisk) in grow_stripes()
2363 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2364 "raid%d-%s", conf->level, mdname(conf->mddev)); in grow_stripes()
2366 snprintf(conf->cache_name[0], namelen, in grow_stripes()
2367 "raid%d-%p", conf->level, conf->mddev); in grow_stripes()
2368 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); in grow_stripes()
2370 conf->active_name = 0; in grow_stripes()
2371 sc = kmem_cache_create(conf->cache_name[conf->active_name], in grow_stripes()
2376 conf->slab_cache = sc; in grow_stripes()
2377 conf->pool_size = devs; in grow_stripes()
2379 if (!grow_one_stripe(conf, GFP_KERNEL)) in grow_stripes()
2426 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) in resize_chunks() argument
2436 if (conf->scribble_disks >= new_disks && in resize_chunks()
2437 conf->scribble_sectors >= new_sectors) in resize_chunks()
2439 mddev_suspend(conf->mddev); in resize_chunks()
2445 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2447 new_sectors / RAID5_STRIPE_SECTORS(conf)); in resize_chunks()
2453 mddev_resume(conf->mddev); in resize_chunks()
2455 conf->scribble_disks = new_disks; in resize_chunks()
2456 conf->scribble_sectors = new_sectors; in resize_chunks()
2461 static int resize_stripes(struct r5conf *conf, int newsize) in resize_stripes() argument
2475 * 3/ reallocate conf->disks to be suitable bigger. If this fails, in resize_stripes()
2494 md_allow_write(conf->mddev); in resize_stripes()
2497 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], in resize_stripes()
2504 mutex_lock(&conf->cache_size_mutex); in resize_stripes()
2506 for (i = conf->max_nr_stripes; i; i--) { in resize_stripes()
2507 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf); in resize_stripes()
2521 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2531 lock_device_hash_lock(conf, hash); in resize_stripes()
2532 wait_event_cmd(conf->wait_for_stripe, in resize_stripes()
2533 !list_empty(conf->inactive_list + hash), in resize_stripes()
2534 unlock_device_hash_lock(conf, hash), in resize_stripes()
2535 lock_device_hash_lock(conf, hash)); in resize_stripes()
2536 osh = get_free_stripe(conf, hash); in resize_stripes()
2537 unlock_device_hash_lock(conf, hash); in resize_stripes()
2545 for(i=0; i<conf->pool_size; i++) { in resize_stripes()
2551 free_stripe(conf->slab_cache, osh); in resize_stripes()
2553 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + in resize_stripes()
2554 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { in resize_stripes()
2559 kmem_cache_destroy(conf->slab_cache); in resize_stripes()
2564 * conf->disks and the scribble region in resize_stripes()
2568 for (i = 0; i < conf->pool_size; i++) in resize_stripes()
2569 ndisks[i] = conf->disks[i]; in resize_stripes()
2571 for (i = conf->pool_size; i < newsize; i++) { in resize_stripes()
2578 for (i = conf->pool_size; i < newsize; i++) in resize_stripes()
2583 kfree(conf->disks); in resize_stripes()
2584 conf->disks = ndisks; in resize_stripes()
2589 conf->slab_cache = sc; in resize_stripes()
2590 conf->active_name = 1-conf->active_name; in resize_stripes()
2606 for (i = conf->raid_disks; i < newsize; i++) { in resize_stripes()
2614 for (i=conf->raid_disks; i < newsize; i++) in resize_stripes()
2629 conf->pool_size = newsize; in resize_stripes()
2630 mutex_unlock(&conf->cache_size_mutex); in resize_stripes()
2635 static int drop_one_stripe(struct r5conf *conf) in drop_one_stripe() argument
2638 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; in drop_one_stripe()
2640 spin_lock_irq(conf->hash_locks + hash); in drop_one_stripe()
2641 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2642 spin_unlock_irq(conf->hash_locks + hash); in drop_one_stripe()
2647 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2648 atomic_dec(&conf->active_stripes); in drop_one_stripe()
2649 conf->max_nr_stripes--; in drop_one_stripe()
2653 static void shrink_stripes(struct r5conf *conf) in shrink_stripes() argument
2655 while (conf->max_nr_stripes && in shrink_stripes()
2656 drop_one_stripe(conf)) in shrink_stripes()
2659 kmem_cache_destroy(conf->slab_cache); in shrink_stripes()
2660 conf->slab_cache = NULL; in shrink_stripes()
2666 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request() local
2690 rdev = conf->disks[i].replacement; in raid5_end_read_request()
2692 rdev = conf->disks[i].rdev; in raid5_end_read_request()
2694 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2707 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf), in raid5_end_read_request()
2710 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors); in raid5_end_read_request()
2736 mdname(conf->mddev), in raid5_end_read_request()
2739 else if (conf->mddev->degraded >= conf->max_degraded) { in raid5_end_read_request()
2743 mdname(conf->mddev), in raid5_end_read_request()
2751 mdname(conf->mddev), in raid5_end_read_request()
2755 > conf->max_nr_stripes) { in raid5_end_read_request()
2758 mdname(conf->mddev), in raid5_end_read_request()
2760 conf->max_nr_stripes); in raid5_end_read_request()
2762 mdname(conf->mddev), bdn); in raid5_end_read_request()
2783 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2784 md_error(conf->mddev, rdev); in raid5_end_read_request()
2787 rdev_dec_pending(rdev, conf->mddev); in raid5_end_read_request()
2797 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request() local
2806 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2810 rdev = conf->disks[i].replacement; in raid5_end_write_request()
2818 rdev = conf->disks[i].rdev; in raid5_end_write_request()
2833 md_error(conf->mddev, rdev); in raid5_end_write_request()
2835 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2847 RAID5_STRIPE_SECTORS(conf), in raid5_end_write_request()
2858 rdev_dec_pending(rdev, conf->mddev); in raid5_end_write_request()
2876 struct r5conf *conf = mddev->private; in raid5_error() local
2883 spin_lock_irqsave(&conf->device_lock, flags); in raid5_error()
2886 mddev->degraded = raid5_calc_degraded(conf); in raid5_error()
2888 if (has_failed(conf)) { in raid5_error()
2889 set_bit(MD_BROKEN, &conf->mddev->flags); in raid5_error()
2890 conf->recovery_disabled = mddev->recovery_disabled; in raid5_error()
2893 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_error()
2896 mdname(mddev), conf->raid_disks - mddev->degraded); in raid5_error()
2899 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_error()
2912 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, in raid5_compute_sector() argument
2922 int algorithm = previous ? conf->prev_algo in raid5_compute_sector()
2923 : conf->algorithm; in raid5_compute_sector()
2924 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_sector()
2925 : conf->chunk_sectors; in raid5_compute_sector()
2926 int raid_disks = previous ? conf->previous_raid_disks in raid5_compute_sector()
2927 : conf->raid_disks; in raid5_compute_sector()
2928 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_sector()
2948 switch(conf->level) { in raid5_compute_sector()
3116 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr() local
3118 int data_disks = raid_disks - conf->max_degraded; in raid5_compute_blocknr()
3120 int sectors_per_chunk = previous ? conf->prev_chunk_sectors in raid5_compute_blocknr()
3121 : conf->chunk_sectors; in raid5_compute_blocknr()
3122 int algorithm = previous ? conf->prev_algo in raid5_compute_blocknr()
3123 : conf->algorithm; in raid5_compute_blocknr()
3136 switch(conf->level) { in raid5_compute_blocknr()
3223 check = raid5_compute_sector(conf, r_sector, in raid5_compute_blocknr()
3228 mdname(conf->mddev)); in raid5_compute_blocknr()
3255 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3257 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3272 static inline bool delay_towrite(struct r5conf *conf, in delay_towrite() argument
3281 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && in delay_towrite()
3295 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction() local
3296 int level = conf->level; in schedule_reconstruction()
3310 if (dev->towrite && !delay_towrite(conf, dev, s)) { in schedule_reconstruction()
3336 if (s->locked + conf->max_degraded == disks) in schedule_reconstruction()
3338 atomic_inc(&conf->pending_full_writes); in schedule_reconstruction()
3408 struct r5conf *conf = sh->raid_conf; in add_stripe_bio() local
3434 if (forwrite && raid5_has_ppl(conf)) { in add_stripe_bio()
3460 if (first + conf->chunk_sectors * (count - 1) != last) in add_stripe_bio()
3472 md_write_inc(conf->mddev, bi); in add_stripe_bio()
3478 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in add_stripe_bio()
3480 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3484 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in add_stripe_bio()
3493 if (conf->mddev->bitmap && firstwrite) { in add_stripe_bio()
3508 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3509 RAID5_STRIPE_SECTORS(conf), 0); in add_stripe_bio()
3513 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3520 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3529 static void end_reshape(struct r5conf *conf);
3531 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, in stripe_set_idx() argument
3535 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
3538 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; in stripe_set_idx()
3540 raid5_compute_sector(conf, in stripe_set_idx()
3541 stripe * (disks - conf->max_degraded) in stripe_set_idx()
3548 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3560 rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_stripe()
3571 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_stripe()
3572 md_error(conf->mddev, rdev); in handle_failed_stripe()
3573 rdev_dec_pending(rdev, conf->mddev); in handle_failed_stripe()
3588 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3591 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3592 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3594 md_write_end(conf->mddev); in handle_failed_stripe()
3599 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3600 RAID5_STRIPE_SECTORS(conf), 0, 0); in handle_failed_stripe()
3612 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3613 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3615 md_write_end(conf->mddev); in handle_failed_stripe()
3624 s->failed > conf->max_degraded && in handle_failed_stripe()
3632 wake_up(&conf->wait_for_overlap); in handle_failed_stripe()
3636 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3638 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3645 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3646 RAID5_STRIPE_SECTORS(conf), 0, 0); in handle_failed_stripe()
3656 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_failed_stripe()
3657 md_wakeup_thread(conf->mddev->thread); in handle_failed_stripe()
3661 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3670 wake_up(&conf->wait_for_overlap); in handle_failed_sync()
3680 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { in handle_failed_sync()
3685 for (i = 0; i < conf->raid_disks; i++) { in handle_failed_sync()
3686 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in handle_failed_sync()
3691 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3693 rdev = rcu_dereference(conf->disks[i].replacement); in handle_failed_sync()
3698 RAID5_STRIPE_SECTORS(conf), 0)) in handle_failed_sync()
3703 conf->recovery_disabled = in handle_failed_sync()
3704 conf->mddev->recovery_disabled; in handle_failed_sync()
3706 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort); in handle_failed_sync()
3960 static void handle_stripe_clean_event(struct r5conf *conf, in handle_stripe_clean_event() argument
3991 dev->sector + RAID5_STRIPE_SECTORS(conf)) { in handle_stripe_clean_event()
3992 wbi2 = r5_next_bio(conf, wbi, dev->sector); in handle_stripe_clean_event()
3993 md_write_end(conf->mddev); in handle_stripe_clean_event()
3997 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3998 RAID5_STRIPE_SECTORS(conf), in handle_stripe_clean_event()
4036 spin_lock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4038 spin_unlock_irq(conf->hash_locks + hash); in handle_stripe_clean_event()
4053 if (atomic_dec_and_test(&conf->pending_full_writes)) in handle_stripe_clean_event()
4054 md_wakeup_thread(conf->mddev->thread); in handle_stripe_clean_event()
4075 static int handle_stripe_dirtying(struct r5conf *conf, in handle_stripe_dirtying() argument
4081 sector_t recovery_cp = conf->mddev->recovery_cp; in handle_stripe_dirtying()
4090 if (conf->rmw_level == PARITY_DISABLE_RMW || in handle_stripe_dirtying()
4098 conf->rmw_level, (unsigned long long)recovery_cp, in handle_stripe_dirtying()
4103 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4130 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) { in handle_stripe_dirtying()
4132 if (conf->mddev->queue) in handle_stripe_dirtying()
4133 blk_add_trace_msg(conf->mddev->queue, in handle_stripe_dirtying()
4154 &conf->cache_state)) { in handle_stripe_dirtying()
4168 if (((dev->towrite && !delay_towrite(conf, dev, s)) || in handle_stripe_dirtying()
4187 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) { in handle_stripe_dirtying()
4212 if (rcw && conf->mddev->queue) in handle_stripe_dirtying()
4213 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", in handle_stripe_dirtying()
4239 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4301 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks5()
4302 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks5()
4306 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks5()
4309 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks5()
4332 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4428 mdname(conf->mddev), in handle_parity_checks6()
4466 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches); in handle_parity_checks6()
4467 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { in handle_parity_checks6()
4471 "%llu-%llu\n", mdname(conf->mddev), in handle_parity_checks6()
4474 RAID5_STRIPE_SECTORS(conf)); in handle_parity_checks6()
4509 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4526 sector_t s = raid5_compute_sector(conf, bn, 0, in handle_stripe_expansion()
4528 sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1); in handle_stripe_expansion()
4546 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4551 for (j = 0; j < conf->raid_disks; j++) in handle_stripe_expansion()
4556 if (j == conf->raid_disks) { in handle_stripe_expansion()
4583 struct r5conf *conf = sh->raid_conf; in analyse_stripe() local
4595 s->log_failed = r5l_log_disk_error(conf); in analyse_stripe()
4643 rdev = rcu_dereference(conf->disks[i].replacement); in analyse_stripe()
4645 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4646 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4654 rdev = rcu_dereference(conf->disks[i].rdev); in analyse_stripe()
4660 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), in analyse_stripe()
4687 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4702 conf->disks[i].rdev); in analyse_stripe()
4715 conf->disks[i].rdev); in analyse_stripe()
4724 conf->disks[i].replacement); in analyse_stripe()
4746 conf->disks[i].replacement); in analyse_stripe()
4767 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4768 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) in analyse_stripe()
4878 struct r5conf *conf = sh->raid_conf; in handle_stripe() local
4935 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { in handle_stripe()
4947 rdev_dec_pending(s.blocked_rdev, conf->mddev); in handle_stripe()
4967 if (s.failed > conf->max_degraded || in handle_stripe()
4973 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
4975 handle_failed_sync(conf, sh, &s); in handle_stripe()
5028 || conf->level < 6; in handle_stripe()
5039 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5042 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5061 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5073 if (!r5c_is_writeback(conf->log)) { in handle_stripe()
5075 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5081 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5094 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5111 if (conf->level == 6) in handle_stripe()
5112 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5114 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5121 for (i = 0; i < conf->raid_disks; i++) in handle_stripe()
5135 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5138 wake_up(&conf->wait_for_overlap); in handle_stripe()
5144 if (s.failed <= conf->max_degraded && !conf->mddev->ro) in handle_stripe()
5165 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
5174 atomic_inc(&conf->preread_active_stripes); in handle_stripe()
5183 for (i = conf->raid_disks; i--; ) { in handle_stripe()
5193 sh->disks = conf->raid_disks; in handle_stripe()
5194 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5198 atomic_dec(&conf->reshape_stripes); in handle_stripe()
5199 wake_up(&conf->wait_for_overlap); in handle_stripe()
5200 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1); in handle_stripe()
5205 handle_stripe_expansion(conf, sh); in handle_stripe()
5210 if (conf->mddev->external) in handle_stripe()
5212 conf->mddev); in handle_stripe()
5219 conf->mddev); in handle_stripe()
5228 rdev = conf->disks[i].rdev; in handle_stripe()
5230 RAID5_STRIPE_SECTORS(conf), 0)) in handle_stripe()
5231 md_error(conf->mddev, rdev); in handle_stripe()
5232 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5235 rdev = conf->disks[i].rdev; in handle_stripe()
5237 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5238 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5241 rdev = conf->disks[i].replacement; in handle_stripe()
5244 rdev = conf->disks[i].rdev; in handle_stripe()
5246 RAID5_STRIPE_SECTORS(conf), 0); in handle_stripe()
5247 rdev_dec_pending(rdev, conf->mddev); in handle_stripe()
5261 atomic_dec(&conf->preread_active_stripes); in handle_stripe()
5262 if (atomic_read(&conf->preread_active_stripes) < in handle_stripe()
5264 md_wakeup_thread(conf->mddev->thread); in handle_stripe()
5270 static void raid5_activate_delayed(struct r5conf *conf) in raid5_activate_delayed() argument
5272 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { in raid5_activate_delayed()
5273 while (!list_empty(&conf->delayed_list)) { in raid5_activate_delayed()
5274 struct list_head *l = conf->delayed_list.next; in raid5_activate_delayed()
5280 atomic_inc(&conf->preread_active_stripes); in raid5_activate_delayed()
5281 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5287 static void activate_bit_delay(struct r5conf *conf, in activate_bit_delay() argument
5292 list_add(&head, &conf->bitmap_list); in activate_bit_delay()
5293 list_del_init(&conf->bitmap_list); in activate_bit_delay()
5300 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5306 struct r5conf *conf = mddev->private; in in_chunk_boundary() local
5313 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
5322 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) in add_bio_to_retry() argument
5326 spin_lock_irqsave(&conf->device_lock, flags); in add_bio_to_retry()
5328 bi->bi_next = conf->retry_read_aligned_list; in add_bio_to_retry()
5329 conf->retry_read_aligned_list = bi; in add_bio_to_retry()
5331 spin_unlock_irqrestore(&conf->device_lock, flags); in add_bio_to_retry()
5332 md_wakeup_thread(conf->mddev->thread); in add_bio_to_retry()
5335 static struct bio *remove_bio_from_retry(struct r5conf *conf, in remove_bio_from_retry() argument
5340 bi = conf->retry_read_aligned; in remove_bio_from_retry()
5342 *offset = conf->retry_read_offset; in remove_bio_from_retry()
5343 conf->retry_read_aligned = NULL; in remove_bio_from_retry()
5346 bi = conf->retry_read_aligned_list; in remove_bio_from_retry()
5348 conf->retry_read_aligned_list = bi->bi_next; in remove_bio_from_retry()
5366 struct r5conf *conf; in raid5_align_endio() local
5375 conf = mddev->private; in raid5_align_endio()
5377 rdev_dec_pending(rdev, conf->mddev); in raid5_align_endio()
5381 if (atomic_dec_and_test(&conf->active_aligned_reads)) in raid5_align_endio()
5382 wake_up(&conf->wait_for_quiescent); in raid5_align_endio()
5388 add_bio_to_retry(raid_bi, conf); in raid5_align_endio()
5393 struct r5conf *conf = mddev->private; in raid5_read_one_chunk() local
5419 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, in raid5_read_one_chunk()
5424 rdev = rcu_dereference(conf->disks[dd_idx].replacement); in raid5_read_one_chunk()
5427 rdev = rcu_dereference(conf->disks[dd_idx].rdev); in raid5_read_one_chunk()
5435 if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { in raid5_read_one_chunk()
5461 spin_lock_irq(&conf->device_lock); in raid5_read_one_chunk()
5462 wait_event_lock_irq(conf->wait_for_quiescent, in raid5_read_one_chunk()
5463 conf->quiesce == 0, in raid5_read_one_chunk()
5464 conf->device_lock); in raid5_read_one_chunk()
5465 atomic_inc(&conf->active_aligned_reads); in raid5_read_one_chunk()
5466 spin_unlock_irq(&conf->device_lock); in raid5_read_one_chunk()
5489 struct r5conf *conf = mddev->private; in chunk_aligned_read() local
5490 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split); in chunk_aligned_read()
5512 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) in __get_priority_stripe() argument
5517 bool second_try = !r5c_is_writeback(conf->log) && in __get_priority_stripe()
5518 !r5l_log_disk_error(conf); in __get_priority_stripe()
5519 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) || in __get_priority_stripe()
5520 r5l_log_disk_error(conf); in __get_priority_stripe()
5525 if (conf->worker_cnt_per_group == 0) { in __get_priority_stripe()
5526 handle_list = try_loprio ? &conf->loprio_list : in __get_priority_stripe()
5527 &conf->handle_list; in __get_priority_stripe()
5529 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list : in __get_priority_stripe()
5530 &conf->worker_groups[group].handle_list; in __get_priority_stripe()
5531 wg = &conf->worker_groups[group]; in __get_priority_stripe()
5534 for (i = 0; i < conf->group_cnt; i++) { in __get_priority_stripe()
5535 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list : in __get_priority_stripe()
5536 &conf->worker_groups[i].handle_list; in __get_priority_stripe()
5537 wg = &conf->worker_groups[i]; in __get_priority_stripe()
5546 list_empty(&conf->hold_list) ? "empty" : "busy", in __get_priority_stripe()
5547 atomic_read(&conf->pending_full_writes), conf->bypass_count); in __get_priority_stripe()
5552 if (list_empty(&conf->hold_list)) in __get_priority_stripe()
5553 conf->bypass_count = 0; in __get_priority_stripe()
5555 if (conf->hold_list.next == conf->last_hold) in __get_priority_stripe()
5556 conf->bypass_count++; in __get_priority_stripe()
5558 conf->last_hold = conf->hold_list.next; in __get_priority_stripe()
5559 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5560 if (conf->bypass_count < 0) in __get_priority_stripe()
5561 conf->bypass_count = 0; in __get_priority_stripe()
5564 } else if (!list_empty(&conf->hold_list) && in __get_priority_stripe()
5565 ((conf->bypass_threshold && in __get_priority_stripe()
5566 conf->bypass_count > conf->bypass_threshold) || in __get_priority_stripe()
5567 atomic_read(&conf->pending_full_writes) == 0)) { in __get_priority_stripe()
5569 list_for_each_entry(tmp, &conf->hold_list, lru) { in __get_priority_stripe()
5570 if (conf->worker_cnt_per_group == 0 || in __get_priority_stripe()
5580 conf->bypass_count -= conf->bypass_threshold; in __get_priority_stripe()
5581 if (conf->bypass_count < 0) in __get_priority_stripe()
5582 conf->bypass_count = 0; in __get_priority_stripe()
5616 struct r5conf *conf = mddev->private; in raid5_unplug() local
5621 spin_lock_irq(&conf->device_lock); in raid5_unplug()
5637 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5640 spin_unlock_irq(&conf->device_lock); in raid5_unplug()
5642 release_inactive_stripe_list(conf, cb->temp_inactive_list, in raid5_unplug()
5679 struct r5conf *conf = mddev->private; in make_discard_request() local
5688 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in make_discard_request()
5693 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5694 (conf->raid_disks - conf->max_degraded); in make_discard_request()
5699 logical_sector *= conf->chunk_sectors; in make_discard_request()
5700 last_sector *= conf->chunk_sectors; in make_discard_request()
5703 logical_sector += RAID5_STRIPE_SECTORS(conf)) { in make_discard_request()
5707 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5708 prepare_to_wait(&conf->wait_for_overlap, &w, in make_discard_request()
5718 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5730 finish_wait(&conf->wait_for_overlap, &w); in make_discard_request()
5732 for (d = 0; d < conf->raid_disks; d++) { in make_discard_request()
5742 if (conf->mddev->bitmap) { in make_discard_request()
5744 d < conf->raid_disks - conf->max_degraded; in make_discard_request()
5748 RAID5_STRIPE_SECTORS(conf), in make_discard_request()
5750 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5757 atomic_inc(&conf->preread_active_stripes); in make_discard_request()
5766 struct r5conf *conf = mddev->private; in raid5_make_request() local
5777 int ret = log_handle_flush_request(conf, bi); in raid5_make_request()
5813 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in raid5_make_request()
5817 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); in raid5_make_request()
5818 for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) { in raid5_make_request()
5824 seq = read_seqcount_begin(&conf->gen_lock); in raid5_make_request()
5827 prepare_to_wait(&conf->wait_for_overlap, &w, in raid5_make_request()
5829 if (unlikely(conf->reshape_progress != MaxSector)) { in raid5_make_request()
5838 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5840 ? logical_sector < conf->reshape_progress in raid5_make_request()
5841 : logical_sector >= conf->reshape_progress) { in raid5_make_request()
5845 ? logical_sector < conf->reshape_safe in raid5_make_request()
5846 : logical_sector >= conf->reshape_safe) { in raid5_make_request()
5847 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5853 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5856 new_sector = raid5_compute_sector(conf, logical_sector, in raid5_make_request()
5863 sh = raid5_get_active_stripe(conf, new_sector, previous, in raid5_make_request()
5876 spin_lock_irq(&conf->device_lock); in raid5_make_request()
5878 ? logical_sector >= conf->reshape_progress in raid5_make_request()
5879 : logical_sector < conf->reshape_progress) in raid5_make_request()
5882 spin_unlock_irq(&conf->device_lock); in raid5_make_request()
5890 if (read_seqcount_retry(&conf->gen_lock, seq)) { in raid5_make_request()
5921 atomic_inc(&conf->preread_active_stripes); in raid5_make_request()
5929 finish_wait(&conf->wait_for_overlap, &w); in raid5_make_request()
5950 struct r5conf *conf = mddev->private; in reshape_request() local
5954 int raid_disks = conf->previous_raid_disks; in reshape_request()
5955 int data_disks = raid_disks - conf->max_degraded; in reshape_request()
5956 int new_data_disks = conf->raid_disks - conf->max_degraded; in reshape_request()
5968 conf->reshape_progress < raid5_size(mddev, 0, 0)) { in reshape_request()
5970 - conf->reshape_progress; in reshape_request()
5972 conf->reshape_progress == MaxSector) { in reshape_request()
5976 conf->reshape_progress > 0) in reshape_request()
5977 sector_nr = conf->reshape_progress; in reshape_request()
5993 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
6001 writepos = conf->reshape_progress; in reshape_request()
6003 readpos = conf->reshape_progress; in reshape_request()
6005 safepos = conf->reshape_safe; in reshape_request()
6026 BUG_ON(conf->reshape_progress == 0); in reshape_request()
6057 if (conf->min_offset_diff < 0) { in reshape_request()
6058 safepos += -conf->min_offset_diff; in reshape_request()
6059 readpos += -conf->min_offset_diff; in reshape_request()
6061 writepos += conf->min_offset_diff; in reshape_request()
6066 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { in reshape_request()
6068 wait_event(conf->wait_for_overlap, in reshape_request()
6069 atomic_read(&conf->reshape_stripes)==0 in reshape_request()
6071 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6073 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6084 conf->reshape_checkpoint = jiffies; in reshape_request()
6091 spin_lock_irq(&conf->device_lock); in reshape_request()
6092 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6093 spin_unlock_irq(&conf->device_lock); in reshape_request()
6094 wake_up(&conf->wait_for_overlap); in reshape_request()
6099 for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) { in reshape_request()
6102 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
6104 atomic_inc(&conf->reshape_stripes); in reshape_request()
6112 if (conf->level == 6 && in reshape_request()
6120 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6130 spin_lock_irq(&conf->device_lock); in reshape_request()
6132 conf->reshape_progress -= reshape_sectors * new_data_disks; in reshape_request()
6134 conf->reshape_progress += reshape_sectors * new_data_disks; in reshape_request()
6135 spin_unlock_irq(&conf->device_lock); in reshape_request()
6142 raid5_compute_sector(conf, stripe_addr*(new_data_disks), in reshape_request()
6145 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) in reshape_request()
6151 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
6155 first_sector += RAID5_STRIPE_SECTORS(conf); in reshape_request()
6175 wait_event(conf->wait_for_overlap, in reshape_request()
6176 atomic_read(&conf->reshape_stripes) == 0 in reshape_request()
6178 if (atomic_read(&conf->reshape_stripes) != 0) in reshape_request()
6180 mddev->reshape_position = conf->reshape_progress; in reshape_request()
6190 conf->reshape_checkpoint = jiffies; in reshape_request()
6198 spin_lock_irq(&conf->device_lock); in reshape_request()
6199 conf->reshape_safe = mddev->reshape_position; in reshape_request()
6200 spin_unlock_irq(&conf->device_lock); in reshape_request()
6201 wake_up(&conf->wait_for_overlap); in reshape_request()
6211 struct r5conf *conf = mddev->private; in raid5_sync_request() local
6222 end_reshape(conf); in raid5_sync_request()
6230 conf->fullsync = 0; in raid5_sync_request()
6237 wait_event(conf->wait_for_overlap, conf->quiesce != 2); in raid5_sync_request()
6252 if (mddev->degraded >= conf->max_degraded && in raid5_sync_request()
6259 !conf->fullsync && in raid5_sync_request()
6261 sync_blocks >= RAID5_STRIPE_SECTORS(conf)) { in raid5_sync_request()
6263 do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf)); in raid5_sync_request()
6266 return sync_blocks * RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6271 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in raid5_sync_request()
6273 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in raid5_sync_request()
6284 for (i = 0; i < conf->raid_disks; i++) { in raid5_sync_request()
6285 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); in raid5_sync_request()
6299 return RAID5_STRIPE_SECTORS(conf); in raid5_sync_request()
6302 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, in retry_aligned_read() argument
6322 ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1); in retry_aligned_read()
6323 sector = raid5_compute_sector(conf, logical_sector, in retry_aligned_read()
6328 logical_sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6329 sector += RAID5_STRIPE_SECTORS(conf), in retry_aligned_read()
6336 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
6340 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6341 conf->retry_read_offset = scnt; in retry_aligned_read()
6347 conf->retry_read_aligned = raid_bio; in retry_aligned_read()
6348 conf->retry_read_offset = scnt; in retry_aligned_read()
6360 if (atomic_dec_and_test(&conf->active_aligned_reads)) in retry_aligned_read()
6361 wake_up(&conf->wait_for_quiescent); in retry_aligned_read()
6365 static int handle_active_stripes(struct r5conf *conf, int group, in handle_active_stripes() argument
6368 __releases(&conf->device_lock) in handle_active_stripes()
6369 __acquires(&conf->device_lock) in handle_active_stripes()
6376 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6384 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6385 log_flush_stripe_to_raid(conf); in handle_active_stripes()
6386 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6391 spin_unlock_irq(&conf->device_lock); in handle_active_stripes()
6393 release_inactive_stripe_list(conf, temp_inactive_list, in handle_active_stripes()
6396 r5l_flush_stripe_to_raid(conf->log); in handle_active_stripes()
6398 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6404 log_write_stripe_run(conf); in handle_active_stripes()
6408 spin_lock_irq(&conf->device_lock); in handle_active_stripes()
6411 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6420 struct r5conf *conf = group->conf; in raid5_do_work() local
6421 struct mddev *mddev = conf->mddev; in raid5_do_work()
6422 int group_id = group - conf->worker_groups; in raid5_do_work()
6430 spin_lock_irq(&conf->device_lock); in raid5_do_work()
6434 released = release_stripe_list(conf, worker->temp_inactive_list); in raid5_do_work()
6436 batch_size = handle_active_stripes(conf, group_id, worker, in raid5_do_work()
6444 conf->device_lock); in raid5_do_work()
6448 spin_unlock_irq(&conf->device_lock); in raid5_do_work()
6450 flush_deferred_bios(conf); in raid5_do_work()
6452 r5l_flush_stripe_to_raid(conf->log); in raid5_do_work()
6470 struct r5conf *conf = mddev->private; in raid5d() local
6480 spin_lock_irq(&conf->device_lock); in raid5d()
6486 released = release_stripe_list(conf, conf->temp_inactive_list); in raid5d()
6488 clear_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6491 !list_empty(&conf->bitmap_list)) { in raid5d()
6493 conf->seq_flush++; in raid5d()
6494 spin_unlock_irq(&conf->device_lock); in raid5d()
6496 spin_lock_irq(&conf->device_lock); in raid5d()
6497 conf->seq_write = conf->seq_flush; in raid5d()
6498 activate_bit_delay(conf, conf->temp_inactive_list); in raid5d()
6500 raid5_activate_delayed(conf); in raid5d()
6502 while ((bio = remove_bio_from_retry(conf, &offset))) { in raid5d()
6504 spin_unlock_irq(&conf->device_lock); in raid5d()
6505 ok = retry_aligned_read(conf, bio, offset); in raid5d()
6506 spin_lock_irq(&conf->device_lock); in raid5d()
6512 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, in raid5d()
6513 conf->temp_inactive_list); in raid5d()
6519 spin_unlock_irq(&conf->device_lock); in raid5d()
6521 spin_lock_irq(&conf->device_lock); in raid5d()
6533 conf->device_lock); in raid5d()
6537 spin_unlock_irq(&conf->device_lock); in raid5d()
6538 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && in raid5d()
6539 mutex_trylock(&conf->cache_size_mutex)) { in raid5d()
6540 grow_one_stripe(conf, __GFP_NOWARN); in raid5d()
6544 set_bit(R5_DID_ALLOC, &conf->cache_state); in raid5d()
6545 mutex_unlock(&conf->cache_size_mutex); in raid5d()
6548 flush_deferred_bios(conf); in raid5d()
6550 r5l_flush_stripe_to_raid(conf->log); in raid5d()
6561 struct r5conf *conf; in raid5_show_stripe_cache_size() local
6564 conf = mddev->private; in raid5_show_stripe_cache_size()
6565 if (conf) in raid5_show_stripe_cache_size()
6566 ret = sprintf(page, "%d\n", conf->min_nr_stripes); in raid5_show_stripe_cache_size()
6575 struct r5conf *conf = mddev->private; in raid5_set_cache_size() local
6580 conf->min_nr_stripes = size; in raid5_set_cache_size()
6581 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6582 while (size < conf->max_nr_stripes && in raid5_set_cache_size()
6583 drop_one_stripe(conf)) in raid5_set_cache_size()
6585 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6589 mutex_lock(&conf->cache_size_mutex); in raid5_set_cache_size()
6590 while (size > conf->max_nr_stripes) in raid5_set_cache_size()
6591 if (!grow_one_stripe(conf, GFP_KERNEL)) { in raid5_set_cache_size()
6592 conf->min_nr_stripes = conf->max_nr_stripes; in raid5_set_cache_size()
6596 mutex_unlock(&conf->cache_size_mutex); in raid5_set_cache_size()
6605 struct r5conf *conf; in raid5_store_stripe_cache_size() local
6616 conf = mddev->private; in raid5_store_stripe_cache_size()
6617 if (!conf) in raid5_store_stripe_cache_size()
6634 struct r5conf *conf = mddev->private; in raid5_show_rmw_level() local
6635 if (conf) in raid5_show_rmw_level()
6636 return sprintf(page, "%d\n", conf->rmw_level); in raid5_show_rmw_level()
6644 struct r5conf *conf = mddev->private; in raid5_store_rmw_level() local
6647 if (!conf) in raid5_store_rmw_level()
6664 conf->rmw_level = new; in raid5_store_rmw_level()
6676 struct r5conf *conf; in raid5_show_stripe_size() local
6680 conf = mddev->private; in raid5_show_stripe_size()
6681 if (conf) in raid5_show_stripe_size()
6682 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf)); in raid5_show_stripe_size()
6691 struct r5conf *conf; in raid5_store_stripe_size() local
6715 conf = mddev->private; in raid5_store_stripe_size()
6716 if (!conf) { in raid5_store_stripe_size()
6721 if (new == conf->stripe_size) in raid5_store_stripe_size()
6725 conf->stripe_size, new); in raid5_store_stripe_size()
6736 mutex_lock(&conf->cache_size_mutex); in raid5_store_stripe_size()
6737 size = conf->max_nr_stripes; in raid5_store_stripe_size()
6739 shrink_stripes(conf); in raid5_store_stripe_size()
6741 conf->stripe_size = new; in raid5_store_stripe_size()
6742 conf->stripe_shift = ilog2(new) - 9; in raid5_store_stripe_size()
6743 conf->stripe_sectors = new >> 9; in raid5_store_stripe_size()
6744 if (grow_stripes(conf, size)) { in raid5_store_stripe_size()
6749 mutex_unlock(&conf->cache_size_mutex); in raid5_store_stripe_size()
6771 struct r5conf *conf; in raid5_show_preread_threshold() local
6774 conf = mddev->private; in raid5_show_preread_threshold()
6775 if (conf) in raid5_show_preread_threshold()
6776 ret = sprintf(page, "%d\n", conf->bypass_threshold); in raid5_show_preread_threshold()
6784 struct r5conf *conf; in raid5_store_preread_threshold() local
6796 conf = mddev->private; in raid5_store_preread_threshold()
6797 if (!conf) in raid5_store_preread_threshold()
6799 else if (new > conf->min_nr_stripes) in raid5_store_preread_threshold()
6802 conf->bypass_threshold = new; in raid5_store_preread_threshold()
6816 struct r5conf *conf; in raid5_show_skip_copy() local
6819 conf = mddev->private; in raid5_show_skip_copy()
6820 if (conf) in raid5_show_skip_copy()
6821 ret = sprintf(page, "%d\n", conf->skip_copy); in raid5_show_skip_copy()
6829 struct r5conf *conf; in raid5_store_skip_copy() local
6842 conf = mddev->private; in raid5_store_skip_copy()
6843 if (!conf) in raid5_store_skip_copy()
6845 else if (new != conf->skip_copy) { in raid5_store_skip_copy()
6849 conf->skip_copy = new; in raid5_store_skip_copy()
6868 struct r5conf *conf = mddev->private; in stripe_cache_active_show() local
6869 if (conf) in stripe_cache_active_show()
6870 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); in stripe_cache_active_show()
6881 struct r5conf *conf; in raid5_show_group_thread_cnt() local
6884 conf = mddev->private; in raid5_show_group_thread_cnt()
6885 if (conf) in raid5_show_group_thread_cnt()
6886 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); in raid5_show_group_thread_cnt()
6891 static int alloc_thread_groups(struct r5conf *conf, int cnt,
6897 struct r5conf *conf; in raid5_store_group_thread_cnt() local
6914 conf = mddev->private; in raid5_store_group_thread_cnt()
6915 if (!conf) in raid5_store_group_thread_cnt()
6917 else if (new != conf->worker_cnt_per_group) { in raid5_store_group_thread_cnt()
6920 old_groups = conf->worker_groups; in raid5_store_group_thread_cnt()
6924 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups); in raid5_store_group_thread_cnt()
6926 spin_lock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6927 conf->group_cnt = group_cnt; in raid5_store_group_thread_cnt()
6928 conf->worker_cnt_per_group = new; in raid5_store_group_thread_cnt()
6929 conf->worker_groups = new_groups; in raid5_store_group_thread_cnt()
6930 spin_unlock_irq(&conf->device_lock); in raid5_store_group_thread_cnt()
6965 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt, in alloc_thread_groups() argument
6994 group->conf = conf; in alloc_thread_groups()
7010 static void free_thread_groups(struct r5conf *conf) in free_thread_groups() argument
7012 if (conf->worker_groups) in free_thread_groups()
7013 kfree(conf->worker_groups[0].workers); in free_thread_groups()
7014 kfree(conf->worker_groups); in free_thread_groups()
7015 conf->worker_groups = NULL; in free_thread_groups()
7021 struct r5conf *conf = mddev->private; in raid5_size() local
7027 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); in raid5_size()
7029 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
7030 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1); in raid5_size()
7031 return sectors * (raid_disks - conf->max_degraded); in raid5_size()
7034 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
7042 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
7044 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
7051 max(conf->raid_disks, in alloc_scratch_buffer()
7052 conf->previous_raid_disks), in alloc_scratch_buffer()
7053 max(conf->chunk_sectors, in alloc_scratch_buffer()
7054 conf->prev_chunk_sectors) in alloc_scratch_buffer()
7055 / RAID5_STRIPE_SECTORS(conf))) { in alloc_scratch_buffer()
7056 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
7065 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_dead() local
7067 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
7071 static void raid5_free_percpu(struct r5conf *conf) in raid5_free_percpu() argument
7073 if (!conf->percpu) in raid5_free_percpu()
7076 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_free_percpu()
7077 free_percpu(conf->percpu); in raid5_free_percpu()
7080 static void free_conf(struct r5conf *conf) in free_conf() argument
7084 log_exit(conf); in free_conf()
7086 unregister_shrinker(&conf->shrinker); in free_conf()
7087 free_thread_groups(conf); in free_conf()
7088 shrink_stripes(conf); in free_conf()
7089 raid5_free_percpu(conf); in free_conf()
7090 for (i = 0; i < conf->pool_size; i++) in free_conf()
7091 if (conf->disks[i].extra_page) in free_conf()
7092 put_page(conf->disks[i].extra_page); in free_conf()
7093 kfree(conf->disks); in free_conf()
7094 bioset_exit(&conf->bio_split); in free_conf()
7095 kfree(conf->stripe_hashtbl); in free_conf()
7096 kfree(conf->pending_data); in free_conf()
7097 kfree(conf); in free_conf()
7102 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node); in raid456_cpu_up_prepare() local
7103 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare()
7105 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
7113 static int raid5_alloc_percpu(struct r5conf *conf) in raid5_alloc_percpu() argument
7117 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
7118 if (!conf->percpu) in raid5_alloc_percpu()
7121 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); in raid5_alloc_percpu()
7123 conf->scribble_disks = max(conf->raid_disks, in raid5_alloc_percpu()
7124 conf->previous_raid_disks); in raid5_alloc_percpu()
7125 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
7126 conf->prev_chunk_sectors); in raid5_alloc_percpu()
7134 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_scan() local
7137 if (mutex_trylock(&conf->cache_size_mutex)) { in raid5_cache_scan()
7140 conf->max_nr_stripes > conf->min_nr_stripes) { in raid5_cache_scan()
7141 if (drop_one_stripe(conf) == 0) { in raid5_cache_scan()
7147 mutex_unlock(&conf->cache_size_mutex); in raid5_cache_scan()
7155 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); in raid5_cache_count() local
7157 if (conf->max_nr_stripes < conf->min_nr_stripes) in raid5_cache_count()
7160 return conf->max_nr_stripes - conf->min_nr_stripes; in raid5_cache_count()
7165 struct r5conf *conf; in setup_conf() local
7204 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); in setup_conf()
7205 if (conf == NULL) in setup_conf()
7209 conf->stripe_size = DEFAULT_STRIPE_SIZE; in setup_conf()
7210 conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9; in setup_conf()
7211 conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9; in setup_conf()
7213 INIT_LIST_HEAD(&conf->free_list); in setup_conf()
7214 INIT_LIST_HEAD(&conf->pending_list); in setup_conf()
7215 conf->pending_data = kcalloc(PENDING_IO_MAX, in setup_conf()
7218 if (!conf->pending_data) in setup_conf()
7221 list_add(&conf->pending_data[i].sibling, &conf->free_list); in setup_conf()
7223 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) { in setup_conf()
7224 conf->group_cnt = group_cnt; in setup_conf()
7225 conf->worker_cnt_per_group = 0; in setup_conf()
7226 conf->worker_groups = new_group; in setup_conf()
7229 spin_lock_init(&conf->device_lock); in setup_conf()
7230 seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); in setup_conf()
7231 mutex_init(&conf->cache_size_mutex); in setup_conf()
7232 init_waitqueue_head(&conf->wait_for_quiescent); in setup_conf()
7233 init_waitqueue_head(&conf->wait_for_stripe); in setup_conf()
7234 init_waitqueue_head(&conf->wait_for_overlap); in setup_conf()
7235 INIT_LIST_HEAD(&conf->handle_list); in setup_conf()
7236 INIT_LIST_HEAD(&conf->loprio_list); in setup_conf()
7237 INIT_LIST_HEAD(&conf->hold_list); in setup_conf()
7238 INIT_LIST_HEAD(&conf->delayed_list); in setup_conf()
7239 INIT_LIST_HEAD(&conf->bitmap_list); in setup_conf()
7240 init_llist_head(&conf->released_stripes); in setup_conf()
7241 atomic_set(&conf->active_stripes, 0); in setup_conf()
7242 atomic_set(&conf->preread_active_stripes, 0); in setup_conf()
7243 atomic_set(&conf->active_aligned_reads, 0); in setup_conf()
7244 spin_lock_init(&conf->pending_bios_lock); in setup_conf()
7245 conf->batch_bio_dispatch = true; in setup_conf()
7250 conf->batch_bio_dispatch = false; in setup_conf()
7255 conf->bypass_threshold = BYPASS_THRESHOLD; in setup_conf()
7256 conf->recovery_disabled = mddev->recovery_disabled - 1; in setup_conf()
7258 conf->raid_disks = mddev->raid_disks; in setup_conf()
7260 conf->previous_raid_disks = mddev->raid_disks; in setup_conf()
7262 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; in setup_conf()
7263 max_disks = max(conf->raid_disks, conf->previous_raid_disks); in setup_conf()
7265 conf->disks = kcalloc(max_disks, sizeof(struct disk_info), in setup_conf()
7268 if (!conf->disks) in setup_conf()
7272 conf->disks[i].extra_page = alloc_page(GFP_KERNEL); in setup_conf()
7273 if (!conf->disks[i].extra_page) in setup_conf()
7277 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); in setup_conf()
7280 conf->mddev = mddev; in setup_conf()
7282 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) in setup_conf()
7290 spin_lock_init(conf->hash_locks); in setup_conf()
7292 spin_lock_init(conf->hash_locks + i); in setup_conf()
7295 INIT_LIST_HEAD(conf->inactive_list + i); in setup_conf()
7298 INIT_LIST_HEAD(conf->temp_inactive_list + i); in setup_conf()
7300 atomic_set(&conf->r5c_cached_full_stripes, 0); in setup_conf()
7301 INIT_LIST_HEAD(&conf->r5c_full_stripe_list); in setup_conf()
7302 atomic_set(&conf->r5c_cached_partial_stripes, 0); in setup_conf()
7303 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list); in setup_conf()
7304 atomic_set(&conf->r5c_flushing_full_stripes, 0); in setup_conf()
7305 atomic_set(&conf->r5c_flushing_partial_stripes, 0); in setup_conf()
7307 conf->level = mddev->new_level; in setup_conf()
7308 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
7309 if (raid5_alloc_percpu(conf) != 0) in setup_conf()
7319 disk = conf->disks + raid_disk; in setup_conf()
7337 conf->fullsync = 1; in setup_conf()
7340 conf->level = mddev->new_level; in setup_conf()
7341 if (conf->level == 6) { in setup_conf()
7342 conf->max_degraded = 2; in setup_conf()
7344 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7346 conf->rmw_level = PARITY_DISABLE_RMW; in setup_conf()
7348 conf->max_degraded = 1; in setup_conf()
7349 conf->rmw_level = PARITY_ENABLE_RMW; in setup_conf()
7351 conf->algorithm = mddev->new_layout; in setup_conf()
7352 conf->reshape_progress = mddev->reshape_position; in setup_conf()
7353 if (conf->reshape_progress != MaxSector) { in setup_conf()
7354 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
7355 conf->prev_algo = mddev->layout; in setup_conf()
7357 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
7358 conf->prev_algo = conf->algorithm; in setup_conf()
7361 conf->min_nr_stripes = NR_STRIPES; in setup_conf()
7364 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4, in setup_conf()
7365 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4); in setup_conf()
7366 conf->min_nr_stripes = max(NR_STRIPES, stripes); in setup_conf()
7367 if (conf->min_nr_stripes != NR_STRIPES) in setup_conf()
7369 mdname(mddev), conf->min_nr_stripes); in setup_conf()
7371 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + in setup_conf()
7373 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); in setup_conf()
7374 if (grow_stripes(conf, conf->min_nr_stripes)) { in setup_conf()
7385 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; in setup_conf()
7386 conf->shrinker.scan_objects = raid5_cache_scan; in setup_conf()
7387 conf->shrinker.count_objects = raid5_cache_count; in setup_conf()
7388 conf->shrinker.batch = 128; in setup_conf()
7389 conf->shrinker.flags = 0; in setup_conf()
7390 if (register_shrinker(&conf->shrinker)) { in setup_conf()
7397 conf->thread = md_register_thread(raid5d, mddev, pers_name); in setup_conf()
7398 if (!conf->thread) { in setup_conf()
7404 return conf; in setup_conf()
7407 if (conf) { in setup_conf()
7408 free_conf(conf); in setup_conf()
7440 static void raid5_set_io_opt(struct r5conf *conf) in raid5_set_io_opt() argument
7442 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * in raid5_set_io_opt()
7443 (conf->raid_disks - conf->max_degraded)); in raid5_set_io_opt()
7448 struct r5conf *conf; in raid5_run() local
7589 conf = setup_conf(mddev); in raid5_run()
7591 conf = mddev->private; in raid5_run()
7593 if (IS_ERR(conf)) in raid5_run()
7594 return PTR_ERR(conf); in raid5_run()
7606 conf->min_offset_diff = min_offset_diff; in raid5_run()
7607 mddev->thread = conf->thread; in raid5_run()
7608 conf->thread = NULL; in raid5_run()
7609 mddev->private = conf; in raid5_run()
7611 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; in raid5_run()
7613 rdev = conf->disks[i].rdev; in raid5_run()
7614 if (!rdev && conf->disks[i].replacement) { in raid5_run()
7616 rdev = conf->disks[i].replacement; in raid5_run()
7617 conf->disks[i].replacement = NULL; in raid5_run()
7619 conf->disks[i].rdev = rdev; in raid5_run()
7623 if (conf->disks[i].replacement && in raid5_run()
7624 conf->reshape_progress != MaxSector) { in raid5_run()
7649 conf->algorithm, in raid5_run()
7650 conf->raid_disks, in raid5_run()
7651 conf->max_degraded)) in raid5_run()
7655 conf->prev_algo, in raid5_run()
7656 conf->previous_raid_disks, in raid5_run()
7657 conf->max_degraded)) in raid5_run()
7665 mddev->degraded = raid5_calc_degraded(conf); in raid5_run()
7667 if (has_failed(conf)) { in raid5_run()
7669 mdname(mddev), mddev->degraded, conf->raid_disks); in raid5_run()
7693 mdname(mddev), conf->level, in raid5_run()
7697 print_raid5_conf(conf); in raid5_run()
7699 if (conf->reshape_progress != MaxSector) { in raid5_run()
7700 conf->reshape_safe = conf->reshape_progress; in raid5_run()
7701 atomic_set(&conf->reshape_stripes, 0); in raid5_run()
7727 int data_disks = conf->previous_raid_disks - conf->max_degraded; in raid5_run()
7733 raid5_set_io_opt(conf); in raid5_run()
7784 if (log_init(conf, journal_dev, raid5_has_ppl(conf))) in raid5_run()
7790 print_raid5_conf(conf); in raid5_run()
7791 free_conf(conf); in raid5_run()
7799 struct r5conf *conf = priv; in raid5_free() local
7801 free_conf(conf); in raid5_free()
7807 struct r5conf *conf = mddev->private; in raid5_status() local
7811 conf->chunk_sectors / 2, mddev->layout); in raid5_status()
7812 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); in raid5_status()
7814 for (i = 0; i < conf->raid_disks; i++) { in raid5_status()
7815 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); in raid5_status()
7822 static void print_raid5_conf (struct r5conf *conf) in print_raid5_conf() argument
7827 pr_debug("RAID conf printout:\n"); in print_raid5_conf()
7828 if (!conf) { in print_raid5_conf()
7829 pr_debug("(conf==NULL)\n"); in print_raid5_conf()
7832 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level, in print_raid5_conf()
7833 conf->raid_disks, in print_raid5_conf()
7834 conf->raid_disks - conf->mddev->degraded); in print_raid5_conf()
7836 for (i = 0; i < conf->raid_disks; i++) { in print_raid5_conf()
7838 tmp = conf->disks + i; in print_raid5_conf()
7849 struct r5conf *conf = mddev->private; in raid5_spare_active() local
7854 for (i = 0; i < conf->raid_disks; i++) { in raid5_spare_active()
7855 tmp = conf->disks + i; in raid5_spare_active()
7882 spin_lock_irqsave(&conf->device_lock, flags); in raid5_spare_active()
7883 mddev->degraded = raid5_calc_degraded(conf); in raid5_spare_active()
7884 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_spare_active()
7885 print_raid5_conf(conf); in raid5_spare_active()
7891 struct r5conf *conf = mddev->private; in raid5_remove_disk() local
7895 struct disk_info *p = conf->disks + number; in raid5_remove_disk()
7897 print_raid5_conf(conf); in raid5_remove_disk()
7898 if (test_bit(Journal, &rdev->flags) && conf->log) { in raid5_remove_disk()
7905 if (atomic_read(&conf->active_stripes) || in raid5_remove_disk()
7906 atomic_read(&conf->r5c_cached_full_stripes) || in raid5_remove_disk()
7907 atomic_read(&conf->r5c_cached_partial_stripes)) { in raid5_remove_disk()
7910 log_exit(conf); in raid5_remove_disk()
7920 if (number >= conf->raid_disks && in raid5_remove_disk()
7921 conf->reshape_progress == MaxSector) in raid5_remove_disk()
7933 mddev->recovery_disabled != conf->recovery_disabled && in raid5_remove_disk()
7934 !has_failed(conf) && in raid5_remove_disk()
7936 number < conf->raid_disks) { in raid5_remove_disk()
7950 err = log_modify(conf, rdev, false); in raid5_remove_disk()
7964 err = log_modify(conf, p->rdev, true); in raid5_remove_disk()
7970 print_raid5_conf(conf); in raid5_remove_disk()
7976 struct r5conf *conf = mddev->private; in raid5_add_disk() local
7981 int last = conf->raid_disks - 1; in raid5_add_disk()
7984 if (conf->log) in raid5_add_disk()
7992 ret = log_init(conf, rdev, false); in raid5_add_disk()
7996 ret = r5l_start(conf->log); in raid5_add_disk()
8002 if (mddev->recovery_disabled == conf->recovery_disabled) in raid5_add_disk()
8005 if (rdev->saved_raid_disk < 0 && has_failed(conf)) in raid5_add_disk()
8019 conf->disks[rdev->saved_raid_disk].rdev == NULL) in raid5_add_disk()
8023 p = conf->disks + disk; in raid5_add_disk()
8028 conf->fullsync = 1; in raid5_add_disk()
8031 err = log_modify(conf, rdev, true); in raid5_add_disk()
8037 p = conf->disks + disk; in raid5_add_disk()
8044 conf->fullsync = 1; in raid5_add_disk()
8050 print_raid5_conf(conf); in raid5_add_disk()
8064 struct r5conf *conf = mddev->private; in raid5_resize() local
8066 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in raid5_resize()
8068 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
8099 struct r5conf *conf = mddev->private; in check_stripe_cache() local
8100 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8101 > conf->min_nr_stripes || in check_stripe_cache()
8102 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4 in check_stripe_cache()
8103 > conf->min_nr_stripes) { in check_stripe_cache()
8107 / RAID5_STRIPE_SIZE(conf))*4); in check_stripe_cache()
8115 struct r5conf *conf = mddev->private; in check_reshape() local
8117 if (raid5_has_log(conf) || raid5_has_ppl(conf)) in check_reshape()
8123 if (has_failed(conf)) in check_reshape()
8143 if (resize_chunks(conf, in check_reshape()
8144 conf->previous_raid_disks in check_reshape()
8151 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) in check_reshape()
8153 return resize_stripes(conf, (conf->previous_raid_disks in check_reshape()
8159 struct r5conf *conf = mddev->private; in raid5_start_reshape() local
8170 if (has_failed(conf)) in raid5_start_reshape()
8179 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) in raid5_start_reshape()
8189 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) in raid5_start_reshape()
8196 atomic_set(&conf->reshape_stripes, 0); in raid5_start_reshape()
8197 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8198 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8199 conf->previous_raid_disks = conf->raid_disks; in raid5_start_reshape()
8200 conf->raid_disks += mddev->delta_disks; in raid5_start_reshape()
8201 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
8202 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
8203 conf->prev_algo = conf->algorithm; in raid5_start_reshape()
8204 conf->algorithm = mddev->new_layout; in raid5_start_reshape()
8205 conf->generation++; in raid5_start_reshape()
8211 conf->reshape_progress = raid5_size(mddev, 0, 0); in raid5_start_reshape()
8213 conf->reshape_progress = 0; in raid5_start_reshape()
8214 conf->reshape_safe = conf->reshape_progress; in raid5_start_reshape()
8215 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8216 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8238 >= conf->previous_raid_disks) in raid5_start_reshape()
8246 } else if (rdev->raid_disk >= conf->previous_raid_disks in raid5_start_reshape()
8256 spin_lock_irqsave(&conf->device_lock, flags); in raid5_start_reshape()
8257 mddev->degraded = raid5_calc_degraded(conf); in raid5_start_reshape()
8258 spin_unlock_irqrestore(&conf->device_lock, flags); in raid5_start_reshape()
8260 mddev->raid_disks = conf->raid_disks; in raid5_start_reshape()
8261 mddev->reshape_position = conf->reshape_progress; in raid5_start_reshape()
8273 spin_lock_irq(&conf->device_lock); in raid5_start_reshape()
8274 write_seqcount_begin(&conf->gen_lock); in raid5_start_reshape()
8275 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; in raid5_start_reshape()
8277 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
8278 mddev->new_layout = conf->algorithm = conf->prev_algo; in raid5_start_reshape()
8282 conf->generation --; in raid5_start_reshape()
8283 conf->reshape_progress = MaxSector; in raid5_start_reshape()
8285 write_seqcount_end(&conf->gen_lock); in raid5_start_reshape()
8286 spin_unlock_irq(&conf->device_lock); in raid5_start_reshape()
8289 conf->reshape_checkpoint = jiffies; in raid5_start_reshape()
8296 * changes needed in 'conf'
8298 static void end_reshape(struct r5conf *conf) in end_reshape() argument
8301 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { in end_reshape()
8304 spin_lock_irq(&conf->device_lock); in end_reshape()
8305 conf->previous_raid_disks = conf->raid_disks; in end_reshape()
8306 md_finish_reshape(conf->mddev); in end_reshape()
8308 conf->reshape_progress = MaxSector; in end_reshape()
8309 conf->mddev->reshape_position = MaxSector; in end_reshape()
8310 rdev_for_each(rdev, conf->mddev) in end_reshape()
8315 spin_unlock_irq(&conf->device_lock); in end_reshape()
8316 wake_up(&conf->wait_for_overlap); in end_reshape()
8318 if (conf->mddev->queue) in end_reshape()
8319 raid5_set_io_opt(conf); in end_reshape()
8328 struct r5conf *conf = mddev->private; in raid5_finish_reshape() local
8334 spin_lock_irq(&conf->device_lock); in raid5_finish_reshape()
8335 mddev->degraded = raid5_calc_degraded(conf); in raid5_finish_reshape()
8336 spin_unlock_irq(&conf->device_lock); in raid5_finish_reshape()
8337 for (d = conf->raid_disks ; in raid5_finish_reshape()
8338 d < conf->raid_disks - mddev->delta_disks; in raid5_finish_reshape()
8340 struct md_rdev *rdev = conf->disks[d].rdev; in raid5_finish_reshape()
8343 rdev = conf->disks[d].replacement; in raid5_finish_reshape()
8348 mddev->layout = conf->algorithm; in raid5_finish_reshape()
8349 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
8358 struct r5conf *conf = mddev->private; in raid5_quiesce() local
8362 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8366 r5c_flush_cache(conf, INT_MAX); in raid5_quiesce()
8367 conf->quiesce = 2; in raid5_quiesce()
8368 wait_event_cmd(conf->wait_for_quiescent, in raid5_quiesce()
8369 atomic_read(&conf->active_stripes) == 0 && in raid5_quiesce()
8370 atomic_read(&conf->active_aligned_reads) == 0, in raid5_quiesce()
8371 unlock_all_device_hash_locks_irq(conf), in raid5_quiesce()
8372 lock_all_device_hash_locks_irq(conf)); in raid5_quiesce()
8373 conf->quiesce = 1; in raid5_quiesce()
8374 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8376 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8379 lock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8380 conf->quiesce = 0; in raid5_quiesce()
8381 wake_up(&conf->wait_for_quiescent); in raid5_quiesce()
8382 wake_up(&conf->wait_for_overlap); in raid5_quiesce()
8383 unlock_all_device_hash_locks_irq(conf); in raid5_quiesce()
8385 log_quiesce(conf, quiesce); in raid5_quiesce()
8486 struct r5conf *conf = mddev->private; in raid5_check_reshape() local
8506 conf->algorithm = mddev->new_layout; in raid5_check_reshape()
8510 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
8629 struct r5conf *conf; in raid5_change_consistency_policy() local
8635 conf = mddev->private; in raid5_change_consistency_policy()
8636 if (!conf) { in raid5_change_consistency_policy()
8643 if (!raid5_has_ppl(conf) && conf->level == 5) { in raid5_change_consistency_policy()
8644 err = log_init(conf, NULL, true); in raid5_change_consistency_policy()
8646 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8648 log_exit(conf); in raid5_change_consistency_policy()
8653 if (raid5_has_ppl(conf)) { in raid5_change_consistency_policy()
8655 log_exit(conf); in raid5_change_consistency_policy()
8657 err = resize_stripes(conf, conf->pool_size); in raid5_change_consistency_policy()
8658 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && in raid5_change_consistency_policy()
8659 r5l_log_disk_error(conf)) { in raid5_change_consistency_policy()
8691 struct r5conf *conf = mddev->private; in raid5_start() local
8693 return r5l_start(conf->log); in raid5_start()