1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * raid5.c : Multiple Devices driver for Linux
4*4882a593Smuzhiyun * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
5*4882a593Smuzhiyun * Copyright (C) 1999, 2000 Ingo Molnar
6*4882a593Smuzhiyun * Copyright (C) 2002, 2003 H. Peter Anvin
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * RAID-4/5/6 management functions.
9*4882a593Smuzhiyun * Thanks to Penguin Computing for making the RAID-6 development possible
10*4882a593Smuzhiyun * by donating a test server!
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * BITMAP UNPLUGGING:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * The sequencing for updating the bitmap reliably is a little
17*4882a593Smuzhiyun * subtle (and I got it wrong the first time) so it deserves some
18*4882a593Smuzhiyun * explanation.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * We group bitmap updates into batches. Each batch has a number.
21*4882a593Smuzhiyun * We may write out several batches at once, but that isn't very important.
22*4882a593Smuzhiyun * conf->seq_write is the number of the last batch successfully written.
23*4882a593Smuzhiyun * conf->seq_flush is the number of the last batch that was closed to
24*4882a593Smuzhiyun * new additions.
25*4882a593Smuzhiyun * When we discover that we will need to write to any block in a stripe
26*4882a593Smuzhiyun * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
27*4882a593Smuzhiyun * the number of the batch it will be in. This is seq_flush+1.
28*4882a593Smuzhiyun * When we are ready to do a write, if that batch hasn't been written yet,
29*4882a593Smuzhiyun * we plug the array and queue the stripe for later.
30*4882a593Smuzhiyun * When an unplug happens, we increment bm_flush, thus closing the current
31*4882a593Smuzhiyun * batch.
32*4882a593Smuzhiyun * When we notice that bm_flush > bm_write, we write out all pending updates
33*4882a593Smuzhiyun * to the bitmap, and advance bm_write to where bm_flush was.
34*4882a593Smuzhiyun * This may occasionally write a bit out twice, but is sure never to
35*4882a593Smuzhiyun * miss any bits.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/blkdev.h>
39*4882a593Smuzhiyun #include <linux/delay.h>
40*4882a593Smuzhiyun #include <linux/kthread.h>
41*4882a593Smuzhiyun #include <linux/raid/pq.h>
42*4882a593Smuzhiyun #include <linux/async_tx.h>
43*4882a593Smuzhiyun #include <linux/module.h>
44*4882a593Smuzhiyun #include <linux/async.h>
45*4882a593Smuzhiyun #include <linux/seq_file.h>
46*4882a593Smuzhiyun #include <linux/cpu.h>
47*4882a593Smuzhiyun #include <linux/slab.h>
48*4882a593Smuzhiyun #include <linux/ratelimit.h>
49*4882a593Smuzhiyun #include <linux/nodemask.h>
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #include <trace/events/block.h>
52*4882a593Smuzhiyun #include <linux/list_sort.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include "md.h"
55*4882a593Smuzhiyun #include "raid5.h"
56*4882a593Smuzhiyun #include "raid0.h"
57*4882a593Smuzhiyun #include "md-bitmap.h"
58*4882a593Smuzhiyun #include "raid5-log.h"
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define cpu_to_group(cpu) cpu_to_node(cpu)
63*4882a593Smuzhiyun #define ANY_GROUP NUMA_NO_NODE
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static bool devices_handle_discard_safely = false;
66*4882a593Smuzhiyun module_param(devices_handle_discard_safely, bool, 0644);
67*4882a593Smuzhiyun MODULE_PARM_DESC(devices_handle_discard_safely,
68*4882a593Smuzhiyun "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
69*4882a593Smuzhiyun static struct workqueue_struct *raid5_wq;
70*4882a593Smuzhiyun
stripe_hash(struct r5conf * conf,sector_t sect)71*4882a593Smuzhiyun static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK;
74*4882a593Smuzhiyun return &conf->stripe_hashtbl[hash];
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
stripe_hash_locks_hash(struct r5conf * conf,sector_t sect)77*4882a593Smuzhiyun static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
lock_device_hash_lock(struct r5conf * conf,int hash)82*4882a593Smuzhiyun static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks + hash);
85*4882a593Smuzhiyun spin_lock(&conf->device_lock);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
unlock_device_hash_lock(struct r5conf * conf,int hash)88*4882a593Smuzhiyun static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun spin_unlock(&conf->device_lock);
91*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks + hash);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
lock_all_device_hash_locks_irq(struct r5conf * conf)94*4882a593Smuzhiyun static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun int i;
97*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks);
98*4882a593Smuzhiyun for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
99*4882a593Smuzhiyun spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
100*4882a593Smuzhiyun spin_lock(&conf->device_lock);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
unlock_all_device_hash_locks_irq(struct r5conf * conf)103*4882a593Smuzhiyun static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun int i;
106*4882a593Smuzhiyun spin_unlock(&conf->device_lock);
107*4882a593Smuzhiyun for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
108*4882a593Smuzhiyun spin_unlock(conf->hash_locks + i);
109*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Find first data disk in a raid6 stripe */
raid6_d0(struct stripe_head * sh)113*4882a593Smuzhiyun static inline int raid6_d0(struct stripe_head *sh)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun if (sh->ddf_layout)
116*4882a593Smuzhiyun /* ddf always start from first device */
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun /* md starts just after Q block */
119*4882a593Smuzhiyun if (sh->qd_idx == sh->disks - 1)
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun else
122*4882a593Smuzhiyun return sh->qd_idx + 1;
123*4882a593Smuzhiyun }
raid6_next_disk(int disk,int raid_disks)124*4882a593Smuzhiyun static inline int raid6_next_disk(int disk, int raid_disks)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun disk++;
127*4882a593Smuzhiyun return (disk < raid_disks) ? disk : 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* When walking through the disks in a raid5, starting at raid6_d0,
131*4882a593Smuzhiyun * We need to map each disk to a 'slot', where the data disks are slot
132*4882a593Smuzhiyun * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
133*4882a593Smuzhiyun * is raid_disks-1. This help does that mapping.
134*4882a593Smuzhiyun */
raid6_idx_to_slot(int idx,struct stripe_head * sh,int * count,int syndrome_disks)135*4882a593Smuzhiyun static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
136*4882a593Smuzhiyun int *count, int syndrome_disks)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun int slot = *count;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (sh->ddf_layout)
141*4882a593Smuzhiyun (*count)++;
142*4882a593Smuzhiyun if (idx == sh->pd_idx)
143*4882a593Smuzhiyun return syndrome_disks;
144*4882a593Smuzhiyun if (idx == sh->qd_idx)
145*4882a593Smuzhiyun return syndrome_disks + 1;
146*4882a593Smuzhiyun if (!sh->ddf_layout)
147*4882a593Smuzhiyun (*count)++;
148*4882a593Smuzhiyun return slot;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun static void print_raid5_conf (struct r5conf *conf);
152*4882a593Smuzhiyun
stripe_operations_active(struct stripe_head * sh)153*4882a593Smuzhiyun static int stripe_operations_active(struct stripe_head *sh)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return sh->check_state || sh->reconstruct_state ||
156*4882a593Smuzhiyun test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
157*4882a593Smuzhiyun test_bit(STRIPE_COMPUTE_RUN, &sh->state);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
stripe_is_lowprio(struct stripe_head * sh)160*4882a593Smuzhiyun static bool stripe_is_lowprio(struct stripe_head *sh)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) ||
163*4882a593Smuzhiyun test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) &&
164*4882a593Smuzhiyun !test_bit(STRIPE_R5C_CACHING, &sh->state);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
raid5_wakeup_stripe_thread(struct stripe_head * sh)167*4882a593Smuzhiyun static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
170*4882a593Smuzhiyun struct r5worker_group *group;
171*4882a593Smuzhiyun int thread_cnt;
172*4882a593Smuzhiyun int i, cpu = sh->cpu;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (!cpu_online(cpu)) {
175*4882a593Smuzhiyun cpu = cpumask_any(cpu_online_mask);
176*4882a593Smuzhiyun sh->cpu = cpu;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (list_empty(&sh->lru)) {
180*4882a593Smuzhiyun struct r5worker_group *group;
181*4882a593Smuzhiyun group = conf->worker_groups + cpu_to_group(cpu);
182*4882a593Smuzhiyun if (stripe_is_lowprio(sh))
183*4882a593Smuzhiyun list_add_tail(&sh->lru, &group->loprio_list);
184*4882a593Smuzhiyun else
185*4882a593Smuzhiyun list_add_tail(&sh->lru, &group->handle_list);
186*4882a593Smuzhiyun group->stripes_cnt++;
187*4882a593Smuzhiyun sh->group = group;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (conf->worker_cnt_per_group == 0) {
191*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
192*4882a593Smuzhiyun return;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun group = conf->worker_groups + cpu_to_group(sh->cpu);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun group->workers[0].working = true;
198*4882a593Smuzhiyun /* at least one worker should run to avoid race */
199*4882a593Smuzhiyun queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
202*4882a593Smuzhiyun /* wakeup more workers */
203*4882a593Smuzhiyun for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
204*4882a593Smuzhiyun if (group->workers[i].working == false) {
205*4882a593Smuzhiyun group->workers[i].working = true;
206*4882a593Smuzhiyun queue_work_on(sh->cpu, raid5_wq,
207*4882a593Smuzhiyun &group->workers[i].work);
208*4882a593Smuzhiyun thread_cnt--;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
do_release_stripe(struct r5conf * conf,struct stripe_head * sh,struct list_head * temp_inactive_list)213*4882a593Smuzhiyun static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
214*4882a593Smuzhiyun struct list_head *temp_inactive_list)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun int i;
217*4882a593Smuzhiyun int injournal = 0; /* number of date pages with R5_InJournal */
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun BUG_ON(!list_empty(&sh->lru));
220*4882a593Smuzhiyun BUG_ON(atomic_read(&conf->active_stripes)==0);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (r5c_is_writeback(conf->log))
223*4882a593Smuzhiyun for (i = sh->disks; i--; )
224*4882a593Smuzhiyun if (test_bit(R5_InJournal, &sh->dev[i].flags))
225*4882a593Smuzhiyun injournal++;
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * In the following cases, the stripe cannot be released to cached
228*4882a593Smuzhiyun * lists. Therefore, we make the stripe write out and set
229*4882a593Smuzhiyun * STRIPE_HANDLE:
230*4882a593Smuzhiyun * 1. when quiesce in r5c write back;
231*4882a593Smuzhiyun * 2. when resync is requested fot the stripe.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) ||
234*4882a593Smuzhiyun (conf->quiesce && r5c_is_writeback(conf->log) &&
235*4882a593Smuzhiyun !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) {
236*4882a593Smuzhiyun if (test_bit(STRIPE_R5C_CACHING, &sh->state))
237*4882a593Smuzhiyun r5c_make_stripe_write_out(sh);
238*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (test_bit(STRIPE_HANDLE, &sh->state)) {
242*4882a593Smuzhiyun if (test_bit(STRIPE_DELAYED, &sh->state) &&
243*4882a593Smuzhiyun !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
244*4882a593Smuzhiyun list_add_tail(&sh->lru, &conf->delayed_list);
245*4882a593Smuzhiyun else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
246*4882a593Smuzhiyun sh->bm_seq - conf->seq_write > 0)
247*4882a593Smuzhiyun list_add_tail(&sh->lru, &conf->bitmap_list);
248*4882a593Smuzhiyun else {
249*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
250*4882a593Smuzhiyun clear_bit(STRIPE_BIT_DELAY, &sh->state);
251*4882a593Smuzhiyun if (conf->worker_cnt_per_group == 0) {
252*4882a593Smuzhiyun if (stripe_is_lowprio(sh))
253*4882a593Smuzhiyun list_add_tail(&sh->lru,
254*4882a593Smuzhiyun &conf->loprio_list);
255*4882a593Smuzhiyun else
256*4882a593Smuzhiyun list_add_tail(&sh->lru,
257*4882a593Smuzhiyun &conf->handle_list);
258*4882a593Smuzhiyun } else {
259*4882a593Smuzhiyun raid5_wakeup_stripe_thread(sh);
260*4882a593Smuzhiyun return;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
264*4882a593Smuzhiyun } else {
265*4882a593Smuzhiyun BUG_ON(stripe_operations_active(sh));
266*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
267*4882a593Smuzhiyun if (atomic_dec_return(&conf->preread_active_stripes)
268*4882a593Smuzhiyun < IO_THRESHOLD)
269*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
270*4882a593Smuzhiyun atomic_dec(&conf->active_stripes);
271*4882a593Smuzhiyun if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
272*4882a593Smuzhiyun if (!r5c_is_writeback(conf->log))
273*4882a593Smuzhiyun list_add_tail(&sh->lru, temp_inactive_list);
274*4882a593Smuzhiyun else {
275*4882a593Smuzhiyun WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
276*4882a593Smuzhiyun if (injournal == 0)
277*4882a593Smuzhiyun list_add_tail(&sh->lru, temp_inactive_list);
278*4882a593Smuzhiyun else if (injournal == conf->raid_disks - conf->max_degraded) {
279*4882a593Smuzhiyun /* full stripe */
280*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
281*4882a593Smuzhiyun atomic_inc(&conf->r5c_cached_full_stripes);
282*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
283*4882a593Smuzhiyun atomic_dec(&conf->r5c_cached_partial_stripes);
284*4882a593Smuzhiyun list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
285*4882a593Smuzhiyun r5c_check_cached_full_stripe(conf);
286*4882a593Smuzhiyun } else
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * STRIPE_R5C_PARTIAL_STRIPE is set in
289*4882a593Smuzhiyun * r5c_try_caching_write(). No need to
290*4882a593Smuzhiyun * set it again.
291*4882a593Smuzhiyun */
292*4882a593Smuzhiyun list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
__release_stripe(struct r5conf * conf,struct stripe_head * sh,struct list_head * temp_inactive_list)298*4882a593Smuzhiyun static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
299*4882a593Smuzhiyun struct list_head *temp_inactive_list)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun if (atomic_dec_and_test(&sh->count))
302*4882a593Smuzhiyun do_release_stripe(conf, sh, temp_inactive_list);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * Be careful: Only one task can add/delete stripes from temp_inactive_list at
309*4882a593Smuzhiyun * given time. Adding stripes only takes device lock, while deleting stripes
310*4882a593Smuzhiyun * only takes hash lock.
311*4882a593Smuzhiyun */
release_inactive_stripe_list(struct r5conf * conf,struct list_head * temp_inactive_list,int hash)312*4882a593Smuzhiyun static void release_inactive_stripe_list(struct r5conf *conf,
313*4882a593Smuzhiyun struct list_head *temp_inactive_list,
314*4882a593Smuzhiyun int hash)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun int size;
317*4882a593Smuzhiyun bool do_wakeup = false;
318*4882a593Smuzhiyun unsigned long flags;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (hash == NR_STRIPE_HASH_LOCKS) {
321*4882a593Smuzhiyun size = NR_STRIPE_HASH_LOCKS;
322*4882a593Smuzhiyun hash = NR_STRIPE_HASH_LOCKS - 1;
323*4882a593Smuzhiyun } else
324*4882a593Smuzhiyun size = 1;
325*4882a593Smuzhiyun while (size) {
326*4882a593Smuzhiyun struct list_head *list = &temp_inactive_list[size - 1];
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * We don't hold any lock here yet, raid5_get_active_stripe() might
330*4882a593Smuzhiyun * remove stripes from the list
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun if (!list_empty_careful(list)) {
333*4882a593Smuzhiyun spin_lock_irqsave(conf->hash_locks + hash, flags);
334*4882a593Smuzhiyun if (list_empty(conf->inactive_list + hash) &&
335*4882a593Smuzhiyun !list_empty(list))
336*4882a593Smuzhiyun atomic_dec(&conf->empty_inactive_list_nr);
337*4882a593Smuzhiyun list_splice_tail_init(list, conf->inactive_list + hash);
338*4882a593Smuzhiyun do_wakeup = true;
339*4882a593Smuzhiyun spin_unlock_irqrestore(conf->hash_locks + hash, flags);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun size--;
342*4882a593Smuzhiyun hash--;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (do_wakeup) {
346*4882a593Smuzhiyun wake_up(&conf->wait_for_stripe);
347*4882a593Smuzhiyun if (atomic_read(&conf->active_stripes) == 0)
348*4882a593Smuzhiyun wake_up(&conf->wait_for_quiescent);
349*4882a593Smuzhiyun if (conf->retry_read_aligned)
350*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* should hold conf->device_lock already */
release_stripe_list(struct r5conf * conf,struct list_head * temp_inactive_list)355*4882a593Smuzhiyun static int release_stripe_list(struct r5conf *conf,
356*4882a593Smuzhiyun struct list_head *temp_inactive_list)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct stripe_head *sh, *t;
359*4882a593Smuzhiyun int count = 0;
360*4882a593Smuzhiyun struct llist_node *head;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun head = llist_del_all(&conf->released_stripes);
363*4882a593Smuzhiyun head = llist_reverse_order(head);
364*4882a593Smuzhiyun llist_for_each_entry_safe(sh, t, head, release_list) {
365*4882a593Smuzhiyun int hash;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
368*4882a593Smuzhiyun smp_mb();
369*4882a593Smuzhiyun clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * Don't worry the bit is set here, because if the bit is set
372*4882a593Smuzhiyun * again, the count is always > 1. This is true for
373*4882a593Smuzhiyun * STRIPE_ON_UNPLUG_LIST bit too.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun hash = sh->hash_lock_index;
376*4882a593Smuzhiyun __release_stripe(conf, sh, &temp_inactive_list[hash]);
377*4882a593Smuzhiyun count++;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return count;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
raid5_release_stripe(struct stripe_head * sh)383*4882a593Smuzhiyun void raid5_release_stripe(struct stripe_head *sh)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
386*4882a593Smuzhiyun unsigned long flags;
387*4882a593Smuzhiyun struct list_head list;
388*4882a593Smuzhiyun int hash;
389*4882a593Smuzhiyun bool wakeup;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Avoid release_list until the last reference.
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun if (atomic_add_unless(&sh->count, -1, 1))
394*4882a593Smuzhiyun return;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (unlikely(!conf->mddev->thread) ||
397*4882a593Smuzhiyun test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
398*4882a593Smuzhiyun goto slow_path;
399*4882a593Smuzhiyun wakeup = llist_add(&sh->release_list, &conf->released_stripes);
400*4882a593Smuzhiyun if (wakeup)
401*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
402*4882a593Smuzhiyun return;
403*4882a593Smuzhiyun slow_path:
404*4882a593Smuzhiyun /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
405*4882a593Smuzhiyun if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
406*4882a593Smuzhiyun INIT_LIST_HEAD(&list);
407*4882a593Smuzhiyun hash = sh->hash_lock_index;
408*4882a593Smuzhiyun do_release_stripe(conf, sh, &list);
409*4882a593Smuzhiyun spin_unlock_irqrestore(&conf->device_lock, flags);
410*4882a593Smuzhiyun release_inactive_stripe_list(conf, &list, hash);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
remove_hash(struct stripe_head * sh)414*4882a593Smuzhiyun static inline void remove_hash(struct stripe_head *sh)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun pr_debug("remove_hash(), stripe %llu\n",
417*4882a593Smuzhiyun (unsigned long long)sh->sector);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun hlist_del_init(&sh->hash);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
insert_hash(struct r5conf * conf,struct stripe_head * sh)422*4882a593Smuzhiyun static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct hlist_head *hp = stripe_hash(conf, sh->sector);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun pr_debug("insert_hash(), stripe %llu\n",
427*4882a593Smuzhiyun (unsigned long long)sh->sector);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun hlist_add_head(&sh->hash, hp);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* find an idle stripe, make sure it is unhashed, and return it. */
get_free_stripe(struct r5conf * conf,int hash)433*4882a593Smuzhiyun static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct stripe_head *sh = NULL;
436*4882a593Smuzhiyun struct list_head *first;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (list_empty(conf->inactive_list + hash))
439*4882a593Smuzhiyun goto out;
440*4882a593Smuzhiyun first = (conf->inactive_list + hash)->next;
441*4882a593Smuzhiyun sh = list_entry(first, struct stripe_head, lru);
442*4882a593Smuzhiyun list_del_init(first);
443*4882a593Smuzhiyun remove_hash(sh);
444*4882a593Smuzhiyun atomic_inc(&conf->active_stripes);
445*4882a593Smuzhiyun BUG_ON(hash != sh->hash_lock_index);
446*4882a593Smuzhiyun if (list_empty(conf->inactive_list + hash))
447*4882a593Smuzhiyun atomic_inc(&conf->empty_inactive_list_nr);
448*4882a593Smuzhiyun out:
449*4882a593Smuzhiyun return sh;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
free_stripe_pages(struct stripe_head * sh)453*4882a593Smuzhiyun static void free_stripe_pages(struct stripe_head *sh)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun int i;
456*4882a593Smuzhiyun struct page *p;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Have not allocate page pool */
459*4882a593Smuzhiyun if (!sh->pages)
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun for (i = 0; i < sh->nr_pages; i++) {
463*4882a593Smuzhiyun p = sh->pages[i];
464*4882a593Smuzhiyun if (p)
465*4882a593Smuzhiyun put_page(p);
466*4882a593Smuzhiyun sh->pages[i] = NULL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
alloc_stripe_pages(struct stripe_head * sh,gfp_t gfp)470*4882a593Smuzhiyun static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun int i;
473*4882a593Smuzhiyun struct page *p;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun for (i = 0; i < sh->nr_pages; i++) {
476*4882a593Smuzhiyun /* The page have allocated. */
477*4882a593Smuzhiyun if (sh->pages[i])
478*4882a593Smuzhiyun continue;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun p = alloc_page(gfp);
481*4882a593Smuzhiyun if (!p) {
482*4882a593Smuzhiyun free_stripe_pages(sh);
483*4882a593Smuzhiyun return -ENOMEM;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun sh->pages[i] = p;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun static int
init_stripe_shared_pages(struct stripe_head * sh,struct r5conf * conf,int disks)491*4882a593Smuzhiyun init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun int nr_pages, cnt;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (sh->pages)
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* Each of the sh->dev[i] need one conf->stripe_size */
499*4882a593Smuzhiyun cnt = PAGE_SIZE / conf->stripe_size;
500*4882a593Smuzhiyun nr_pages = (disks + cnt - 1) / cnt;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
503*4882a593Smuzhiyun if (!sh->pages)
504*4882a593Smuzhiyun return -ENOMEM;
505*4882a593Smuzhiyun sh->nr_pages = nr_pages;
506*4882a593Smuzhiyun sh->stripes_per_page = cnt;
507*4882a593Smuzhiyun return 0;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun #endif
510*4882a593Smuzhiyun
shrink_buffers(struct stripe_head * sh)511*4882a593Smuzhiyun static void shrink_buffers(struct stripe_head *sh)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun int i;
514*4882a593Smuzhiyun int num = sh->raid_conf->pool_size;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
517*4882a593Smuzhiyun for (i = 0; i < num ; i++) {
518*4882a593Smuzhiyun struct page *p;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
521*4882a593Smuzhiyun p = sh->dev[i].page;
522*4882a593Smuzhiyun if (!p)
523*4882a593Smuzhiyun continue;
524*4882a593Smuzhiyun sh->dev[i].page = NULL;
525*4882a593Smuzhiyun put_page(p);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun #else
528*4882a593Smuzhiyun for (i = 0; i < num; i++)
529*4882a593Smuzhiyun sh->dev[i].page = NULL;
530*4882a593Smuzhiyun free_stripe_pages(sh); /* Free pages */
531*4882a593Smuzhiyun #endif
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
grow_buffers(struct stripe_head * sh,gfp_t gfp)534*4882a593Smuzhiyun static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun int i;
537*4882a593Smuzhiyun int num = sh->raid_conf->pool_size;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
540*4882a593Smuzhiyun for (i = 0; i < num; i++) {
541*4882a593Smuzhiyun struct page *page;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (!(page = alloc_page(gfp))) {
544*4882a593Smuzhiyun return 1;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun sh->dev[i].page = page;
547*4882a593Smuzhiyun sh->dev[i].orig_page = page;
548*4882a593Smuzhiyun sh->dev[i].offset = 0;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun #else
551*4882a593Smuzhiyun if (alloc_stripe_pages(sh, gfp))
552*4882a593Smuzhiyun return -ENOMEM;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun for (i = 0; i < num; i++) {
555*4882a593Smuzhiyun sh->dev[i].page = raid5_get_dev_page(sh, i);
556*4882a593Smuzhiyun sh->dev[i].orig_page = sh->dev[i].page;
557*4882a593Smuzhiyun sh->dev[i].offset = raid5_get_page_offset(sh, i);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun #endif
560*4882a593Smuzhiyun return 0;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
564*4882a593Smuzhiyun struct stripe_head *sh);
565*4882a593Smuzhiyun
init_stripe(struct stripe_head * sh,sector_t sector,int previous)566*4882a593Smuzhiyun static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
569*4882a593Smuzhiyun int i, seq;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun BUG_ON(atomic_read(&sh->count) != 0);
572*4882a593Smuzhiyun BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
573*4882a593Smuzhiyun BUG_ON(stripe_operations_active(sh));
574*4882a593Smuzhiyun BUG_ON(sh->batch_head);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun pr_debug("init_stripe called, stripe %llu\n",
577*4882a593Smuzhiyun (unsigned long long)sector);
578*4882a593Smuzhiyun retry:
579*4882a593Smuzhiyun seq = read_seqcount_begin(&conf->gen_lock);
580*4882a593Smuzhiyun sh->generation = conf->generation - previous;
581*4882a593Smuzhiyun sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
582*4882a593Smuzhiyun sh->sector = sector;
583*4882a593Smuzhiyun stripe_set_idx(sector, conf, previous, sh);
584*4882a593Smuzhiyun sh->state = 0;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun for (i = sh->disks; i--; ) {
587*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (dev->toread || dev->read || dev->towrite || dev->written ||
590*4882a593Smuzhiyun test_bit(R5_LOCKED, &dev->flags)) {
591*4882a593Smuzhiyun pr_err("sector=%llx i=%d %p %p %p %p %d\n",
592*4882a593Smuzhiyun (unsigned long long)sh->sector, i, dev->toread,
593*4882a593Smuzhiyun dev->read, dev->towrite, dev->written,
594*4882a593Smuzhiyun test_bit(R5_LOCKED, &dev->flags));
595*4882a593Smuzhiyun WARN_ON(1);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun dev->flags = 0;
598*4882a593Smuzhiyun dev->sector = raid5_compute_blocknr(sh, i, previous);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun if (read_seqcount_retry(&conf->gen_lock, seq))
601*4882a593Smuzhiyun goto retry;
602*4882a593Smuzhiyun sh->overwrite_disks = 0;
603*4882a593Smuzhiyun insert_hash(conf, sh);
604*4882a593Smuzhiyun sh->cpu = smp_processor_id();
605*4882a593Smuzhiyun set_bit(STRIPE_BATCH_READY, &sh->state);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
__find_stripe(struct r5conf * conf,sector_t sector,short generation)608*4882a593Smuzhiyun static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
609*4882a593Smuzhiyun short generation)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun struct stripe_head *sh;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
614*4882a593Smuzhiyun hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
615*4882a593Smuzhiyun if (sh->sector == sector && sh->generation == generation)
616*4882a593Smuzhiyun return sh;
617*4882a593Smuzhiyun pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
618*4882a593Smuzhiyun return NULL;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * Need to check if array has failed when deciding whether to:
623*4882a593Smuzhiyun * - start an array
624*4882a593Smuzhiyun * - remove non-faulty devices
625*4882a593Smuzhiyun * - add a spare
626*4882a593Smuzhiyun * - allow a reshape
627*4882a593Smuzhiyun * This determination is simple when no reshape is happening.
628*4882a593Smuzhiyun * However if there is a reshape, we need to carefully check
629*4882a593Smuzhiyun * both the before and after sections.
630*4882a593Smuzhiyun * This is because some failed devices may only affect one
631*4882a593Smuzhiyun * of the two sections, and some non-in_sync devices may
632*4882a593Smuzhiyun * be insync in the section most affected by failed devices.
633*4882a593Smuzhiyun */
raid5_calc_degraded(struct r5conf * conf)634*4882a593Smuzhiyun int raid5_calc_degraded(struct r5conf *conf)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun int degraded, degraded2;
637*4882a593Smuzhiyun int i;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun rcu_read_lock();
640*4882a593Smuzhiyun degraded = 0;
641*4882a593Smuzhiyun for (i = 0; i < conf->previous_raid_disks; i++) {
642*4882a593Smuzhiyun struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
643*4882a593Smuzhiyun if (rdev && test_bit(Faulty, &rdev->flags))
644*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].replacement);
645*4882a593Smuzhiyun if (!rdev || test_bit(Faulty, &rdev->flags))
646*4882a593Smuzhiyun degraded++;
647*4882a593Smuzhiyun else if (test_bit(In_sync, &rdev->flags))
648*4882a593Smuzhiyun ;
649*4882a593Smuzhiyun else
650*4882a593Smuzhiyun /* not in-sync or faulty.
651*4882a593Smuzhiyun * If the reshape increases the number of devices,
652*4882a593Smuzhiyun * this is being recovered by the reshape, so
653*4882a593Smuzhiyun * this 'previous' section is not in_sync.
654*4882a593Smuzhiyun * If the number of devices is being reduced however,
655*4882a593Smuzhiyun * the device can only be part of the array if
656*4882a593Smuzhiyun * we are reverting a reshape, so this section will
657*4882a593Smuzhiyun * be in-sync.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun if (conf->raid_disks >= conf->previous_raid_disks)
660*4882a593Smuzhiyun degraded++;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun rcu_read_unlock();
663*4882a593Smuzhiyun if (conf->raid_disks == conf->previous_raid_disks)
664*4882a593Smuzhiyun return degraded;
665*4882a593Smuzhiyun rcu_read_lock();
666*4882a593Smuzhiyun degraded2 = 0;
667*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
668*4882a593Smuzhiyun struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
669*4882a593Smuzhiyun if (rdev && test_bit(Faulty, &rdev->flags))
670*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].replacement);
671*4882a593Smuzhiyun if (!rdev || test_bit(Faulty, &rdev->flags))
672*4882a593Smuzhiyun degraded2++;
673*4882a593Smuzhiyun else if (test_bit(In_sync, &rdev->flags))
674*4882a593Smuzhiyun ;
675*4882a593Smuzhiyun else
676*4882a593Smuzhiyun /* not in-sync or faulty.
677*4882a593Smuzhiyun * If reshape increases the number of devices, this
678*4882a593Smuzhiyun * section has already been recovered, else it
679*4882a593Smuzhiyun * almost certainly hasn't.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun if (conf->raid_disks <= conf->previous_raid_disks)
682*4882a593Smuzhiyun degraded2++;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun rcu_read_unlock();
685*4882a593Smuzhiyun if (degraded2 > degraded)
686*4882a593Smuzhiyun return degraded2;
687*4882a593Smuzhiyun return degraded;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
has_failed(struct r5conf * conf)690*4882a593Smuzhiyun static bool has_failed(struct r5conf *conf)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun int degraded = conf->mddev->degraded;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (test_bit(MD_BROKEN, &conf->mddev->flags))
695*4882a593Smuzhiyun return true;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (conf->mddev->reshape_position != MaxSector)
698*4882a593Smuzhiyun degraded = raid5_calc_degraded(conf);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun return degraded > conf->max_degraded;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun struct stripe_head *
raid5_get_active_stripe(struct r5conf * conf,sector_t sector,int previous,int noblock,int noquiesce)704*4882a593Smuzhiyun raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
705*4882a593Smuzhiyun int previous, int noblock, int noquiesce)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun struct stripe_head *sh;
708*4882a593Smuzhiyun int hash = stripe_hash_locks_hash(conf, sector);
709*4882a593Smuzhiyun int inc_empty_inactive_list_flag;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks + hash);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun do {
716*4882a593Smuzhiyun wait_event_lock_irq(conf->wait_for_quiescent,
717*4882a593Smuzhiyun conf->quiesce == 0 || noquiesce,
718*4882a593Smuzhiyun *(conf->hash_locks + hash));
719*4882a593Smuzhiyun sh = __find_stripe(conf, sector, conf->generation - previous);
720*4882a593Smuzhiyun if (!sh) {
721*4882a593Smuzhiyun if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
722*4882a593Smuzhiyun sh = get_free_stripe(conf, hash);
723*4882a593Smuzhiyun if (!sh && !test_bit(R5_DID_ALLOC,
724*4882a593Smuzhiyun &conf->cache_state))
725*4882a593Smuzhiyun set_bit(R5_ALLOC_MORE,
726*4882a593Smuzhiyun &conf->cache_state);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun if (noblock && sh == NULL)
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun r5c_check_stripe_cache_usage(conf);
732*4882a593Smuzhiyun if (!sh) {
733*4882a593Smuzhiyun set_bit(R5_INACTIVE_BLOCKED,
734*4882a593Smuzhiyun &conf->cache_state);
735*4882a593Smuzhiyun r5l_wake_reclaim(conf->log, 0);
736*4882a593Smuzhiyun wait_event_lock_irq(
737*4882a593Smuzhiyun conf->wait_for_stripe,
738*4882a593Smuzhiyun !list_empty(conf->inactive_list + hash) &&
739*4882a593Smuzhiyun (atomic_read(&conf->active_stripes)
740*4882a593Smuzhiyun < (conf->max_nr_stripes * 3 / 4)
741*4882a593Smuzhiyun || !test_bit(R5_INACTIVE_BLOCKED,
742*4882a593Smuzhiyun &conf->cache_state)),
743*4882a593Smuzhiyun *(conf->hash_locks + hash));
744*4882a593Smuzhiyun clear_bit(R5_INACTIVE_BLOCKED,
745*4882a593Smuzhiyun &conf->cache_state);
746*4882a593Smuzhiyun } else {
747*4882a593Smuzhiyun init_stripe(sh, sector, previous);
748*4882a593Smuzhiyun atomic_inc(&sh->count);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun } else if (!atomic_inc_not_zero(&sh->count)) {
751*4882a593Smuzhiyun spin_lock(&conf->device_lock);
752*4882a593Smuzhiyun if (!atomic_read(&sh->count)) {
753*4882a593Smuzhiyun if (!test_bit(STRIPE_HANDLE, &sh->state))
754*4882a593Smuzhiyun atomic_inc(&conf->active_stripes);
755*4882a593Smuzhiyun BUG_ON(list_empty(&sh->lru) &&
756*4882a593Smuzhiyun !test_bit(STRIPE_EXPANDING, &sh->state));
757*4882a593Smuzhiyun inc_empty_inactive_list_flag = 0;
758*4882a593Smuzhiyun if (!list_empty(conf->inactive_list + hash))
759*4882a593Smuzhiyun inc_empty_inactive_list_flag = 1;
760*4882a593Smuzhiyun list_del_init(&sh->lru);
761*4882a593Smuzhiyun if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
762*4882a593Smuzhiyun atomic_inc(&conf->empty_inactive_list_nr);
763*4882a593Smuzhiyun if (sh->group) {
764*4882a593Smuzhiyun sh->group->stripes_cnt--;
765*4882a593Smuzhiyun sh->group = NULL;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun atomic_inc(&sh->count);
769*4882a593Smuzhiyun spin_unlock(&conf->device_lock);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun } while (sh == NULL);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks + hash);
774*4882a593Smuzhiyun return sh;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
is_full_stripe_write(struct stripe_head * sh)777*4882a593Smuzhiyun static bool is_full_stripe_write(struct stripe_head *sh)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
780*4882a593Smuzhiyun return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
lock_two_stripes(struct stripe_head * sh1,struct stripe_head * sh2)783*4882a593Smuzhiyun static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
784*4882a593Smuzhiyun __acquires(&sh1->stripe_lock)
785*4882a593Smuzhiyun __acquires(&sh2->stripe_lock)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun if (sh1 > sh2) {
788*4882a593Smuzhiyun spin_lock_irq(&sh2->stripe_lock);
789*4882a593Smuzhiyun spin_lock_nested(&sh1->stripe_lock, 1);
790*4882a593Smuzhiyun } else {
791*4882a593Smuzhiyun spin_lock_irq(&sh1->stripe_lock);
792*4882a593Smuzhiyun spin_lock_nested(&sh2->stripe_lock, 1);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
unlock_two_stripes(struct stripe_head * sh1,struct stripe_head * sh2)796*4882a593Smuzhiyun static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
797*4882a593Smuzhiyun __releases(&sh1->stripe_lock)
798*4882a593Smuzhiyun __releases(&sh2->stripe_lock)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun spin_unlock(&sh1->stripe_lock);
801*4882a593Smuzhiyun spin_unlock_irq(&sh2->stripe_lock);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* Only freshly new full stripe normal write stripe can be added to a batch list */
stripe_can_batch(struct stripe_head * sh)805*4882a593Smuzhiyun static bool stripe_can_batch(struct stripe_head *sh)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (raid5_has_log(conf) || raid5_has_ppl(conf))
810*4882a593Smuzhiyun return false;
811*4882a593Smuzhiyun return test_bit(STRIPE_BATCH_READY, &sh->state) &&
812*4882a593Smuzhiyun !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
813*4882a593Smuzhiyun is_full_stripe_write(sh);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* we only do back search */
stripe_add_to_batch_list(struct r5conf * conf,struct stripe_head * sh)817*4882a593Smuzhiyun static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct stripe_head *head;
820*4882a593Smuzhiyun sector_t head_sector, tmp_sec;
821*4882a593Smuzhiyun int hash;
822*4882a593Smuzhiyun int dd_idx;
823*4882a593Smuzhiyun int inc_empty_inactive_list_flag;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
826*4882a593Smuzhiyun tmp_sec = sh->sector;
827*4882a593Smuzhiyun if (!sector_div(tmp_sec, conf->chunk_sectors))
828*4882a593Smuzhiyun return;
829*4882a593Smuzhiyun head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun hash = stripe_hash_locks_hash(conf, head_sector);
832*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks + hash);
833*4882a593Smuzhiyun head = __find_stripe(conf, head_sector, conf->generation);
834*4882a593Smuzhiyun if (head && !atomic_inc_not_zero(&head->count)) {
835*4882a593Smuzhiyun spin_lock(&conf->device_lock);
836*4882a593Smuzhiyun if (!atomic_read(&head->count)) {
837*4882a593Smuzhiyun if (!test_bit(STRIPE_HANDLE, &head->state))
838*4882a593Smuzhiyun atomic_inc(&conf->active_stripes);
839*4882a593Smuzhiyun BUG_ON(list_empty(&head->lru) &&
840*4882a593Smuzhiyun !test_bit(STRIPE_EXPANDING, &head->state));
841*4882a593Smuzhiyun inc_empty_inactive_list_flag = 0;
842*4882a593Smuzhiyun if (!list_empty(conf->inactive_list + hash))
843*4882a593Smuzhiyun inc_empty_inactive_list_flag = 1;
844*4882a593Smuzhiyun list_del_init(&head->lru);
845*4882a593Smuzhiyun if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
846*4882a593Smuzhiyun atomic_inc(&conf->empty_inactive_list_nr);
847*4882a593Smuzhiyun if (head->group) {
848*4882a593Smuzhiyun head->group->stripes_cnt--;
849*4882a593Smuzhiyun head->group = NULL;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun atomic_inc(&head->count);
853*4882a593Smuzhiyun spin_unlock(&conf->device_lock);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks + hash);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (!head)
858*4882a593Smuzhiyun return;
859*4882a593Smuzhiyun if (!stripe_can_batch(head))
860*4882a593Smuzhiyun goto out;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun lock_two_stripes(head, sh);
863*4882a593Smuzhiyun /* clear_batch_ready clear the flag */
864*4882a593Smuzhiyun if (!stripe_can_batch(head) || !stripe_can_batch(sh))
865*4882a593Smuzhiyun goto unlock_out;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (sh->batch_head)
868*4882a593Smuzhiyun goto unlock_out;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun dd_idx = 0;
871*4882a593Smuzhiyun while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
872*4882a593Smuzhiyun dd_idx++;
873*4882a593Smuzhiyun if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
874*4882a593Smuzhiyun bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
875*4882a593Smuzhiyun goto unlock_out;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun if (head->batch_head) {
878*4882a593Smuzhiyun spin_lock(&head->batch_head->batch_lock);
879*4882a593Smuzhiyun /* This batch list is already running */
880*4882a593Smuzhiyun if (!stripe_can_batch(head)) {
881*4882a593Smuzhiyun spin_unlock(&head->batch_head->batch_lock);
882*4882a593Smuzhiyun goto unlock_out;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun /*
885*4882a593Smuzhiyun * We must assign batch_head of this stripe within the
886*4882a593Smuzhiyun * batch_lock, otherwise clear_batch_ready of batch head
887*4882a593Smuzhiyun * stripe could clear BATCH_READY bit of this stripe and
888*4882a593Smuzhiyun * this stripe->batch_head doesn't get assigned, which
889*4882a593Smuzhiyun * could confuse clear_batch_ready for this stripe
890*4882a593Smuzhiyun */
891*4882a593Smuzhiyun sh->batch_head = head->batch_head;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /*
894*4882a593Smuzhiyun * at this point, head's BATCH_READY could be cleared, but we
895*4882a593Smuzhiyun * can still add the stripe to batch list
896*4882a593Smuzhiyun */
897*4882a593Smuzhiyun list_add(&sh->batch_list, &head->batch_list);
898*4882a593Smuzhiyun spin_unlock(&head->batch_head->batch_lock);
899*4882a593Smuzhiyun } else {
900*4882a593Smuzhiyun head->batch_head = head;
901*4882a593Smuzhiyun sh->batch_head = head->batch_head;
902*4882a593Smuzhiyun spin_lock(&head->batch_lock);
903*4882a593Smuzhiyun list_add_tail(&sh->batch_list, &head->batch_list);
904*4882a593Smuzhiyun spin_unlock(&head->batch_lock);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
908*4882a593Smuzhiyun if (atomic_dec_return(&conf->preread_active_stripes)
909*4882a593Smuzhiyun < IO_THRESHOLD)
910*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
913*4882a593Smuzhiyun int seq = sh->bm_seq;
914*4882a593Smuzhiyun if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
915*4882a593Smuzhiyun sh->batch_head->bm_seq > seq)
916*4882a593Smuzhiyun seq = sh->batch_head->bm_seq;
917*4882a593Smuzhiyun set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
918*4882a593Smuzhiyun sh->batch_head->bm_seq = seq;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun atomic_inc(&sh->count);
922*4882a593Smuzhiyun unlock_out:
923*4882a593Smuzhiyun unlock_two_stripes(head, sh);
924*4882a593Smuzhiyun out:
925*4882a593Smuzhiyun raid5_release_stripe(head);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /* Determine if 'data_offset' or 'new_data_offset' should be used
929*4882a593Smuzhiyun * in this stripe_head.
930*4882a593Smuzhiyun */
use_new_offset(struct r5conf * conf,struct stripe_head * sh)931*4882a593Smuzhiyun static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun sector_t progress = conf->reshape_progress;
934*4882a593Smuzhiyun /* Need a memory barrier to make sure we see the value
935*4882a593Smuzhiyun * of conf->generation, or ->data_offset that was set before
936*4882a593Smuzhiyun * reshape_progress was updated.
937*4882a593Smuzhiyun */
938*4882a593Smuzhiyun smp_rmb();
939*4882a593Smuzhiyun if (progress == MaxSector)
940*4882a593Smuzhiyun return 0;
941*4882a593Smuzhiyun if (sh->generation == conf->generation - 1)
942*4882a593Smuzhiyun return 0;
943*4882a593Smuzhiyun /* We are in a reshape, and this is a new-generation stripe,
944*4882a593Smuzhiyun * so use new_data_offset.
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun return 1;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
dispatch_bio_list(struct bio_list * tmp)949*4882a593Smuzhiyun static void dispatch_bio_list(struct bio_list *tmp)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct bio *bio;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun while ((bio = bio_list_pop(tmp)))
954*4882a593Smuzhiyun submit_bio_noacct(bio);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
cmp_stripe(void * priv,struct list_head * a,struct list_head * b)957*4882a593Smuzhiyun static int cmp_stripe(void *priv, struct list_head *a, struct list_head *b)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun const struct r5pending_data *da = list_entry(a,
960*4882a593Smuzhiyun struct r5pending_data, sibling);
961*4882a593Smuzhiyun const struct r5pending_data *db = list_entry(b,
962*4882a593Smuzhiyun struct r5pending_data, sibling);
963*4882a593Smuzhiyun if (da->sector > db->sector)
964*4882a593Smuzhiyun return 1;
965*4882a593Smuzhiyun if (da->sector < db->sector)
966*4882a593Smuzhiyun return -1;
967*4882a593Smuzhiyun return 0;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
dispatch_defer_bios(struct r5conf * conf,int target,struct bio_list * list)970*4882a593Smuzhiyun static void dispatch_defer_bios(struct r5conf *conf, int target,
971*4882a593Smuzhiyun struct bio_list *list)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct r5pending_data *data;
974*4882a593Smuzhiyun struct list_head *first, *next = NULL;
975*4882a593Smuzhiyun int cnt = 0;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun if (conf->pending_data_cnt == 0)
978*4882a593Smuzhiyun return;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun list_sort(NULL, &conf->pending_list, cmp_stripe);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun first = conf->pending_list.next;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* temporarily move the head */
985*4882a593Smuzhiyun if (conf->next_pending_data)
986*4882a593Smuzhiyun list_move_tail(&conf->pending_list,
987*4882a593Smuzhiyun &conf->next_pending_data->sibling);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun while (!list_empty(&conf->pending_list)) {
990*4882a593Smuzhiyun data = list_first_entry(&conf->pending_list,
991*4882a593Smuzhiyun struct r5pending_data, sibling);
992*4882a593Smuzhiyun if (&data->sibling == first)
993*4882a593Smuzhiyun first = data->sibling.next;
994*4882a593Smuzhiyun next = data->sibling.next;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun bio_list_merge(list, &data->bios);
997*4882a593Smuzhiyun list_move(&data->sibling, &conf->free_list);
998*4882a593Smuzhiyun cnt++;
999*4882a593Smuzhiyun if (cnt >= target)
1000*4882a593Smuzhiyun break;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun conf->pending_data_cnt -= cnt;
1003*4882a593Smuzhiyun BUG_ON(conf->pending_data_cnt < 0 || cnt < target);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (next != &conf->pending_list)
1006*4882a593Smuzhiyun conf->next_pending_data = list_entry(next,
1007*4882a593Smuzhiyun struct r5pending_data, sibling);
1008*4882a593Smuzhiyun else
1009*4882a593Smuzhiyun conf->next_pending_data = NULL;
1010*4882a593Smuzhiyun /* list isn't empty */
1011*4882a593Smuzhiyun if (first != &conf->pending_list)
1012*4882a593Smuzhiyun list_move_tail(&conf->pending_list, first);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
flush_deferred_bios(struct r5conf * conf)1015*4882a593Smuzhiyun static void flush_deferred_bios(struct r5conf *conf)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun struct bio_list tmp = BIO_EMPTY_LIST;
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun if (conf->pending_data_cnt == 0)
1020*4882a593Smuzhiyun return;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun spin_lock(&conf->pending_bios_lock);
1023*4882a593Smuzhiyun dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp);
1024*4882a593Smuzhiyun BUG_ON(conf->pending_data_cnt != 0);
1025*4882a593Smuzhiyun spin_unlock(&conf->pending_bios_lock);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun dispatch_bio_list(&tmp);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
defer_issue_bios(struct r5conf * conf,sector_t sector,struct bio_list * bios)1030*4882a593Smuzhiyun static void defer_issue_bios(struct r5conf *conf, sector_t sector,
1031*4882a593Smuzhiyun struct bio_list *bios)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun struct bio_list tmp = BIO_EMPTY_LIST;
1034*4882a593Smuzhiyun struct r5pending_data *ent;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun spin_lock(&conf->pending_bios_lock);
1037*4882a593Smuzhiyun ent = list_first_entry(&conf->free_list, struct r5pending_data,
1038*4882a593Smuzhiyun sibling);
1039*4882a593Smuzhiyun list_move_tail(&ent->sibling, &conf->pending_list);
1040*4882a593Smuzhiyun ent->sector = sector;
1041*4882a593Smuzhiyun bio_list_init(&ent->bios);
1042*4882a593Smuzhiyun bio_list_merge(&ent->bios, bios);
1043*4882a593Smuzhiyun conf->pending_data_cnt++;
1044*4882a593Smuzhiyun if (conf->pending_data_cnt >= PENDING_IO_MAX)
1045*4882a593Smuzhiyun dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun spin_unlock(&conf->pending_bios_lock);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun dispatch_bio_list(&tmp);
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun static void
1053*4882a593Smuzhiyun raid5_end_read_request(struct bio *bi);
1054*4882a593Smuzhiyun static void
1055*4882a593Smuzhiyun raid5_end_write_request(struct bio *bi);
1056*4882a593Smuzhiyun
ops_run_io(struct stripe_head * sh,struct stripe_head_state * s)1057*4882a593Smuzhiyun static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
1060*4882a593Smuzhiyun int i, disks = sh->disks;
1061*4882a593Smuzhiyun struct stripe_head *head_sh = sh;
1062*4882a593Smuzhiyun struct bio_list pending_bios = BIO_EMPTY_LIST;
1063*4882a593Smuzhiyun bool should_defer;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun might_sleep();
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (log_stripe(sh, s) == 0)
1068*4882a593Smuzhiyun return;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun should_defer = conf->batch_bio_dispatch && conf->group_cnt;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun for (i = disks; i--; ) {
1073*4882a593Smuzhiyun int op, op_flags = 0;
1074*4882a593Smuzhiyun int replace_only = 0;
1075*4882a593Smuzhiyun struct bio *bi, *rbi;
1076*4882a593Smuzhiyun struct md_rdev *rdev, *rrdev = NULL;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun sh = head_sh;
1079*4882a593Smuzhiyun if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
1080*4882a593Smuzhiyun op = REQ_OP_WRITE;
1081*4882a593Smuzhiyun if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
1082*4882a593Smuzhiyun op_flags = REQ_FUA;
1083*4882a593Smuzhiyun if (test_bit(R5_Discard, &sh->dev[i].flags))
1084*4882a593Smuzhiyun op = REQ_OP_DISCARD;
1085*4882a593Smuzhiyun } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1086*4882a593Smuzhiyun op = REQ_OP_READ;
1087*4882a593Smuzhiyun else if (test_and_clear_bit(R5_WantReplace,
1088*4882a593Smuzhiyun &sh->dev[i].flags)) {
1089*4882a593Smuzhiyun op = REQ_OP_WRITE;
1090*4882a593Smuzhiyun replace_only = 1;
1091*4882a593Smuzhiyun } else
1092*4882a593Smuzhiyun continue;
1093*4882a593Smuzhiyun if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
1094*4882a593Smuzhiyun op_flags |= REQ_SYNC;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun again:
1097*4882a593Smuzhiyun bi = &sh->dev[i].req;
1098*4882a593Smuzhiyun rbi = &sh->dev[i].rreq; /* For writing to replacement */
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun rcu_read_lock();
1101*4882a593Smuzhiyun rrdev = rcu_dereference(conf->disks[i].replacement);
1102*4882a593Smuzhiyun smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
1103*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].rdev);
1104*4882a593Smuzhiyun if (!rdev) {
1105*4882a593Smuzhiyun rdev = rrdev;
1106*4882a593Smuzhiyun rrdev = NULL;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun if (op_is_write(op)) {
1109*4882a593Smuzhiyun if (replace_only)
1110*4882a593Smuzhiyun rdev = NULL;
1111*4882a593Smuzhiyun if (rdev == rrdev)
1112*4882a593Smuzhiyun /* We raced and saw duplicates */
1113*4882a593Smuzhiyun rrdev = NULL;
1114*4882a593Smuzhiyun } else {
1115*4882a593Smuzhiyun if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
1116*4882a593Smuzhiyun rdev = rrdev;
1117*4882a593Smuzhiyun rrdev = NULL;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (rdev && test_bit(Faulty, &rdev->flags))
1121*4882a593Smuzhiyun rdev = NULL;
1122*4882a593Smuzhiyun if (rdev)
1123*4882a593Smuzhiyun atomic_inc(&rdev->nr_pending);
1124*4882a593Smuzhiyun if (rrdev && test_bit(Faulty, &rrdev->flags))
1125*4882a593Smuzhiyun rrdev = NULL;
1126*4882a593Smuzhiyun if (rrdev)
1127*4882a593Smuzhiyun atomic_inc(&rrdev->nr_pending);
1128*4882a593Smuzhiyun rcu_read_unlock();
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /* We have already checked bad blocks for reads. Now
1131*4882a593Smuzhiyun * need to check for writes. We never accept write errors
1132*4882a593Smuzhiyun * on the replacement, so we don't to check rrdev.
1133*4882a593Smuzhiyun */
1134*4882a593Smuzhiyun while (op_is_write(op) && rdev &&
1135*4882a593Smuzhiyun test_bit(WriteErrorSeen, &rdev->flags)) {
1136*4882a593Smuzhiyun sector_t first_bad;
1137*4882a593Smuzhiyun int bad_sectors;
1138*4882a593Smuzhiyun int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
1139*4882a593Smuzhiyun &first_bad, &bad_sectors);
1140*4882a593Smuzhiyun if (!bad)
1141*4882a593Smuzhiyun break;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (bad < 0) {
1144*4882a593Smuzhiyun set_bit(BlockedBadBlocks, &rdev->flags);
1145*4882a593Smuzhiyun if (!conf->mddev->external &&
1146*4882a593Smuzhiyun conf->mddev->sb_flags) {
1147*4882a593Smuzhiyun /* It is very unlikely, but we might
1148*4882a593Smuzhiyun * still need to write out the
1149*4882a593Smuzhiyun * bad block log - better give it
1150*4882a593Smuzhiyun * a chance*/
1151*4882a593Smuzhiyun md_check_recovery(conf->mddev);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun /*
1154*4882a593Smuzhiyun * Because md_wait_for_blocked_rdev
1155*4882a593Smuzhiyun * will dec nr_pending, we must
1156*4882a593Smuzhiyun * increment it first.
1157*4882a593Smuzhiyun */
1158*4882a593Smuzhiyun atomic_inc(&rdev->nr_pending);
1159*4882a593Smuzhiyun md_wait_for_blocked_rdev(rdev, conf->mddev);
1160*4882a593Smuzhiyun } else {
1161*4882a593Smuzhiyun /* Acknowledged bad block - skip the write */
1162*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
1163*4882a593Smuzhiyun rdev = NULL;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (rdev) {
1168*4882a593Smuzhiyun if (s->syncing || s->expanding || s->expanded
1169*4882a593Smuzhiyun || s->replacing)
1170*4882a593Smuzhiyun md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun set_bit(STRIPE_IO_STARTED, &sh->state);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun bio_set_dev(bi, rdev->bdev);
1175*4882a593Smuzhiyun bio_set_op_attrs(bi, op, op_flags);
1176*4882a593Smuzhiyun bi->bi_end_io = op_is_write(op)
1177*4882a593Smuzhiyun ? raid5_end_write_request
1178*4882a593Smuzhiyun : raid5_end_read_request;
1179*4882a593Smuzhiyun bi->bi_private = sh;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun pr_debug("%s: for %llu schedule op %d on disc %d\n",
1182*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector,
1183*4882a593Smuzhiyun bi->bi_opf, i);
1184*4882a593Smuzhiyun atomic_inc(&sh->count);
1185*4882a593Smuzhiyun if (sh != head_sh)
1186*4882a593Smuzhiyun atomic_inc(&head_sh->count);
1187*4882a593Smuzhiyun if (use_new_offset(conf, sh))
1188*4882a593Smuzhiyun bi->bi_iter.bi_sector = (sh->sector
1189*4882a593Smuzhiyun + rdev->new_data_offset);
1190*4882a593Smuzhiyun else
1191*4882a593Smuzhiyun bi->bi_iter.bi_sector = (sh->sector
1192*4882a593Smuzhiyun + rdev->data_offset);
1193*4882a593Smuzhiyun if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1194*4882a593Smuzhiyun bi->bi_opf |= REQ_NOMERGE;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1197*4882a593Smuzhiyun WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (!op_is_write(op) &&
1200*4882a593Smuzhiyun test_bit(R5_InJournal, &sh->dev[i].flags))
1201*4882a593Smuzhiyun /*
1202*4882a593Smuzhiyun * issuing read for a page in journal, this
1203*4882a593Smuzhiyun * must be preparing for prexor in rmw; read
1204*4882a593Smuzhiyun * the data into orig_page
1205*4882a593Smuzhiyun */
1206*4882a593Smuzhiyun sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
1207*4882a593Smuzhiyun else
1208*4882a593Smuzhiyun sh->dev[i].vec.bv_page = sh->dev[i].page;
1209*4882a593Smuzhiyun bi->bi_vcnt = 1;
1210*4882a593Smuzhiyun bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1211*4882a593Smuzhiyun bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
1212*4882a593Smuzhiyun bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1213*4882a593Smuzhiyun bi->bi_write_hint = sh->dev[i].write_hint;
1214*4882a593Smuzhiyun if (!rrdev)
1215*4882a593Smuzhiyun sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
1216*4882a593Smuzhiyun /*
1217*4882a593Smuzhiyun * If this is discard request, set bi_vcnt 0. We don't
1218*4882a593Smuzhiyun * want to confuse SCSI because SCSI will replace payload
1219*4882a593Smuzhiyun */
1220*4882a593Smuzhiyun if (op == REQ_OP_DISCARD)
1221*4882a593Smuzhiyun bi->bi_vcnt = 0;
1222*4882a593Smuzhiyun if (rrdev)
1223*4882a593Smuzhiyun set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun if (conf->mddev->gendisk)
1226*4882a593Smuzhiyun trace_block_bio_remap(bi->bi_disk->queue,
1227*4882a593Smuzhiyun bi, disk_devt(conf->mddev->gendisk),
1228*4882a593Smuzhiyun sh->dev[i].sector);
1229*4882a593Smuzhiyun if (should_defer && op_is_write(op))
1230*4882a593Smuzhiyun bio_list_add(&pending_bios, bi);
1231*4882a593Smuzhiyun else
1232*4882a593Smuzhiyun submit_bio_noacct(bi);
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun if (rrdev) {
1235*4882a593Smuzhiyun if (s->syncing || s->expanding || s->expanded
1236*4882a593Smuzhiyun || s->replacing)
1237*4882a593Smuzhiyun md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun set_bit(STRIPE_IO_STARTED, &sh->state);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun bio_set_dev(rbi, rrdev->bdev);
1242*4882a593Smuzhiyun bio_set_op_attrs(rbi, op, op_flags);
1243*4882a593Smuzhiyun BUG_ON(!op_is_write(op));
1244*4882a593Smuzhiyun rbi->bi_end_io = raid5_end_write_request;
1245*4882a593Smuzhiyun rbi->bi_private = sh;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun pr_debug("%s: for %llu schedule op %d on "
1248*4882a593Smuzhiyun "replacement disc %d\n",
1249*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector,
1250*4882a593Smuzhiyun rbi->bi_opf, i);
1251*4882a593Smuzhiyun atomic_inc(&sh->count);
1252*4882a593Smuzhiyun if (sh != head_sh)
1253*4882a593Smuzhiyun atomic_inc(&head_sh->count);
1254*4882a593Smuzhiyun if (use_new_offset(conf, sh))
1255*4882a593Smuzhiyun rbi->bi_iter.bi_sector = (sh->sector
1256*4882a593Smuzhiyun + rrdev->new_data_offset);
1257*4882a593Smuzhiyun else
1258*4882a593Smuzhiyun rbi->bi_iter.bi_sector = (sh->sector
1259*4882a593Smuzhiyun + rrdev->data_offset);
1260*4882a593Smuzhiyun if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1261*4882a593Smuzhiyun WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1262*4882a593Smuzhiyun sh->dev[i].rvec.bv_page = sh->dev[i].page;
1263*4882a593Smuzhiyun rbi->bi_vcnt = 1;
1264*4882a593Smuzhiyun rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1265*4882a593Smuzhiyun rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
1266*4882a593Smuzhiyun rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1267*4882a593Smuzhiyun rbi->bi_write_hint = sh->dev[i].write_hint;
1268*4882a593Smuzhiyun sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
1269*4882a593Smuzhiyun /*
1270*4882a593Smuzhiyun * If this is discard request, set bi_vcnt 0. We don't
1271*4882a593Smuzhiyun * want to confuse SCSI because SCSI will replace payload
1272*4882a593Smuzhiyun */
1273*4882a593Smuzhiyun if (op == REQ_OP_DISCARD)
1274*4882a593Smuzhiyun rbi->bi_vcnt = 0;
1275*4882a593Smuzhiyun if (conf->mddev->gendisk)
1276*4882a593Smuzhiyun trace_block_bio_remap(rbi->bi_disk->queue,
1277*4882a593Smuzhiyun rbi, disk_devt(conf->mddev->gendisk),
1278*4882a593Smuzhiyun sh->dev[i].sector);
1279*4882a593Smuzhiyun if (should_defer && op_is_write(op))
1280*4882a593Smuzhiyun bio_list_add(&pending_bios, rbi);
1281*4882a593Smuzhiyun else
1282*4882a593Smuzhiyun submit_bio_noacct(rbi);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun if (!rdev && !rrdev) {
1285*4882a593Smuzhiyun if (op_is_write(op))
1286*4882a593Smuzhiyun set_bit(STRIPE_DEGRADED, &sh->state);
1287*4882a593Smuzhiyun pr_debug("skip op %d on disc %d for sector %llu\n",
1288*4882a593Smuzhiyun bi->bi_opf, i, (unsigned long long)sh->sector);
1289*4882a593Smuzhiyun clear_bit(R5_LOCKED, &sh->dev[i].flags);
1290*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun if (!head_sh->batch_head)
1294*4882a593Smuzhiyun continue;
1295*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list, struct stripe_head,
1296*4882a593Smuzhiyun batch_list);
1297*4882a593Smuzhiyun if (sh != head_sh)
1298*4882a593Smuzhiyun goto again;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun if (should_defer && !bio_list_empty(&pending_bios))
1302*4882a593Smuzhiyun defer_issue_bios(conf, head_sh->sector, &pending_bios);
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
async_copy_data(int frombio,struct bio * bio,struct page ** page,unsigned int poff,sector_t sector,struct dma_async_tx_descriptor * tx,struct stripe_head * sh,int no_skipcopy)1306*4882a593Smuzhiyun async_copy_data(int frombio, struct bio *bio, struct page **page,
1307*4882a593Smuzhiyun unsigned int poff, sector_t sector, struct dma_async_tx_descriptor *tx,
1308*4882a593Smuzhiyun struct stripe_head *sh, int no_skipcopy)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun struct bio_vec bvl;
1311*4882a593Smuzhiyun struct bvec_iter iter;
1312*4882a593Smuzhiyun struct page *bio_page;
1313*4882a593Smuzhiyun int page_offset;
1314*4882a593Smuzhiyun struct async_submit_ctl submit;
1315*4882a593Smuzhiyun enum async_tx_flags flags = 0;
1316*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun if (bio->bi_iter.bi_sector >= sector)
1319*4882a593Smuzhiyun page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
1320*4882a593Smuzhiyun else
1321*4882a593Smuzhiyun page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun if (frombio)
1324*4882a593Smuzhiyun flags |= ASYNC_TX_FENCE;
1325*4882a593Smuzhiyun init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun bio_for_each_segment(bvl, bio, iter) {
1328*4882a593Smuzhiyun int len = bvl.bv_len;
1329*4882a593Smuzhiyun int clen;
1330*4882a593Smuzhiyun int b_offset = 0;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun if (page_offset < 0) {
1333*4882a593Smuzhiyun b_offset = -page_offset;
1334*4882a593Smuzhiyun page_offset += b_offset;
1335*4882a593Smuzhiyun len -= b_offset;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf))
1339*4882a593Smuzhiyun clen = RAID5_STRIPE_SIZE(conf) - page_offset;
1340*4882a593Smuzhiyun else
1341*4882a593Smuzhiyun clen = len;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (clen > 0) {
1344*4882a593Smuzhiyun b_offset += bvl.bv_offset;
1345*4882a593Smuzhiyun bio_page = bvl.bv_page;
1346*4882a593Smuzhiyun if (frombio) {
1347*4882a593Smuzhiyun if (conf->skip_copy &&
1348*4882a593Smuzhiyun b_offset == 0 && page_offset == 0 &&
1349*4882a593Smuzhiyun clen == RAID5_STRIPE_SIZE(conf) &&
1350*4882a593Smuzhiyun !no_skipcopy)
1351*4882a593Smuzhiyun *page = bio_page;
1352*4882a593Smuzhiyun else
1353*4882a593Smuzhiyun tx = async_memcpy(*page, bio_page, page_offset + poff,
1354*4882a593Smuzhiyun b_offset, clen, &submit);
1355*4882a593Smuzhiyun } else
1356*4882a593Smuzhiyun tx = async_memcpy(bio_page, *page, b_offset,
1357*4882a593Smuzhiyun page_offset + poff, clen, &submit);
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun /* chain the operations */
1360*4882a593Smuzhiyun submit.depend_tx = tx;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun if (clen < len) /* hit end of page */
1363*4882a593Smuzhiyun break;
1364*4882a593Smuzhiyun page_offset += len;
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun return tx;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun
ops_complete_biofill(void * stripe_head_ref)1370*4882a593Smuzhiyun static void ops_complete_biofill(void *stripe_head_ref)
1371*4882a593Smuzhiyun {
1372*4882a593Smuzhiyun struct stripe_head *sh = stripe_head_ref;
1373*4882a593Smuzhiyun int i;
1374*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1377*4882a593Smuzhiyun (unsigned long long)sh->sector);
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /* clear completed biofills */
1380*4882a593Smuzhiyun for (i = sh->disks; i--; ) {
1381*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /* acknowledge completion of a biofill operation */
1384*4882a593Smuzhiyun /* and check if we need to reply to a read request,
1385*4882a593Smuzhiyun * new R5_Wantfill requests are held off until
1386*4882a593Smuzhiyun * !STRIPE_BIOFILL_RUN
1387*4882a593Smuzhiyun */
1388*4882a593Smuzhiyun if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
1389*4882a593Smuzhiyun struct bio *rbi, *rbi2;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun BUG_ON(!dev->read);
1392*4882a593Smuzhiyun rbi = dev->read;
1393*4882a593Smuzhiyun dev->read = NULL;
1394*4882a593Smuzhiyun while (rbi && rbi->bi_iter.bi_sector <
1395*4882a593Smuzhiyun dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1396*4882a593Smuzhiyun rbi2 = r5_next_bio(conf, rbi, dev->sector);
1397*4882a593Smuzhiyun bio_endio(rbi);
1398*4882a593Smuzhiyun rbi = rbi2;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
1405*4882a593Smuzhiyun raid5_release_stripe(sh);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
ops_run_biofill(struct stripe_head * sh)1408*4882a593Smuzhiyun static void ops_run_biofill(struct stripe_head *sh)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx = NULL;
1411*4882a593Smuzhiyun struct async_submit_ctl submit;
1412*4882a593Smuzhiyun int i;
1413*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun BUG_ON(sh->batch_head);
1416*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1417*4882a593Smuzhiyun (unsigned long long)sh->sector);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun for (i = sh->disks; i--; ) {
1420*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
1421*4882a593Smuzhiyun if (test_bit(R5_Wantfill, &dev->flags)) {
1422*4882a593Smuzhiyun struct bio *rbi;
1423*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
1424*4882a593Smuzhiyun dev->read = rbi = dev->toread;
1425*4882a593Smuzhiyun dev->toread = NULL;
1426*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
1427*4882a593Smuzhiyun while (rbi && rbi->bi_iter.bi_sector <
1428*4882a593Smuzhiyun dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1429*4882a593Smuzhiyun tx = async_copy_data(0, rbi, &dev->page,
1430*4882a593Smuzhiyun dev->offset,
1431*4882a593Smuzhiyun dev->sector, tx, sh, 0);
1432*4882a593Smuzhiyun rbi = r5_next_bio(conf, rbi, dev->sector);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun atomic_inc(&sh->count);
1438*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
1439*4882a593Smuzhiyun async_trigger_callback(&submit);
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
mark_target_uptodate(struct stripe_head * sh,int target)1442*4882a593Smuzhiyun static void mark_target_uptodate(struct stripe_head *sh, int target)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun struct r5dev *tgt;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun if (target < 0)
1447*4882a593Smuzhiyun return;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun tgt = &sh->dev[target];
1450*4882a593Smuzhiyun set_bit(R5_UPTODATE, &tgt->flags);
1451*4882a593Smuzhiyun BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1452*4882a593Smuzhiyun clear_bit(R5_Wantcompute, &tgt->flags);
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
ops_complete_compute(void * stripe_head_ref)1455*4882a593Smuzhiyun static void ops_complete_compute(void *stripe_head_ref)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun struct stripe_head *sh = stripe_head_ref;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1460*4882a593Smuzhiyun (unsigned long long)sh->sector);
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun /* mark the computed target(s) as uptodate */
1463*4882a593Smuzhiyun mark_target_uptodate(sh, sh->ops.target);
1464*4882a593Smuzhiyun mark_target_uptodate(sh, sh->ops.target2);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
1467*4882a593Smuzhiyun if (sh->check_state == check_state_compute_run)
1468*4882a593Smuzhiyun sh->check_state = check_state_compute_result;
1469*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
1470*4882a593Smuzhiyun raid5_release_stripe(sh);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /* return a pointer to the address conversion region of the scribble buffer */
to_addr_page(struct raid5_percpu * percpu,int i)1474*4882a593Smuzhiyun static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun return percpu->scribble + i * percpu->scribble_obj_size;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun /* return a pointer to the address conversion region of the scribble buffer */
to_addr_conv(struct stripe_head * sh,struct raid5_percpu * percpu,int i)1480*4882a593Smuzhiyun static addr_conv_t *to_addr_conv(struct stripe_head *sh,
1481*4882a593Smuzhiyun struct raid5_percpu *percpu, int i)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun return (void *) (to_addr_page(percpu, i) + sh->disks + 2);
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun /*
1487*4882a593Smuzhiyun * Return a pointer to record offset address.
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun static unsigned int *
to_addr_offs(struct stripe_head * sh,struct raid5_percpu * percpu)1490*4882a593Smuzhiyun to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2);
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head * sh,struct raid5_percpu * percpu)1496*4882a593Smuzhiyun ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun int disks = sh->disks;
1499*4882a593Smuzhiyun struct page **xor_srcs = to_addr_page(percpu, 0);
1500*4882a593Smuzhiyun unsigned int *off_srcs = to_addr_offs(sh, percpu);
1501*4882a593Smuzhiyun int target = sh->ops.target;
1502*4882a593Smuzhiyun struct r5dev *tgt = &sh->dev[target];
1503*4882a593Smuzhiyun struct page *xor_dest = tgt->page;
1504*4882a593Smuzhiyun unsigned int off_dest = tgt->offset;
1505*4882a593Smuzhiyun int count = 0;
1506*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
1507*4882a593Smuzhiyun struct async_submit_ctl submit;
1508*4882a593Smuzhiyun int i;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun BUG_ON(sh->batch_head);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun pr_debug("%s: stripe %llu block: %d\n",
1513*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector, target);
1514*4882a593Smuzhiyun BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun for (i = disks; i--; ) {
1517*4882a593Smuzhiyun if (i != target) {
1518*4882a593Smuzhiyun off_srcs[count] = sh->dev[i].offset;
1519*4882a593Smuzhiyun xor_srcs[count++] = sh->dev[i].page;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun atomic_inc(&sh->count);
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
1526*4882a593Smuzhiyun ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
1527*4882a593Smuzhiyun if (unlikely(count == 1))
1528*4882a593Smuzhiyun tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
1529*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1530*4882a593Smuzhiyun else
1531*4882a593Smuzhiyun tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
1532*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun return tx;
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun /* set_syndrome_sources - populate source buffers for gen_syndrome
1538*4882a593Smuzhiyun * @srcs - (struct page *) array of size sh->disks
1539*4882a593Smuzhiyun * @offs - (unsigned int) array of offset for each page
1540*4882a593Smuzhiyun * @sh - stripe_head to parse
1541*4882a593Smuzhiyun *
1542*4882a593Smuzhiyun * Populates srcs in proper layout order for the stripe and returns the
1543*4882a593Smuzhiyun * 'count' of sources to be used in a call to async_gen_syndrome. The P
1544*4882a593Smuzhiyun * destination buffer is recorded in srcs[count] and the Q destination
1545*4882a593Smuzhiyun * is recorded in srcs[count+1]].
1546*4882a593Smuzhiyun */
set_syndrome_sources(struct page ** srcs,unsigned int * offs,struct stripe_head * sh,int srctype)1547*4882a593Smuzhiyun static int set_syndrome_sources(struct page **srcs,
1548*4882a593Smuzhiyun unsigned int *offs,
1549*4882a593Smuzhiyun struct stripe_head *sh,
1550*4882a593Smuzhiyun int srctype)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun int disks = sh->disks;
1553*4882a593Smuzhiyun int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
1554*4882a593Smuzhiyun int d0_idx = raid6_d0(sh);
1555*4882a593Smuzhiyun int count;
1556*4882a593Smuzhiyun int i;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun for (i = 0; i < disks; i++)
1559*4882a593Smuzhiyun srcs[i] = NULL;
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun count = 0;
1562*4882a593Smuzhiyun i = d0_idx;
1563*4882a593Smuzhiyun do {
1564*4882a593Smuzhiyun int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1565*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun if (i == sh->qd_idx || i == sh->pd_idx ||
1568*4882a593Smuzhiyun (srctype == SYNDROME_SRC_ALL) ||
1569*4882a593Smuzhiyun (srctype == SYNDROME_SRC_WANT_DRAIN &&
1570*4882a593Smuzhiyun (test_bit(R5_Wantdrain, &dev->flags) ||
1571*4882a593Smuzhiyun test_bit(R5_InJournal, &dev->flags))) ||
1572*4882a593Smuzhiyun (srctype == SYNDROME_SRC_WRITTEN &&
1573*4882a593Smuzhiyun (dev->written ||
1574*4882a593Smuzhiyun test_bit(R5_InJournal, &dev->flags)))) {
1575*4882a593Smuzhiyun if (test_bit(R5_InJournal, &dev->flags))
1576*4882a593Smuzhiyun srcs[slot] = sh->dev[i].orig_page;
1577*4882a593Smuzhiyun else
1578*4882a593Smuzhiyun srcs[slot] = sh->dev[i].page;
1579*4882a593Smuzhiyun /*
1580*4882a593Smuzhiyun * For R5_InJournal, PAGE_SIZE must be 4KB and will
1581*4882a593Smuzhiyun * not shared page. In that case, dev[i].offset
1582*4882a593Smuzhiyun * is 0.
1583*4882a593Smuzhiyun */
1584*4882a593Smuzhiyun offs[slot] = sh->dev[i].offset;
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun i = raid6_next_disk(i, disks);
1587*4882a593Smuzhiyun } while (i != d0_idx);
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun return syndrome_disks;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_compute6_1(struct stripe_head * sh,struct raid5_percpu * percpu)1593*4882a593Smuzhiyun ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun int disks = sh->disks;
1596*4882a593Smuzhiyun struct page **blocks = to_addr_page(percpu, 0);
1597*4882a593Smuzhiyun unsigned int *offs = to_addr_offs(sh, percpu);
1598*4882a593Smuzhiyun int target;
1599*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
1600*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
1601*4882a593Smuzhiyun struct async_submit_ctl submit;
1602*4882a593Smuzhiyun struct r5dev *tgt;
1603*4882a593Smuzhiyun struct page *dest;
1604*4882a593Smuzhiyun unsigned int dest_off;
1605*4882a593Smuzhiyun int i;
1606*4882a593Smuzhiyun int count;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun BUG_ON(sh->batch_head);
1609*4882a593Smuzhiyun if (sh->ops.target < 0)
1610*4882a593Smuzhiyun target = sh->ops.target2;
1611*4882a593Smuzhiyun else if (sh->ops.target2 < 0)
1612*4882a593Smuzhiyun target = sh->ops.target;
1613*4882a593Smuzhiyun else
1614*4882a593Smuzhiyun /* we should only have one valid target */
1615*4882a593Smuzhiyun BUG();
1616*4882a593Smuzhiyun BUG_ON(target < 0);
1617*4882a593Smuzhiyun pr_debug("%s: stripe %llu block: %d\n",
1618*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector, target);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun tgt = &sh->dev[target];
1621*4882a593Smuzhiyun BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1622*4882a593Smuzhiyun dest = tgt->page;
1623*4882a593Smuzhiyun dest_off = tgt->offset;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun atomic_inc(&sh->count);
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (target == qd_idx) {
1628*4882a593Smuzhiyun count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
1629*4882a593Smuzhiyun blocks[count] = NULL; /* regenerating p is not necessary */
1630*4882a593Smuzhiyun BUG_ON(blocks[count+1] != dest); /* q should already be set */
1631*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1632*4882a593Smuzhiyun ops_complete_compute, sh,
1633*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1634*4882a593Smuzhiyun tx = async_gen_syndrome(blocks, offs, count+2,
1635*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1636*4882a593Smuzhiyun } else {
1637*4882a593Smuzhiyun /* Compute any data- or p-drive using XOR */
1638*4882a593Smuzhiyun count = 0;
1639*4882a593Smuzhiyun for (i = disks; i-- ; ) {
1640*4882a593Smuzhiyun if (i == target || i == qd_idx)
1641*4882a593Smuzhiyun continue;
1642*4882a593Smuzhiyun offs[count] = sh->dev[i].offset;
1643*4882a593Smuzhiyun blocks[count++] = sh->dev[i].page;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1647*4882a593Smuzhiyun NULL, ops_complete_compute, sh,
1648*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1649*4882a593Smuzhiyun tx = async_xor_offs(dest, dest_off, blocks, offs, count,
1650*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun return tx;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_compute6_2(struct stripe_head * sh,struct raid5_percpu * percpu)1657*4882a593Smuzhiyun ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
1658*4882a593Smuzhiyun {
1659*4882a593Smuzhiyun int i, count, disks = sh->disks;
1660*4882a593Smuzhiyun int syndrome_disks = sh->ddf_layout ? disks : disks-2;
1661*4882a593Smuzhiyun int d0_idx = raid6_d0(sh);
1662*4882a593Smuzhiyun int faila = -1, failb = -1;
1663*4882a593Smuzhiyun int target = sh->ops.target;
1664*4882a593Smuzhiyun int target2 = sh->ops.target2;
1665*4882a593Smuzhiyun struct r5dev *tgt = &sh->dev[target];
1666*4882a593Smuzhiyun struct r5dev *tgt2 = &sh->dev[target2];
1667*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
1668*4882a593Smuzhiyun struct page **blocks = to_addr_page(percpu, 0);
1669*4882a593Smuzhiyun unsigned int *offs = to_addr_offs(sh, percpu);
1670*4882a593Smuzhiyun struct async_submit_ctl submit;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun BUG_ON(sh->batch_head);
1673*4882a593Smuzhiyun pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1674*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector, target, target2);
1675*4882a593Smuzhiyun BUG_ON(target < 0 || target2 < 0);
1676*4882a593Smuzhiyun BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
1677*4882a593Smuzhiyun BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun /* we need to open-code set_syndrome_sources to handle the
1680*4882a593Smuzhiyun * slot number conversion for 'faila' and 'failb'
1681*4882a593Smuzhiyun */
1682*4882a593Smuzhiyun for (i = 0; i < disks ; i++) {
1683*4882a593Smuzhiyun offs[i] = 0;
1684*4882a593Smuzhiyun blocks[i] = NULL;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun count = 0;
1687*4882a593Smuzhiyun i = d0_idx;
1688*4882a593Smuzhiyun do {
1689*4882a593Smuzhiyun int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun offs[slot] = sh->dev[i].offset;
1692*4882a593Smuzhiyun blocks[slot] = sh->dev[i].page;
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun if (i == target)
1695*4882a593Smuzhiyun faila = slot;
1696*4882a593Smuzhiyun if (i == target2)
1697*4882a593Smuzhiyun failb = slot;
1698*4882a593Smuzhiyun i = raid6_next_disk(i, disks);
1699*4882a593Smuzhiyun } while (i != d0_idx);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun BUG_ON(faila == failb);
1702*4882a593Smuzhiyun if (failb < faila)
1703*4882a593Smuzhiyun swap(faila, failb);
1704*4882a593Smuzhiyun pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1705*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector, faila, failb);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun atomic_inc(&sh->count);
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun if (failb == syndrome_disks+1) {
1710*4882a593Smuzhiyun /* Q disk is one of the missing disks */
1711*4882a593Smuzhiyun if (faila == syndrome_disks) {
1712*4882a593Smuzhiyun /* Missing P+Q, just recompute */
1713*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1714*4882a593Smuzhiyun ops_complete_compute, sh,
1715*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1716*4882a593Smuzhiyun return async_gen_syndrome(blocks, offs, syndrome_disks+2,
1717*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
1718*4882a593Smuzhiyun &submit);
1719*4882a593Smuzhiyun } else {
1720*4882a593Smuzhiyun struct page *dest;
1721*4882a593Smuzhiyun unsigned int dest_off;
1722*4882a593Smuzhiyun int data_target;
1723*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun /* Missing D+Q: recompute D from P, then recompute Q */
1726*4882a593Smuzhiyun if (target == qd_idx)
1727*4882a593Smuzhiyun data_target = target2;
1728*4882a593Smuzhiyun else
1729*4882a593Smuzhiyun data_target = target;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun count = 0;
1732*4882a593Smuzhiyun for (i = disks; i-- ; ) {
1733*4882a593Smuzhiyun if (i == data_target || i == qd_idx)
1734*4882a593Smuzhiyun continue;
1735*4882a593Smuzhiyun offs[count] = sh->dev[i].offset;
1736*4882a593Smuzhiyun blocks[count++] = sh->dev[i].page;
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun dest = sh->dev[data_target].page;
1739*4882a593Smuzhiyun dest_off = sh->dev[data_target].offset;
1740*4882a593Smuzhiyun init_async_submit(&submit,
1741*4882a593Smuzhiyun ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
1742*4882a593Smuzhiyun NULL, NULL, NULL,
1743*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1744*4882a593Smuzhiyun tx = async_xor_offs(dest, dest_off, blocks, offs, count,
1745*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
1746*4882a593Smuzhiyun &submit);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
1749*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE, tx,
1750*4882a593Smuzhiyun ops_complete_compute, sh,
1751*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1752*4882a593Smuzhiyun return async_gen_syndrome(blocks, offs, count+2,
1753*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
1754*4882a593Smuzhiyun &submit);
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun } else {
1757*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
1758*4882a593Smuzhiyun ops_complete_compute, sh,
1759*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
1760*4882a593Smuzhiyun if (failb == syndrome_disks) {
1761*4882a593Smuzhiyun /* We're missing D+P. */
1762*4882a593Smuzhiyun return async_raid6_datap_recov(syndrome_disks+2,
1763*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
1764*4882a593Smuzhiyun faila,
1765*4882a593Smuzhiyun blocks, offs, &submit);
1766*4882a593Smuzhiyun } else {
1767*4882a593Smuzhiyun /* We're missing D+D. */
1768*4882a593Smuzhiyun return async_raid6_2data_recov(syndrome_disks+2,
1769*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
1770*4882a593Smuzhiyun faila, failb,
1771*4882a593Smuzhiyun blocks, offs, &submit);
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun
ops_complete_prexor(void * stripe_head_ref)1776*4882a593Smuzhiyun static void ops_complete_prexor(void *stripe_head_ref)
1777*4882a593Smuzhiyun {
1778*4882a593Smuzhiyun struct stripe_head *sh = stripe_head_ref;
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1781*4882a593Smuzhiyun (unsigned long long)sh->sector);
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun if (r5c_is_writeback(sh->raid_conf->log))
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * raid5-cache write back uses orig_page during prexor.
1786*4882a593Smuzhiyun * After prexor, it is time to free orig_page
1787*4882a593Smuzhiyun */
1788*4882a593Smuzhiyun r5c_release_extra_page(sh);
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_prexor5(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)1792*4882a593Smuzhiyun ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
1793*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun int disks = sh->disks;
1796*4882a593Smuzhiyun struct page **xor_srcs = to_addr_page(percpu, 0);
1797*4882a593Smuzhiyun unsigned int *off_srcs = to_addr_offs(sh, percpu);
1798*4882a593Smuzhiyun int count = 0, pd_idx = sh->pd_idx, i;
1799*4882a593Smuzhiyun struct async_submit_ctl submit;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun /* existing parity data subtracted */
1802*4882a593Smuzhiyun unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
1803*4882a593Smuzhiyun struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun BUG_ON(sh->batch_head);
1806*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1807*4882a593Smuzhiyun (unsigned long long)sh->sector);
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun for (i = disks; i--; ) {
1810*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
1811*4882a593Smuzhiyun /* Only process blocks that are known to be uptodate */
1812*4882a593Smuzhiyun if (test_bit(R5_InJournal, &dev->flags)) {
1813*4882a593Smuzhiyun /*
1814*4882a593Smuzhiyun * For this case, PAGE_SIZE must be equal to 4KB and
1815*4882a593Smuzhiyun * page offset is zero.
1816*4882a593Smuzhiyun */
1817*4882a593Smuzhiyun off_srcs[count] = dev->offset;
1818*4882a593Smuzhiyun xor_srcs[count++] = dev->orig_page;
1819*4882a593Smuzhiyun } else if (test_bit(R5_Wantdrain, &dev->flags)) {
1820*4882a593Smuzhiyun off_srcs[count] = dev->offset;
1821*4882a593Smuzhiyun xor_srcs[count++] = dev->page;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1826*4882a593Smuzhiyun ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1827*4882a593Smuzhiyun tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
1828*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun return tx;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_prexor6(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)1834*4882a593Smuzhiyun ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
1835*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun struct page **blocks = to_addr_page(percpu, 0);
1838*4882a593Smuzhiyun unsigned int *offs = to_addr_offs(sh, percpu);
1839*4882a593Smuzhiyun int count;
1840*4882a593Smuzhiyun struct async_submit_ctl submit;
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1843*4882a593Smuzhiyun (unsigned long long)sh->sector);
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
1848*4882a593Smuzhiyun ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
1849*4882a593Smuzhiyun tx = async_gen_syndrome(blocks, offs, count+2,
1850*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun return tx;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head * sh,struct dma_async_tx_descriptor * tx)1856*4882a593Smuzhiyun ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
1859*4882a593Smuzhiyun int disks = sh->disks;
1860*4882a593Smuzhiyun int i;
1861*4882a593Smuzhiyun struct stripe_head *head_sh = sh;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1864*4882a593Smuzhiyun (unsigned long long)sh->sector);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun for (i = disks; i--; ) {
1867*4882a593Smuzhiyun struct r5dev *dev;
1868*4882a593Smuzhiyun struct bio *chosen;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun sh = head_sh;
1871*4882a593Smuzhiyun if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
1872*4882a593Smuzhiyun struct bio *wbi;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun again:
1875*4882a593Smuzhiyun dev = &sh->dev[i];
1876*4882a593Smuzhiyun /*
1877*4882a593Smuzhiyun * clear R5_InJournal, so when rewriting a page in
1878*4882a593Smuzhiyun * journal, it is not skipped by r5l_log_stripe()
1879*4882a593Smuzhiyun */
1880*4882a593Smuzhiyun clear_bit(R5_InJournal, &dev->flags);
1881*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
1882*4882a593Smuzhiyun chosen = dev->towrite;
1883*4882a593Smuzhiyun dev->towrite = NULL;
1884*4882a593Smuzhiyun sh->overwrite_disks = 0;
1885*4882a593Smuzhiyun BUG_ON(dev->written);
1886*4882a593Smuzhiyun wbi = dev->written = chosen;
1887*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
1888*4882a593Smuzhiyun WARN_ON(dev->page != dev->orig_page);
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun while (wbi && wbi->bi_iter.bi_sector <
1891*4882a593Smuzhiyun dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1892*4882a593Smuzhiyun if (wbi->bi_opf & REQ_FUA)
1893*4882a593Smuzhiyun set_bit(R5_WantFUA, &dev->flags);
1894*4882a593Smuzhiyun if (wbi->bi_opf & REQ_SYNC)
1895*4882a593Smuzhiyun set_bit(R5_SyncIO, &dev->flags);
1896*4882a593Smuzhiyun if (bio_op(wbi) == REQ_OP_DISCARD)
1897*4882a593Smuzhiyun set_bit(R5_Discard, &dev->flags);
1898*4882a593Smuzhiyun else {
1899*4882a593Smuzhiyun tx = async_copy_data(1, wbi, &dev->page,
1900*4882a593Smuzhiyun dev->offset,
1901*4882a593Smuzhiyun dev->sector, tx, sh,
1902*4882a593Smuzhiyun r5c_is_writeback(conf->log));
1903*4882a593Smuzhiyun if (dev->page != dev->orig_page &&
1904*4882a593Smuzhiyun !r5c_is_writeback(conf->log)) {
1905*4882a593Smuzhiyun set_bit(R5_SkipCopy, &dev->flags);
1906*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &dev->flags);
1907*4882a593Smuzhiyun clear_bit(R5_OVERWRITE, &dev->flags);
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun wbi = r5_next_bio(conf, wbi, dev->sector);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun if (head_sh->batch_head) {
1914*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list,
1915*4882a593Smuzhiyun struct stripe_head,
1916*4882a593Smuzhiyun batch_list);
1917*4882a593Smuzhiyun if (sh == head_sh)
1918*4882a593Smuzhiyun continue;
1919*4882a593Smuzhiyun goto again;
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun return tx;
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
ops_complete_reconstruct(void * stripe_head_ref)1927*4882a593Smuzhiyun static void ops_complete_reconstruct(void *stripe_head_ref)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun struct stripe_head *sh = stripe_head_ref;
1930*4882a593Smuzhiyun int disks = sh->disks;
1931*4882a593Smuzhiyun int pd_idx = sh->pd_idx;
1932*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
1933*4882a593Smuzhiyun int i;
1934*4882a593Smuzhiyun bool fua = false, sync = false, discard = false;
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1937*4882a593Smuzhiyun (unsigned long long)sh->sector);
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun for (i = disks; i--; ) {
1940*4882a593Smuzhiyun fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1941*4882a593Smuzhiyun sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
1942*4882a593Smuzhiyun discard |= test_bit(R5_Discard, &sh->dev[i].flags);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun for (i = disks; i--; ) {
1946*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun if (dev->written || i == pd_idx || i == qd_idx) {
1949*4882a593Smuzhiyun if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
1950*4882a593Smuzhiyun set_bit(R5_UPTODATE, &dev->flags);
1951*4882a593Smuzhiyun if (test_bit(STRIPE_EXPAND_READY, &sh->state))
1952*4882a593Smuzhiyun set_bit(R5_Expanded, &dev->flags);
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun if (fua)
1955*4882a593Smuzhiyun set_bit(R5_WantFUA, &dev->flags);
1956*4882a593Smuzhiyun if (sync)
1957*4882a593Smuzhiyun set_bit(R5_SyncIO, &dev->flags);
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_drain_run)
1962*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_drain_result;
1963*4882a593Smuzhiyun else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1964*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1965*4882a593Smuzhiyun else {
1966*4882a593Smuzhiyun BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1967*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_result;
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
1971*4882a593Smuzhiyun raid5_release_stripe(sh);
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun static void
ops_run_reconstruct5(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)1975*4882a593Smuzhiyun ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1976*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun int disks = sh->disks;
1979*4882a593Smuzhiyun struct page **xor_srcs;
1980*4882a593Smuzhiyun unsigned int *off_srcs;
1981*4882a593Smuzhiyun struct async_submit_ctl submit;
1982*4882a593Smuzhiyun int count, pd_idx = sh->pd_idx, i;
1983*4882a593Smuzhiyun struct page *xor_dest;
1984*4882a593Smuzhiyun unsigned int off_dest;
1985*4882a593Smuzhiyun int prexor = 0;
1986*4882a593Smuzhiyun unsigned long flags;
1987*4882a593Smuzhiyun int j = 0;
1988*4882a593Smuzhiyun struct stripe_head *head_sh = sh;
1989*4882a593Smuzhiyun int last_stripe;
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
1992*4882a593Smuzhiyun (unsigned long long)sh->sector);
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
1995*4882a593Smuzhiyun if (pd_idx == i)
1996*4882a593Smuzhiyun continue;
1997*4882a593Smuzhiyun if (!test_bit(R5_Discard, &sh->dev[i].flags))
1998*4882a593Smuzhiyun break;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun if (i >= sh->disks) {
2001*4882a593Smuzhiyun atomic_inc(&sh->count);
2002*4882a593Smuzhiyun set_bit(R5_Discard, &sh->dev[pd_idx].flags);
2003*4882a593Smuzhiyun ops_complete_reconstruct(sh);
2004*4882a593Smuzhiyun return;
2005*4882a593Smuzhiyun }
2006*4882a593Smuzhiyun again:
2007*4882a593Smuzhiyun count = 0;
2008*4882a593Smuzhiyun xor_srcs = to_addr_page(percpu, j);
2009*4882a593Smuzhiyun off_srcs = to_addr_offs(sh, percpu);
2010*4882a593Smuzhiyun /* check if prexor is active which means only process blocks
2011*4882a593Smuzhiyun * that are part of a read-modify-write (written)
2012*4882a593Smuzhiyun */
2013*4882a593Smuzhiyun if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
2014*4882a593Smuzhiyun prexor = 1;
2015*4882a593Smuzhiyun off_dest = off_srcs[count] = sh->dev[pd_idx].offset;
2016*4882a593Smuzhiyun xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
2017*4882a593Smuzhiyun for (i = disks; i--; ) {
2018*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
2019*4882a593Smuzhiyun if (head_sh->dev[i].written ||
2020*4882a593Smuzhiyun test_bit(R5_InJournal, &head_sh->dev[i].flags)) {
2021*4882a593Smuzhiyun off_srcs[count] = dev->offset;
2022*4882a593Smuzhiyun xor_srcs[count++] = dev->page;
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun } else {
2026*4882a593Smuzhiyun xor_dest = sh->dev[pd_idx].page;
2027*4882a593Smuzhiyun off_dest = sh->dev[pd_idx].offset;
2028*4882a593Smuzhiyun for (i = disks; i--; ) {
2029*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
2030*4882a593Smuzhiyun if (i != pd_idx) {
2031*4882a593Smuzhiyun off_srcs[count] = dev->offset;
2032*4882a593Smuzhiyun xor_srcs[count++] = dev->page;
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun /* 1/ if we prexor'd then the dest is reused as a source
2038*4882a593Smuzhiyun * 2/ if we did not prexor then we are redoing the parity
2039*4882a593Smuzhiyun * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
2040*4882a593Smuzhiyun * for the synchronous xor case
2041*4882a593Smuzhiyun */
2042*4882a593Smuzhiyun last_stripe = !head_sh->batch_head ||
2043*4882a593Smuzhiyun list_first_entry(&sh->batch_list,
2044*4882a593Smuzhiyun struct stripe_head, batch_list) == head_sh;
2045*4882a593Smuzhiyun if (last_stripe) {
2046*4882a593Smuzhiyun flags = ASYNC_TX_ACK |
2047*4882a593Smuzhiyun (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun atomic_inc(&head_sh->count);
2050*4882a593Smuzhiyun init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
2051*4882a593Smuzhiyun to_addr_conv(sh, percpu, j));
2052*4882a593Smuzhiyun } else {
2053*4882a593Smuzhiyun flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
2054*4882a593Smuzhiyun init_async_submit(&submit, flags, tx, NULL, NULL,
2055*4882a593Smuzhiyun to_addr_conv(sh, percpu, j));
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun if (unlikely(count == 1))
2059*4882a593Smuzhiyun tx = async_memcpy(xor_dest, xor_srcs[0], off_dest, off_srcs[0],
2060*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
2061*4882a593Smuzhiyun else
2062*4882a593Smuzhiyun tx = async_xor_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
2063*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
2064*4882a593Smuzhiyun if (!last_stripe) {
2065*4882a593Smuzhiyun j++;
2066*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list, struct stripe_head,
2067*4882a593Smuzhiyun batch_list);
2068*4882a593Smuzhiyun goto again;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun static void
ops_run_reconstruct6(struct stripe_head * sh,struct raid5_percpu * percpu,struct dma_async_tx_descriptor * tx)2073*4882a593Smuzhiyun ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
2074*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx)
2075*4882a593Smuzhiyun {
2076*4882a593Smuzhiyun struct async_submit_ctl submit;
2077*4882a593Smuzhiyun struct page **blocks;
2078*4882a593Smuzhiyun unsigned int *offs;
2079*4882a593Smuzhiyun int count, i, j = 0;
2080*4882a593Smuzhiyun struct stripe_head *head_sh = sh;
2081*4882a593Smuzhiyun int last_stripe;
2082*4882a593Smuzhiyun int synflags;
2083*4882a593Smuzhiyun unsigned long txflags;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
2088*4882a593Smuzhiyun if (sh->pd_idx == i || sh->qd_idx == i)
2089*4882a593Smuzhiyun continue;
2090*4882a593Smuzhiyun if (!test_bit(R5_Discard, &sh->dev[i].flags))
2091*4882a593Smuzhiyun break;
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun if (i >= sh->disks) {
2094*4882a593Smuzhiyun atomic_inc(&sh->count);
2095*4882a593Smuzhiyun set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
2096*4882a593Smuzhiyun set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
2097*4882a593Smuzhiyun ops_complete_reconstruct(sh);
2098*4882a593Smuzhiyun return;
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun again:
2102*4882a593Smuzhiyun blocks = to_addr_page(percpu, j);
2103*4882a593Smuzhiyun offs = to_addr_offs(sh, percpu);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
2106*4882a593Smuzhiyun synflags = SYNDROME_SRC_WRITTEN;
2107*4882a593Smuzhiyun txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
2108*4882a593Smuzhiyun } else {
2109*4882a593Smuzhiyun synflags = SYNDROME_SRC_ALL;
2110*4882a593Smuzhiyun txflags = ASYNC_TX_ACK;
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun count = set_syndrome_sources(blocks, offs, sh, synflags);
2114*4882a593Smuzhiyun last_stripe = !head_sh->batch_head ||
2115*4882a593Smuzhiyun list_first_entry(&sh->batch_list,
2116*4882a593Smuzhiyun struct stripe_head, batch_list) == head_sh;
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun if (last_stripe) {
2119*4882a593Smuzhiyun atomic_inc(&head_sh->count);
2120*4882a593Smuzhiyun init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
2121*4882a593Smuzhiyun head_sh, to_addr_conv(sh, percpu, j));
2122*4882a593Smuzhiyun } else
2123*4882a593Smuzhiyun init_async_submit(&submit, 0, tx, NULL, NULL,
2124*4882a593Smuzhiyun to_addr_conv(sh, percpu, j));
2125*4882a593Smuzhiyun tx = async_gen_syndrome(blocks, offs, count+2,
2126*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
2127*4882a593Smuzhiyun if (!last_stripe) {
2128*4882a593Smuzhiyun j++;
2129*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list, struct stripe_head,
2130*4882a593Smuzhiyun batch_list);
2131*4882a593Smuzhiyun goto again;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun
ops_complete_check(void * stripe_head_ref)2135*4882a593Smuzhiyun static void ops_complete_check(void *stripe_head_ref)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun struct stripe_head *sh = stripe_head_ref;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
2140*4882a593Smuzhiyun (unsigned long long)sh->sector);
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun sh->check_state = check_state_check_result;
2143*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
2144*4882a593Smuzhiyun raid5_release_stripe(sh);
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
ops_run_check_p(struct stripe_head * sh,struct raid5_percpu * percpu)2147*4882a593Smuzhiyun static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
2148*4882a593Smuzhiyun {
2149*4882a593Smuzhiyun int disks = sh->disks;
2150*4882a593Smuzhiyun int pd_idx = sh->pd_idx;
2151*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
2152*4882a593Smuzhiyun struct page *xor_dest;
2153*4882a593Smuzhiyun unsigned int off_dest;
2154*4882a593Smuzhiyun struct page **xor_srcs = to_addr_page(percpu, 0);
2155*4882a593Smuzhiyun unsigned int *off_srcs = to_addr_offs(sh, percpu);
2156*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
2157*4882a593Smuzhiyun struct async_submit_ctl submit;
2158*4882a593Smuzhiyun int count;
2159*4882a593Smuzhiyun int i;
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun pr_debug("%s: stripe %llu\n", __func__,
2162*4882a593Smuzhiyun (unsigned long long)sh->sector);
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun BUG_ON(sh->batch_head);
2165*4882a593Smuzhiyun count = 0;
2166*4882a593Smuzhiyun xor_dest = sh->dev[pd_idx].page;
2167*4882a593Smuzhiyun off_dest = sh->dev[pd_idx].offset;
2168*4882a593Smuzhiyun off_srcs[count] = off_dest;
2169*4882a593Smuzhiyun xor_srcs[count++] = xor_dest;
2170*4882a593Smuzhiyun for (i = disks; i--; ) {
2171*4882a593Smuzhiyun if (i == pd_idx || i == qd_idx)
2172*4882a593Smuzhiyun continue;
2173*4882a593Smuzhiyun off_srcs[count] = sh->dev[i].offset;
2174*4882a593Smuzhiyun xor_srcs[count++] = sh->dev[i].page;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun init_async_submit(&submit, 0, NULL, NULL, NULL,
2178*4882a593Smuzhiyun to_addr_conv(sh, percpu, 0));
2179*4882a593Smuzhiyun tx = async_xor_val_offs(xor_dest, off_dest, xor_srcs, off_srcs, count,
2180*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
2181*4882a593Smuzhiyun &sh->ops.zero_sum_result, &submit);
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun atomic_inc(&sh->count);
2184*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
2185*4882a593Smuzhiyun tx = async_trigger_callback(&submit);
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
ops_run_check_pq(struct stripe_head * sh,struct raid5_percpu * percpu,int checkp)2188*4882a593Smuzhiyun static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun struct page **srcs = to_addr_page(percpu, 0);
2191*4882a593Smuzhiyun unsigned int *offs = to_addr_offs(sh, percpu);
2192*4882a593Smuzhiyun struct async_submit_ctl submit;
2193*4882a593Smuzhiyun int count;
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun pr_debug("%s: stripe %llu checkp: %d\n", __func__,
2196*4882a593Smuzhiyun (unsigned long long)sh->sector, checkp);
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun BUG_ON(sh->batch_head);
2199*4882a593Smuzhiyun count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL);
2200*4882a593Smuzhiyun if (!checkp)
2201*4882a593Smuzhiyun srcs[count] = NULL;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun atomic_inc(&sh->count);
2204*4882a593Smuzhiyun init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
2205*4882a593Smuzhiyun sh, to_addr_conv(sh, percpu, 0));
2206*4882a593Smuzhiyun async_syndrome_val(srcs, offs, count+2,
2207*4882a593Smuzhiyun RAID5_STRIPE_SIZE(sh->raid_conf),
2208*4882a593Smuzhiyun &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit);
2209*4882a593Smuzhiyun }
2210*4882a593Smuzhiyun
raid_run_ops(struct stripe_head * sh,unsigned long ops_request)2211*4882a593Smuzhiyun static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
2212*4882a593Smuzhiyun {
2213*4882a593Smuzhiyun int overlap_clear = 0, i, disks = sh->disks;
2214*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx = NULL;
2215*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
2216*4882a593Smuzhiyun int level = conf->level;
2217*4882a593Smuzhiyun struct raid5_percpu *percpu;
2218*4882a593Smuzhiyun unsigned long cpu;
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun cpu = get_cpu();
2221*4882a593Smuzhiyun percpu = per_cpu_ptr(conf->percpu, cpu);
2222*4882a593Smuzhiyun if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
2223*4882a593Smuzhiyun ops_run_biofill(sh);
2224*4882a593Smuzhiyun overlap_clear++;
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
2228*4882a593Smuzhiyun if (level < 6)
2229*4882a593Smuzhiyun tx = ops_run_compute5(sh, percpu);
2230*4882a593Smuzhiyun else {
2231*4882a593Smuzhiyun if (sh->ops.target2 < 0 || sh->ops.target < 0)
2232*4882a593Smuzhiyun tx = ops_run_compute6_1(sh, percpu);
2233*4882a593Smuzhiyun else
2234*4882a593Smuzhiyun tx = ops_run_compute6_2(sh, percpu);
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun /* terminate the chain if reconstruct is not set to be run */
2237*4882a593Smuzhiyun if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
2238*4882a593Smuzhiyun async_tx_ack(tx);
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
2242*4882a593Smuzhiyun if (level < 6)
2243*4882a593Smuzhiyun tx = ops_run_prexor5(sh, percpu, tx);
2244*4882a593Smuzhiyun else
2245*4882a593Smuzhiyun tx = ops_run_prexor6(sh, percpu, tx);
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun if (test_bit(STRIPE_OP_PARTIAL_PARITY, &ops_request))
2249*4882a593Smuzhiyun tx = ops_run_partial_parity(sh, percpu, tx);
2250*4882a593Smuzhiyun
2251*4882a593Smuzhiyun if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
2252*4882a593Smuzhiyun tx = ops_run_biodrain(sh, tx);
2253*4882a593Smuzhiyun overlap_clear++;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
2257*4882a593Smuzhiyun if (level < 6)
2258*4882a593Smuzhiyun ops_run_reconstruct5(sh, percpu, tx);
2259*4882a593Smuzhiyun else
2260*4882a593Smuzhiyun ops_run_reconstruct6(sh, percpu, tx);
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
2264*4882a593Smuzhiyun if (sh->check_state == check_state_run)
2265*4882a593Smuzhiyun ops_run_check_p(sh, percpu);
2266*4882a593Smuzhiyun else if (sh->check_state == check_state_run_q)
2267*4882a593Smuzhiyun ops_run_check_pq(sh, percpu, 0);
2268*4882a593Smuzhiyun else if (sh->check_state == check_state_run_pq)
2269*4882a593Smuzhiyun ops_run_check_pq(sh, percpu, 1);
2270*4882a593Smuzhiyun else
2271*4882a593Smuzhiyun BUG();
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun if (overlap_clear && !sh->batch_head)
2275*4882a593Smuzhiyun for (i = disks; i--; ) {
2276*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
2277*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &dev->flags))
2278*4882a593Smuzhiyun wake_up(&sh->raid_conf->wait_for_overlap);
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun put_cpu();
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun
free_stripe(struct kmem_cache * sc,struct stripe_head * sh)2283*4882a593Smuzhiyun static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2286*4882a593Smuzhiyun kfree(sh->pages);
2287*4882a593Smuzhiyun #endif
2288*4882a593Smuzhiyun if (sh->ppl_page)
2289*4882a593Smuzhiyun __free_page(sh->ppl_page);
2290*4882a593Smuzhiyun kmem_cache_free(sc, sh);
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun
alloc_stripe(struct kmem_cache * sc,gfp_t gfp,int disks,struct r5conf * conf)2293*4882a593Smuzhiyun static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
2294*4882a593Smuzhiyun int disks, struct r5conf *conf)
2295*4882a593Smuzhiyun {
2296*4882a593Smuzhiyun struct stripe_head *sh;
2297*4882a593Smuzhiyun int i;
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun sh = kmem_cache_zalloc(sc, gfp);
2300*4882a593Smuzhiyun if (sh) {
2301*4882a593Smuzhiyun spin_lock_init(&sh->stripe_lock);
2302*4882a593Smuzhiyun spin_lock_init(&sh->batch_lock);
2303*4882a593Smuzhiyun INIT_LIST_HEAD(&sh->batch_list);
2304*4882a593Smuzhiyun INIT_LIST_HEAD(&sh->lru);
2305*4882a593Smuzhiyun INIT_LIST_HEAD(&sh->r5c);
2306*4882a593Smuzhiyun INIT_LIST_HEAD(&sh->log_list);
2307*4882a593Smuzhiyun atomic_set(&sh->count, 1);
2308*4882a593Smuzhiyun sh->raid_conf = conf;
2309*4882a593Smuzhiyun sh->log_start = MaxSector;
2310*4882a593Smuzhiyun for (i = 0; i < disks; i++) {
2311*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun bio_init(&dev->req, &dev->vec, 1);
2314*4882a593Smuzhiyun bio_init(&dev->rreq, &dev->rvec, 1);
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun if (raid5_has_ppl(conf)) {
2318*4882a593Smuzhiyun sh->ppl_page = alloc_page(gfp);
2319*4882a593Smuzhiyun if (!sh->ppl_page) {
2320*4882a593Smuzhiyun free_stripe(sc, sh);
2321*4882a593Smuzhiyun return NULL;
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2325*4882a593Smuzhiyun if (init_stripe_shared_pages(sh, conf, disks)) {
2326*4882a593Smuzhiyun free_stripe(sc, sh);
2327*4882a593Smuzhiyun return NULL;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun #endif
2330*4882a593Smuzhiyun }
2331*4882a593Smuzhiyun return sh;
2332*4882a593Smuzhiyun }
grow_one_stripe(struct r5conf * conf,gfp_t gfp)2333*4882a593Smuzhiyun static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2334*4882a593Smuzhiyun {
2335*4882a593Smuzhiyun struct stripe_head *sh;
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf);
2338*4882a593Smuzhiyun if (!sh)
2339*4882a593Smuzhiyun return 0;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun if (grow_buffers(sh, gfp)) {
2342*4882a593Smuzhiyun shrink_buffers(sh);
2343*4882a593Smuzhiyun free_stripe(conf->slab_cache, sh);
2344*4882a593Smuzhiyun return 0;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun sh->hash_lock_index =
2347*4882a593Smuzhiyun conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2348*4882a593Smuzhiyun /* we just created an active stripe so... */
2349*4882a593Smuzhiyun atomic_inc(&conf->active_stripes);
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun raid5_release_stripe(sh);
2352*4882a593Smuzhiyun conf->max_nr_stripes++;
2353*4882a593Smuzhiyun return 1;
2354*4882a593Smuzhiyun }
2355*4882a593Smuzhiyun
grow_stripes(struct r5conf * conf,int num)2356*4882a593Smuzhiyun static int grow_stripes(struct r5conf *conf, int num)
2357*4882a593Smuzhiyun {
2358*4882a593Smuzhiyun struct kmem_cache *sc;
2359*4882a593Smuzhiyun size_t namelen = sizeof(conf->cache_name[0]);
2360*4882a593Smuzhiyun int devs = max(conf->raid_disks, conf->previous_raid_disks);
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun if (conf->mddev->gendisk)
2363*4882a593Smuzhiyun snprintf(conf->cache_name[0], namelen,
2364*4882a593Smuzhiyun "raid%d-%s", conf->level, mdname(conf->mddev));
2365*4882a593Smuzhiyun else
2366*4882a593Smuzhiyun snprintf(conf->cache_name[0], namelen,
2367*4882a593Smuzhiyun "raid%d-%p", conf->level, conf->mddev);
2368*4882a593Smuzhiyun snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun conf->active_name = 0;
2371*4882a593Smuzhiyun sc = kmem_cache_create(conf->cache_name[conf->active_name],
2372*4882a593Smuzhiyun sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
2373*4882a593Smuzhiyun 0, 0, NULL);
2374*4882a593Smuzhiyun if (!sc)
2375*4882a593Smuzhiyun return 1;
2376*4882a593Smuzhiyun conf->slab_cache = sc;
2377*4882a593Smuzhiyun conf->pool_size = devs;
2378*4882a593Smuzhiyun while (num--)
2379*4882a593Smuzhiyun if (!grow_one_stripe(conf, GFP_KERNEL))
2380*4882a593Smuzhiyun return 1;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun return 0;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun /**
2386*4882a593Smuzhiyun * scribble_alloc - allocate percpu scribble buffer for required size
2387*4882a593Smuzhiyun * of the scribble region
2388*4882a593Smuzhiyun * @percpu: from for_each_present_cpu() of the caller
2389*4882a593Smuzhiyun * @num: total number of disks in the array
2390*4882a593Smuzhiyun * @cnt: scribble objs count for required size of the scribble region
2391*4882a593Smuzhiyun *
2392*4882a593Smuzhiyun * The scribble buffer size must be enough to contain:
2393*4882a593Smuzhiyun * 1/ a struct page pointer for each device in the array +2
2394*4882a593Smuzhiyun * 2/ room to convert each entry in (1) to its corresponding dma
2395*4882a593Smuzhiyun * (dma_map_page()) or page (page_address()) address.
2396*4882a593Smuzhiyun *
2397*4882a593Smuzhiyun * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
2398*4882a593Smuzhiyun * calculate over all devices (not just the data blocks), using zeros in place
2399*4882a593Smuzhiyun * of the P and Q blocks.
2400*4882a593Smuzhiyun */
scribble_alloc(struct raid5_percpu * percpu,int num,int cnt)2401*4882a593Smuzhiyun static int scribble_alloc(struct raid5_percpu *percpu,
2402*4882a593Smuzhiyun int num, int cnt)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun size_t obj_size =
2405*4882a593Smuzhiyun sizeof(struct page *) * (num + 2) +
2406*4882a593Smuzhiyun sizeof(addr_conv_t) * (num + 2) +
2407*4882a593Smuzhiyun sizeof(unsigned int) * (num + 2);
2408*4882a593Smuzhiyun void *scribble;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun /*
2411*4882a593Smuzhiyun * If here is in raid array suspend context, it is in memalloc noio
2412*4882a593Smuzhiyun * context as well, there is no potential recursive memory reclaim
2413*4882a593Smuzhiyun * I/Os with the GFP_KERNEL flag.
2414*4882a593Smuzhiyun */
2415*4882a593Smuzhiyun scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
2416*4882a593Smuzhiyun if (!scribble)
2417*4882a593Smuzhiyun return -ENOMEM;
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun kvfree(percpu->scribble);
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun percpu->scribble = scribble;
2422*4882a593Smuzhiyun percpu->scribble_obj_size = obj_size;
2423*4882a593Smuzhiyun return 0;
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun
resize_chunks(struct r5conf * conf,int new_disks,int new_sectors)2426*4882a593Smuzhiyun static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun unsigned long cpu;
2429*4882a593Smuzhiyun int err = 0;
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun /*
2432*4882a593Smuzhiyun * Never shrink. And mddev_suspend() could deadlock if this is called
2433*4882a593Smuzhiyun * from raid5d. In that case, scribble_disks and scribble_sectors
2434*4882a593Smuzhiyun * should equal to new_disks and new_sectors
2435*4882a593Smuzhiyun */
2436*4882a593Smuzhiyun if (conf->scribble_disks >= new_disks &&
2437*4882a593Smuzhiyun conf->scribble_sectors >= new_sectors)
2438*4882a593Smuzhiyun return 0;
2439*4882a593Smuzhiyun mddev_suspend(conf->mddev);
2440*4882a593Smuzhiyun get_online_cpus();
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun for_each_present_cpu(cpu) {
2443*4882a593Smuzhiyun struct raid5_percpu *percpu;
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun percpu = per_cpu_ptr(conf->percpu, cpu);
2446*4882a593Smuzhiyun err = scribble_alloc(percpu, new_disks,
2447*4882a593Smuzhiyun new_sectors / RAID5_STRIPE_SECTORS(conf));
2448*4882a593Smuzhiyun if (err)
2449*4882a593Smuzhiyun break;
2450*4882a593Smuzhiyun }
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun put_online_cpus();
2453*4882a593Smuzhiyun mddev_resume(conf->mddev);
2454*4882a593Smuzhiyun if (!err) {
2455*4882a593Smuzhiyun conf->scribble_disks = new_disks;
2456*4882a593Smuzhiyun conf->scribble_sectors = new_sectors;
2457*4882a593Smuzhiyun }
2458*4882a593Smuzhiyun return err;
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun
resize_stripes(struct r5conf * conf,int newsize)2461*4882a593Smuzhiyun static int resize_stripes(struct r5conf *conf, int newsize)
2462*4882a593Smuzhiyun {
2463*4882a593Smuzhiyun /* Make all the stripes able to hold 'newsize' devices.
2464*4882a593Smuzhiyun * New slots in each stripe get 'page' set to a new page.
2465*4882a593Smuzhiyun *
2466*4882a593Smuzhiyun * This happens in stages:
2467*4882a593Smuzhiyun * 1/ create a new kmem_cache and allocate the required number of
2468*4882a593Smuzhiyun * stripe_heads.
2469*4882a593Smuzhiyun * 2/ gather all the old stripe_heads and transfer the pages across
2470*4882a593Smuzhiyun * to the new stripe_heads. This will have the side effect of
2471*4882a593Smuzhiyun * freezing the array as once all stripe_heads have been collected,
2472*4882a593Smuzhiyun * no IO will be possible. Old stripe heads are freed once their
2473*4882a593Smuzhiyun * pages have been transferred over, and the old kmem_cache is
2474*4882a593Smuzhiyun * freed when all stripes are done.
2475*4882a593Smuzhiyun * 3/ reallocate conf->disks to be suitable bigger. If this fails,
2476*4882a593Smuzhiyun * we simple return a failure status - no need to clean anything up.
2477*4882a593Smuzhiyun * 4/ allocate new pages for the new slots in the new stripe_heads.
2478*4882a593Smuzhiyun * If this fails, we don't bother trying the shrink the
2479*4882a593Smuzhiyun * stripe_heads down again, we just leave them as they are.
2480*4882a593Smuzhiyun * As each stripe_head is processed the new one is released into
2481*4882a593Smuzhiyun * active service.
2482*4882a593Smuzhiyun *
2483*4882a593Smuzhiyun * Once step2 is started, we cannot afford to wait for a write,
2484*4882a593Smuzhiyun * so we use GFP_NOIO allocations.
2485*4882a593Smuzhiyun */
2486*4882a593Smuzhiyun struct stripe_head *osh, *nsh;
2487*4882a593Smuzhiyun LIST_HEAD(newstripes);
2488*4882a593Smuzhiyun struct disk_info *ndisks;
2489*4882a593Smuzhiyun int err = 0;
2490*4882a593Smuzhiyun struct kmem_cache *sc;
2491*4882a593Smuzhiyun int i;
2492*4882a593Smuzhiyun int hash, cnt;
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun md_allow_write(conf->mddev);
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun /* Step 1 */
2497*4882a593Smuzhiyun sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2498*4882a593Smuzhiyun sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
2499*4882a593Smuzhiyun 0, 0, NULL);
2500*4882a593Smuzhiyun if (!sc)
2501*4882a593Smuzhiyun return -ENOMEM;
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun /* Need to ensure auto-resizing doesn't interfere */
2504*4882a593Smuzhiyun mutex_lock(&conf->cache_size_mutex);
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun for (i = conf->max_nr_stripes; i; i--) {
2507*4882a593Smuzhiyun nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf);
2508*4882a593Smuzhiyun if (!nsh)
2509*4882a593Smuzhiyun break;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun list_add(&nsh->lru, &newstripes);
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun if (i) {
2514*4882a593Smuzhiyun /* didn't get enough, give up */
2515*4882a593Smuzhiyun while (!list_empty(&newstripes)) {
2516*4882a593Smuzhiyun nsh = list_entry(newstripes.next, struct stripe_head, lru);
2517*4882a593Smuzhiyun list_del(&nsh->lru);
2518*4882a593Smuzhiyun free_stripe(sc, nsh);
2519*4882a593Smuzhiyun }
2520*4882a593Smuzhiyun kmem_cache_destroy(sc);
2521*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
2522*4882a593Smuzhiyun return -ENOMEM;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun /* Step 2 - Must use GFP_NOIO now.
2525*4882a593Smuzhiyun * OK, we have enough stripes, start collecting inactive
2526*4882a593Smuzhiyun * stripes and copying them over
2527*4882a593Smuzhiyun */
2528*4882a593Smuzhiyun hash = 0;
2529*4882a593Smuzhiyun cnt = 0;
2530*4882a593Smuzhiyun list_for_each_entry(nsh, &newstripes, lru) {
2531*4882a593Smuzhiyun lock_device_hash_lock(conf, hash);
2532*4882a593Smuzhiyun wait_event_cmd(conf->wait_for_stripe,
2533*4882a593Smuzhiyun !list_empty(conf->inactive_list + hash),
2534*4882a593Smuzhiyun unlock_device_hash_lock(conf, hash),
2535*4882a593Smuzhiyun lock_device_hash_lock(conf, hash));
2536*4882a593Smuzhiyun osh = get_free_stripe(conf, hash);
2537*4882a593Smuzhiyun unlock_device_hash_lock(conf, hash);
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2540*4882a593Smuzhiyun for (i = 0; i < osh->nr_pages; i++) {
2541*4882a593Smuzhiyun nsh->pages[i] = osh->pages[i];
2542*4882a593Smuzhiyun osh->pages[i] = NULL;
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun #endif
2545*4882a593Smuzhiyun for(i=0; i<conf->pool_size; i++) {
2546*4882a593Smuzhiyun nsh->dev[i].page = osh->dev[i].page;
2547*4882a593Smuzhiyun nsh->dev[i].orig_page = osh->dev[i].page;
2548*4882a593Smuzhiyun nsh->dev[i].offset = osh->dev[i].offset;
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun nsh->hash_lock_index = hash;
2551*4882a593Smuzhiyun free_stripe(conf->slab_cache, osh);
2552*4882a593Smuzhiyun cnt++;
2553*4882a593Smuzhiyun if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2554*4882a593Smuzhiyun !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2555*4882a593Smuzhiyun hash++;
2556*4882a593Smuzhiyun cnt = 0;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun kmem_cache_destroy(conf->slab_cache);
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun /* Step 3.
2562*4882a593Smuzhiyun * At this point, we are holding all the stripes so the array
2563*4882a593Smuzhiyun * is completely stalled, so now is a good time to resize
2564*4882a593Smuzhiyun * conf->disks and the scribble region
2565*4882a593Smuzhiyun */
2566*4882a593Smuzhiyun ndisks = kcalloc(newsize, sizeof(struct disk_info), GFP_NOIO);
2567*4882a593Smuzhiyun if (ndisks) {
2568*4882a593Smuzhiyun for (i = 0; i < conf->pool_size; i++)
2569*4882a593Smuzhiyun ndisks[i] = conf->disks[i];
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun for (i = conf->pool_size; i < newsize; i++) {
2572*4882a593Smuzhiyun ndisks[i].extra_page = alloc_page(GFP_NOIO);
2573*4882a593Smuzhiyun if (!ndisks[i].extra_page)
2574*4882a593Smuzhiyun err = -ENOMEM;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun if (err) {
2578*4882a593Smuzhiyun for (i = conf->pool_size; i < newsize; i++)
2579*4882a593Smuzhiyun if (ndisks[i].extra_page)
2580*4882a593Smuzhiyun put_page(ndisks[i].extra_page);
2581*4882a593Smuzhiyun kfree(ndisks);
2582*4882a593Smuzhiyun } else {
2583*4882a593Smuzhiyun kfree(conf->disks);
2584*4882a593Smuzhiyun conf->disks = ndisks;
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun } else
2587*4882a593Smuzhiyun err = -ENOMEM;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun conf->slab_cache = sc;
2590*4882a593Smuzhiyun conf->active_name = 1-conf->active_name;
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun /* Step 4, return new stripes to service */
2593*4882a593Smuzhiyun while(!list_empty(&newstripes)) {
2594*4882a593Smuzhiyun nsh = list_entry(newstripes.next, struct stripe_head, lru);
2595*4882a593Smuzhiyun list_del_init(&nsh->lru);
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
2598*4882a593Smuzhiyun for (i = 0; i < nsh->nr_pages; i++) {
2599*4882a593Smuzhiyun if (nsh->pages[i])
2600*4882a593Smuzhiyun continue;
2601*4882a593Smuzhiyun nsh->pages[i] = alloc_page(GFP_NOIO);
2602*4882a593Smuzhiyun if (!nsh->pages[i])
2603*4882a593Smuzhiyun err = -ENOMEM;
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun for (i = conf->raid_disks; i < newsize; i++) {
2607*4882a593Smuzhiyun if (nsh->dev[i].page)
2608*4882a593Smuzhiyun continue;
2609*4882a593Smuzhiyun nsh->dev[i].page = raid5_get_dev_page(nsh, i);
2610*4882a593Smuzhiyun nsh->dev[i].orig_page = nsh->dev[i].page;
2611*4882a593Smuzhiyun nsh->dev[i].offset = raid5_get_page_offset(nsh, i);
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun #else
2614*4882a593Smuzhiyun for (i=conf->raid_disks; i < newsize; i++)
2615*4882a593Smuzhiyun if (nsh->dev[i].page == NULL) {
2616*4882a593Smuzhiyun struct page *p = alloc_page(GFP_NOIO);
2617*4882a593Smuzhiyun nsh->dev[i].page = p;
2618*4882a593Smuzhiyun nsh->dev[i].orig_page = p;
2619*4882a593Smuzhiyun nsh->dev[i].offset = 0;
2620*4882a593Smuzhiyun if (!p)
2621*4882a593Smuzhiyun err = -ENOMEM;
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun #endif
2624*4882a593Smuzhiyun raid5_release_stripe(nsh);
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun /* critical section pass, GFP_NOIO no longer needed */
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun if (!err)
2629*4882a593Smuzhiyun conf->pool_size = newsize;
2630*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
2631*4882a593Smuzhiyun
2632*4882a593Smuzhiyun return err;
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun
drop_one_stripe(struct r5conf * conf)2635*4882a593Smuzhiyun static int drop_one_stripe(struct r5conf *conf)
2636*4882a593Smuzhiyun {
2637*4882a593Smuzhiyun struct stripe_head *sh;
2638*4882a593Smuzhiyun int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks + hash);
2641*4882a593Smuzhiyun sh = get_free_stripe(conf, hash);
2642*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks + hash);
2643*4882a593Smuzhiyun if (!sh)
2644*4882a593Smuzhiyun return 0;
2645*4882a593Smuzhiyun BUG_ON(atomic_read(&sh->count));
2646*4882a593Smuzhiyun shrink_buffers(sh);
2647*4882a593Smuzhiyun free_stripe(conf->slab_cache, sh);
2648*4882a593Smuzhiyun atomic_dec(&conf->active_stripes);
2649*4882a593Smuzhiyun conf->max_nr_stripes--;
2650*4882a593Smuzhiyun return 1;
2651*4882a593Smuzhiyun }
2652*4882a593Smuzhiyun
shrink_stripes(struct r5conf * conf)2653*4882a593Smuzhiyun static void shrink_stripes(struct r5conf *conf)
2654*4882a593Smuzhiyun {
2655*4882a593Smuzhiyun while (conf->max_nr_stripes &&
2656*4882a593Smuzhiyun drop_one_stripe(conf))
2657*4882a593Smuzhiyun ;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun kmem_cache_destroy(conf->slab_cache);
2660*4882a593Smuzhiyun conf->slab_cache = NULL;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun
raid5_end_read_request(struct bio * bi)2663*4882a593Smuzhiyun static void raid5_end_read_request(struct bio * bi)
2664*4882a593Smuzhiyun {
2665*4882a593Smuzhiyun struct stripe_head *sh = bi->bi_private;
2666*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
2667*4882a593Smuzhiyun int disks = sh->disks, i;
2668*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
2669*4882a593Smuzhiyun struct md_rdev *rdev = NULL;
2670*4882a593Smuzhiyun sector_t s;
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun for (i=0 ; i<disks; i++)
2673*4882a593Smuzhiyun if (bi == &sh->dev[i].req)
2674*4882a593Smuzhiyun break;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
2677*4882a593Smuzhiyun (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2678*4882a593Smuzhiyun bi->bi_status);
2679*4882a593Smuzhiyun if (i == disks) {
2680*4882a593Smuzhiyun bio_reset(bi);
2681*4882a593Smuzhiyun BUG();
2682*4882a593Smuzhiyun return;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2685*4882a593Smuzhiyun /* If replacement finished while this request was outstanding,
2686*4882a593Smuzhiyun * 'replacement' might be NULL already.
2687*4882a593Smuzhiyun * In that case it moved down to 'rdev'.
2688*4882a593Smuzhiyun * rdev is not removed until all requests are finished.
2689*4882a593Smuzhiyun */
2690*4882a593Smuzhiyun rdev = conf->disks[i].replacement;
2691*4882a593Smuzhiyun if (!rdev)
2692*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun if (use_new_offset(conf, sh))
2695*4882a593Smuzhiyun s = sh->sector + rdev->new_data_offset;
2696*4882a593Smuzhiyun else
2697*4882a593Smuzhiyun s = sh->sector + rdev->data_offset;
2698*4882a593Smuzhiyun if (!bi->bi_status) {
2699*4882a593Smuzhiyun set_bit(R5_UPTODATE, &sh->dev[i].flags);
2700*4882a593Smuzhiyun if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2701*4882a593Smuzhiyun /* Note that this cannot happen on a
2702*4882a593Smuzhiyun * replacement device. We just fail those on
2703*4882a593Smuzhiyun * any error
2704*4882a593Smuzhiyun */
2705*4882a593Smuzhiyun pr_info_ratelimited(
2706*4882a593Smuzhiyun "md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
2707*4882a593Smuzhiyun mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
2708*4882a593Smuzhiyun (unsigned long long)s,
2709*4882a593Smuzhiyun bdevname(rdev->bdev, b));
2710*4882a593Smuzhiyun atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
2711*4882a593Smuzhiyun clear_bit(R5_ReadError, &sh->dev[i].flags);
2712*4882a593Smuzhiyun clear_bit(R5_ReWrite, &sh->dev[i].flags);
2713*4882a593Smuzhiyun } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2714*4882a593Smuzhiyun clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun if (test_bit(R5_InJournal, &sh->dev[i].flags))
2717*4882a593Smuzhiyun /*
2718*4882a593Smuzhiyun * end read for a page in journal, this
2719*4882a593Smuzhiyun * must be preparing for prexor in rmw
2720*4882a593Smuzhiyun */
2721*4882a593Smuzhiyun set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun if (atomic_read(&rdev->read_errors))
2724*4882a593Smuzhiyun atomic_set(&rdev->read_errors, 0);
2725*4882a593Smuzhiyun } else {
2726*4882a593Smuzhiyun const char *bdn = bdevname(rdev->bdev, b);
2727*4882a593Smuzhiyun int retry = 0;
2728*4882a593Smuzhiyun int set_bad = 0;
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2731*4882a593Smuzhiyun if (!(bi->bi_status == BLK_STS_PROTECTION))
2732*4882a593Smuzhiyun atomic_inc(&rdev->read_errors);
2733*4882a593Smuzhiyun if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2734*4882a593Smuzhiyun pr_warn_ratelimited(
2735*4882a593Smuzhiyun "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
2736*4882a593Smuzhiyun mdname(conf->mddev),
2737*4882a593Smuzhiyun (unsigned long long)s,
2738*4882a593Smuzhiyun bdn);
2739*4882a593Smuzhiyun else if (conf->mddev->degraded >= conf->max_degraded) {
2740*4882a593Smuzhiyun set_bad = 1;
2741*4882a593Smuzhiyun pr_warn_ratelimited(
2742*4882a593Smuzhiyun "md/raid:%s: read error not correctable (sector %llu on %s).\n",
2743*4882a593Smuzhiyun mdname(conf->mddev),
2744*4882a593Smuzhiyun (unsigned long long)s,
2745*4882a593Smuzhiyun bdn);
2746*4882a593Smuzhiyun } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
2747*4882a593Smuzhiyun /* Oh, no!!! */
2748*4882a593Smuzhiyun set_bad = 1;
2749*4882a593Smuzhiyun pr_warn_ratelimited(
2750*4882a593Smuzhiyun "md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
2751*4882a593Smuzhiyun mdname(conf->mddev),
2752*4882a593Smuzhiyun (unsigned long long)s,
2753*4882a593Smuzhiyun bdn);
2754*4882a593Smuzhiyun } else if (atomic_read(&rdev->read_errors)
2755*4882a593Smuzhiyun > conf->max_nr_stripes) {
2756*4882a593Smuzhiyun if (!test_bit(Faulty, &rdev->flags)) {
2757*4882a593Smuzhiyun pr_warn("md/raid:%s: %d read_errors > %d stripes\n",
2758*4882a593Smuzhiyun mdname(conf->mddev),
2759*4882a593Smuzhiyun atomic_read(&rdev->read_errors),
2760*4882a593Smuzhiyun conf->max_nr_stripes);
2761*4882a593Smuzhiyun pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
2762*4882a593Smuzhiyun mdname(conf->mddev), bdn);
2763*4882a593Smuzhiyun }
2764*4882a593Smuzhiyun } else
2765*4882a593Smuzhiyun retry = 1;
2766*4882a593Smuzhiyun if (set_bad && test_bit(In_sync, &rdev->flags)
2767*4882a593Smuzhiyun && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2768*4882a593Smuzhiyun retry = 1;
2769*4882a593Smuzhiyun if (retry)
2770*4882a593Smuzhiyun if (sh->qd_idx >= 0 && sh->pd_idx == i)
2771*4882a593Smuzhiyun set_bit(R5_ReadError, &sh->dev[i].flags);
2772*4882a593Smuzhiyun else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2773*4882a593Smuzhiyun set_bit(R5_ReadError, &sh->dev[i].flags);
2774*4882a593Smuzhiyun clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2775*4882a593Smuzhiyun } else
2776*4882a593Smuzhiyun set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2777*4882a593Smuzhiyun else {
2778*4882a593Smuzhiyun clear_bit(R5_ReadError, &sh->dev[i].flags);
2779*4882a593Smuzhiyun clear_bit(R5_ReWrite, &sh->dev[i].flags);
2780*4882a593Smuzhiyun if (!(set_bad
2781*4882a593Smuzhiyun && test_bit(In_sync, &rdev->flags)
2782*4882a593Smuzhiyun && rdev_set_badblocks(
2783*4882a593Smuzhiyun rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0)))
2784*4882a593Smuzhiyun md_error(conf->mddev, rdev);
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
2788*4882a593Smuzhiyun bio_reset(bi);
2789*4882a593Smuzhiyun clear_bit(R5_LOCKED, &sh->dev[i].flags);
2790*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
2791*4882a593Smuzhiyun raid5_release_stripe(sh);
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun
raid5_end_write_request(struct bio * bi)2794*4882a593Smuzhiyun static void raid5_end_write_request(struct bio *bi)
2795*4882a593Smuzhiyun {
2796*4882a593Smuzhiyun struct stripe_head *sh = bi->bi_private;
2797*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
2798*4882a593Smuzhiyun int disks = sh->disks, i;
2799*4882a593Smuzhiyun struct md_rdev *rdev;
2800*4882a593Smuzhiyun sector_t first_bad;
2801*4882a593Smuzhiyun int bad_sectors;
2802*4882a593Smuzhiyun int replacement = 0;
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun for (i = 0 ; i < disks; i++) {
2805*4882a593Smuzhiyun if (bi == &sh->dev[i].req) {
2806*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
2807*4882a593Smuzhiyun break;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun if (bi == &sh->dev[i].rreq) {
2810*4882a593Smuzhiyun rdev = conf->disks[i].replacement;
2811*4882a593Smuzhiyun if (rdev)
2812*4882a593Smuzhiyun replacement = 1;
2813*4882a593Smuzhiyun else
2814*4882a593Smuzhiyun /* rdev was removed and 'replacement'
2815*4882a593Smuzhiyun * replaced it. rdev is not removed
2816*4882a593Smuzhiyun * until all requests are finished.
2817*4882a593Smuzhiyun */
2818*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
2819*4882a593Smuzhiyun break;
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
2823*4882a593Smuzhiyun (unsigned long long)sh->sector, i, atomic_read(&sh->count),
2824*4882a593Smuzhiyun bi->bi_status);
2825*4882a593Smuzhiyun if (i == disks) {
2826*4882a593Smuzhiyun bio_reset(bi);
2827*4882a593Smuzhiyun BUG();
2828*4882a593Smuzhiyun return;
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun if (replacement) {
2832*4882a593Smuzhiyun if (bi->bi_status)
2833*4882a593Smuzhiyun md_error(conf->mddev, rdev);
2834*4882a593Smuzhiyun else if (is_badblock(rdev, sh->sector,
2835*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf),
2836*4882a593Smuzhiyun &first_bad, &bad_sectors))
2837*4882a593Smuzhiyun set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
2838*4882a593Smuzhiyun } else {
2839*4882a593Smuzhiyun if (bi->bi_status) {
2840*4882a593Smuzhiyun set_bit(STRIPE_DEGRADED, &sh->state);
2841*4882a593Smuzhiyun set_bit(WriteErrorSeen, &rdev->flags);
2842*4882a593Smuzhiyun set_bit(R5_WriteError, &sh->dev[i].flags);
2843*4882a593Smuzhiyun if (!test_and_set_bit(WantReplacement, &rdev->flags))
2844*4882a593Smuzhiyun set_bit(MD_RECOVERY_NEEDED,
2845*4882a593Smuzhiyun &rdev->mddev->recovery);
2846*4882a593Smuzhiyun } else if (is_badblock(rdev, sh->sector,
2847*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf),
2848*4882a593Smuzhiyun &first_bad, &bad_sectors)) {
2849*4882a593Smuzhiyun set_bit(R5_MadeGood, &sh->dev[i].flags);
2850*4882a593Smuzhiyun if (test_bit(R5_ReadError, &sh->dev[i].flags))
2851*4882a593Smuzhiyun /* That was a successful write so make
2852*4882a593Smuzhiyun * sure it looks like we already did
2853*4882a593Smuzhiyun * a re-write.
2854*4882a593Smuzhiyun */
2855*4882a593Smuzhiyun set_bit(R5_ReWrite, &sh->dev[i].flags);
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun }
2858*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun if (sh->batch_head && bi->bi_status && !replacement)
2861*4882a593Smuzhiyun set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun bio_reset(bi);
2864*4882a593Smuzhiyun if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
2865*4882a593Smuzhiyun clear_bit(R5_LOCKED, &sh->dev[i].flags);
2866*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun if (sh->batch_head && sh != sh->batch_head)
2869*4882a593Smuzhiyun raid5_release_stripe(sh->batch_head);
2870*4882a593Smuzhiyun raid5_release_stripe(sh);
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun
raid5_error(struct mddev * mddev,struct md_rdev * rdev)2873*4882a593Smuzhiyun static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2874*4882a593Smuzhiyun {
2875*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
2876*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
2877*4882a593Smuzhiyun unsigned long flags;
2878*4882a593Smuzhiyun pr_debug("raid456: error called\n");
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
2881*4882a593Smuzhiyun mdname(mddev), bdevname(rdev->bdev, b));
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun spin_lock_irqsave(&conf->device_lock, flags);
2884*4882a593Smuzhiyun set_bit(Faulty, &rdev->flags);
2885*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
2886*4882a593Smuzhiyun mddev->degraded = raid5_calc_degraded(conf);
2887*4882a593Smuzhiyun
2888*4882a593Smuzhiyun if (has_failed(conf)) {
2889*4882a593Smuzhiyun set_bit(MD_BROKEN, &conf->mddev->flags);
2890*4882a593Smuzhiyun conf->recovery_disabled = mddev->recovery_disabled;
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
2893*4882a593Smuzhiyun mdname(mddev), mddev->degraded, conf->raid_disks);
2894*4882a593Smuzhiyun } else {
2895*4882a593Smuzhiyun pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
2896*4882a593Smuzhiyun mdname(mddev), conf->raid_disks - mddev->degraded);
2897*4882a593Smuzhiyun }
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun spin_unlock_irqrestore(&conf->device_lock, flags);
2900*4882a593Smuzhiyun set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2901*4882a593Smuzhiyun
2902*4882a593Smuzhiyun set_bit(Blocked, &rdev->flags);
2903*4882a593Smuzhiyun set_mask_bits(&mddev->sb_flags, 0,
2904*4882a593Smuzhiyun BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
2905*4882a593Smuzhiyun r5c_update_on_rdev_error(mddev, rdev);
2906*4882a593Smuzhiyun }
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun /*
2909*4882a593Smuzhiyun * Input: a 'big' sector number,
2910*4882a593Smuzhiyun * Output: index of the data and parity disk, and the sector # in them.
2911*4882a593Smuzhiyun */
raid5_compute_sector(struct r5conf * conf,sector_t r_sector,int previous,int * dd_idx,struct stripe_head * sh)2912*4882a593Smuzhiyun sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
2913*4882a593Smuzhiyun int previous, int *dd_idx,
2914*4882a593Smuzhiyun struct stripe_head *sh)
2915*4882a593Smuzhiyun {
2916*4882a593Smuzhiyun sector_t stripe, stripe2;
2917*4882a593Smuzhiyun sector_t chunk_number;
2918*4882a593Smuzhiyun unsigned int chunk_offset;
2919*4882a593Smuzhiyun int pd_idx, qd_idx;
2920*4882a593Smuzhiyun int ddf_layout = 0;
2921*4882a593Smuzhiyun sector_t new_sector;
2922*4882a593Smuzhiyun int algorithm = previous ? conf->prev_algo
2923*4882a593Smuzhiyun : conf->algorithm;
2924*4882a593Smuzhiyun int sectors_per_chunk = previous ? conf->prev_chunk_sectors
2925*4882a593Smuzhiyun : conf->chunk_sectors;
2926*4882a593Smuzhiyun int raid_disks = previous ? conf->previous_raid_disks
2927*4882a593Smuzhiyun : conf->raid_disks;
2928*4882a593Smuzhiyun int data_disks = raid_disks - conf->max_degraded;
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun /* First compute the information on this sector */
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun /*
2933*4882a593Smuzhiyun * Compute the chunk number and the sector offset inside the chunk
2934*4882a593Smuzhiyun */
2935*4882a593Smuzhiyun chunk_offset = sector_div(r_sector, sectors_per_chunk);
2936*4882a593Smuzhiyun chunk_number = r_sector;
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun /*
2939*4882a593Smuzhiyun * Compute the stripe number
2940*4882a593Smuzhiyun */
2941*4882a593Smuzhiyun stripe = chunk_number;
2942*4882a593Smuzhiyun *dd_idx = sector_div(stripe, data_disks);
2943*4882a593Smuzhiyun stripe2 = stripe;
2944*4882a593Smuzhiyun /*
2945*4882a593Smuzhiyun * Select the parity disk based on the user selected algorithm.
2946*4882a593Smuzhiyun */
2947*4882a593Smuzhiyun pd_idx = qd_idx = -1;
2948*4882a593Smuzhiyun switch(conf->level) {
2949*4882a593Smuzhiyun case 4:
2950*4882a593Smuzhiyun pd_idx = data_disks;
2951*4882a593Smuzhiyun break;
2952*4882a593Smuzhiyun case 5:
2953*4882a593Smuzhiyun switch (algorithm) {
2954*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC:
2955*4882a593Smuzhiyun pd_idx = data_disks - sector_div(stripe2, raid_disks);
2956*4882a593Smuzhiyun if (*dd_idx >= pd_idx)
2957*4882a593Smuzhiyun (*dd_idx)++;
2958*4882a593Smuzhiyun break;
2959*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC:
2960*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks);
2961*4882a593Smuzhiyun if (*dd_idx >= pd_idx)
2962*4882a593Smuzhiyun (*dd_idx)++;
2963*4882a593Smuzhiyun break;
2964*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC:
2965*4882a593Smuzhiyun pd_idx = data_disks - sector_div(stripe2, raid_disks);
2966*4882a593Smuzhiyun *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2967*4882a593Smuzhiyun break;
2968*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC:
2969*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks);
2970*4882a593Smuzhiyun *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
2971*4882a593Smuzhiyun break;
2972*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
2973*4882a593Smuzhiyun pd_idx = 0;
2974*4882a593Smuzhiyun (*dd_idx)++;
2975*4882a593Smuzhiyun break;
2976*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
2977*4882a593Smuzhiyun pd_idx = data_disks;
2978*4882a593Smuzhiyun break;
2979*4882a593Smuzhiyun default:
2980*4882a593Smuzhiyun BUG();
2981*4882a593Smuzhiyun }
2982*4882a593Smuzhiyun break;
2983*4882a593Smuzhiyun case 6:
2984*4882a593Smuzhiyun
2985*4882a593Smuzhiyun switch (algorithm) {
2986*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC:
2987*4882a593Smuzhiyun pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
2988*4882a593Smuzhiyun qd_idx = pd_idx + 1;
2989*4882a593Smuzhiyun if (pd_idx == raid_disks-1) {
2990*4882a593Smuzhiyun (*dd_idx)++; /* Q D D D P */
2991*4882a593Smuzhiyun qd_idx = 0;
2992*4882a593Smuzhiyun } else if (*dd_idx >= pd_idx)
2993*4882a593Smuzhiyun (*dd_idx) += 2; /* D D P Q D */
2994*4882a593Smuzhiyun break;
2995*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC:
2996*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks);
2997*4882a593Smuzhiyun qd_idx = pd_idx + 1;
2998*4882a593Smuzhiyun if (pd_idx == raid_disks-1) {
2999*4882a593Smuzhiyun (*dd_idx)++; /* Q D D D P */
3000*4882a593Smuzhiyun qd_idx = 0;
3001*4882a593Smuzhiyun } else if (*dd_idx >= pd_idx)
3002*4882a593Smuzhiyun (*dd_idx) += 2; /* D D P Q D */
3003*4882a593Smuzhiyun break;
3004*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC:
3005*4882a593Smuzhiyun pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3006*4882a593Smuzhiyun qd_idx = (pd_idx + 1) % raid_disks;
3007*4882a593Smuzhiyun *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3008*4882a593Smuzhiyun break;
3009*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC:
3010*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks);
3011*4882a593Smuzhiyun qd_idx = (pd_idx + 1) % raid_disks;
3012*4882a593Smuzhiyun *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
3013*4882a593Smuzhiyun break;
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
3016*4882a593Smuzhiyun pd_idx = 0;
3017*4882a593Smuzhiyun qd_idx = 1;
3018*4882a593Smuzhiyun (*dd_idx) += 2;
3019*4882a593Smuzhiyun break;
3020*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
3021*4882a593Smuzhiyun pd_idx = data_disks;
3022*4882a593Smuzhiyun qd_idx = data_disks + 1;
3023*4882a593Smuzhiyun break;
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun case ALGORITHM_ROTATING_ZERO_RESTART:
3026*4882a593Smuzhiyun /* Exactly the same as RIGHT_ASYMMETRIC, but or
3027*4882a593Smuzhiyun * of blocks for computing Q is different.
3028*4882a593Smuzhiyun */
3029*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks);
3030*4882a593Smuzhiyun qd_idx = pd_idx + 1;
3031*4882a593Smuzhiyun if (pd_idx == raid_disks-1) {
3032*4882a593Smuzhiyun (*dd_idx)++; /* Q D D D P */
3033*4882a593Smuzhiyun qd_idx = 0;
3034*4882a593Smuzhiyun } else if (*dd_idx >= pd_idx)
3035*4882a593Smuzhiyun (*dd_idx) += 2; /* D D P Q D */
3036*4882a593Smuzhiyun ddf_layout = 1;
3037*4882a593Smuzhiyun break;
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun case ALGORITHM_ROTATING_N_RESTART:
3040*4882a593Smuzhiyun /* Same a left_asymmetric, by first stripe is
3041*4882a593Smuzhiyun * D D D P Q rather than
3042*4882a593Smuzhiyun * Q D D D P
3043*4882a593Smuzhiyun */
3044*4882a593Smuzhiyun stripe2 += 1;
3045*4882a593Smuzhiyun pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3046*4882a593Smuzhiyun qd_idx = pd_idx + 1;
3047*4882a593Smuzhiyun if (pd_idx == raid_disks-1) {
3048*4882a593Smuzhiyun (*dd_idx)++; /* Q D D D P */
3049*4882a593Smuzhiyun qd_idx = 0;
3050*4882a593Smuzhiyun } else if (*dd_idx >= pd_idx)
3051*4882a593Smuzhiyun (*dd_idx) += 2; /* D D P Q D */
3052*4882a593Smuzhiyun ddf_layout = 1;
3053*4882a593Smuzhiyun break;
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun case ALGORITHM_ROTATING_N_CONTINUE:
3056*4882a593Smuzhiyun /* Same as left_symmetric but Q is before P */
3057*4882a593Smuzhiyun pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
3058*4882a593Smuzhiyun qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
3059*4882a593Smuzhiyun *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
3060*4882a593Smuzhiyun ddf_layout = 1;
3061*4882a593Smuzhiyun break;
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC_6:
3064*4882a593Smuzhiyun /* RAID5 left_asymmetric, with Q on last device */
3065*4882a593Smuzhiyun pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
3066*4882a593Smuzhiyun if (*dd_idx >= pd_idx)
3067*4882a593Smuzhiyun (*dd_idx)++;
3068*4882a593Smuzhiyun qd_idx = raid_disks - 1;
3069*4882a593Smuzhiyun break;
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC_6:
3072*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks-1);
3073*4882a593Smuzhiyun if (*dd_idx >= pd_idx)
3074*4882a593Smuzhiyun (*dd_idx)++;
3075*4882a593Smuzhiyun qd_idx = raid_disks - 1;
3076*4882a593Smuzhiyun break;
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC_6:
3079*4882a593Smuzhiyun pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
3080*4882a593Smuzhiyun *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3081*4882a593Smuzhiyun qd_idx = raid_disks - 1;
3082*4882a593Smuzhiyun break;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC_6:
3085*4882a593Smuzhiyun pd_idx = sector_div(stripe2, raid_disks-1);
3086*4882a593Smuzhiyun *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
3087*4882a593Smuzhiyun qd_idx = raid_disks - 1;
3088*4882a593Smuzhiyun break;
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun case ALGORITHM_PARITY_0_6:
3091*4882a593Smuzhiyun pd_idx = 0;
3092*4882a593Smuzhiyun (*dd_idx)++;
3093*4882a593Smuzhiyun qd_idx = raid_disks - 1;
3094*4882a593Smuzhiyun break;
3095*4882a593Smuzhiyun
3096*4882a593Smuzhiyun default:
3097*4882a593Smuzhiyun BUG();
3098*4882a593Smuzhiyun }
3099*4882a593Smuzhiyun break;
3100*4882a593Smuzhiyun }
3101*4882a593Smuzhiyun
3102*4882a593Smuzhiyun if (sh) {
3103*4882a593Smuzhiyun sh->pd_idx = pd_idx;
3104*4882a593Smuzhiyun sh->qd_idx = qd_idx;
3105*4882a593Smuzhiyun sh->ddf_layout = ddf_layout;
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun /*
3108*4882a593Smuzhiyun * Finally, compute the new sector number
3109*4882a593Smuzhiyun */
3110*4882a593Smuzhiyun new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
3111*4882a593Smuzhiyun return new_sector;
3112*4882a593Smuzhiyun }
3113*4882a593Smuzhiyun
raid5_compute_blocknr(struct stripe_head * sh,int i,int previous)3114*4882a593Smuzhiyun sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
3115*4882a593Smuzhiyun {
3116*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
3117*4882a593Smuzhiyun int raid_disks = sh->disks;
3118*4882a593Smuzhiyun int data_disks = raid_disks - conf->max_degraded;
3119*4882a593Smuzhiyun sector_t new_sector = sh->sector, check;
3120*4882a593Smuzhiyun int sectors_per_chunk = previous ? conf->prev_chunk_sectors
3121*4882a593Smuzhiyun : conf->chunk_sectors;
3122*4882a593Smuzhiyun int algorithm = previous ? conf->prev_algo
3123*4882a593Smuzhiyun : conf->algorithm;
3124*4882a593Smuzhiyun sector_t stripe;
3125*4882a593Smuzhiyun int chunk_offset;
3126*4882a593Smuzhiyun sector_t chunk_number;
3127*4882a593Smuzhiyun int dummy1, dd_idx = i;
3128*4882a593Smuzhiyun sector_t r_sector;
3129*4882a593Smuzhiyun struct stripe_head sh2;
3130*4882a593Smuzhiyun
3131*4882a593Smuzhiyun chunk_offset = sector_div(new_sector, sectors_per_chunk);
3132*4882a593Smuzhiyun stripe = new_sector;
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun if (i == sh->pd_idx)
3135*4882a593Smuzhiyun return 0;
3136*4882a593Smuzhiyun switch(conf->level) {
3137*4882a593Smuzhiyun case 4: break;
3138*4882a593Smuzhiyun case 5:
3139*4882a593Smuzhiyun switch (algorithm) {
3140*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC:
3141*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC:
3142*4882a593Smuzhiyun if (i > sh->pd_idx)
3143*4882a593Smuzhiyun i--;
3144*4882a593Smuzhiyun break;
3145*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC:
3146*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC:
3147*4882a593Smuzhiyun if (i < sh->pd_idx)
3148*4882a593Smuzhiyun i += raid_disks;
3149*4882a593Smuzhiyun i -= (sh->pd_idx + 1);
3150*4882a593Smuzhiyun break;
3151*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
3152*4882a593Smuzhiyun i -= 1;
3153*4882a593Smuzhiyun break;
3154*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
3155*4882a593Smuzhiyun break;
3156*4882a593Smuzhiyun default:
3157*4882a593Smuzhiyun BUG();
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun break;
3160*4882a593Smuzhiyun case 6:
3161*4882a593Smuzhiyun if (i == sh->qd_idx)
3162*4882a593Smuzhiyun return 0; /* It is the Q disk */
3163*4882a593Smuzhiyun switch (algorithm) {
3164*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC:
3165*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC:
3166*4882a593Smuzhiyun case ALGORITHM_ROTATING_ZERO_RESTART:
3167*4882a593Smuzhiyun case ALGORITHM_ROTATING_N_RESTART:
3168*4882a593Smuzhiyun if (sh->pd_idx == raid_disks-1)
3169*4882a593Smuzhiyun i--; /* Q D D D P */
3170*4882a593Smuzhiyun else if (i > sh->pd_idx)
3171*4882a593Smuzhiyun i -= 2; /* D D P Q D */
3172*4882a593Smuzhiyun break;
3173*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC:
3174*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC:
3175*4882a593Smuzhiyun if (sh->pd_idx == raid_disks-1)
3176*4882a593Smuzhiyun i--; /* Q D D D P */
3177*4882a593Smuzhiyun else {
3178*4882a593Smuzhiyun /* D D P Q D */
3179*4882a593Smuzhiyun if (i < sh->pd_idx)
3180*4882a593Smuzhiyun i += raid_disks;
3181*4882a593Smuzhiyun i -= (sh->pd_idx + 2);
3182*4882a593Smuzhiyun }
3183*4882a593Smuzhiyun break;
3184*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
3185*4882a593Smuzhiyun i -= 2;
3186*4882a593Smuzhiyun break;
3187*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
3188*4882a593Smuzhiyun break;
3189*4882a593Smuzhiyun case ALGORITHM_ROTATING_N_CONTINUE:
3190*4882a593Smuzhiyun /* Like left_symmetric, but P is before Q */
3191*4882a593Smuzhiyun if (sh->pd_idx == 0)
3192*4882a593Smuzhiyun i--; /* P D D D Q */
3193*4882a593Smuzhiyun else {
3194*4882a593Smuzhiyun /* D D Q P D */
3195*4882a593Smuzhiyun if (i < sh->pd_idx)
3196*4882a593Smuzhiyun i += raid_disks;
3197*4882a593Smuzhiyun i -= (sh->pd_idx + 1);
3198*4882a593Smuzhiyun }
3199*4882a593Smuzhiyun break;
3200*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC_6:
3201*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC_6:
3202*4882a593Smuzhiyun if (i > sh->pd_idx)
3203*4882a593Smuzhiyun i--;
3204*4882a593Smuzhiyun break;
3205*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC_6:
3206*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC_6:
3207*4882a593Smuzhiyun if (i < sh->pd_idx)
3208*4882a593Smuzhiyun i += data_disks + 1;
3209*4882a593Smuzhiyun i -= (sh->pd_idx + 1);
3210*4882a593Smuzhiyun break;
3211*4882a593Smuzhiyun case ALGORITHM_PARITY_0_6:
3212*4882a593Smuzhiyun i -= 1;
3213*4882a593Smuzhiyun break;
3214*4882a593Smuzhiyun default:
3215*4882a593Smuzhiyun BUG();
3216*4882a593Smuzhiyun }
3217*4882a593Smuzhiyun break;
3218*4882a593Smuzhiyun }
3219*4882a593Smuzhiyun
3220*4882a593Smuzhiyun chunk_number = stripe * data_disks + i;
3221*4882a593Smuzhiyun r_sector = chunk_number * sectors_per_chunk + chunk_offset;
3222*4882a593Smuzhiyun
3223*4882a593Smuzhiyun check = raid5_compute_sector(conf, r_sector,
3224*4882a593Smuzhiyun previous, &dummy1, &sh2);
3225*4882a593Smuzhiyun if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
3226*4882a593Smuzhiyun || sh2.qd_idx != sh->qd_idx) {
3227*4882a593Smuzhiyun pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
3228*4882a593Smuzhiyun mdname(conf->mddev));
3229*4882a593Smuzhiyun return 0;
3230*4882a593Smuzhiyun }
3231*4882a593Smuzhiyun return r_sector;
3232*4882a593Smuzhiyun }
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun /*
3235*4882a593Smuzhiyun * There are cases where we want handle_stripe_dirtying() and
3236*4882a593Smuzhiyun * schedule_reconstruction() to delay towrite to some dev of a stripe.
3237*4882a593Smuzhiyun *
3238*4882a593Smuzhiyun * This function checks whether we want to delay the towrite. Specifically,
3239*4882a593Smuzhiyun * we delay the towrite when:
3240*4882a593Smuzhiyun *
3241*4882a593Smuzhiyun * 1. degraded stripe has a non-overwrite to the missing dev, AND this
3242*4882a593Smuzhiyun * stripe has data in journal (for other devices).
3243*4882a593Smuzhiyun *
3244*4882a593Smuzhiyun * In this case, when reading data for the non-overwrite dev, it is
3245*4882a593Smuzhiyun * necessary to handle complex rmw of write back cache (prexor with
3246*4882a593Smuzhiyun * orig_page, and xor with page). To keep read path simple, we would
3247*4882a593Smuzhiyun * like to flush data in journal to RAID disks first, so complex rmw
3248*4882a593Smuzhiyun * is handled in the write patch (handle_stripe_dirtying).
3249*4882a593Smuzhiyun *
3250*4882a593Smuzhiyun * 2. when journal space is critical (R5C_LOG_CRITICAL=1)
3251*4882a593Smuzhiyun *
3252*4882a593Smuzhiyun * It is important to be able to flush all stripes in raid5-cache.
3253*4882a593Smuzhiyun * Therefore, we need reserve some space on the journal device for
3254*4882a593Smuzhiyun * these flushes. If flush operation includes pending writes to the
3255*4882a593Smuzhiyun * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3256*4882a593Smuzhiyun * for the flush out. If we exclude these pending writes from flush
3257*4882a593Smuzhiyun * operation, we only need (conf->max_degraded + 1) pages per stripe.
3258*4882a593Smuzhiyun * Therefore, excluding pending writes in these cases enables more
3259*4882a593Smuzhiyun * efficient use of the journal device.
3260*4882a593Smuzhiyun *
3261*4882a593Smuzhiyun * Note: To make sure the stripe makes progress, we only delay
3262*4882a593Smuzhiyun * towrite for stripes with data already in journal (injournal > 0).
3263*4882a593Smuzhiyun * When LOG_CRITICAL, stripes with injournal == 0 will be sent to
3264*4882a593Smuzhiyun * no_space_stripes list.
3265*4882a593Smuzhiyun *
3266*4882a593Smuzhiyun * 3. during journal failure
3267*4882a593Smuzhiyun * In journal failure, we try to flush all cached data to raid disks
3268*4882a593Smuzhiyun * based on data in stripe cache. The array is read-only to upper
3269*4882a593Smuzhiyun * layers, so we would skip all pending writes.
3270*4882a593Smuzhiyun *
3271*4882a593Smuzhiyun */
delay_towrite(struct r5conf * conf,struct r5dev * dev,struct stripe_head_state * s)3272*4882a593Smuzhiyun static inline bool delay_towrite(struct r5conf *conf,
3273*4882a593Smuzhiyun struct r5dev *dev,
3274*4882a593Smuzhiyun struct stripe_head_state *s)
3275*4882a593Smuzhiyun {
3276*4882a593Smuzhiyun /* case 1 above */
3277*4882a593Smuzhiyun if (!test_bit(R5_OVERWRITE, &dev->flags) &&
3278*4882a593Smuzhiyun !test_bit(R5_Insync, &dev->flags) && s->injournal)
3279*4882a593Smuzhiyun return true;
3280*4882a593Smuzhiyun /* case 2 above */
3281*4882a593Smuzhiyun if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
3282*4882a593Smuzhiyun s->injournal > 0)
3283*4882a593Smuzhiyun return true;
3284*4882a593Smuzhiyun /* case 3 above */
3285*4882a593Smuzhiyun if (s->log_failed && s->injournal)
3286*4882a593Smuzhiyun return true;
3287*4882a593Smuzhiyun return false;
3288*4882a593Smuzhiyun }
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun static void
schedule_reconstruction(struct stripe_head * sh,struct stripe_head_state * s,int rcw,int expand)3291*4882a593Smuzhiyun schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
3292*4882a593Smuzhiyun int rcw, int expand)
3293*4882a593Smuzhiyun {
3294*4882a593Smuzhiyun int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
3295*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
3296*4882a593Smuzhiyun int level = conf->level;
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun if (rcw) {
3299*4882a593Smuzhiyun /*
3300*4882a593Smuzhiyun * In some cases, handle_stripe_dirtying initially decided to
3301*4882a593Smuzhiyun * run rmw and allocates extra page for prexor. However, rcw is
3302*4882a593Smuzhiyun * cheaper later on. We need to free the extra page now,
3303*4882a593Smuzhiyun * because we won't be able to do that in ops_complete_prexor().
3304*4882a593Smuzhiyun */
3305*4882a593Smuzhiyun r5c_release_extra_page(sh);
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun for (i = disks; i--; ) {
3308*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
3309*4882a593Smuzhiyun
3310*4882a593Smuzhiyun if (dev->towrite && !delay_towrite(conf, dev, s)) {
3311*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3312*4882a593Smuzhiyun set_bit(R5_Wantdrain, &dev->flags);
3313*4882a593Smuzhiyun if (!expand)
3314*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &dev->flags);
3315*4882a593Smuzhiyun s->locked++;
3316*4882a593Smuzhiyun } else if (test_bit(R5_InJournal, &dev->flags)) {
3317*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3318*4882a593Smuzhiyun s->locked++;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun }
3321*4882a593Smuzhiyun /* if we are not expanding this is a proper write request, and
3322*4882a593Smuzhiyun * there will be bios with new data to be drained into the
3323*4882a593Smuzhiyun * stripe cache
3324*4882a593Smuzhiyun */
3325*4882a593Smuzhiyun if (!expand) {
3326*4882a593Smuzhiyun if (!s->locked)
3327*4882a593Smuzhiyun /* False alarm, nothing to do */
3328*4882a593Smuzhiyun return;
3329*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_drain_run;
3330*4882a593Smuzhiyun set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
3331*4882a593Smuzhiyun } else
3332*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_run;
3333*4882a593Smuzhiyun
3334*4882a593Smuzhiyun set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun if (s->locked + conf->max_degraded == disks)
3337*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
3338*4882a593Smuzhiyun atomic_inc(&conf->pending_full_writes);
3339*4882a593Smuzhiyun } else {
3340*4882a593Smuzhiyun BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
3341*4882a593Smuzhiyun test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
3342*4882a593Smuzhiyun BUG_ON(level == 6 &&
3343*4882a593Smuzhiyun (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
3344*4882a593Smuzhiyun test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
3345*4882a593Smuzhiyun
3346*4882a593Smuzhiyun for (i = disks; i--; ) {
3347*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
3348*4882a593Smuzhiyun if (i == pd_idx || i == qd_idx)
3349*4882a593Smuzhiyun continue;
3350*4882a593Smuzhiyun
3351*4882a593Smuzhiyun if (dev->towrite &&
3352*4882a593Smuzhiyun (test_bit(R5_UPTODATE, &dev->flags) ||
3353*4882a593Smuzhiyun test_bit(R5_Wantcompute, &dev->flags))) {
3354*4882a593Smuzhiyun set_bit(R5_Wantdrain, &dev->flags);
3355*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3356*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &dev->flags);
3357*4882a593Smuzhiyun s->locked++;
3358*4882a593Smuzhiyun } else if (test_bit(R5_InJournal, &dev->flags)) {
3359*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3360*4882a593Smuzhiyun s->locked++;
3361*4882a593Smuzhiyun }
3362*4882a593Smuzhiyun }
3363*4882a593Smuzhiyun if (!s->locked)
3364*4882a593Smuzhiyun /* False alarm - nothing to do */
3365*4882a593Smuzhiyun return;
3366*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_prexor_drain_run;
3367*4882a593Smuzhiyun set_bit(STRIPE_OP_PREXOR, &s->ops_request);
3368*4882a593Smuzhiyun set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
3369*4882a593Smuzhiyun set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
3370*4882a593Smuzhiyun }
3371*4882a593Smuzhiyun
3372*4882a593Smuzhiyun /* keep the parity disk(s) locked while asynchronous operations
3373*4882a593Smuzhiyun * are in flight
3374*4882a593Smuzhiyun */
3375*4882a593Smuzhiyun set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
3376*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
3377*4882a593Smuzhiyun s->locked++;
3378*4882a593Smuzhiyun
3379*4882a593Smuzhiyun if (level == 6) {
3380*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
3381*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[qd_idx];
3382*4882a593Smuzhiyun
3383*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3384*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &dev->flags);
3385*4882a593Smuzhiyun s->locked++;
3386*4882a593Smuzhiyun }
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page &&
3389*4882a593Smuzhiyun test_bit(STRIPE_OP_BIODRAIN, &s->ops_request) &&
3390*4882a593Smuzhiyun !test_bit(STRIPE_FULL_WRITE, &sh->state) &&
3391*4882a593Smuzhiyun test_bit(R5_Insync, &sh->dev[pd_idx].flags))
3392*4882a593Smuzhiyun set_bit(STRIPE_OP_PARTIAL_PARITY, &s->ops_request);
3393*4882a593Smuzhiyun
3394*4882a593Smuzhiyun pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
3395*4882a593Smuzhiyun __func__, (unsigned long long)sh->sector,
3396*4882a593Smuzhiyun s->locked, s->ops_request);
3397*4882a593Smuzhiyun }
3398*4882a593Smuzhiyun
3399*4882a593Smuzhiyun /*
3400*4882a593Smuzhiyun * Each stripe/dev can have one or more bion attached.
3401*4882a593Smuzhiyun * toread/towrite point to the first in a chain.
3402*4882a593Smuzhiyun * The bi_next chain must be in order.
3403*4882a593Smuzhiyun */
add_stripe_bio(struct stripe_head * sh,struct bio * bi,int dd_idx,int forwrite,int previous)3404*4882a593Smuzhiyun static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
3405*4882a593Smuzhiyun int forwrite, int previous)
3406*4882a593Smuzhiyun {
3407*4882a593Smuzhiyun struct bio **bip;
3408*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
3409*4882a593Smuzhiyun int firstwrite=0;
3410*4882a593Smuzhiyun
3411*4882a593Smuzhiyun pr_debug("adding bi b#%llu to stripe s#%llu\n",
3412*4882a593Smuzhiyun (unsigned long long)bi->bi_iter.bi_sector,
3413*4882a593Smuzhiyun (unsigned long long)sh->sector);
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
3416*4882a593Smuzhiyun sh->dev[dd_idx].write_hint = bi->bi_write_hint;
3417*4882a593Smuzhiyun /* Don't allow new IO added to stripes in batch list */
3418*4882a593Smuzhiyun if (sh->batch_head)
3419*4882a593Smuzhiyun goto overlap;
3420*4882a593Smuzhiyun if (forwrite) {
3421*4882a593Smuzhiyun bip = &sh->dev[dd_idx].towrite;
3422*4882a593Smuzhiyun if (*bip == NULL)
3423*4882a593Smuzhiyun firstwrite = 1;
3424*4882a593Smuzhiyun } else
3425*4882a593Smuzhiyun bip = &sh->dev[dd_idx].toread;
3426*4882a593Smuzhiyun while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
3427*4882a593Smuzhiyun if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
3428*4882a593Smuzhiyun goto overlap;
3429*4882a593Smuzhiyun bip = & (*bip)->bi_next;
3430*4882a593Smuzhiyun }
3431*4882a593Smuzhiyun if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
3432*4882a593Smuzhiyun goto overlap;
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun if (forwrite && raid5_has_ppl(conf)) {
3435*4882a593Smuzhiyun /*
3436*4882a593Smuzhiyun * With PPL only writes to consecutive data chunks within a
3437*4882a593Smuzhiyun * stripe are allowed because for a single stripe_head we can
3438*4882a593Smuzhiyun * only have one PPL entry at a time, which describes one data
3439*4882a593Smuzhiyun * range. Not really an overlap, but wait_for_overlap can be
3440*4882a593Smuzhiyun * used to handle this.
3441*4882a593Smuzhiyun */
3442*4882a593Smuzhiyun sector_t sector;
3443*4882a593Smuzhiyun sector_t first = 0;
3444*4882a593Smuzhiyun sector_t last = 0;
3445*4882a593Smuzhiyun int count = 0;
3446*4882a593Smuzhiyun int i;
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
3449*4882a593Smuzhiyun if (i != sh->pd_idx &&
3450*4882a593Smuzhiyun (i == dd_idx || sh->dev[i].towrite)) {
3451*4882a593Smuzhiyun sector = sh->dev[i].sector;
3452*4882a593Smuzhiyun if (count == 0 || sector < first)
3453*4882a593Smuzhiyun first = sector;
3454*4882a593Smuzhiyun if (sector > last)
3455*4882a593Smuzhiyun last = sector;
3456*4882a593Smuzhiyun count++;
3457*4882a593Smuzhiyun }
3458*4882a593Smuzhiyun }
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun if (first + conf->chunk_sectors * (count - 1) != last)
3461*4882a593Smuzhiyun goto overlap;
3462*4882a593Smuzhiyun }
3463*4882a593Smuzhiyun
3464*4882a593Smuzhiyun if (!forwrite || previous)
3465*4882a593Smuzhiyun clear_bit(STRIPE_BATCH_READY, &sh->state);
3466*4882a593Smuzhiyun
3467*4882a593Smuzhiyun BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
3468*4882a593Smuzhiyun if (*bip)
3469*4882a593Smuzhiyun bi->bi_next = *bip;
3470*4882a593Smuzhiyun *bip = bi;
3471*4882a593Smuzhiyun bio_inc_remaining(bi);
3472*4882a593Smuzhiyun md_write_inc(conf->mddev, bi);
3473*4882a593Smuzhiyun
3474*4882a593Smuzhiyun if (forwrite) {
3475*4882a593Smuzhiyun /* check if page is covered */
3476*4882a593Smuzhiyun sector_t sector = sh->dev[dd_idx].sector;
3477*4882a593Smuzhiyun for (bi=sh->dev[dd_idx].towrite;
3478*4882a593Smuzhiyun sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
3479*4882a593Smuzhiyun bi && bi->bi_iter.bi_sector <= sector;
3480*4882a593Smuzhiyun bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
3481*4882a593Smuzhiyun if (bio_end_sector(bi) >= sector)
3482*4882a593Smuzhiyun sector = bio_end_sector(bi);
3483*4882a593Smuzhiyun }
3484*4882a593Smuzhiyun if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf))
3485*4882a593Smuzhiyun if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
3486*4882a593Smuzhiyun sh->overwrite_disks++;
3487*4882a593Smuzhiyun }
3488*4882a593Smuzhiyun
3489*4882a593Smuzhiyun pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
3490*4882a593Smuzhiyun (unsigned long long)(*bip)->bi_iter.bi_sector,
3491*4882a593Smuzhiyun (unsigned long long)sh->sector, dd_idx);
3492*4882a593Smuzhiyun
3493*4882a593Smuzhiyun if (conf->mddev->bitmap && firstwrite) {
3494*4882a593Smuzhiyun /* Cannot hold spinlock over bitmap_startwrite,
3495*4882a593Smuzhiyun * but must ensure this isn't added to a batch until
3496*4882a593Smuzhiyun * we have added to the bitmap and set bm_seq.
3497*4882a593Smuzhiyun * So set STRIPE_BITMAP_PENDING to prevent
3498*4882a593Smuzhiyun * batching.
3499*4882a593Smuzhiyun * If multiple add_stripe_bio() calls race here they
3500*4882a593Smuzhiyun * much all set STRIPE_BITMAP_PENDING. So only the first one
3501*4882a593Smuzhiyun * to complete "bitmap_startwrite" gets to set
3502*4882a593Smuzhiyun * STRIPE_BIT_DELAY. This is important as once a stripe
3503*4882a593Smuzhiyun * is added to a batch, STRIPE_BIT_DELAY cannot be changed
3504*4882a593Smuzhiyun * any more.
3505*4882a593Smuzhiyun */
3506*4882a593Smuzhiyun set_bit(STRIPE_BITMAP_PENDING, &sh->state);
3507*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
3508*4882a593Smuzhiyun md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3509*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0);
3510*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
3511*4882a593Smuzhiyun clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
3512*4882a593Smuzhiyun if (!sh->batch_head) {
3513*4882a593Smuzhiyun sh->bm_seq = conf->seq_flush+1;
3514*4882a593Smuzhiyun set_bit(STRIPE_BIT_DELAY, &sh->state);
3515*4882a593Smuzhiyun }
3516*4882a593Smuzhiyun }
3517*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
3518*4882a593Smuzhiyun
3519*4882a593Smuzhiyun if (stripe_can_batch(sh))
3520*4882a593Smuzhiyun stripe_add_to_batch_list(conf, sh);
3521*4882a593Smuzhiyun return 1;
3522*4882a593Smuzhiyun
3523*4882a593Smuzhiyun overlap:
3524*4882a593Smuzhiyun set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
3525*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
3526*4882a593Smuzhiyun return 0;
3527*4882a593Smuzhiyun }
3528*4882a593Smuzhiyun
3529*4882a593Smuzhiyun static void end_reshape(struct r5conf *conf);
3530*4882a593Smuzhiyun
stripe_set_idx(sector_t stripe,struct r5conf * conf,int previous,struct stripe_head * sh)3531*4882a593Smuzhiyun static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3532*4882a593Smuzhiyun struct stripe_head *sh)
3533*4882a593Smuzhiyun {
3534*4882a593Smuzhiyun int sectors_per_chunk =
3535*4882a593Smuzhiyun previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3536*4882a593Smuzhiyun int dd_idx;
3537*4882a593Smuzhiyun int chunk_offset = sector_div(stripe, sectors_per_chunk);
3538*4882a593Smuzhiyun int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3539*4882a593Smuzhiyun
3540*4882a593Smuzhiyun raid5_compute_sector(conf,
3541*4882a593Smuzhiyun stripe * (disks - conf->max_degraded)
3542*4882a593Smuzhiyun *sectors_per_chunk + chunk_offset,
3543*4882a593Smuzhiyun previous,
3544*4882a593Smuzhiyun &dd_idx, sh);
3545*4882a593Smuzhiyun }
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun static void
handle_failed_stripe(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)3548*4882a593Smuzhiyun handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3549*4882a593Smuzhiyun struct stripe_head_state *s, int disks)
3550*4882a593Smuzhiyun {
3551*4882a593Smuzhiyun int i;
3552*4882a593Smuzhiyun BUG_ON(sh->batch_head);
3553*4882a593Smuzhiyun for (i = disks; i--; ) {
3554*4882a593Smuzhiyun struct bio *bi;
3555*4882a593Smuzhiyun int bitmap_end = 0;
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
3558*4882a593Smuzhiyun struct md_rdev *rdev;
3559*4882a593Smuzhiyun rcu_read_lock();
3560*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].rdev);
3561*4882a593Smuzhiyun if (rdev && test_bit(In_sync, &rdev->flags) &&
3562*4882a593Smuzhiyun !test_bit(Faulty, &rdev->flags))
3563*4882a593Smuzhiyun atomic_inc(&rdev->nr_pending);
3564*4882a593Smuzhiyun else
3565*4882a593Smuzhiyun rdev = NULL;
3566*4882a593Smuzhiyun rcu_read_unlock();
3567*4882a593Smuzhiyun if (rdev) {
3568*4882a593Smuzhiyun if (!rdev_set_badblocks(
3569*4882a593Smuzhiyun rdev,
3570*4882a593Smuzhiyun sh->sector,
3571*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0))
3572*4882a593Smuzhiyun md_error(conf->mddev, rdev);
3573*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
3574*4882a593Smuzhiyun }
3575*4882a593Smuzhiyun }
3576*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
3577*4882a593Smuzhiyun /* fail all writes first */
3578*4882a593Smuzhiyun bi = sh->dev[i].towrite;
3579*4882a593Smuzhiyun sh->dev[i].towrite = NULL;
3580*4882a593Smuzhiyun sh->overwrite_disks = 0;
3581*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
3582*4882a593Smuzhiyun if (bi)
3583*4882a593Smuzhiyun bitmap_end = 1;
3584*4882a593Smuzhiyun
3585*4882a593Smuzhiyun log_stripe_write_finished(sh);
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3588*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun while (bi && bi->bi_iter.bi_sector <
3591*4882a593Smuzhiyun sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3592*4882a593Smuzhiyun struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun md_write_end(conf->mddev);
3595*4882a593Smuzhiyun bio_io_error(bi);
3596*4882a593Smuzhiyun bi = nextbi;
3597*4882a593Smuzhiyun }
3598*4882a593Smuzhiyun if (bitmap_end)
3599*4882a593Smuzhiyun md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3600*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0, 0);
3601*4882a593Smuzhiyun bitmap_end = 0;
3602*4882a593Smuzhiyun /* and fail all 'written' */
3603*4882a593Smuzhiyun bi = sh->dev[i].written;
3604*4882a593Smuzhiyun sh->dev[i].written = NULL;
3605*4882a593Smuzhiyun if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
3606*4882a593Smuzhiyun WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
3607*4882a593Smuzhiyun sh->dev[i].page = sh->dev[i].orig_page;
3608*4882a593Smuzhiyun }
3609*4882a593Smuzhiyun
3610*4882a593Smuzhiyun if (bi) bitmap_end = 1;
3611*4882a593Smuzhiyun while (bi && bi->bi_iter.bi_sector <
3612*4882a593Smuzhiyun sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3613*4882a593Smuzhiyun struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
3614*4882a593Smuzhiyun
3615*4882a593Smuzhiyun md_write_end(conf->mddev);
3616*4882a593Smuzhiyun bio_io_error(bi);
3617*4882a593Smuzhiyun bi = bi2;
3618*4882a593Smuzhiyun }
3619*4882a593Smuzhiyun
3620*4882a593Smuzhiyun /* fail any reads if this device is non-operational and
3621*4882a593Smuzhiyun * the data has not reached the cache yet.
3622*4882a593Smuzhiyun */
3623*4882a593Smuzhiyun if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
3624*4882a593Smuzhiyun s->failed > conf->max_degraded &&
3625*4882a593Smuzhiyun (!test_bit(R5_Insync, &sh->dev[i].flags) ||
3626*4882a593Smuzhiyun test_bit(R5_ReadError, &sh->dev[i].flags))) {
3627*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
3628*4882a593Smuzhiyun bi = sh->dev[i].toread;
3629*4882a593Smuzhiyun sh->dev[i].toread = NULL;
3630*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
3631*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
3632*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
3633*4882a593Smuzhiyun if (bi)
3634*4882a593Smuzhiyun s->to_read--;
3635*4882a593Smuzhiyun while (bi && bi->bi_iter.bi_sector <
3636*4882a593Smuzhiyun sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3637*4882a593Smuzhiyun struct bio *nextbi =
3638*4882a593Smuzhiyun r5_next_bio(conf, bi, sh->dev[i].sector);
3639*4882a593Smuzhiyun
3640*4882a593Smuzhiyun bio_io_error(bi);
3641*4882a593Smuzhiyun bi = nextbi;
3642*4882a593Smuzhiyun }
3643*4882a593Smuzhiyun }
3644*4882a593Smuzhiyun if (bitmap_end)
3645*4882a593Smuzhiyun md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3646*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0, 0);
3647*4882a593Smuzhiyun /* If we were in the middle of a write the parity block might
3648*4882a593Smuzhiyun * still be locked - so just clear all R5_LOCKED flags
3649*4882a593Smuzhiyun */
3650*4882a593Smuzhiyun clear_bit(R5_LOCKED, &sh->dev[i].flags);
3651*4882a593Smuzhiyun }
3652*4882a593Smuzhiyun s->to_write = 0;
3653*4882a593Smuzhiyun s->written = 0;
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
3656*4882a593Smuzhiyun if (atomic_dec_and_test(&conf->pending_full_writes))
3657*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
3658*4882a593Smuzhiyun }
3659*4882a593Smuzhiyun
3660*4882a593Smuzhiyun static void
handle_failed_sync(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s)3661*4882a593Smuzhiyun handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3662*4882a593Smuzhiyun struct stripe_head_state *s)
3663*4882a593Smuzhiyun {
3664*4882a593Smuzhiyun int abort = 0;
3665*4882a593Smuzhiyun int i;
3666*4882a593Smuzhiyun
3667*4882a593Smuzhiyun BUG_ON(sh->batch_head);
3668*4882a593Smuzhiyun clear_bit(STRIPE_SYNCING, &sh->state);
3669*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
3670*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
3671*4882a593Smuzhiyun s->syncing = 0;
3672*4882a593Smuzhiyun s->replacing = 0;
3673*4882a593Smuzhiyun /* There is nothing more to do for sync/check/repair.
3674*4882a593Smuzhiyun * Don't even need to abort as that is handled elsewhere
3675*4882a593Smuzhiyun * if needed, and not always wanted e.g. if there is a known
3676*4882a593Smuzhiyun * bad block here.
3677*4882a593Smuzhiyun * For recover/replace we need to record a bad block on all
3678*4882a593Smuzhiyun * non-sync devices, or abort the recovery
3679*4882a593Smuzhiyun */
3680*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3681*4882a593Smuzhiyun /* During recovery devices cannot be removed, so
3682*4882a593Smuzhiyun * locking and refcounting of rdevs is not needed
3683*4882a593Smuzhiyun */
3684*4882a593Smuzhiyun rcu_read_lock();
3685*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
3686*4882a593Smuzhiyun struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
3687*4882a593Smuzhiyun if (rdev
3688*4882a593Smuzhiyun && !test_bit(Faulty, &rdev->flags)
3689*4882a593Smuzhiyun && !test_bit(In_sync, &rdev->flags)
3690*4882a593Smuzhiyun && !rdev_set_badblocks(rdev, sh->sector,
3691*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0))
3692*4882a593Smuzhiyun abort = 1;
3693*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].replacement);
3694*4882a593Smuzhiyun if (rdev
3695*4882a593Smuzhiyun && !test_bit(Faulty, &rdev->flags)
3696*4882a593Smuzhiyun && !test_bit(In_sync, &rdev->flags)
3697*4882a593Smuzhiyun && !rdev_set_badblocks(rdev, sh->sector,
3698*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0))
3699*4882a593Smuzhiyun abort = 1;
3700*4882a593Smuzhiyun }
3701*4882a593Smuzhiyun rcu_read_unlock();
3702*4882a593Smuzhiyun if (abort)
3703*4882a593Smuzhiyun conf->recovery_disabled =
3704*4882a593Smuzhiyun conf->mddev->recovery_disabled;
3705*4882a593Smuzhiyun }
3706*4882a593Smuzhiyun md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort);
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun
want_replace(struct stripe_head * sh,int disk_idx)3709*4882a593Smuzhiyun static int want_replace(struct stripe_head *sh, int disk_idx)
3710*4882a593Smuzhiyun {
3711*4882a593Smuzhiyun struct md_rdev *rdev;
3712*4882a593Smuzhiyun int rv = 0;
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun rcu_read_lock();
3715*4882a593Smuzhiyun rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
3716*4882a593Smuzhiyun if (rdev
3717*4882a593Smuzhiyun && !test_bit(Faulty, &rdev->flags)
3718*4882a593Smuzhiyun && !test_bit(In_sync, &rdev->flags)
3719*4882a593Smuzhiyun && (rdev->recovery_offset <= sh->sector
3720*4882a593Smuzhiyun || rdev->mddev->recovery_cp <= sh->sector))
3721*4882a593Smuzhiyun rv = 1;
3722*4882a593Smuzhiyun rcu_read_unlock();
3723*4882a593Smuzhiyun return rv;
3724*4882a593Smuzhiyun }
3725*4882a593Smuzhiyun
need_this_block(struct stripe_head * sh,struct stripe_head_state * s,int disk_idx,int disks)3726*4882a593Smuzhiyun static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3727*4882a593Smuzhiyun int disk_idx, int disks)
3728*4882a593Smuzhiyun {
3729*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[disk_idx];
3730*4882a593Smuzhiyun struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
3731*4882a593Smuzhiyun &sh->dev[s->failed_num[1]] };
3732*4882a593Smuzhiyun int i;
3733*4882a593Smuzhiyun bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW);
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun if (test_bit(R5_LOCKED, &dev->flags) ||
3737*4882a593Smuzhiyun test_bit(R5_UPTODATE, &dev->flags))
3738*4882a593Smuzhiyun /* No point reading this as we already have it or have
3739*4882a593Smuzhiyun * decided to get it.
3740*4882a593Smuzhiyun */
3741*4882a593Smuzhiyun return 0;
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun if (dev->toread ||
3744*4882a593Smuzhiyun (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
3745*4882a593Smuzhiyun /* We need this block to directly satisfy a request */
3746*4882a593Smuzhiyun return 1;
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun if (s->syncing || s->expanding ||
3749*4882a593Smuzhiyun (s->replacing && want_replace(sh, disk_idx)))
3750*4882a593Smuzhiyun /* When syncing, or expanding we read everything.
3751*4882a593Smuzhiyun * When replacing, we need the replaced block.
3752*4882a593Smuzhiyun */
3753*4882a593Smuzhiyun return 1;
3754*4882a593Smuzhiyun
3755*4882a593Smuzhiyun if ((s->failed >= 1 && fdev[0]->toread) ||
3756*4882a593Smuzhiyun (s->failed >= 2 && fdev[1]->toread))
3757*4882a593Smuzhiyun /* If we want to read from a failed device, then
3758*4882a593Smuzhiyun * we need to actually read every other device.
3759*4882a593Smuzhiyun */
3760*4882a593Smuzhiyun return 1;
3761*4882a593Smuzhiyun
3762*4882a593Smuzhiyun /* Sometimes neither read-modify-write nor reconstruct-write
3763*4882a593Smuzhiyun * cycles can work. In those cases we read every block we
3764*4882a593Smuzhiyun * can. Then the parity-update is certain to have enough to
3765*4882a593Smuzhiyun * work with.
3766*4882a593Smuzhiyun * This can only be a problem when we need to write something,
3767*4882a593Smuzhiyun * and some device has failed. If either of those tests
3768*4882a593Smuzhiyun * fail we need look no further.
3769*4882a593Smuzhiyun */
3770*4882a593Smuzhiyun if (!s->failed || !s->to_write)
3771*4882a593Smuzhiyun return 0;
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun if (test_bit(R5_Insync, &dev->flags) &&
3774*4882a593Smuzhiyun !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3775*4882a593Smuzhiyun /* Pre-reads at not permitted until after short delay
3776*4882a593Smuzhiyun * to gather multiple requests. However if this
3777*4882a593Smuzhiyun * device is no Insync, the block could only be computed
3778*4882a593Smuzhiyun * and there is no need to delay that.
3779*4882a593Smuzhiyun */
3780*4882a593Smuzhiyun return 0;
3781*4882a593Smuzhiyun
3782*4882a593Smuzhiyun for (i = 0; i < s->failed && i < 2; i++) {
3783*4882a593Smuzhiyun if (fdev[i]->towrite &&
3784*4882a593Smuzhiyun !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3785*4882a593Smuzhiyun !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3786*4882a593Smuzhiyun /* If we have a partial write to a failed
3787*4882a593Smuzhiyun * device, then we will need to reconstruct
3788*4882a593Smuzhiyun * the content of that device, so all other
3789*4882a593Smuzhiyun * devices must be read.
3790*4882a593Smuzhiyun */
3791*4882a593Smuzhiyun return 1;
3792*4882a593Smuzhiyun
3793*4882a593Smuzhiyun if (s->failed >= 2 &&
3794*4882a593Smuzhiyun (fdev[i]->towrite ||
3795*4882a593Smuzhiyun s->failed_num[i] == sh->pd_idx ||
3796*4882a593Smuzhiyun s->failed_num[i] == sh->qd_idx) &&
3797*4882a593Smuzhiyun !test_bit(R5_UPTODATE, &fdev[i]->flags))
3798*4882a593Smuzhiyun /* In max degraded raid6, If the failed disk is P, Q,
3799*4882a593Smuzhiyun * or we want to read the failed disk, we need to do
3800*4882a593Smuzhiyun * reconstruct-write.
3801*4882a593Smuzhiyun */
3802*4882a593Smuzhiyun force_rcw = true;
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun
3805*4882a593Smuzhiyun /* If we are forced to do a reconstruct-write, because parity
3806*4882a593Smuzhiyun * cannot be trusted and we are currently recovering it, there
3807*4882a593Smuzhiyun * is extra need to be careful.
3808*4882a593Smuzhiyun * If one of the devices that we would need to read, because
3809*4882a593Smuzhiyun * it is not being overwritten (and maybe not written at all)
3810*4882a593Smuzhiyun * is missing/faulty, then we need to read everything we can.
3811*4882a593Smuzhiyun */
3812*4882a593Smuzhiyun if (!force_rcw &&
3813*4882a593Smuzhiyun sh->sector < sh->raid_conf->mddev->recovery_cp)
3814*4882a593Smuzhiyun /* reconstruct-write isn't being forced */
3815*4882a593Smuzhiyun return 0;
3816*4882a593Smuzhiyun for (i = 0; i < s->failed && i < 2; i++) {
3817*4882a593Smuzhiyun if (s->failed_num[i] != sh->pd_idx &&
3818*4882a593Smuzhiyun s->failed_num[i] != sh->qd_idx &&
3819*4882a593Smuzhiyun !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
3820*4882a593Smuzhiyun !test_bit(R5_OVERWRITE, &fdev[i]->flags))
3821*4882a593Smuzhiyun return 1;
3822*4882a593Smuzhiyun }
3823*4882a593Smuzhiyun
3824*4882a593Smuzhiyun return 0;
3825*4882a593Smuzhiyun }
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun /* fetch_block - checks the given member device to see if its data needs
3828*4882a593Smuzhiyun * to be read or computed to satisfy a request.
3829*4882a593Smuzhiyun *
3830*4882a593Smuzhiyun * Returns 1 when no more member devices need to be checked, otherwise returns
3831*4882a593Smuzhiyun * 0 to tell the loop in handle_stripe_fill to continue
3832*4882a593Smuzhiyun */
fetch_block(struct stripe_head * sh,struct stripe_head_state * s,int disk_idx,int disks)3833*4882a593Smuzhiyun static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3834*4882a593Smuzhiyun int disk_idx, int disks)
3835*4882a593Smuzhiyun {
3836*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[disk_idx];
3837*4882a593Smuzhiyun
3838*4882a593Smuzhiyun /* is the data in this block needed, and can we get it? */
3839*4882a593Smuzhiyun if (need_this_block(sh, s, disk_idx, disks)) {
3840*4882a593Smuzhiyun /* we would like to get this block, possibly by computing it,
3841*4882a593Smuzhiyun * otherwise read it if the backing disk is insync
3842*4882a593Smuzhiyun */
3843*4882a593Smuzhiyun BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
3844*4882a593Smuzhiyun BUG_ON(test_bit(R5_Wantread, &dev->flags));
3845*4882a593Smuzhiyun BUG_ON(sh->batch_head);
3846*4882a593Smuzhiyun
3847*4882a593Smuzhiyun /*
3848*4882a593Smuzhiyun * In the raid6 case if the only non-uptodate disk is P
3849*4882a593Smuzhiyun * then we already trusted P to compute the other failed
3850*4882a593Smuzhiyun * drives. It is safe to compute rather than re-read P.
3851*4882a593Smuzhiyun * In other cases we only compute blocks from failed
3852*4882a593Smuzhiyun * devices, otherwise check/repair might fail to detect
3853*4882a593Smuzhiyun * a real inconsistency.
3854*4882a593Smuzhiyun */
3855*4882a593Smuzhiyun
3856*4882a593Smuzhiyun if ((s->uptodate == disks - 1) &&
3857*4882a593Smuzhiyun ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
3858*4882a593Smuzhiyun (s->failed && (disk_idx == s->failed_num[0] ||
3859*4882a593Smuzhiyun disk_idx == s->failed_num[1])))) {
3860*4882a593Smuzhiyun /* have disk failed, and we're requested to fetch it;
3861*4882a593Smuzhiyun * do compute it
3862*4882a593Smuzhiyun */
3863*4882a593Smuzhiyun pr_debug("Computing stripe %llu block %d\n",
3864*4882a593Smuzhiyun (unsigned long long)sh->sector, disk_idx);
3865*4882a593Smuzhiyun set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3866*4882a593Smuzhiyun set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3867*4882a593Smuzhiyun set_bit(R5_Wantcompute, &dev->flags);
3868*4882a593Smuzhiyun sh->ops.target = disk_idx;
3869*4882a593Smuzhiyun sh->ops.target2 = -1; /* no 2nd target */
3870*4882a593Smuzhiyun s->req_compute = 1;
3871*4882a593Smuzhiyun /* Careful: from this point on 'uptodate' is in the eye
3872*4882a593Smuzhiyun * of raid_run_ops which services 'compute' operations
3873*4882a593Smuzhiyun * before writes. R5_Wantcompute flags a block that will
3874*4882a593Smuzhiyun * be R5_UPTODATE by the time it is needed for a
3875*4882a593Smuzhiyun * subsequent operation.
3876*4882a593Smuzhiyun */
3877*4882a593Smuzhiyun s->uptodate++;
3878*4882a593Smuzhiyun return 1;
3879*4882a593Smuzhiyun } else if (s->uptodate == disks-2 && s->failed >= 2) {
3880*4882a593Smuzhiyun /* Computing 2-failure is *very* expensive; only
3881*4882a593Smuzhiyun * do it if failed >= 2
3882*4882a593Smuzhiyun */
3883*4882a593Smuzhiyun int other;
3884*4882a593Smuzhiyun for (other = disks; other--; ) {
3885*4882a593Smuzhiyun if (other == disk_idx)
3886*4882a593Smuzhiyun continue;
3887*4882a593Smuzhiyun if (!test_bit(R5_UPTODATE,
3888*4882a593Smuzhiyun &sh->dev[other].flags))
3889*4882a593Smuzhiyun break;
3890*4882a593Smuzhiyun }
3891*4882a593Smuzhiyun BUG_ON(other < 0);
3892*4882a593Smuzhiyun pr_debug("Computing stripe %llu blocks %d,%d\n",
3893*4882a593Smuzhiyun (unsigned long long)sh->sector,
3894*4882a593Smuzhiyun disk_idx, other);
3895*4882a593Smuzhiyun set_bit(STRIPE_COMPUTE_RUN, &sh->state);
3896*4882a593Smuzhiyun set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
3897*4882a593Smuzhiyun set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
3898*4882a593Smuzhiyun set_bit(R5_Wantcompute, &sh->dev[other].flags);
3899*4882a593Smuzhiyun sh->ops.target = disk_idx;
3900*4882a593Smuzhiyun sh->ops.target2 = other;
3901*4882a593Smuzhiyun s->uptodate += 2;
3902*4882a593Smuzhiyun s->req_compute = 1;
3903*4882a593Smuzhiyun return 1;
3904*4882a593Smuzhiyun } else if (test_bit(R5_Insync, &dev->flags)) {
3905*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
3906*4882a593Smuzhiyun set_bit(R5_Wantread, &dev->flags);
3907*4882a593Smuzhiyun s->locked++;
3908*4882a593Smuzhiyun pr_debug("Reading block %d (sync=%d)\n",
3909*4882a593Smuzhiyun disk_idx, s->syncing);
3910*4882a593Smuzhiyun }
3911*4882a593Smuzhiyun }
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun return 0;
3914*4882a593Smuzhiyun }
3915*4882a593Smuzhiyun
3916*4882a593Smuzhiyun /*
3917*4882a593Smuzhiyun * handle_stripe_fill - read or compute data to satisfy pending requests.
3918*4882a593Smuzhiyun */
handle_stripe_fill(struct stripe_head * sh,struct stripe_head_state * s,int disks)3919*4882a593Smuzhiyun static void handle_stripe_fill(struct stripe_head *sh,
3920*4882a593Smuzhiyun struct stripe_head_state *s,
3921*4882a593Smuzhiyun int disks)
3922*4882a593Smuzhiyun {
3923*4882a593Smuzhiyun int i;
3924*4882a593Smuzhiyun
3925*4882a593Smuzhiyun /* look for blocks to read/compute, skip this if a compute
3926*4882a593Smuzhiyun * is already in flight, or if the stripe contents are in the
3927*4882a593Smuzhiyun * midst of changing due to a write
3928*4882a593Smuzhiyun */
3929*4882a593Smuzhiyun if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3930*4882a593Smuzhiyun !sh->reconstruct_state) {
3931*4882a593Smuzhiyun
3932*4882a593Smuzhiyun /*
3933*4882a593Smuzhiyun * For degraded stripe with data in journal, do not handle
3934*4882a593Smuzhiyun * read requests yet, instead, flush the stripe to raid
3935*4882a593Smuzhiyun * disks first, this avoids handling complex rmw of write
3936*4882a593Smuzhiyun * back cache (prexor with orig_page, and then xor with
3937*4882a593Smuzhiyun * page) in the read path
3938*4882a593Smuzhiyun */
3939*4882a593Smuzhiyun if (s->to_read && s->injournal && s->failed) {
3940*4882a593Smuzhiyun if (test_bit(STRIPE_R5C_CACHING, &sh->state))
3941*4882a593Smuzhiyun r5c_make_stripe_write_out(sh);
3942*4882a593Smuzhiyun goto out;
3943*4882a593Smuzhiyun }
3944*4882a593Smuzhiyun
3945*4882a593Smuzhiyun for (i = disks; i--; )
3946*4882a593Smuzhiyun if (fetch_block(sh, s, i, disks))
3947*4882a593Smuzhiyun break;
3948*4882a593Smuzhiyun }
3949*4882a593Smuzhiyun out:
3950*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
3951*4882a593Smuzhiyun }
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun static void break_stripe_batch_list(struct stripe_head *head_sh,
3954*4882a593Smuzhiyun unsigned long handle_flags);
3955*4882a593Smuzhiyun /* handle_stripe_clean_event
3956*4882a593Smuzhiyun * any written block on an uptodate or failed drive can be returned.
3957*4882a593Smuzhiyun * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
3958*4882a593Smuzhiyun * never LOCKED, so we don't need to test 'failed' directly.
3959*4882a593Smuzhiyun */
handle_stripe_clean_event(struct r5conf * conf,struct stripe_head * sh,int disks)3960*4882a593Smuzhiyun static void handle_stripe_clean_event(struct r5conf *conf,
3961*4882a593Smuzhiyun struct stripe_head *sh, int disks)
3962*4882a593Smuzhiyun {
3963*4882a593Smuzhiyun int i;
3964*4882a593Smuzhiyun struct r5dev *dev;
3965*4882a593Smuzhiyun int discard_pending = 0;
3966*4882a593Smuzhiyun struct stripe_head *head_sh = sh;
3967*4882a593Smuzhiyun bool do_endio = false;
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun for (i = disks; i--; )
3970*4882a593Smuzhiyun if (sh->dev[i].written) {
3971*4882a593Smuzhiyun dev = &sh->dev[i];
3972*4882a593Smuzhiyun if (!test_bit(R5_LOCKED, &dev->flags) &&
3973*4882a593Smuzhiyun (test_bit(R5_UPTODATE, &dev->flags) ||
3974*4882a593Smuzhiyun test_bit(R5_Discard, &dev->flags) ||
3975*4882a593Smuzhiyun test_bit(R5_SkipCopy, &dev->flags))) {
3976*4882a593Smuzhiyun /* We can return any write requests */
3977*4882a593Smuzhiyun struct bio *wbi, *wbi2;
3978*4882a593Smuzhiyun pr_debug("Return write for disc %d\n", i);
3979*4882a593Smuzhiyun if (test_and_clear_bit(R5_Discard, &dev->flags))
3980*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &dev->flags);
3981*4882a593Smuzhiyun if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
3982*4882a593Smuzhiyun WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
3983*4882a593Smuzhiyun }
3984*4882a593Smuzhiyun do_endio = true;
3985*4882a593Smuzhiyun
3986*4882a593Smuzhiyun returnbi:
3987*4882a593Smuzhiyun dev->page = dev->orig_page;
3988*4882a593Smuzhiyun wbi = dev->written;
3989*4882a593Smuzhiyun dev->written = NULL;
3990*4882a593Smuzhiyun while (wbi && wbi->bi_iter.bi_sector <
3991*4882a593Smuzhiyun dev->sector + RAID5_STRIPE_SECTORS(conf)) {
3992*4882a593Smuzhiyun wbi2 = r5_next_bio(conf, wbi, dev->sector);
3993*4882a593Smuzhiyun md_write_end(conf->mddev);
3994*4882a593Smuzhiyun bio_endio(wbi);
3995*4882a593Smuzhiyun wbi = wbi2;
3996*4882a593Smuzhiyun }
3997*4882a593Smuzhiyun md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3998*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf),
3999*4882a593Smuzhiyun !test_bit(STRIPE_DEGRADED, &sh->state),
4000*4882a593Smuzhiyun 0);
4001*4882a593Smuzhiyun if (head_sh->batch_head) {
4002*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list,
4003*4882a593Smuzhiyun struct stripe_head,
4004*4882a593Smuzhiyun batch_list);
4005*4882a593Smuzhiyun if (sh != head_sh) {
4006*4882a593Smuzhiyun dev = &sh->dev[i];
4007*4882a593Smuzhiyun goto returnbi;
4008*4882a593Smuzhiyun }
4009*4882a593Smuzhiyun }
4010*4882a593Smuzhiyun sh = head_sh;
4011*4882a593Smuzhiyun dev = &sh->dev[i];
4012*4882a593Smuzhiyun } else if (test_bit(R5_Discard, &dev->flags))
4013*4882a593Smuzhiyun discard_pending = 1;
4014*4882a593Smuzhiyun }
4015*4882a593Smuzhiyun
4016*4882a593Smuzhiyun log_stripe_write_finished(sh);
4017*4882a593Smuzhiyun
4018*4882a593Smuzhiyun if (!discard_pending &&
4019*4882a593Smuzhiyun test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
4020*4882a593Smuzhiyun int hash;
4021*4882a593Smuzhiyun clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
4022*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
4023*4882a593Smuzhiyun if (sh->qd_idx >= 0) {
4024*4882a593Smuzhiyun clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
4025*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun /* now that discard is done we can proceed with any sync */
4028*4882a593Smuzhiyun clear_bit(STRIPE_DISCARD, &sh->state);
4029*4882a593Smuzhiyun /*
4030*4882a593Smuzhiyun * SCSI discard will change some bio fields and the stripe has
4031*4882a593Smuzhiyun * no updated data, so remove it from hash list and the stripe
4032*4882a593Smuzhiyun * will be reinitialized
4033*4882a593Smuzhiyun */
4034*4882a593Smuzhiyun unhash:
4035*4882a593Smuzhiyun hash = sh->hash_lock_index;
4036*4882a593Smuzhiyun spin_lock_irq(conf->hash_locks + hash);
4037*4882a593Smuzhiyun remove_hash(sh);
4038*4882a593Smuzhiyun spin_unlock_irq(conf->hash_locks + hash);
4039*4882a593Smuzhiyun if (head_sh->batch_head) {
4040*4882a593Smuzhiyun sh = list_first_entry(&sh->batch_list,
4041*4882a593Smuzhiyun struct stripe_head, batch_list);
4042*4882a593Smuzhiyun if (sh != head_sh)
4043*4882a593Smuzhiyun goto unhash;
4044*4882a593Smuzhiyun }
4045*4882a593Smuzhiyun sh = head_sh;
4046*4882a593Smuzhiyun
4047*4882a593Smuzhiyun if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
4048*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4049*4882a593Smuzhiyun
4050*4882a593Smuzhiyun }
4051*4882a593Smuzhiyun
4052*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
4053*4882a593Smuzhiyun if (atomic_dec_and_test(&conf->pending_full_writes))
4054*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun if (head_sh->batch_head && do_endio)
4057*4882a593Smuzhiyun break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
4058*4882a593Smuzhiyun }
4059*4882a593Smuzhiyun
4060*4882a593Smuzhiyun /*
4061*4882a593Smuzhiyun * For RMW in write back cache, we need extra page in prexor to store the
4062*4882a593Smuzhiyun * old data. This page is stored in dev->orig_page.
4063*4882a593Smuzhiyun *
4064*4882a593Smuzhiyun * This function checks whether we have data for prexor. The exact logic
4065*4882a593Smuzhiyun * is:
4066*4882a593Smuzhiyun * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
4067*4882a593Smuzhiyun */
uptodate_for_rmw(struct r5dev * dev)4068*4882a593Smuzhiyun static inline bool uptodate_for_rmw(struct r5dev *dev)
4069*4882a593Smuzhiyun {
4070*4882a593Smuzhiyun return (test_bit(R5_UPTODATE, &dev->flags)) &&
4071*4882a593Smuzhiyun (!test_bit(R5_InJournal, &dev->flags) ||
4072*4882a593Smuzhiyun test_bit(R5_OrigPageUPTDODATE, &dev->flags));
4073*4882a593Smuzhiyun }
4074*4882a593Smuzhiyun
handle_stripe_dirtying(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)4075*4882a593Smuzhiyun static int handle_stripe_dirtying(struct r5conf *conf,
4076*4882a593Smuzhiyun struct stripe_head *sh,
4077*4882a593Smuzhiyun struct stripe_head_state *s,
4078*4882a593Smuzhiyun int disks)
4079*4882a593Smuzhiyun {
4080*4882a593Smuzhiyun int rmw = 0, rcw = 0, i;
4081*4882a593Smuzhiyun sector_t recovery_cp = conf->mddev->recovery_cp;
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun /* Check whether resync is now happening or should start.
4084*4882a593Smuzhiyun * If yes, then the array is dirty (after unclean shutdown or
4085*4882a593Smuzhiyun * initial creation), so parity in some stripes might be inconsistent.
4086*4882a593Smuzhiyun * In this case, we need to always do reconstruct-write, to ensure
4087*4882a593Smuzhiyun * that in case of drive failure or read-error correction, we
4088*4882a593Smuzhiyun * generate correct data from the parity.
4089*4882a593Smuzhiyun */
4090*4882a593Smuzhiyun if (conf->rmw_level == PARITY_DISABLE_RMW ||
4091*4882a593Smuzhiyun (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
4092*4882a593Smuzhiyun s->failed == 0)) {
4093*4882a593Smuzhiyun /* Calculate the real rcw later - for now make it
4094*4882a593Smuzhiyun * look like rcw is cheaper
4095*4882a593Smuzhiyun */
4096*4882a593Smuzhiyun rcw = 1; rmw = 2;
4097*4882a593Smuzhiyun pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
4098*4882a593Smuzhiyun conf->rmw_level, (unsigned long long)recovery_cp,
4099*4882a593Smuzhiyun (unsigned long long)sh->sector);
4100*4882a593Smuzhiyun } else for (i = disks; i--; ) {
4101*4882a593Smuzhiyun /* would I have to read this buffer for read_modify_write */
4102*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
4103*4882a593Smuzhiyun if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4104*4882a593Smuzhiyun i == sh->pd_idx || i == sh->qd_idx ||
4105*4882a593Smuzhiyun test_bit(R5_InJournal, &dev->flags)) &&
4106*4882a593Smuzhiyun !test_bit(R5_LOCKED, &dev->flags) &&
4107*4882a593Smuzhiyun !(uptodate_for_rmw(dev) ||
4108*4882a593Smuzhiyun test_bit(R5_Wantcompute, &dev->flags))) {
4109*4882a593Smuzhiyun if (test_bit(R5_Insync, &dev->flags))
4110*4882a593Smuzhiyun rmw++;
4111*4882a593Smuzhiyun else
4112*4882a593Smuzhiyun rmw += 2*disks; /* cannot read it */
4113*4882a593Smuzhiyun }
4114*4882a593Smuzhiyun /* Would I have to read this buffer for reconstruct_write */
4115*4882a593Smuzhiyun if (!test_bit(R5_OVERWRITE, &dev->flags) &&
4116*4882a593Smuzhiyun i != sh->pd_idx && i != sh->qd_idx &&
4117*4882a593Smuzhiyun !test_bit(R5_LOCKED, &dev->flags) &&
4118*4882a593Smuzhiyun !(test_bit(R5_UPTODATE, &dev->flags) ||
4119*4882a593Smuzhiyun test_bit(R5_Wantcompute, &dev->flags))) {
4120*4882a593Smuzhiyun if (test_bit(R5_Insync, &dev->flags))
4121*4882a593Smuzhiyun rcw++;
4122*4882a593Smuzhiyun else
4123*4882a593Smuzhiyun rcw += 2*disks;
4124*4882a593Smuzhiyun }
4125*4882a593Smuzhiyun }
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun pr_debug("for sector %llu state 0x%lx, rmw=%d rcw=%d\n",
4128*4882a593Smuzhiyun (unsigned long long)sh->sector, sh->state, rmw, rcw);
4129*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4130*4882a593Smuzhiyun if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
4131*4882a593Smuzhiyun /* prefer read-modify-write, but need to get some data */
4132*4882a593Smuzhiyun if (conf->mddev->queue)
4133*4882a593Smuzhiyun blk_add_trace_msg(conf->mddev->queue,
4134*4882a593Smuzhiyun "raid5 rmw %llu %d",
4135*4882a593Smuzhiyun (unsigned long long)sh->sector, rmw);
4136*4882a593Smuzhiyun for (i = disks; i--; ) {
4137*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
4138*4882a593Smuzhiyun if (test_bit(R5_InJournal, &dev->flags) &&
4139*4882a593Smuzhiyun dev->page == dev->orig_page &&
4140*4882a593Smuzhiyun !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
4141*4882a593Smuzhiyun /* alloc page for prexor */
4142*4882a593Smuzhiyun struct page *p = alloc_page(GFP_NOIO);
4143*4882a593Smuzhiyun
4144*4882a593Smuzhiyun if (p) {
4145*4882a593Smuzhiyun dev->orig_page = p;
4146*4882a593Smuzhiyun continue;
4147*4882a593Smuzhiyun }
4148*4882a593Smuzhiyun
4149*4882a593Smuzhiyun /*
4150*4882a593Smuzhiyun * alloc_page() failed, try use
4151*4882a593Smuzhiyun * disk_info->extra_page
4152*4882a593Smuzhiyun */
4153*4882a593Smuzhiyun if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
4154*4882a593Smuzhiyun &conf->cache_state)) {
4155*4882a593Smuzhiyun r5c_use_extra_page(sh);
4156*4882a593Smuzhiyun break;
4157*4882a593Smuzhiyun }
4158*4882a593Smuzhiyun
4159*4882a593Smuzhiyun /* extra_page in use, add to delayed_list */
4160*4882a593Smuzhiyun set_bit(STRIPE_DELAYED, &sh->state);
4161*4882a593Smuzhiyun s->waiting_extra_page = 1;
4162*4882a593Smuzhiyun return -EAGAIN;
4163*4882a593Smuzhiyun }
4164*4882a593Smuzhiyun }
4165*4882a593Smuzhiyun
4166*4882a593Smuzhiyun for (i = disks; i--; ) {
4167*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
4168*4882a593Smuzhiyun if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4169*4882a593Smuzhiyun i == sh->pd_idx || i == sh->qd_idx ||
4170*4882a593Smuzhiyun test_bit(R5_InJournal, &dev->flags)) &&
4171*4882a593Smuzhiyun !test_bit(R5_LOCKED, &dev->flags) &&
4172*4882a593Smuzhiyun !(uptodate_for_rmw(dev) ||
4173*4882a593Smuzhiyun test_bit(R5_Wantcompute, &dev->flags)) &&
4174*4882a593Smuzhiyun test_bit(R5_Insync, &dev->flags)) {
4175*4882a593Smuzhiyun if (test_bit(STRIPE_PREREAD_ACTIVE,
4176*4882a593Smuzhiyun &sh->state)) {
4177*4882a593Smuzhiyun pr_debug("Read_old block %d for r-m-w\n",
4178*4882a593Smuzhiyun i);
4179*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4180*4882a593Smuzhiyun set_bit(R5_Wantread, &dev->flags);
4181*4882a593Smuzhiyun s->locked++;
4182*4882a593Smuzhiyun } else
4183*4882a593Smuzhiyun set_bit(STRIPE_DELAYED, &sh->state);
4184*4882a593Smuzhiyun }
4185*4882a593Smuzhiyun }
4186*4882a593Smuzhiyun }
4187*4882a593Smuzhiyun if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
4188*4882a593Smuzhiyun /* want reconstruct write, but need to get some data */
4189*4882a593Smuzhiyun int qread =0;
4190*4882a593Smuzhiyun rcw = 0;
4191*4882a593Smuzhiyun for (i = disks; i--; ) {
4192*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
4193*4882a593Smuzhiyun if (!test_bit(R5_OVERWRITE, &dev->flags) &&
4194*4882a593Smuzhiyun i != sh->pd_idx && i != sh->qd_idx &&
4195*4882a593Smuzhiyun !test_bit(R5_LOCKED, &dev->flags) &&
4196*4882a593Smuzhiyun !(test_bit(R5_UPTODATE, &dev->flags) ||
4197*4882a593Smuzhiyun test_bit(R5_Wantcompute, &dev->flags))) {
4198*4882a593Smuzhiyun rcw++;
4199*4882a593Smuzhiyun if (test_bit(R5_Insync, &dev->flags) &&
4200*4882a593Smuzhiyun test_bit(STRIPE_PREREAD_ACTIVE,
4201*4882a593Smuzhiyun &sh->state)) {
4202*4882a593Smuzhiyun pr_debug("Read_old block "
4203*4882a593Smuzhiyun "%d for Reconstruct\n", i);
4204*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4205*4882a593Smuzhiyun set_bit(R5_Wantread, &dev->flags);
4206*4882a593Smuzhiyun s->locked++;
4207*4882a593Smuzhiyun qread++;
4208*4882a593Smuzhiyun } else
4209*4882a593Smuzhiyun set_bit(STRIPE_DELAYED, &sh->state);
4210*4882a593Smuzhiyun }
4211*4882a593Smuzhiyun }
4212*4882a593Smuzhiyun if (rcw && conf->mddev->queue)
4213*4882a593Smuzhiyun blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
4214*4882a593Smuzhiyun (unsigned long long)sh->sector,
4215*4882a593Smuzhiyun rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
4216*4882a593Smuzhiyun }
4217*4882a593Smuzhiyun
4218*4882a593Smuzhiyun if (rcw > disks && rmw > disks &&
4219*4882a593Smuzhiyun !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4220*4882a593Smuzhiyun set_bit(STRIPE_DELAYED, &sh->state);
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun /* now if nothing is locked, and if we have enough data,
4223*4882a593Smuzhiyun * we can start a write request
4224*4882a593Smuzhiyun */
4225*4882a593Smuzhiyun /* since handle_stripe can be called at any time we need to handle the
4226*4882a593Smuzhiyun * case where a compute block operation has been submitted and then a
4227*4882a593Smuzhiyun * subsequent call wants to start a write request. raid_run_ops only
4228*4882a593Smuzhiyun * handles the case where compute block and reconstruct are requested
4229*4882a593Smuzhiyun * simultaneously. If this is not the case then new writes need to be
4230*4882a593Smuzhiyun * held off until the compute completes.
4231*4882a593Smuzhiyun */
4232*4882a593Smuzhiyun if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
4233*4882a593Smuzhiyun (s->locked == 0 && (rcw == 0 || rmw == 0) &&
4234*4882a593Smuzhiyun !test_bit(STRIPE_BIT_DELAY, &sh->state)))
4235*4882a593Smuzhiyun schedule_reconstruction(sh, s, rcw == 0, 0);
4236*4882a593Smuzhiyun return 0;
4237*4882a593Smuzhiyun }
4238*4882a593Smuzhiyun
handle_parity_checks5(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)4239*4882a593Smuzhiyun static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
4240*4882a593Smuzhiyun struct stripe_head_state *s, int disks)
4241*4882a593Smuzhiyun {
4242*4882a593Smuzhiyun struct r5dev *dev = NULL;
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun BUG_ON(sh->batch_head);
4245*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4246*4882a593Smuzhiyun
4247*4882a593Smuzhiyun switch (sh->check_state) {
4248*4882a593Smuzhiyun case check_state_idle:
4249*4882a593Smuzhiyun /* start a new check operation if there are no failures */
4250*4882a593Smuzhiyun if (s->failed == 0) {
4251*4882a593Smuzhiyun BUG_ON(s->uptodate != disks);
4252*4882a593Smuzhiyun sh->check_state = check_state_run;
4253*4882a593Smuzhiyun set_bit(STRIPE_OP_CHECK, &s->ops_request);
4254*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
4255*4882a593Smuzhiyun s->uptodate--;
4256*4882a593Smuzhiyun break;
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun dev = &sh->dev[s->failed_num[0]];
4259*4882a593Smuzhiyun fallthrough;
4260*4882a593Smuzhiyun case check_state_compute_result:
4261*4882a593Smuzhiyun sh->check_state = check_state_idle;
4262*4882a593Smuzhiyun if (!dev)
4263*4882a593Smuzhiyun dev = &sh->dev[sh->pd_idx];
4264*4882a593Smuzhiyun
4265*4882a593Smuzhiyun /* check that a write has not made the stripe insync */
4266*4882a593Smuzhiyun if (test_bit(STRIPE_INSYNC, &sh->state))
4267*4882a593Smuzhiyun break;
4268*4882a593Smuzhiyun
4269*4882a593Smuzhiyun /* either failed parity check, or recovery is happening */
4270*4882a593Smuzhiyun BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
4271*4882a593Smuzhiyun BUG_ON(s->uptodate != disks);
4272*4882a593Smuzhiyun
4273*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4274*4882a593Smuzhiyun s->locked++;
4275*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
4276*4882a593Smuzhiyun
4277*4882a593Smuzhiyun clear_bit(STRIPE_DEGRADED, &sh->state);
4278*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4279*4882a593Smuzhiyun break;
4280*4882a593Smuzhiyun case check_state_run:
4281*4882a593Smuzhiyun break; /* we will be called again upon completion */
4282*4882a593Smuzhiyun case check_state_check_result:
4283*4882a593Smuzhiyun sh->check_state = check_state_idle;
4284*4882a593Smuzhiyun
4285*4882a593Smuzhiyun /* if a failure occurred during the check operation, leave
4286*4882a593Smuzhiyun * STRIPE_INSYNC not set and let the stripe be handled again
4287*4882a593Smuzhiyun */
4288*4882a593Smuzhiyun if (s->failed)
4289*4882a593Smuzhiyun break;
4290*4882a593Smuzhiyun
4291*4882a593Smuzhiyun /* handle a successful check operation, if parity is correct
4292*4882a593Smuzhiyun * we are done. Otherwise update the mismatch count and repair
4293*4882a593Smuzhiyun * parity if !MD_RECOVERY_CHECK
4294*4882a593Smuzhiyun */
4295*4882a593Smuzhiyun if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
4296*4882a593Smuzhiyun /* parity is correct (on disc,
4297*4882a593Smuzhiyun * not in buffer any more)
4298*4882a593Smuzhiyun */
4299*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4300*4882a593Smuzhiyun else {
4301*4882a593Smuzhiyun atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4302*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4303*4882a593Smuzhiyun /* don't try to repair!! */
4304*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4305*4882a593Smuzhiyun pr_warn_ratelimited("%s: mismatch sector in range "
4306*4882a593Smuzhiyun "%llu-%llu\n", mdname(conf->mddev),
4307*4882a593Smuzhiyun (unsigned long long) sh->sector,
4308*4882a593Smuzhiyun (unsigned long long) sh->sector +
4309*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf));
4310*4882a593Smuzhiyun } else {
4311*4882a593Smuzhiyun sh->check_state = check_state_compute_run;
4312*4882a593Smuzhiyun set_bit(STRIPE_COMPUTE_RUN, &sh->state);
4313*4882a593Smuzhiyun set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
4314*4882a593Smuzhiyun set_bit(R5_Wantcompute,
4315*4882a593Smuzhiyun &sh->dev[sh->pd_idx].flags);
4316*4882a593Smuzhiyun sh->ops.target = sh->pd_idx;
4317*4882a593Smuzhiyun sh->ops.target2 = -1;
4318*4882a593Smuzhiyun s->uptodate++;
4319*4882a593Smuzhiyun }
4320*4882a593Smuzhiyun }
4321*4882a593Smuzhiyun break;
4322*4882a593Smuzhiyun case check_state_compute_run:
4323*4882a593Smuzhiyun break;
4324*4882a593Smuzhiyun default:
4325*4882a593Smuzhiyun pr_err("%s: unknown check_state: %d sector: %llu\n",
4326*4882a593Smuzhiyun __func__, sh->check_state,
4327*4882a593Smuzhiyun (unsigned long long) sh->sector);
4328*4882a593Smuzhiyun BUG();
4329*4882a593Smuzhiyun }
4330*4882a593Smuzhiyun }
4331*4882a593Smuzhiyun
handle_parity_checks6(struct r5conf * conf,struct stripe_head * sh,struct stripe_head_state * s,int disks)4332*4882a593Smuzhiyun static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
4333*4882a593Smuzhiyun struct stripe_head_state *s,
4334*4882a593Smuzhiyun int disks)
4335*4882a593Smuzhiyun {
4336*4882a593Smuzhiyun int pd_idx = sh->pd_idx;
4337*4882a593Smuzhiyun int qd_idx = sh->qd_idx;
4338*4882a593Smuzhiyun struct r5dev *dev;
4339*4882a593Smuzhiyun
4340*4882a593Smuzhiyun BUG_ON(sh->batch_head);
4341*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4342*4882a593Smuzhiyun
4343*4882a593Smuzhiyun BUG_ON(s->failed > 2);
4344*4882a593Smuzhiyun
4345*4882a593Smuzhiyun /* Want to check and possibly repair P and Q.
4346*4882a593Smuzhiyun * However there could be one 'failed' device, in which
4347*4882a593Smuzhiyun * case we can only check one of them, possibly using the
4348*4882a593Smuzhiyun * other to generate missing data
4349*4882a593Smuzhiyun */
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun switch (sh->check_state) {
4352*4882a593Smuzhiyun case check_state_idle:
4353*4882a593Smuzhiyun /* start a new check operation if there are < 2 failures */
4354*4882a593Smuzhiyun if (s->failed == s->q_failed) {
4355*4882a593Smuzhiyun /* The only possible failed device holds Q, so it
4356*4882a593Smuzhiyun * makes sense to check P (If anything else were failed,
4357*4882a593Smuzhiyun * we would have used P to recreate it).
4358*4882a593Smuzhiyun */
4359*4882a593Smuzhiyun sh->check_state = check_state_run;
4360*4882a593Smuzhiyun }
4361*4882a593Smuzhiyun if (!s->q_failed && s->failed < 2) {
4362*4882a593Smuzhiyun /* Q is not failed, and we didn't use it to generate
4363*4882a593Smuzhiyun * anything, so it makes sense to check it
4364*4882a593Smuzhiyun */
4365*4882a593Smuzhiyun if (sh->check_state == check_state_run)
4366*4882a593Smuzhiyun sh->check_state = check_state_run_pq;
4367*4882a593Smuzhiyun else
4368*4882a593Smuzhiyun sh->check_state = check_state_run_q;
4369*4882a593Smuzhiyun }
4370*4882a593Smuzhiyun
4371*4882a593Smuzhiyun /* discard potentially stale zero_sum_result */
4372*4882a593Smuzhiyun sh->ops.zero_sum_result = 0;
4373*4882a593Smuzhiyun
4374*4882a593Smuzhiyun if (sh->check_state == check_state_run) {
4375*4882a593Smuzhiyun /* async_xor_zero_sum destroys the contents of P */
4376*4882a593Smuzhiyun clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
4377*4882a593Smuzhiyun s->uptodate--;
4378*4882a593Smuzhiyun }
4379*4882a593Smuzhiyun if (sh->check_state >= check_state_run &&
4380*4882a593Smuzhiyun sh->check_state <= check_state_run_pq) {
4381*4882a593Smuzhiyun /* async_syndrome_zero_sum preserves P and Q, so
4382*4882a593Smuzhiyun * no need to mark them !uptodate here
4383*4882a593Smuzhiyun */
4384*4882a593Smuzhiyun set_bit(STRIPE_OP_CHECK, &s->ops_request);
4385*4882a593Smuzhiyun break;
4386*4882a593Smuzhiyun }
4387*4882a593Smuzhiyun
4388*4882a593Smuzhiyun /* we have 2-disk failure */
4389*4882a593Smuzhiyun BUG_ON(s->failed != 2);
4390*4882a593Smuzhiyun fallthrough;
4391*4882a593Smuzhiyun case check_state_compute_result:
4392*4882a593Smuzhiyun sh->check_state = check_state_idle;
4393*4882a593Smuzhiyun
4394*4882a593Smuzhiyun /* check that a write has not made the stripe insync */
4395*4882a593Smuzhiyun if (test_bit(STRIPE_INSYNC, &sh->state))
4396*4882a593Smuzhiyun break;
4397*4882a593Smuzhiyun
4398*4882a593Smuzhiyun /* now write out any block on a failed drive,
4399*4882a593Smuzhiyun * or P or Q if they were recomputed
4400*4882a593Smuzhiyun */
4401*4882a593Smuzhiyun dev = NULL;
4402*4882a593Smuzhiyun if (s->failed == 2) {
4403*4882a593Smuzhiyun dev = &sh->dev[s->failed_num[1]];
4404*4882a593Smuzhiyun s->locked++;
4405*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4406*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
4407*4882a593Smuzhiyun }
4408*4882a593Smuzhiyun if (s->failed >= 1) {
4409*4882a593Smuzhiyun dev = &sh->dev[s->failed_num[0]];
4410*4882a593Smuzhiyun s->locked++;
4411*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4412*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
4413*4882a593Smuzhiyun }
4414*4882a593Smuzhiyun if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
4415*4882a593Smuzhiyun dev = &sh->dev[pd_idx];
4416*4882a593Smuzhiyun s->locked++;
4417*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4418*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
4419*4882a593Smuzhiyun }
4420*4882a593Smuzhiyun if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
4421*4882a593Smuzhiyun dev = &sh->dev[qd_idx];
4422*4882a593Smuzhiyun s->locked++;
4423*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
4424*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
4425*4882a593Smuzhiyun }
4426*4882a593Smuzhiyun if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
4427*4882a593Smuzhiyun "%s: disk%td not up to date\n",
4428*4882a593Smuzhiyun mdname(conf->mddev),
4429*4882a593Smuzhiyun dev - (struct r5dev *) &sh->dev)) {
4430*4882a593Smuzhiyun clear_bit(R5_LOCKED, &dev->flags);
4431*4882a593Smuzhiyun clear_bit(R5_Wantwrite, &dev->flags);
4432*4882a593Smuzhiyun s->locked--;
4433*4882a593Smuzhiyun }
4434*4882a593Smuzhiyun clear_bit(STRIPE_DEGRADED, &sh->state);
4435*4882a593Smuzhiyun
4436*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4437*4882a593Smuzhiyun break;
4438*4882a593Smuzhiyun case check_state_run:
4439*4882a593Smuzhiyun case check_state_run_q:
4440*4882a593Smuzhiyun case check_state_run_pq:
4441*4882a593Smuzhiyun break; /* we will be called again upon completion */
4442*4882a593Smuzhiyun case check_state_check_result:
4443*4882a593Smuzhiyun sh->check_state = check_state_idle;
4444*4882a593Smuzhiyun
4445*4882a593Smuzhiyun /* handle a successful check operation, if parity is correct
4446*4882a593Smuzhiyun * we are done. Otherwise update the mismatch count and repair
4447*4882a593Smuzhiyun * parity if !MD_RECOVERY_CHECK
4448*4882a593Smuzhiyun */
4449*4882a593Smuzhiyun if (sh->ops.zero_sum_result == 0) {
4450*4882a593Smuzhiyun /* both parities are correct */
4451*4882a593Smuzhiyun if (!s->failed)
4452*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4453*4882a593Smuzhiyun else {
4454*4882a593Smuzhiyun /* in contrast to the raid5 case we can validate
4455*4882a593Smuzhiyun * parity, but still have a failure to write
4456*4882a593Smuzhiyun * back
4457*4882a593Smuzhiyun */
4458*4882a593Smuzhiyun sh->check_state = check_state_compute_result;
4459*4882a593Smuzhiyun /* Returning at this point means that we may go
4460*4882a593Smuzhiyun * off and bring p and/or q uptodate again so
4461*4882a593Smuzhiyun * we make sure to check zero_sum_result again
4462*4882a593Smuzhiyun * to verify if p or q need writeback
4463*4882a593Smuzhiyun */
4464*4882a593Smuzhiyun }
4465*4882a593Smuzhiyun } else {
4466*4882a593Smuzhiyun atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4467*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4468*4882a593Smuzhiyun /* don't try to repair!! */
4469*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
4470*4882a593Smuzhiyun pr_warn_ratelimited("%s: mismatch sector in range "
4471*4882a593Smuzhiyun "%llu-%llu\n", mdname(conf->mddev),
4472*4882a593Smuzhiyun (unsigned long long) sh->sector,
4473*4882a593Smuzhiyun (unsigned long long) sh->sector +
4474*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf));
4475*4882a593Smuzhiyun } else {
4476*4882a593Smuzhiyun int *target = &sh->ops.target;
4477*4882a593Smuzhiyun
4478*4882a593Smuzhiyun sh->ops.target = -1;
4479*4882a593Smuzhiyun sh->ops.target2 = -1;
4480*4882a593Smuzhiyun sh->check_state = check_state_compute_run;
4481*4882a593Smuzhiyun set_bit(STRIPE_COMPUTE_RUN, &sh->state);
4482*4882a593Smuzhiyun set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
4483*4882a593Smuzhiyun if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
4484*4882a593Smuzhiyun set_bit(R5_Wantcompute,
4485*4882a593Smuzhiyun &sh->dev[pd_idx].flags);
4486*4882a593Smuzhiyun *target = pd_idx;
4487*4882a593Smuzhiyun target = &sh->ops.target2;
4488*4882a593Smuzhiyun s->uptodate++;
4489*4882a593Smuzhiyun }
4490*4882a593Smuzhiyun if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
4491*4882a593Smuzhiyun set_bit(R5_Wantcompute,
4492*4882a593Smuzhiyun &sh->dev[qd_idx].flags);
4493*4882a593Smuzhiyun *target = qd_idx;
4494*4882a593Smuzhiyun s->uptodate++;
4495*4882a593Smuzhiyun }
4496*4882a593Smuzhiyun }
4497*4882a593Smuzhiyun }
4498*4882a593Smuzhiyun break;
4499*4882a593Smuzhiyun case check_state_compute_run:
4500*4882a593Smuzhiyun break;
4501*4882a593Smuzhiyun default:
4502*4882a593Smuzhiyun pr_warn("%s: unknown check_state: %d sector: %llu\n",
4503*4882a593Smuzhiyun __func__, sh->check_state,
4504*4882a593Smuzhiyun (unsigned long long) sh->sector);
4505*4882a593Smuzhiyun BUG();
4506*4882a593Smuzhiyun }
4507*4882a593Smuzhiyun }
4508*4882a593Smuzhiyun
handle_stripe_expansion(struct r5conf * conf,struct stripe_head * sh)4509*4882a593Smuzhiyun static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
4510*4882a593Smuzhiyun {
4511*4882a593Smuzhiyun int i;
4512*4882a593Smuzhiyun
4513*4882a593Smuzhiyun /* We have read all the blocks in this stripe and now we need to
4514*4882a593Smuzhiyun * copy some of them into a target stripe for expand.
4515*4882a593Smuzhiyun */
4516*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx = NULL;
4517*4882a593Smuzhiyun BUG_ON(sh->batch_head);
4518*4882a593Smuzhiyun clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4519*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++)
4520*4882a593Smuzhiyun if (i != sh->pd_idx && i != sh->qd_idx) {
4521*4882a593Smuzhiyun int dd_idx, j;
4522*4882a593Smuzhiyun struct stripe_head *sh2;
4523*4882a593Smuzhiyun struct async_submit_ctl submit;
4524*4882a593Smuzhiyun
4525*4882a593Smuzhiyun sector_t bn = raid5_compute_blocknr(sh, i, 1);
4526*4882a593Smuzhiyun sector_t s = raid5_compute_sector(conf, bn, 0,
4527*4882a593Smuzhiyun &dd_idx, NULL);
4528*4882a593Smuzhiyun sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
4529*4882a593Smuzhiyun if (sh2 == NULL)
4530*4882a593Smuzhiyun /* so far only the early blocks of this stripe
4531*4882a593Smuzhiyun * have been requested. When later blocks
4532*4882a593Smuzhiyun * get requested, we will try again
4533*4882a593Smuzhiyun */
4534*4882a593Smuzhiyun continue;
4535*4882a593Smuzhiyun if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
4536*4882a593Smuzhiyun test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
4537*4882a593Smuzhiyun /* must have already done this block */
4538*4882a593Smuzhiyun raid5_release_stripe(sh2);
4539*4882a593Smuzhiyun continue;
4540*4882a593Smuzhiyun }
4541*4882a593Smuzhiyun
4542*4882a593Smuzhiyun /* place all the copies on one channel */
4543*4882a593Smuzhiyun init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
4544*4882a593Smuzhiyun tx = async_memcpy(sh2->dev[dd_idx].page,
4545*4882a593Smuzhiyun sh->dev[i].page, sh2->dev[dd_idx].offset,
4546*4882a593Smuzhiyun sh->dev[i].offset, RAID5_STRIPE_SIZE(conf),
4547*4882a593Smuzhiyun &submit);
4548*4882a593Smuzhiyun
4549*4882a593Smuzhiyun set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
4550*4882a593Smuzhiyun set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
4551*4882a593Smuzhiyun for (j = 0; j < conf->raid_disks; j++)
4552*4882a593Smuzhiyun if (j != sh2->pd_idx &&
4553*4882a593Smuzhiyun j != sh2->qd_idx &&
4554*4882a593Smuzhiyun !test_bit(R5_Expanded, &sh2->dev[j].flags))
4555*4882a593Smuzhiyun break;
4556*4882a593Smuzhiyun if (j == conf->raid_disks) {
4557*4882a593Smuzhiyun set_bit(STRIPE_EXPAND_READY, &sh2->state);
4558*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh2->state);
4559*4882a593Smuzhiyun }
4560*4882a593Smuzhiyun raid5_release_stripe(sh2);
4561*4882a593Smuzhiyun
4562*4882a593Smuzhiyun }
4563*4882a593Smuzhiyun /* done submitting copies, wait for them to complete */
4564*4882a593Smuzhiyun async_tx_quiesce(&tx);
4565*4882a593Smuzhiyun }
4566*4882a593Smuzhiyun
4567*4882a593Smuzhiyun /*
4568*4882a593Smuzhiyun * handle_stripe - do things to a stripe.
4569*4882a593Smuzhiyun *
4570*4882a593Smuzhiyun * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4571*4882a593Smuzhiyun * state of various bits to see what needs to be done.
4572*4882a593Smuzhiyun * Possible results:
4573*4882a593Smuzhiyun * return some read requests which now have data
4574*4882a593Smuzhiyun * return some write requests which are safely on storage
4575*4882a593Smuzhiyun * schedule a read on some buffers
4576*4882a593Smuzhiyun * schedule a write of some buffers
4577*4882a593Smuzhiyun * return confirmation of parity correctness
4578*4882a593Smuzhiyun *
4579*4882a593Smuzhiyun */
4580*4882a593Smuzhiyun
analyse_stripe(struct stripe_head * sh,struct stripe_head_state * s)4581*4882a593Smuzhiyun static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4582*4882a593Smuzhiyun {
4583*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
4584*4882a593Smuzhiyun int disks = sh->disks;
4585*4882a593Smuzhiyun struct r5dev *dev;
4586*4882a593Smuzhiyun int i;
4587*4882a593Smuzhiyun int do_recovery = 0;
4588*4882a593Smuzhiyun
4589*4882a593Smuzhiyun memset(s, 0, sizeof(*s));
4590*4882a593Smuzhiyun
4591*4882a593Smuzhiyun s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
4592*4882a593Smuzhiyun s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
4593*4882a593Smuzhiyun s->failed_num[0] = -1;
4594*4882a593Smuzhiyun s->failed_num[1] = -1;
4595*4882a593Smuzhiyun s->log_failed = r5l_log_disk_error(conf);
4596*4882a593Smuzhiyun
4597*4882a593Smuzhiyun /* Now to look around and see what can be done */
4598*4882a593Smuzhiyun rcu_read_lock();
4599*4882a593Smuzhiyun for (i=disks; i--; ) {
4600*4882a593Smuzhiyun struct md_rdev *rdev;
4601*4882a593Smuzhiyun sector_t first_bad;
4602*4882a593Smuzhiyun int bad_sectors;
4603*4882a593Smuzhiyun int is_bad = 0;
4604*4882a593Smuzhiyun
4605*4882a593Smuzhiyun dev = &sh->dev[i];
4606*4882a593Smuzhiyun
4607*4882a593Smuzhiyun pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
4608*4882a593Smuzhiyun i, dev->flags,
4609*4882a593Smuzhiyun dev->toread, dev->towrite, dev->written);
4610*4882a593Smuzhiyun /* maybe we can reply to a read
4611*4882a593Smuzhiyun *
4612*4882a593Smuzhiyun * new wantfill requests are only permitted while
4613*4882a593Smuzhiyun * ops_complete_biofill is guaranteed to be inactive
4614*4882a593Smuzhiyun */
4615*4882a593Smuzhiyun if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
4616*4882a593Smuzhiyun !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
4617*4882a593Smuzhiyun set_bit(R5_Wantfill, &dev->flags);
4618*4882a593Smuzhiyun
4619*4882a593Smuzhiyun /* now count some things */
4620*4882a593Smuzhiyun if (test_bit(R5_LOCKED, &dev->flags))
4621*4882a593Smuzhiyun s->locked++;
4622*4882a593Smuzhiyun if (test_bit(R5_UPTODATE, &dev->flags))
4623*4882a593Smuzhiyun s->uptodate++;
4624*4882a593Smuzhiyun if (test_bit(R5_Wantcompute, &dev->flags)) {
4625*4882a593Smuzhiyun s->compute++;
4626*4882a593Smuzhiyun BUG_ON(s->compute > 2);
4627*4882a593Smuzhiyun }
4628*4882a593Smuzhiyun
4629*4882a593Smuzhiyun if (test_bit(R5_Wantfill, &dev->flags))
4630*4882a593Smuzhiyun s->to_fill++;
4631*4882a593Smuzhiyun else if (dev->toread)
4632*4882a593Smuzhiyun s->to_read++;
4633*4882a593Smuzhiyun if (dev->towrite) {
4634*4882a593Smuzhiyun s->to_write++;
4635*4882a593Smuzhiyun if (!test_bit(R5_OVERWRITE, &dev->flags))
4636*4882a593Smuzhiyun s->non_overwrite++;
4637*4882a593Smuzhiyun }
4638*4882a593Smuzhiyun if (dev->written)
4639*4882a593Smuzhiyun s->written++;
4640*4882a593Smuzhiyun /* Prefer to use the replacement for reads, but only
4641*4882a593Smuzhiyun * if it is recovered enough and has no bad blocks.
4642*4882a593Smuzhiyun */
4643*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].replacement);
4644*4882a593Smuzhiyun if (rdev && !test_bit(Faulty, &rdev->flags) &&
4645*4882a593Smuzhiyun rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
4646*4882a593Smuzhiyun !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4647*4882a593Smuzhiyun &first_bad, &bad_sectors))
4648*4882a593Smuzhiyun set_bit(R5_ReadRepl, &dev->flags);
4649*4882a593Smuzhiyun else {
4650*4882a593Smuzhiyun if (rdev && !test_bit(Faulty, &rdev->flags))
4651*4882a593Smuzhiyun set_bit(R5_NeedReplace, &dev->flags);
4652*4882a593Smuzhiyun else
4653*4882a593Smuzhiyun clear_bit(R5_NeedReplace, &dev->flags);
4654*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[i].rdev);
4655*4882a593Smuzhiyun clear_bit(R5_ReadRepl, &dev->flags);
4656*4882a593Smuzhiyun }
4657*4882a593Smuzhiyun if (rdev && test_bit(Faulty, &rdev->flags))
4658*4882a593Smuzhiyun rdev = NULL;
4659*4882a593Smuzhiyun if (rdev) {
4660*4882a593Smuzhiyun is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4661*4882a593Smuzhiyun &first_bad, &bad_sectors);
4662*4882a593Smuzhiyun if (s->blocked_rdev == NULL
4663*4882a593Smuzhiyun && (test_bit(Blocked, &rdev->flags)
4664*4882a593Smuzhiyun || is_bad < 0)) {
4665*4882a593Smuzhiyun if (is_bad < 0)
4666*4882a593Smuzhiyun set_bit(BlockedBadBlocks,
4667*4882a593Smuzhiyun &rdev->flags);
4668*4882a593Smuzhiyun s->blocked_rdev = rdev;
4669*4882a593Smuzhiyun atomic_inc(&rdev->nr_pending);
4670*4882a593Smuzhiyun }
4671*4882a593Smuzhiyun }
4672*4882a593Smuzhiyun clear_bit(R5_Insync, &dev->flags);
4673*4882a593Smuzhiyun if (!rdev)
4674*4882a593Smuzhiyun /* Not in-sync */;
4675*4882a593Smuzhiyun else if (is_bad) {
4676*4882a593Smuzhiyun /* also not in-sync */
4677*4882a593Smuzhiyun if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4678*4882a593Smuzhiyun test_bit(R5_UPTODATE, &dev->flags)) {
4679*4882a593Smuzhiyun /* treat as in-sync, but with a read error
4680*4882a593Smuzhiyun * which we can now try to correct
4681*4882a593Smuzhiyun */
4682*4882a593Smuzhiyun set_bit(R5_Insync, &dev->flags);
4683*4882a593Smuzhiyun set_bit(R5_ReadError, &dev->flags);
4684*4882a593Smuzhiyun }
4685*4882a593Smuzhiyun } else if (test_bit(In_sync, &rdev->flags))
4686*4882a593Smuzhiyun set_bit(R5_Insync, &dev->flags);
4687*4882a593Smuzhiyun else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
4688*4882a593Smuzhiyun /* in sync if before recovery_offset */
4689*4882a593Smuzhiyun set_bit(R5_Insync, &dev->flags);
4690*4882a593Smuzhiyun else if (test_bit(R5_UPTODATE, &dev->flags) &&
4691*4882a593Smuzhiyun test_bit(R5_Expanded, &dev->flags))
4692*4882a593Smuzhiyun /* If we've reshaped into here, we assume it is Insync.
4693*4882a593Smuzhiyun * We will shortly update recovery_offset to make
4694*4882a593Smuzhiyun * it official.
4695*4882a593Smuzhiyun */
4696*4882a593Smuzhiyun set_bit(R5_Insync, &dev->flags);
4697*4882a593Smuzhiyun
4698*4882a593Smuzhiyun if (test_bit(R5_WriteError, &dev->flags)) {
4699*4882a593Smuzhiyun /* This flag does not apply to '.replacement'
4700*4882a593Smuzhiyun * only to .rdev, so make sure to check that*/
4701*4882a593Smuzhiyun struct md_rdev *rdev2 = rcu_dereference(
4702*4882a593Smuzhiyun conf->disks[i].rdev);
4703*4882a593Smuzhiyun if (rdev2 == rdev)
4704*4882a593Smuzhiyun clear_bit(R5_Insync, &dev->flags);
4705*4882a593Smuzhiyun if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4706*4882a593Smuzhiyun s->handle_bad_blocks = 1;
4707*4882a593Smuzhiyun atomic_inc(&rdev2->nr_pending);
4708*4882a593Smuzhiyun } else
4709*4882a593Smuzhiyun clear_bit(R5_WriteError, &dev->flags);
4710*4882a593Smuzhiyun }
4711*4882a593Smuzhiyun if (test_bit(R5_MadeGood, &dev->flags)) {
4712*4882a593Smuzhiyun /* This flag does not apply to '.replacement'
4713*4882a593Smuzhiyun * only to .rdev, so make sure to check that*/
4714*4882a593Smuzhiyun struct md_rdev *rdev2 = rcu_dereference(
4715*4882a593Smuzhiyun conf->disks[i].rdev);
4716*4882a593Smuzhiyun if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4717*4882a593Smuzhiyun s->handle_bad_blocks = 1;
4718*4882a593Smuzhiyun atomic_inc(&rdev2->nr_pending);
4719*4882a593Smuzhiyun } else
4720*4882a593Smuzhiyun clear_bit(R5_MadeGood, &dev->flags);
4721*4882a593Smuzhiyun }
4722*4882a593Smuzhiyun if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
4723*4882a593Smuzhiyun struct md_rdev *rdev2 = rcu_dereference(
4724*4882a593Smuzhiyun conf->disks[i].replacement);
4725*4882a593Smuzhiyun if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
4726*4882a593Smuzhiyun s->handle_bad_blocks = 1;
4727*4882a593Smuzhiyun atomic_inc(&rdev2->nr_pending);
4728*4882a593Smuzhiyun } else
4729*4882a593Smuzhiyun clear_bit(R5_MadeGoodRepl, &dev->flags);
4730*4882a593Smuzhiyun }
4731*4882a593Smuzhiyun if (!test_bit(R5_Insync, &dev->flags)) {
4732*4882a593Smuzhiyun /* The ReadError flag will just be confusing now */
4733*4882a593Smuzhiyun clear_bit(R5_ReadError, &dev->flags);
4734*4882a593Smuzhiyun clear_bit(R5_ReWrite, &dev->flags);
4735*4882a593Smuzhiyun }
4736*4882a593Smuzhiyun if (test_bit(R5_ReadError, &dev->flags))
4737*4882a593Smuzhiyun clear_bit(R5_Insync, &dev->flags);
4738*4882a593Smuzhiyun if (!test_bit(R5_Insync, &dev->flags)) {
4739*4882a593Smuzhiyun if (s->failed < 2)
4740*4882a593Smuzhiyun s->failed_num[s->failed] = i;
4741*4882a593Smuzhiyun s->failed++;
4742*4882a593Smuzhiyun if (rdev && !test_bit(Faulty, &rdev->flags))
4743*4882a593Smuzhiyun do_recovery = 1;
4744*4882a593Smuzhiyun else if (!rdev) {
4745*4882a593Smuzhiyun rdev = rcu_dereference(
4746*4882a593Smuzhiyun conf->disks[i].replacement);
4747*4882a593Smuzhiyun if (rdev && !test_bit(Faulty, &rdev->flags))
4748*4882a593Smuzhiyun do_recovery = 1;
4749*4882a593Smuzhiyun }
4750*4882a593Smuzhiyun }
4751*4882a593Smuzhiyun
4752*4882a593Smuzhiyun if (test_bit(R5_InJournal, &dev->flags))
4753*4882a593Smuzhiyun s->injournal++;
4754*4882a593Smuzhiyun if (test_bit(R5_InJournal, &dev->flags) && dev->written)
4755*4882a593Smuzhiyun s->just_cached++;
4756*4882a593Smuzhiyun }
4757*4882a593Smuzhiyun if (test_bit(STRIPE_SYNCING, &sh->state)) {
4758*4882a593Smuzhiyun /* If there is a failed device being replaced,
4759*4882a593Smuzhiyun * we must be recovering.
4760*4882a593Smuzhiyun * else if we are after recovery_cp, we must be syncing
4761*4882a593Smuzhiyun * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
4762*4882a593Smuzhiyun * else we can only be replacing
4763*4882a593Smuzhiyun * sync and recovery both need to read all devices, and so
4764*4882a593Smuzhiyun * use the same flag.
4765*4882a593Smuzhiyun */
4766*4882a593Smuzhiyun if (do_recovery ||
4767*4882a593Smuzhiyun sh->sector >= conf->mddev->recovery_cp ||
4768*4882a593Smuzhiyun test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4769*4882a593Smuzhiyun s->syncing = 1;
4770*4882a593Smuzhiyun else
4771*4882a593Smuzhiyun s->replacing = 1;
4772*4882a593Smuzhiyun }
4773*4882a593Smuzhiyun rcu_read_unlock();
4774*4882a593Smuzhiyun }
4775*4882a593Smuzhiyun
4776*4882a593Smuzhiyun /*
4777*4882a593Smuzhiyun * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
4778*4882a593Smuzhiyun * a head which can now be handled.
4779*4882a593Smuzhiyun */
clear_batch_ready(struct stripe_head * sh)4780*4882a593Smuzhiyun static int clear_batch_ready(struct stripe_head *sh)
4781*4882a593Smuzhiyun {
4782*4882a593Smuzhiyun struct stripe_head *tmp;
4783*4882a593Smuzhiyun if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
4784*4882a593Smuzhiyun return (sh->batch_head && sh->batch_head != sh);
4785*4882a593Smuzhiyun spin_lock(&sh->stripe_lock);
4786*4882a593Smuzhiyun if (!sh->batch_head) {
4787*4882a593Smuzhiyun spin_unlock(&sh->stripe_lock);
4788*4882a593Smuzhiyun return 0;
4789*4882a593Smuzhiyun }
4790*4882a593Smuzhiyun
4791*4882a593Smuzhiyun /*
4792*4882a593Smuzhiyun * this stripe could be added to a batch list before we check
4793*4882a593Smuzhiyun * BATCH_READY, skips it
4794*4882a593Smuzhiyun */
4795*4882a593Smuzhiyun if (sh->batch_head != sh) {
4796*4882a593Smuzhiyun spin_unlock(&sh->stripe_lock);
4797*4882a593Smuzhiyun return 1;
4798*4882a593Smuzhiyun }
4799*4882a593Smuzhiyun spin_lock(&sh->batch_lock);
4800*4882a593Smuzhiyun list_for_each_entry(tmp, &sh->batch_list, batch_list)
4801*4882a593Smuzhiyun clear_bit(STRIPE_BATCH_READY, &tmp->state);
4802*4882a593Smuzhiyun spin_unlock(&sh->batch_lock);
4803*4882a593Smuzhiyun spin_unlock(&sh->stripe_lock);
4804*4882a593Smuzhiyun
4805*4882a593Smuzhiyun /*
4806*4882a593Smuzhiyun * BATCH_READY is cleared, no new stripes can be added.
4807*4882a593Smuzhiyun * batch_list can be accessed without lock
4808*4882a593Smuzhiyun */
4809*4882a593Smuzhiyun return 0;
4810*4882a593Smuzhiyun }
4811*4882a593Smuzhiyun
break_stripe_batch_list(struct stripe_head * head_sh,unsigned long handle_flags)4812*4882a593Smuzhiyun static void break_stripe_batch_list(struct stripe_head *head_sh,
4813*4882a593Smuzhiyun unsigned long handle_flags)
4814*4882a593Smuzhiyun {
4815*4882a593Smuzhiyun struct stripe_head *sh, *next;
4816*4882a593Smuzhiyun int i;
4817*4882a593Smuzhiyun int do_wakeup = 0;
4818*4882a593Smuzhiyun
4819*4882a593Smuzhiyun list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
4820*4882a593Smuzhiyun
4821*4882a593Smuzhiyun list_del_init(&sh->batch_list);
4822*4882a593Smuzhiyun
4823*4882a593Smuzhiyun WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
4824*4882a593Smuzhiyun (1 << STRIPE_SYNCING) |
4825*4882a593Smuzhiyun (1 << STRIPE_REPLACED) |
4826*4882a593Smuzhiyun (1 << STRIPE_DELAYED) |
4827*4882a593Smuzhiyun (1 << STRIPE_BIT_DELAY) |
4828*4882a593Smuzhiyun (1 << STRIPE_FULL_WRITE) |
4829*4882a593Smuzhiyun (1 << STRIPE_BIOFILL_RUN) |
4830*4882a593Smuzhiyun (1 << STRIPE_COMPUTE_RUN) |
4831*4882a593Smuzhiyun (1 << STRIPE_DISCARD) |
4832*4882a593Smuzhiyun (1 << STRIPE_BATCH_READY) |
4833*4882a593Smuzhiyun (1 << STRIPE_BATCH_ERR) |
4834*4882a593Smuzhiyun (1 << STRIPE_BITMAP_PENDING)),
4835*4882a593Smuzhiyun "stripe state: %lx\n", sh->state);
4836*4882a593Smuzhiyun WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
4837*4882a593Smuzhiyun (1 << STRIPE_REPLACED)),
4838*4882a593Smuzhiyun "head stripe state: %lx\n", head_sh->state);
4839*4882a593Smuzhiyun
4840*4882a593Smuzhiyun set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4841*4882a593Smuzhiyun (1 << STRIPE_PREREAD_ACTIVE) |
4842*4882a593Smuzhiyun (1 << STRIPE_DEGRADED) |
4843*4882a593Smuzhiyun (1 << STRIPE_ON_UNPLUG_LIST)),
4844*4882a593Smuzhiyun head_sh->state & (1 << STRIPE_INSYNC));
4845*4882a593Smuzhiyun
4846*4882a593Smuzhiyun sh->check_state = head_sh->check_state;
4847*4882a593Smuzhiyun sh->reconstruct_state = head_sh->reconstruct_state;
4848*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
4849*4882a593Smuzhiyun sh->batch_head = NULL;
4850*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
4851*4882a593Smuzhiyun for (i = 0; i < sh->disks; i++) {
4852*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
4853*4882a593Smuzhiyun do_wakeup = 1;
4854*4882a593Smuzhiyun sh->dev[i].flags = head_sh->dev[i].flags &
4855*4882a593Smuzhiyun (~((1 << R5_WriteError) | (1 << R5_Overlap)));
4856*4882a593Smuzhiyun }
4857*4882a593Smuzhiyun if (handle_flags == 0 ||
4858*4882a593Smuzhiyun sh->state & handle_flags)
4859*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4860*4882a593Smuzhiyun raid5_release_stripe(sh);
4861*4882a593Smuzhiyun }
4862*4882a593Smuzhiyun spin_lock_irq(&head_sh->stripe_lock);
4863*4882a593Smuzhiyun head_sh->batch_head = NULL;
4864*4882a593Smuzhiyun spin_unlock_irq(&head_sh->stripe_lock);
4865*4882a593Smuzhiyun for (i = 0; i < head_sh->disks; i++)
4866*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
4867*4882a593Smuzhiyun do_wakeup = 1;
4868*4882a593Smuzhiyun if (head_sh->state & handle_flags)
4869*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &head_sh->state);
4870*4882a593Smuzhiyun
4871*4882a593Smuzhiyun if (do_wakeup)
4872*4882a593Smuzhiyun wake_up(&head_sh->raid_conf->wait_for_overlap);
4873*4882a593Smuzhiyun }
4874*4882a593Smuzhiyun
handle_stripe(struct stripe_head * sh)4875*4882a593Smuzhiyun static void handle_stripe(struct stripe_head *sh)
4876*4882a593Smuzhiyun {
4877*4882a593Smuzhiyun struct stripe_head_state s;
4878*4882a593Smuzhiyun struct r5conf *conf = sh->raid_conf;
4879*4882a593Smuzhiyun int i;
4880*4882a593Smuzhiyun int prexor;
4881*4882a593Smuzhiyun int disks = sh->disks;
4882*4882a593Smuzhiyun struct r5dev *pdev, *qdev;
4883*4882a593Smuzhiyun
4884*4882a593Smuzhiyun clear_bit(STRIPE_HANDLE, &sh->state);
4885*4882a593Smuzhiyun
4886*4882a593Smuzhiyun /*
4887*4882a593Smuzhiyun * handle_stripe should not continue handle the batched stripe, only
4888*4882a593Smuzhiyun * the head of batch list or lone stripe can continue. Otherwise we
4889*4882a593Smuzhiyun * could see break_stripe_batch_list warns about the STRIPE_ACTIVE
4890*4882a593Smuzhiyun * is set for the batched stripe.
4891*4882a593Smuzhiyun */
4892*4882a593Smuzhiyun if (clear_batch_ready(sh))
4893*4882a593Smuzhiyun return;
4894*4882a593Smuzhiyun
4895*4882a593Smuzhiyun if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
4896*4882a593Smuzhiyun /* already being handled, ensure it gets handled
4897*4882a593Smuzhiyun * again when current action finishes */
4898*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4899*4882a593Smuzhiyun return;
4900*4882a593Smuzhiyun }
4901*4882a593Smuzhiyun
4902*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
4903*4882a593Smuzhiyun break_stripe_batch_list(sh, 0);
4904*4882a593Smuzhiyun
4905*4882a593Smuzhiyun if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
4906*4882a593Smuzhiyun spin_lock(&sh->stripe_lock);
4907*4882a593Smuzhiyun /*
4908*4882a593Smuzhiyun * Cannot process 'sync' concurrently with 'discard'.
4909*4882a593Smuzhiyun * Flush data in r5cache before 'sync'.
4910*4882a593Smuzhiyun */
4911*4882a593Smuzhiyun if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
4912*4882a593Smuzhiyun !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) &&
4913*4882a593Smuzhiyun !test_bit(STRIPE_DISCARD, &sh->state) &&
4914*4882a593Smuzhiyun test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
4915*4882a593Smuzhiyun set_bit(STRIPE_SYNCING, &sh->state);
4916*4882a593Smuzhiyun clear_bit(STRIPE_INSYNC, &sh->state);
4917*4882a593Smuzhiyun clear_bit(STRIPE_REPLACED, &sh->state);
4918*4882a593Smuzhiyun }
4919*4882a593Smuzhiyun spin_unlock(&sh->stripe_lock);
4920*4882a593Smuzhiyun }
4921*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
4922*4882a593Smuzhiyun
4923*4882a593Smuzhiyun pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4924*4882a593Smuzhiyun "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
4925*4882a593Smuzhiyun (unsigned long long)sh->sector, sh->state,
4926*4882a593Smuzhiyun atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
4927*4882a593Smuzhiyun sh->check_state, sh->reconstruct_state);
4928*4882a593Smuzhiyun
4929*4882a593Smuzhiyun analyse_stripe(sh, &s);
4930*4882a593Smuzhiyun
4931*4882a593Smuzhiyun if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
4932*4882a593Smuzhiyun goto finish;
4933*4882a593Smuzhiyun
4934*4882a593Smuzhiyun if (s.handle_bad_blocks ||
4935*4882a593Smuzhiyun test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
4936*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4937*4882a593Smuzhiyun goto finish;
4938*4882a593Smuzhiyun }
4939*4882a593Smuzhiyun
4940*4882a593Smuzhiyun if (unlikely(s.blocked_rdev)) {
4941*4882a593Smuzhiyun if (s.syncing || s.expanding || s.expanded ||
4942*4882a593Smuzhiyun s.replacing || s.to_write || s.written) {
4943*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
4944*4882a593Smuzhiyun goto finish;
4945*4882a593Smuzhiyun }
4946*4882a593Smuzhiyun /* There is nothing for the blocked_rdev to block */
4947*4882a593Smuzhiyun rdev_dec_pending(s.blocked_rdev, conf->mddev);
4948*4882a593Smuzhiyun s.blocked_rdev = NULL;
4949*4882a593Smuzhiyun }
4950*4882a593Smuzhiyun
4951*4882a593Smuzhiyun if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
4952*4882a593Smuzhiyun set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
4953*4882a593Smuzhiyun set_bit(STRIPE_BIOFILL_RUN, &sh->state);
4954*4882a593Smuzhiyun }
4955*4882a593Smuzhiyun
4956*4882a593Smuzhiyun pr_debug("locked=%d uptodate=%d to_read=%d"
4957*4882a593Smuzhiyun " to_write=%d failed=%d failed_num=%d,%d\n",
4958*4882a593Smuzhiyun s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
4959*4882a593Smuzhiyun s.failed_num[0], s.failed_num[1]);
4960*4882a593Smuzhiyun /*
4961*4882a593Smuzhiyun * check if the array has lost more than max_degraded devices and,
4962*4882a593Smuzhiyun * if so, some requests might need to be failed.
4963*4882a593Smuzhiyun *
4964*4882a593Smuzhiyun * When journal device failed (log_failed), we will only process
4965*4882a593Smuzhiyun * the stripe if there is data need write to raid disks
4966*4882a593Smuzhiyun */
4967*4882a593Smuzhiyun if (s.failed > conf->max_degraded ||
4968*4882a593Smuzhiyun (s.log_failed && s.injournal == 0)) {
4969*4882a593Smuzhiyun sh->check_state = 0;
4970*4882a593Smuzhiyun sh->reconstruct_state = 0;
4971*4882a593Smuzhiyun break_stripe_batch_list(sh, 0);
4972*4882a593Smuzhiyun if (s.to_read+s.to_write+s.written)
4973*4882a593Smuzhiyun handle_failed_stripe(conf, sh, &s, disks);
4974*4882a593Smuzhiyun if (s.syncing + s.replacing)
4975*4882a593Smuzhiyun handle_failed_sync(conf, sh, &s);
4976*4882a593Smuzhiyun }
4977*4882a593Smuzhiyun
4978*4882a593Smuzhiyun /* Now we check to see if any write operations have recently
4979*4882a593Smuzhiyun * completed
4980*4882a593Smuzhiyun */
4981*4882a593Smuzhiyun prexor = 0;
4982*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
4983*4882a593Smuzhiyun prexor = 1;
4984*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_drain_result ||
4985*4882a593Smuzhiyun sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
4986*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_idle;
4987*4882a593Smuzhiyun
4988*4882a593Smuzhiyun /* All the 'written' buffers and the parity block are ready to
4989*4882a593Smuzhiyun * be written back to disk
4990*4882a593Smuzhiyun */
4991*4882a593Smuzhiyun BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
4992*4882a593Smuzhiyun !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
4993*4882a593Smuzhiyun BUG_ON(sh->qd_idx >= 0 &&
4994*4882a593Smuzhiyun !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
4995*4882a593Smuzhiyun !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
4996*4882a593Smuzhiyun for (i = disks; i--; ) {
4997*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
4998*4882a593Smuzhiyun if (test_bit(R5_LOCKED, &dev->flags) &&
4999*4882a593Smuzhiyun (i == sh->pd_idx || i == sh->qd_idx ||
5000*4882a593Smuzhiyun dev->written || test_bit(R5_InJournal,
5001*4882a593Smuzhiyun &dev->flags))) {
5002*4882a593Smuzhiyun pr_debug("Writing block %d\n", i);
5003*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
5004*4882a593Smuzhiyun if (prexor)
5005*4882a593Smuzhiyun continue;
5006*4882a593Smuzhiyun if (s.failed > 1)
5007*4882a593Smuzhiyun continue;
5008*4882a593Smuzhiyun if (!test_bit(R5_Insync, &dev->flags) ||
5009*4882a593Smuzhiyun ((i == sh->pd_idx || i == sh->qd_idx) &&
5010*4882a593Smuzhiyun s.failed == 0))
5011*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
5012*4882a593Smuzhiyun }
5013*4882a593Smuzhiyun }
5014*4882a593Smuzhiyun if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5015*4882a593Smuzhiyun s.dec_preread_active = 1;
5016*4882a593Smuzhiyun }
5017*4882a593Smuzhiyun
5018*4882a593Smuzhiyun /*
5019*4882a593Smuzhiyun * might be able to return some write requests if the parity blocks
5020*4882a593Smuzhiyun * are safe, or on a failed drive
5021*4882a593Smuzhiyun */
5022*4882a593Smuzhiyun pdev = &sh->dev[sh->pd_idx];
5023*4882a593Smuzhiyun s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
5024*4882a593Smuzhiyun || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
5025*4882a593Smuzhiyun qdev = &sh->dev[sh->qd_idx];
5026*4882a593Smuzhiyun s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
5027*4882a593Smuzhiyun || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
5028*4882a593Smuzhiyun || conf->level < 6;
5029*4882a593Smuzhiyun
5030*4882a593Smuzhiyun if (s.written &&
5031*4882a593Smuzhiyun (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
5032*4882a593Smuzhiyun && !test_bit(R5_LOCKED, &pdev->flags)
5033*4882a593Smuzhiyun && (test_bit(R5_UPTODATE, &pdev->flags) ||
5034*4882a593Smuzhiyun test_bit(R5_Discard, &pdev->flags))))) &&
5035*4882a593Smuzhiyun (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
5036*4882a593Smuzhiyun && !test_bit(R5_LOCKED, &qdev->flags)
5037*4882a593Smuzhiyun && (test_bit(R5_UPTODATE, &qdev->flags) ||
5038*4882a593Smuzhiyun test_bit(R5_Discard, &qdev->flags))))))
5039*4882a593Smuzhiyun handle_stripe_clean_event(conf, sh, disks);
5040*4882a593Smuzhiyun
5041*4882a593Smuzhiyun if (s.just_cached)
5042*4882a593Smuzhiyun r5c_handle_cached_data_endio(conf, sh, disks);
5043*4882a593Smuzhiyun log_stripe_write_finished(sh);
5044*4882a593Smuzhiyun
5045*4882a593Smuzhiyun /* Now we might consider reading some blocks, either to check/generate
5046*4882a593Smuzhiyun * parity, or to satisfy requests
5047*4882a593Smuzhiyun * or to load a block that is being partially written.
5048*4882a593Smuzhiyun */
5049*4882a593Smuzhiyun if (s.to_read || s.non_overwrite
5050*4882a593Smuzhiyun || (s.to_write && s.failed)
5051*4882a593Smuzhiyun || (s.syncing && (s.uptodate + s.compute < disks))
5052*4882a593Smuzhiyun || s.replacing
5053*4882a593Smuzhiyun || s.expanding)
5054*4882a593Smuzhiyun handle_stripe_fill(sh, &s, disks);
5055*4882a593Smuzhiyun
5056*4882a593Smuzhiyun /*
5057*4882a593Smuzhiyun * When the stripe finishes full journal write cycle (write to journal
5058*4882a593Smuzhiyun * and raid disk), this is the clean up procedure so it is ready for
5059*4882a593Smuzhiyun * next operation.
5060*4882a593Smuzhiyun */
5061*4882a593Smuzhiyun r5c_finish_stripe_write_out(conf, sh, &s);
5062*4882a593Smuzhiyun
5063*4882a593Smuzhiyun /*
5064*4882a593Smuzhiyun * Now to consider new write requests, cache write back and what else,
5065*4882a593Smuzhiyun * if anything should be read. We do not handle new writes when:
5066*4882a593Smuzhiyun * 1/ A 'write' operation (copy+xor) is already in flight.
5067*4882a593Smuzhiyun * 2/ A 'check' operation is in flight, as it may clobber the parity
5068*4882a593Smuzhiyun * block.
5069*4882a593Smuzhiyun * 3/ A r5c cache log write is in flight.
5070*4882a593Smuzhiyun */
5071*4882a593Smuzhiyun
5072*4882a593Smuzhiyun if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
5073*4882a593Smuzhiyun if (!r5c_is_writeback(conf->log)) {
5074*4882a593Smuzhiyun if (s.to_write)
5075*4882a593Smuzhiyun handle_stripe_dirtying(conf, sh, &s, disks);
5076*4882a593Smuzhiyun } else { /* write back cache */
5077*4882a593Smuzhiyun int ret = 0;
5078*4882a593Smuzhiyun
5079*4882a593Smuzhiyun /* First, try handle writes in caching phase */
5080*4882a593Smuzhiyun if (s.to_write)
5081*4882a593Smuzhiyun ret = r5c_try_caching_write(conf, sh, &s,
5082*4882a593Smuzhiyun disks);
5083*4882a593Smuzhiyun /*
5084*4882a593Smuzhiyun * If caching phase failed: ret == -EAGAIN
5085*4882a593Smuzhiyun * OR
5086*4882a593Smuzhiyun * stripe under reclaim: !caching && injournal
5087*4882a593Smuzhiyun *
5088*4882a593Smuzhiyun * fall back to handle_stripe_dirtying()
5089*4882a593Smuzhiyun */
5090*4882a593Smuzhiyun if (ret == -EAGAIN ||
5091*4882a593Smuzhiyun /* stripe under reclaim: !caching && injournal */
5092*4882a593Smuzhiyun (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
5093*4882a593Smuzhiyun s.injournal > 0)) {
5094*4882a593Smuzhiyun ret = handle_stripe_dirtying(conf, sh, &s,
5095*4882a593Smuzhiyun disks);
5096*4882a593Smuzhiyun if (ret == -EAGAIN)
5097*4882a593Smuzhiyun goto finish;
5098*4882a593Smuzhiyun }
5099*4882a593Smuzhiyun }
5100*4882a593Smuzhiyun }
5101*4882a593Smuzhiyun
5102*4882a593Smuzhiyun /* maybe we need to check and possibly fix the parity for this stripe
5103*4882a593Smuzhiyun * Any reads will already have been scheduled, so we just see if enough
5104*4882a593Smuzhiyun * data is available. The parity check is held off while parity
5105*4882a593Smuzhiyun * dependent operations are in flight.
5106*4882a593Smuzhiyun */
5107*4882a593Smuzhiyun if (sh->check_state ||
5108*4882a593Smuzhiyun (s.syncing && s.locked == 0 &&
5109*4882a593Smuzhiyun !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
5110*4882a593Smuzhiyun !test_bit(STRIPE_INSYNC, &sh->state))) {
5111*4882a593Smuzhiyun if (conf->level == 6)
5112*4882a593Smuzhiyun handle_parity_checks6(conf, sh, &s, disks);
5113*4882a593Smuzhiyun else
5114*4882a593Smuzhiyun handle_parity_checks5(conf, sh, &s, disks);
5115*4882a593Smuzhiyun }
5116*4882a593Smuzhiyun
5117*4882a593Smuzhiyun if ((s.replacing || s.syncing) && s.locked == 0
5118*4882a593Smuzhiyun && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
5119*4882a593Smuzhiyun && !test_bit(STRIPE_REPLACED, &sh->state)) {
5120*4882a593Smuzhiyun /* Write out to replacement devices where possible */
5121*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++)
5122*4882a593Smuzhiyun if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
5123*4882a593Smuzhiyun WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
5124*4882a593Smuzhiyun set_bit(R5_WantReplace, &sh->dev[i].flags);
5125*4882a593Smuzhiyun set_bit(R5_LOCKED, &sh->dev[i].flags);
5126*4882a593Smuzhiyun s.locked++;
5127*4882a593Smuzhiyun }
5128*4882a593Smuzhiyun if (s.replacing)
5129*4882a593Smuzhiyun set_bit(STRIPE_INSYNC, &sh->state);
5130*4882a593Smuzhiyun set_bit(STRIPE_REPLACED, &sh->state);
5131*4882a593Smuzhiyun }
5132*4882a593Smuzhiyun if ((s.syncing || s.replacing) && s.locked == 0 &&
5133*4882a593Smuzhiyun !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
5134*4882a593Smuzhiyun test_bit(STRIPE_INSYNC, &sh->state)) {
5135*4882a593Smuzhiyun md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5136*4882a593Smuzhiyun clear_bit(STRIPE_SYNCING, &sh->state);
5137*4882a593Smuzhiyun if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
5138*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
5139*4882a593Smuzhiyun }
5140*4882a593Smuzhiyun
5141*4882a593Smuzhiyun /* If the failed drives are just a ReadError, then we might need
5142*4882a593Smuzhiyun * to progress the repair/check process
5143*4882a593Smuzhiyun */
5144*4882a593Smuzhiyun if (s.failed <= conf->max_degraded && !conf->mddev->ro)
5145*4882a593Smuzhiyun for (i = 0; i < s.failed; i++) {
5146*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[s.failed_num[i]];
5147*4882a593Smuzhiyun if (test_bit(R5_ReadError, &dev->flags)
5148*4882a593Smuzhiyun && !test_bit(R5_LOCKED, &dev->flags)
5149*4882a593Smuzhiyun && test_bit(R5_UPTODATE, &dev->flags)
5150*4882a593Smuzhiyun ) {
5151*4882a593Smuzhiyun if (!test_bit(R5_ReWrite, &dev->flags)) {
5152*4882a593Smuzhiyun set_bit(R5_Wantwrite, &dev->flags);
5153*4882a593Smuzhiyun set_bit(R5_ReWrite, &dev->flags);
5154*4882a593Smuzhiyun } else
5155*4882a593Smuzhiyun /* let's read it back */
5156*4882a593Smuzhiyun set_bit(R5_Wantread, &dev->flags);
5157*4882a593Smuzhiyun set_bit(R5_LOCKED, &dev->flags);
5158*4882a593Smuzhiyun s.locked++;
5159*4882a593Smuzhiyun }
5160*4882a593Smuzhiyun }
5161*4882a593Smuzhiyun
5162*4882a593Smuzhiyun /* Finish reconstruct operations initiated by the expansion process */
5163*4882a593Smuzhiyun if (sh->reconstruct_state == reconstruct_state_result) {
5164*4882a593Smuzhiyun struct stripe_head *sh_src
5165*4882a593Smuzhiyun = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
5166*4882a593Smuzhiyun if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
5167*4882a593Smuzhiyun /* sh cannot be written until sh_src has been read.
5168*4882a593Smuzhiyun * so arrange for sh to be delayed a little
5169*4882a593Smuzhiyun */
5170*4882a593Smuzhiyun set_bit(STRIPE_DELAYED, &sh->state);
5171*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
5172*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
5173*4882a593Smuzhiyun &sh_src->state))
5174*4882a593Smuzhiyun atomic_inc(&conf->preread_active_stripes);
5175*4882a593Smuzhiyun raid5_release_stripe(sh_src);
5176*4882a593Smuzhiyun goto finish;
5177*4882a593Smuzhiyun }
5178*4882a593Smuzhiyun if (sh_src)
5179*4882a593Smuzhiyun raid5_release_stripe(sh_src);
5180*4882a593Smuzhiyun
5181*4882a593Smuzhiyun sh->reconstruct_state = reconstruct_state_idle;
5182*4882a593Smuzhiyun clear_bit(STRIPE_EXPANDING, &sh->state);
5183*4882a593Smuzhiyun for (i = conf->raid_disks; i--; ) {
5184*4882a593Smuzhiyun set_bit(R5_Wantwrite, &sh->dev[i].flags);
5185*4882a593Smuzhiyun set_bit(R5_LOCKED, &sh->dev[i].flags);
5186*4882a593Smuzhiyun s.locked++;
5187*4882a593Smuzhiyun }
5188*4882a593Smuzhiyun }
5189*4882a593Smuzhiyun
5190*4882a593Smuzhiyun if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
5191*4882a593Smuzhiyun !sh->reconstruct_state) {
5192*4882a593Smuzhiyun /* Need to write out all blocks after computing parity */
5193*4882a593Smuzhiyun sh->disks = conf->raid_disks;
5194*4882a593Smuzhiyun stripe_set_idx(sh->sector, conf, 0, sh);
5195*4882a593Smuzhiyun schedule_reconstruction(sh, &s, 1, 1);
5196*4882a593Smuzhiyun } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
5197*4882a593Smuzhiyun clear_bit(STRIPE_EXPAND_READY, &sh->state);
5198*4882a593Smuzhiyun atomic_dec(&conf->reshape_stripes);
5199*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
5200*4882a593Smuzhiyun md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5201*4882a593Smuzhiyun }
5202*4882a593Smuzhiyun
5203*4882a593Smuzhiyun if (s.expanding && s.locked == 0 &&
5204*4882a593Smuzhiyun !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
5205*4882a593Smuzhiyun handle_stripe_expansion(conf, sh);
5206*4882a593Smuzhiyun
5207*4882a593Smuzhiyun finish:
5208*4882a593Smuzhiyun /* wait for this device to become unblocked */
5209*4882a593Smuzhiyun if (unlikely(s.blocked_rdev)) {
5210*4882a593Smuzhiyun if (conf->mddev->external)
5211*4882a593Smuzhiyun md_wait_for_blocked_rdev(s.blocked_rdev,
5212*4882a593Smuzhiyun conf->mddev);
5213*4882a593Smuzhiyun else
5214*4882a593Smuzhiyun /* Internal metadata will immediately
5215*4882a593Smuzhiyun * be written by raid5d, so we don't
5216*4882a593Smuzhiyun * need to wait here.
5217*4882a593Smuzhiyun */
5218*4882a593Smuzhiyun rdev_dec_pending(s.blocked_rdev,
5219*4882a593Smuzhiyun conf->mddev);
5220*4882a593Smuzhiyun }
5221*4882a593Smuzhiyun
5222*4882a593Smuzhiyun if (s.handle_bad_blocks)
5223*4882a593Smuzhiyun for (i = disks; i--; ) {
5224*4882a593Smuzhiyun struct md_rdev *rdev;
5225*4882a593Smuzhiyun struct r5dev *dev = &sh->dev[i];
5226*4882a593Smuzhiyun if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
5227*4882a593Smuzhiyun /* We own a safe reference to the rdev */
5228*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
5229*4882a593Smuzhiyun if (!rdev_set_badblocks(rdev, sh->sector,
5230*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0))
5231*4882a593Smuzhiyun md_error(conf->mddev, rdev);
5232*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
5233*4882a593Smuzhiyun }
5234*4882a593Smuzhiyun if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
5235*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
5236*4882a593Smuzhiyun rdev_clear_badblocks(rdev, sh->sector,
5237*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0);
5238*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
5239*4882a593Smuzhiyun }
5240*4882a593Smuzhiyun if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
5241*4882a593Smuzhiyun rdev = conf->disks[i].replacement;
5242*4882a593Smuzhiyun if (!rdev)
5243*4882a593Smuzhiyun /* rdev have been moved down */
5244*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
5245*4882a593Smuzhiyun rdev_clear_badblocks(rdev, sh->sector,
5246*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf), 0);
5247*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
5248*4882a593Smuzhiyun }
5249*4882a593Smuzhiyun }
5250*4882a593Smuzhiyun
5251*4882a593Smuzhiyun if (s.ops_request)
5252*4882a593Smuzhiyun raid_run_ops(sh, s.ops_request);
5253*4882a593Smuzhiyun
5254*4882a593Smuzhiyun ops_run_io(sh, &s);
5255*4882a593Smuzhiyun
5256*4882a593Smuzhiyun if (s.dec_preread_active) {
5257*4882a593Smuzhiyun /* We delay this until after ops_run_io so that if make_request
5258*4882a593Smuzhiyun * is waiting on a flush, it won't continue until the writes
5259*4882a593Smuzhiyun * have actually been submitted.
5260*4882a593Smuzhiyun */
5261*4882a593Smuzhiyun atomic_dec(&conf->preread_active_stripes);
5262*4882a593Smuzhiyun if (atomic_read(&conf->preread_active_stripes) <
5263*4882a593Smuzhiyun IO_THRESHOLD)
5264*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
5265*4882a593Smuzhiyun }
5266*4882a593Smuzhiyun
5267*4882a593Smuzhiyun clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
5268*4882a593Smuzhiyun }
5269*4882a593Smuzhiyun
raid5_activate_delayed(struct r5conf * conf)5270*4882a593Smuzhiyun static void raid5_activate_delayed(struct r5conf *conf)
5271*4882a593Smuzhiyun {
5272*4882a593Smuzhiyun if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
5273*4882a593Smuzhiyun while (!list_empty(&conf->delayed_list)) {
5274*4882a593Smuzhiyun struct list_head *l = conf->delayed_list.next;
5275*4882a593Smuzhiyun struct stripe_head *sh;
5276*4882a593Smuzhiyun sh = list_entry(l, struct stripe_head, lru);
5277*4882a593Smuzhiyun list_del_init(l);
5278*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
5279*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5280*4882a593Smuzhiyun atomic_inc(&conf->preread_active_stripes);
5281*4882a593Smuzhiyun list_add_tail(&sh->lru, &conf->hold_list);
5282*4882a593Smuzhiyun raid5_wakeup_stripe_thread(sh);
5283*4882a593Smuzhiyun }
5284*4882a593Smuzhiyun }
5285*4882a593Smuzhiyun }
5286*4882a593Smuzhiyun
activate_bit_delay(struct r5conf * conf,struct list_head * temp_inactive_list)5287*4882a593Smuzhiyun static void activate_bit_delay(struct r5conf *conf,
5288*4882a593Smuzhiyun struct list_head *temp_inactive_list)
5289*4882a593Smuzhiyun {
5290*4882a593Smuzhiyun /* device_lock is held */
5291*4882a593Smuzhiyun struct list_head head;
5292*4882a593Smuzhiyun list_add(&head, &conf->bitmap_list);
5293*4882a593Smuzhiyun list_del_init(&conf->bitmap_list);
5294*4882a593Smuzhiyun while (!list_empty(&head)) {
5295*4882a593Smuzhiyun struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
5296*4882a593Smuzhiyun int hash;
5297*4882a593Smuzhiyun list_del_init(&sh->lru);
5298*4882a593Smuzhiyun atomic_inc(&sh->count);
5299*4882a593Smuzhiyun hash = sh->hash_lock_index;
5300*4882a593Smuzhiyun __release_stripe(conf, sh, &temp_inactive_list[hash]);
5301*4882a593Smuzhiyun }
5302*4882a593Smuzhiyun }
5303*4882a593Smuzhiyun
in_chunk_boundary(struct mddev * mddev,struct bio * bio)5304*4882a593Smuzhiyun static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
5305*4882a593Smuzhiyun {
5306*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5307*4882a593Smuzhiyun sector_t sector = bio->bi_iter.bi_sector;
5308*4882a593Smuzhiyun unsigned int chunk_sectors;
5309*4882a593Smuzhiyun unsigned int bio_sectors = bio_sectors(bio);
5310*4882a593Smuzhiyun
5311*4882a593Smuzhiyun WARN_ON_ONCE(bio->bi_partno);
5312*4882a593Smuzhiyun
5313*4882a593Smuzhiyun chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
5314*4882a593Smuzhiyun return chunk_sectors >=
5315*4882a593Smuzhiyun ((sector & (chunk_sectors - 1)) + bio_sectors);
5316*4882a593Smuzhiyun }
5317*4882a593Smuzhiyun
5318*4882a593Smuzhiyun /*
5319*4882a593Smuzhiyun * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
5320*4882a593Smuzhiyun * later sampled by raid5d.
5321*4882a593Smuzhiyun */
add_bio_to_retry(struct bio * bi,struct r5conf * conf)5322*4882a593Smuzhiyun static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
5323*4882a593Smuzhiyun {
5324*4882a593Smuzhiyun unsigned long flags;
5325*4882a593Smuzhiyun
5326*4882a593Smuzhiyun spin_lock_irqsave(&conf->device_lock, flags);
5327*4882a593Smuzhiyun
5328*4882a593Smuzhiyun bi->bi_next = conf->retry_read_aligned_list;
5329*4882a593Smuzhiyun conf->retry_read_aligned_list = bi;
5330*4882a593Smuzhiyun
5331*4882a593Smuzhiyun spin_unlock_irqrestore(&conf->device_lock, flags);
5332*4882a593Smuzhiyun md_wakeup_thread(conf->mddev->thread);
5333*4882a593Smuzhiyun }
5334*4882a593Smuzhiyun
remove_bio_from_retry(struct r5conf * conf,unsigned int * offset)5335*4882a593Smuzhiyun static struct bio *remove_bio_from_retry(struct r5conf *conf,
5336*4882a593Smuzhiyun unsigned int *offset)
5337*4882a593Smuzhiyun {
5338*4882a593Smuzhiyun struct bio *bi;
5339*4882a593Smuzhiyun
5340*4882a593Smuzhiyun bi = conf->retry_read_aligned;
5341*4882a593Smuzhiyun if (bi) {
5342*4882a593Smuzhiyun *offset = conf->retry_read_offset;
5343*4882a593Smuzhiyun conf->retry_read_aligned = NULL;
5344*4882a593Smuzhiyun return bi;
5345*4882a593Smuzhiyun }
5346*4882a593Smuzhiyun bi = conf->retry_read_aligned_list;
5347*4882a593Smuzhiyun if(bi) {
5348*4882a593Smuzhiyun conf->retry_read_aligned_list = bi->bi_next;
5349*4882a593Smuzhiyun bi->bi_next = NULL;
5350*4882a593Smuzhiyun *offset = 0;
5351*4882a593Smuzhiyun }
5352*4882a593Smuzhiyun
5353*4882a593Smuzhiyun return bi;
5354*4882a593Smuzhiyun }
5355*4882a593Smuzhiyun
5356*4882a593Smuzhiyun /*
5357*4882a593Smuzhiyun * The "raid5_align_endio" should check if the read succeeded and if it
5358*4882a593Smuzhiyun * did, call bio_endio on the original bio (having bio_put the new bio
5359*4882a593Smuzhiyun * first).
5360*4882a593Smuzhiyun * If the read failed..
5361*4882a593Smuzhiyun */
raid5_align_endio(struct bio * bi)5362*4882a593Smuzhiyun static void raid5_align_endio(struct bio *bi)
5363*4882a593Smuzhiyun {
5364*4882a593Smuzhiyun struct bio* raid_bi = bi->bi_private;
5365*4882a593Smuzhiyun struct mddev *mddev;
5366*4882a593Smuzhiyun struct r5conf *conf;
5367*4882a593Smuzhiyun struct md_rdev *rdev;
5368*4882a593Smuzhiyun blk_status_t error = bi->bi_status;
5369*4882a593Smuzhiyun
5370*4882a593Smuzhiyun bio_put(bi);
5371*4882a593Smuzhiyun
5372*4882a593Smuzhiyun rdev = (void*)raid_bi->bi_next;
5373*4882a593Smuzhiyun raid_bi->bi_next = NULL;
5374*4882a593Smuzhiyun mddev = rdev->mddev;
5375*4882a593Smuzhiyun conf = mddev->private;
5376*4882a593Smuzhiyun
5377*4882a593Smuzhiyun rdev_dec_pending(rdev, conf->mddev);
5378*4882a593Smuzhiyun
5379*4882a593Smuzhiyun if (!error) {
5380*4882a593Smuzhiyun bio_endio(raid_bi);
5381*4882a593Smuzhiyun if (atomic_dec_and_test(&conf->active_aligned_reads))
5382*4882a593Smuzhiyun wake_up(&conf->wait_for_quiescent);
5383*4882a593Smuzhiyun return;
5384*4882a593Smuzhiyun }
5385*4882a593Smuzhiyun
5386*4882a593Smuzhiyun pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
5387*4882a593Smuzhiyun
5388*4882a593Smuzhiyun add_bio_to_retry(raid_bi, conf);
5389*4882a593Smuzhiyun }
5390*4882a593Smuzhiyun
raid5_read_one_chunk(struct mddev * mddev,struct bio * raid_bio)5391*4882a593Smuzhiyun static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
5392*4882a593Smuzhiyun {
5393*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5394*4882a593Smuzhiyun int dd_idx;
5395*4882a593Smuzhiyun struct bio* align_bi;
5396*4882a593Smuzhiyun struct md_rdev *rdev;
5397*4882a593Smuzhiyun sector_t end_sector;
5398*4882a593Smuzhiyun
5399*4882a593Smuzhiyun if (!in_chunk_boundary(mddev, raid_bio)) {
5400*4882a593Smuzhiyun pr_debug("%s: non aligned\n", __func__);
5401*4882a593Smuzhiyun return 0;
5402*4882a593Smuzhiyun }
5403*4882a593Smuzhiyun /*
5404*4882a593Smuzhiyun * use bio_clone_fast to make a copy of the bio
5405*4882a593Smuzhiyun */
5406*4882a593Smuzhiyun align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
5407*4882a593Smuzhiyun if (!align_bi)
5408*4882a593Smuzhiyun return 0;
5409*4882a593Smuzhiyun /*
5410*4882a593Smuzhiyun * set bi_end_io to a new function, and set bi_private to the
5411*4882a593Smuzhiyun * original bio.
5412*4882a593Smuzhiyun */
5413*4882a593Smuzhiyun align_bi->bi_end_io = raid5_align_endio;
5414*4882a593Smuzhiyun align_bi->bi_private = raid_bio;
5415*4882a593Smuzhiyun /*
5416*4882a593Smuzhiyun * compute position
5417*4882a593Smuzhiyun */
5418*4882a593Smuzhiyun align_bi->bi_iter.bi_sector =
5419*4882a593Smuzhiyun raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
5420*4882a593Smuzhiyun 0, &dd_idx, NULL);
5421*4882a593Smuzhiyun
5422*4882a593Smuzhiyun end_sector = bio_end_sector(align_bi);
5423*4882a593Smuzhiyun rcu_read_lock();
5424*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[dd_idx].replacement);
5425*4882a593Smuzhiyun if (!rdev || test_bit(Faulty, &rdev->flags) ||
5426*4882a593Smuzhiyun rdev->recovery_offset < end_sector) {
5427*4882a593Smuzhiyun rdev = rcu_dereference(conf->disks[dd_idx].rdev);
5428*4882a593Smuzhiyun if (rdev &&
5429*4882a593Smuzhiyun (test_bit(Faulty, &rdev->flags) ||
5430*4882a593Smuzhiyun !(test_bit(In_sync, &rdev->flags) ||
5431*4882a593Smuzhiyun rdev->recovery_offset >= end_sector)))
5432*4882a593Smuzhiyun rdev = NULL;
5433*4882a593Smuzhiyun }
5434*4882a593Smuzhiyun
5435*4882a593Smuzhiyun if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) {
5436*4882a593Smuzhiyun rcu_read_unlock();
5437*4882a593Smuzhiyun bio_put(align_bi);
5438*4882a593Smuzhiyun return 0;
5439*4882a593Smuzhiyun }
5440*4882a593Smuzhiyun
5441*4882a593Smuzhiyun if (rdev) {
5442*4882a593Smuzhiyun sector_t first_bad;
5443*4882a593Smuzhiyun int bad_sectors;
5444*4882a593Smuzhiyun
5445*4882a593Smuzhiyun atomic_inc(&rdev->nr_pending);
5446*4882a593Smuzhiyun rcu_read_unlock();
5447*4882a593Smuzhiyun raid_bio->bi_next = (void*)rdev;
5448*4882a593Smuzhiyun bio_set_dev(align_bi, rdev->bdev);
5449*4882a593Smuzhiyun
5450*4882a593Smuzhiyun if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
5451*4882a593Smuzhiyun bio_sectors(align_bi),
5452*4882a593Smuzhiyun &first_bad, &bad_sectors)) {
5453*4882a593Smuzhiyun bio_put(align_bi);
5454*4882a593Smuzhiyun rdev_dec_pending(rdev, mddev);
5455*4882a593Smuzhiyun return 0;
5456*4882a593Smuzhiyun }
5457*4882a593Smuzhiyun
5458*4882a593Smuzhiyun /* No reshape active, so we can trust rdev->data_offset */
5459*4882a593Smuzhiyun align_bi->bi_iter.bi_sector += rdev->data_offset;
5460*4882a593Smuzhiyun
5461*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
5462*4882a593Smuzhiyun wait_event_lock_irq(conf->wait_for_quiescent,
5463*4882a593Smuzhiyun conf->quiesce == 0,
5464*4882a593Smuzhiyun conf->device_lock);
5465*4882a593Smuzhiyun atomic_inc(&conf->active_aligned_reads);
5466*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
5467*4882a593Smuzhiyun
5468*4882a593Smuzhiyun if (mddev->gendisk)
5469*4882a593Smuzhiyun trace_block_bio_remap(align_bi->bi_disk->queue,
5470*4882a593Smuzhiyun align_bi, disk_devt(mddev->gendisk),
5471*4882a593Smuzhiyun raid_bio->bi_iter.bi_sector);
5472*4882a593Smuzhiyun submit_bio_noacct(align_bi);
5473*4882a593Smuzhiyun return 1;
5474*4882a593Smuzhiyun } else {
5475*4882a593Smuzhiyun rcu_read_unlock();
5476*4882a593Smuzhiyun bio_put(align_bi);
5477*4882a593Smuzhiyun return 0;
5478*4882a593Smuzhiyun }
5479*4882a593Smuzhiyun }
5480*4882a593Smuzhiyun
chunk_aligned_read(struct mddev * mddev,struct bio * raid_bio)5481*4882a593Smuzhiyun static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
5482*4882a593Smuzhiyun {
5483*4882a593Smuzhiyun struct bio *split;
5484*4882a593Smuzhiyun sector_t sector = raid_bio->bi_iter.bi_sector;
5485*4882a593Smuzhiyun unsigned chunk_sects = mddev->chunk_sectors;
5486*4882a593Smuzhiyun unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
5487*4882a593Smuzhiyun
5488*4882a593Smuzhiyun if (sectors < bio_sectors(raid_bio)) {
5489*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5490*4882a593Smuzhiyun split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
5491*4882a593Smuzhiyun bio_chain(split, raid_bio);
5492*4882a593Smuzhiyun submit_bio_noacct(raid_bio);
5493*4882a593Smuzhiyun raid_bio = split;
5494*4882a593Smuzhiyun }
5495*4882a593Smuzhiyun
5496*4882a593Smuzhiyun if (!raid5_read_one_chunk(mddev, raid_bio))
5497*4882a593Smuzhiyun return raid_bio;
5498*4882a593Smuzhiyun
5499*4882a593Smuzhiyun return NULL;
5500*4882a593Smuzhiyun }
5501*4882a593Smuzhiyun
5502*4882a593Smuzhiyun /* __get_priority_stripe - get the next stripe to process
5503*4882a593Smuzhiyun *
5504*4882a593Smuzhiyun * Full stripe writes are allowed to pass preread active stripes up until
5505*4882a593Smuzhiyun * the bypass_threshold is exceeded. In general the bypass_count
5506*4882a593Smuzhiyun * increments when the handle_list is handled before the hold_list; however, it
5507*4882a593Smuzhiyun * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
5508*4882a593Smuzhiyun * stripe with in flight i/o. The bypass_count will be reset when the
5509*4882a593Smuzhiyun * head of the hold_list has changed, i.e. the head was promoted to the
5510*4882a593Smuzhiyun * handle_list.
5511*4882a593Smuzhiyun */
__get_priority_stripe(struct r5conf * conf,int group)5512*4882a593Smuzhiyun static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
5513*4882a593Smuzhiyun {
5514*4882a593Smuzhiyun struct stripe_head *sh, *tmp;
5515*4882a593Smuzhiyun struct list_head *handle_list = NULL;
5516*4882a593Smuzhiyun struct r5worker_group *wg;
5517*4882a593Smuzhiyun bool second_try = !r5c_is_writeback(conf->log) &&
5518*4882a593Smuzhiyun !r5l_log_disk_error(conf);
5519*4882a593Smuzhiyun bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
5520*4882a593Smuzhiyun r5l_log_disk_error(conf);
5521*4882a593Smuzhiyun
5522*4882a593Smuzhiyun again:
5523*4882a593Smuzhiyun wg = NULL;
5524*4882a593Smuzhiyun sh = NULL;
5525*4882a593Smuzhiyun if (conf->worker_cnt_per_group == 0) {
5526*4882a593Smuzhiyun handle_list = try_loprio ? &conf->loprio_list :
5527*4882a593Smuzhiyun &conf->handle_list;
5528*4882a593Smuzhiyun } else if (group != ANY_GROUP) {
5529*4882a593Smuzhiyun handle_list = try_loprio ? &conf->worker_groups[group].loprio_list :
5530*4882a593Smuzhiyun &conf->worker_groups[group].handle_list;
5531*4882a593Smuzhiyun wg = &conf->worker_groups[group];
5532*4882a593Smuzhiyun } else {
5533*4882a593Smuzhiyun int i;
5534*4882a593Smuzhiyun for (i = 0; i < conf->group_cnt; i++) {
5535*4882a593Smuzhiyun handle_list = try_loprio ? &conf->worker_groups[i].loprio_list :
5536*4882a593Smuzhiyun &conf->worker_groups[i].handle_list;
5537*4882a593Smuzhiyun wg = &conf->worker_groups[i];
5538*4882a593Smuzhiyun if (!list_empty(handle_list))
5539*4882a593Smuzhiyun break;
5540*4882a593Smuzhiyun }
5541*4882a593Smuzhiyun }
5542*4882a593Smuzhiyun
5543*4882a593Smuzhiyun pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
5544*4882a593Smuzhiyun __func__,
5545*4882a593Smuzhiyun list_empty(handle_list) ? "empty" : "busy",
5546*4882a593Smuzhiyun list_empty(&conf->hold_list) ? "empty" : "busy",
5547*4882a593Smuzhiyun atomic_read(&conf->pending_full_writes), conf->bypass_count);
5548*4882a593Smuzhiyun
5549*4882a593Smuzhiyun if (!list_empty(handle_list)) {
5550*4882a593Smuzhiyun sh = list_entry(handle_list->next, typeof(*sh), lru);
5551*4882a593Smuzhiyun
5552*4882a593Smuzhiyun if (list_empty(&conf->hold_list))
5553*4882a593Smuzhiyun conf->bypass_count = 0;
5554*4882a593Smuzhiyun else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
5555*4882a593Smuzhiyun if (conf->hold_list.next == conf->last_hold)
5556*4882a593Smuzhiyun conf->bypass_count++;
5557*4882a593Smuzhiyun else {
5558*4882a593Smuzhiyun conf->last_hold = conf->hold_list.next;
5559*4882a593Smuzhiyun conf->bypass_count -= conf->bypass_threshold;
5560*4882a593Smuzhiyun if (conf->bypass_count < 0)
5561*4882a593Smuzhiyun conf->bypass_count = 0;
5562*4882a593Smuzhiyun }
5563*4882a593Smuzhiyun }
5564*4882a593Smuzhiyun } else if (!list_empty(&conf->hold_list) &&
5565*4882a593Smuzhiyun ((conf->bypass_threshold &&
5566*4882a593Smuzhiyun conf->bypass_count > conf->bypass_threshold) ||
5567*4882a593Smuzhiyun atomic_read(&conf->pending_full_writes) == 0)) {
5568*4882a593Smuzhiyun
5569*4882a593Smuzhiyun list_for_each_entry(tmp, &conf->hold_list, lru) {
5570*4882a593Smuzhiyun if (conf->worker_cnt_per_group == 0 ||
5571*4882a593Smuzhiyun group == ANY_GROUP ||
5572*4882a593Smuzhiyun !cpu_online(tmp->cpu) ||
5573*4882a593Smuzhiyun cpu_to_group(tmp->cpu) == group) {
5574*4882a593Smuzhiyun sh = tmp;
5575*4882a593Smuzhiyun break;
5576*4882a593Smuzhiyun }
5577*4882a593Smuzhiyun }
5578*4882a593Smuzhiyun
5579*4882a593Smuzhiyun if (sh) {
5580*4882a593Smuzhiyun conf->bypass_count -= conf->bypass_threshold;
5581*4882a593Smuzhiyun if (conf->bypass_count < 0)
5582*4882a593Smuzhiyun conf->bypass_count = 0;
5583*4882a593Smuzhiyun }
5584*4882a593Smuzhiyun wg = NULL;
5585*4882a593Smuzhiyun }
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun if (!sh) {
5588*4882a593Smuzhiyun if (second_try)
5589*4882a593Smuzhiyun return NULL;
5590*4882a593Smuzhiyun second_try = true;
5591*4882a593Smuzhiyun try_loprio = !try_loprio;
5592*4882a593Smuzhiyun goto again;
5593*4882a593Smuzhiyun }
5594*4882a593Smuzhiyun
5595*4882a593Smuzhiyun if (wg) {
5596*4882a593Smuzhiyun wg->stripes_cnt--;
5597*4882a593Smuzhiyun sh->group = NULL;
5598*4882a593Smuzhiyun }
5599*4882a593Smuzhiyun list_del_init(&sh->lru);
5600*4882a593Smuzhiyun BUG_ON(atomic_inc_return(&sh->count) != 1);
5601*4882a593Smuzhiyun return sh;
5602*4882a593Smuzhiyun }
5603*4882a593Smuzhiyun
5604*4882a593Smuzhiyun struct raid5_plug_cb {
5605*4882a593Smuzhiyun struct blk_plug_cb cb;
5606*4882a593Smuzhiyun struct list_head list;
5607*4882a593Smuzhiyun struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
5608*4882a593Smuzhiyun };
5609*4882a593Smuzhiyun
raid5_unplug(struct blk_plug_cb * blk_cb,bool from_schedule)5610*4882a593Smuzhiyun static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
5611*4882a593Smuzhiyun {
5612*4882a593Smuzhiyun struct raid5_plug_cb *cb = container_of(
5613*4882a593Smuzhiyun blk_cb, struct raid5_plug_cb, cb);
5614*4882a593Smuzhiyun struct stripe_head *sh;
5615*4882a593Smuzhiyun struct mddev *mddev = cb->cb.data;
5616*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5617*4882a593Smuzhiyun int cnt = 0;
5618*4882a593Smuzhiyun int hash;
5619*4882a593Smuzhiyun
5620*4882a593Smuzhiyun if (cb->list.next && !list_empty(&cb->list)) {
5621*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
5622*4882a593Smuzhiyun while (!list_empty(&cb->list)) {
5623*4882a593Smuzhiyun sh = list_first_entry(&cb->list, struct stripe_head, lru);
5624*4882a593Smuzhiyun list_del_init(&sh->lru);
5625*4882a593Smuzhiyun /*
5626*4882a593Smuzhiyun * avoid race release_stripe_plug() sees
5627*4882a593Smuzhiyun * STRIPE_ON_UNPLUG_LIST clear but the stripe
5628*4882a593Smuzhiyun * is still in our list
5629*4882a593Smuzhiyun */
5630*4882a593Smuzhiyun smp_mb__before_atomic();
5631*4882a593Smuzhiyun clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
5632*4882a593Smuzhiyun /*
5633*4882a593Smuzhiyun * STRIPE_ON_RELEASE_LIST could be set here. In that
5634*4882a593Smuzhiyun * case, the count is always > 1 here
5635*4882a593Smuzhiyun */
5636*4882a593Smuzhiyun hash = sh->hash_lock_index;
5637*4882a593Smuzhiyun __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
5638*4882a593Smuzhiyun cnt++;
5639*4882a593Smuzhiyun }
5640*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
5641*4882a593Smuzhiyun }
5642*4882a593Smuzhiyun release_inactive_stripe_list(conf, cb->temp_inactive_list,
5643*4882a593Smuzhiyun NR_STRIPE_HASH_LOCKS);
5644*4882a593Smuzhiyun if (mddev->queue)
5645*4882a593Smuzhiyun trace_block_unplug(mddev->queue, cnt, !from_schedule);
5646*4882a593Smuzhiyun kfree(cb);
5647*4882a593Smuzhiyun }
5648*4882a593Smuzhiyun
release_stripe_plug(struct mddev * mddev,struct stripe_head * sh)5649*4882a593Smuzhiyun static void release_stripe_plug(struct mddev *mddev,
5650*4882a593Smuzhiyun struct stripe_head *sh)
5651*4882a593Smuzhiyun {
5652*4882a593Smuzhiyun struct blk_plug_cb *blk_cb = blk_check_plugged(
5653*4882a593Smuzhiyun raid5_unplug, mddev,
5654*4882a593Smuzhiyun sizeof(struct raid5_plug_cb));
5655*4882a593Smuzhiyun struct raid5_plug_cb *cb;
5656*4882a593Smuzhiyun
5657*4882a593Smuzhiyun if (!blk_cb) {
5658*4882a593Smuzhiyun raid5_release_stripe(sh);
5659*4882a593Smuzhiyun return;
5660*4882a593Smuzhiyun }
5661*4882a593Smuzhiyun
5662*4882a593Smuzhiyun cb = container_of(blk_cb, struct raid5_plug_cb, cb);
5663*4882a593Smuzhiyun
5664*4882a593Smuzhiyun if (cb->list.next == NULL) {
5665*4882a593Smuzhiyun int i;
5666*4882a593Smuzhiyun INIT_LIST_HEAD(&cb->list);
5667*4882a593Smuzhiyun for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
5668*4882a593Smuzhiyun INIT_LIST_HEAD(cb->temp_inactive_list + i);
5669*4882a593Smuzhiyun }
5670*4882a593Smuzhiyun
5671*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
5672*4882a593Smuzhiyun list_add_tail(&sh->lru, &cb->list);
5673*4882a593Smuzhiyun else
5674*4882a593Smuzhiyun raid5_release_stripe(sh);
5675*4882a593Smuzhiyun }
5676*4882a593Smuzhiyun
make_discard_request(struct mddev * mddev,struct bio * bi)5677*4882a593Smuzhiyun static void make_discard_request(struct mddev *mddev, struct bio *bi)
5678*4882a593Smuzhiyun {
5679*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5680*4882a593Smuzhiyun sector_t logical_sector, last_sector;
5681*4882a593Smuzhiyun struct stripe_head *sh;
5682*4882a593Smuzhiyun int stripe_sectors;
5683*4882a593Smuzhiyun
5684*4882a593Smuzhiyun if (mddev->reshape_position != MaxSector)
5685*4882a593Smuzhiyun /* Skip discard while reshape is happening */
5686*4882a593Smuzhiyun return;
5687*4882a593Smuzhiyun
5688*4882a593Smuzhiyun logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
5689*4882a593Smuzhiyun last_sector = bio_end_sector(bi);
5690*4882a593Smuzhiyun
5691*4882a593Smuzhiyun bi->bi_next = NULL;
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun stripe_sectors = conf->chunk_sectors *
5694*4882a593Smuzhiyun (conf->raid_disks - conf->max_degraded);
5695*4882a593Smuzhiyun logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
5696*4882a593Smuzhiyun stripe_sectors);
5697*4882a593Smuzhiyun sector_div(last_sector, stripe_sectors);
5698*4882a593Smuzhiyun
5699*4882a593Smuzhiyun logical_sector *= conf->chunk_sectors;
5700*4882a593Smuzhiyun last_sector *= conf->chunk_sectors;
5701*4882a593Smuzhiyun
5702*4882a593Smuzhiyun for (; logical_sector < last_sector;
5703*4882a593Smuzhiyun logical_sector += RAID5_STRIPE_SECTORS(conf)) {
5704*4882a593Smuzhiyun DEFINE_WAIT(w);
5705*4882a593Smuzhiyun int d;
5706*4882a593Smuzhiyun again:
5707*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
5708*4882a593Smuzhiyun prepare_to_wait(&conf->wait_for_overlap, &w,
5709*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
5710*4882a593Smuzhiyun set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5711*4882a593Smuzhiyun if (test_bit(STRIPE_SYNCING, &sh->state)) {
5712*4882a593Smuzhiyun raid5_release_stripe(sh);
5713*4882a593Smuzhiyun schedule();
5714*4882a593Smuzhiyun goto again;
5715*4882a593Smuzhiyun }
5716*4882a593Smuzhiyun clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
5717*4882a593Smuzhiyun spin_lock_irq(&sh->stripe_lock);
5718*4882a593Smuzhiyun for (d = 0; d < conf->raid_disks; d++) {
5719*4882a593Smuzhiyun if (d == sh->pd_idx || d == sh->qd_idx)
5720*4882a593Smuzhiyun continue;
5721*4882a593Smuzhiyun if (sh->dev[d].towrite || sh->dev[d].toread) {
5722*4882a593Smuzhiyun set_bit(R5_Overlap, &sh->dev[d].flags);
5723*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
5724*4882a593Smuzhiyun raid5_release_stripe(sh);
5725*4882a593Smuzhiyun schedule();
5726*4882a593Smuzhiyun goto again;
5727*4882a593Smuzhiyun }
5728*4882a593Smuzhiyun }
5729*4882a593Smuzhiyun set_bit(STRIPE_DISCARD, &sh->state);
5730*4882a593Smuzhiyun finish_wait(&conf->wait_for_overlap, &w);
5731*4882a593Smuzhiyun sh->overwrite_disks = 0;
5732*4882a593Smuzhiyun for (d = 0; d < conf->raid_disks; d++) {
5733*4882a593Smuzhiyun if (d == sh->pd_idx || d == sh->qd_idx)
5734*4882a593Smuzhiyun continue;
5735*4882a593Smuzhiyun sh->dev[d].towrite = bi;
5736*4882a593Smuzhiyun set_bit(R5_OVERWRITE, &sh->dev[d].flags);
5737*4882a593Smuzhiyun bio_inc_remaining(bi);
5738*4882a593Smuzhiyun md_write_inc(mddev, bi);
5739*4882a593Smuzhiyun sh->overwrite_disks++;
5740*4882a593Smuzhiyun }
5741*4882a593Smuzhiyun spin_unlock_irq(&sh->stripe_lock);
5742*4882a593Smuzhiyun if (conf->mddev->bitmap) {
5743*4882a593Smuzhiyun for (d = 0;
5744*4882a593Smuzhiyun d < conf->raid_disks - conf->max_degraded;
5745*4882a593Smuzhiyun d++)
5746*4882a593Smuzhiyun md_bitmap_startwrite(mddev->bitmap,
5747*4882a593Smuzhiyun sh->sector,
5748*4882a593Smuzhiyun RAID5_STRIPE_SECTORS(conf),
5749*4882a593Smuzhiyun 0);
5750*4882a593Smuzhiyun sh->bm_seq = conf->seq_flush + 1;
5751*4882a593Smuzhiyun set_bit(STRIPE_BIT_DELAY, &sh->state);
5752*4882a593Smuzhiyun }
5753*4882a593Smuzhiyun
5754*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
5755*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
5756*4882a593Smuzhiyun if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5757*4882a593Smuzhiyun atomic_inc(&conf->preread_active_stripes);
5758*4882a593Smuzhiyun release_stripe_plug(mddev, sh);
5759*4882a593Smuzhiyun }
5760*4882a593Smuzhiyun
5761*4882a593Smuzhiyun bio_endio(bi);
5762*4882a593Smuzhiyun }
5763*4882a593Smuzhiyun
raid5_make_request(struct mddev * mddev,struct bio * bi)5764*4882a593Smuzhiyun static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
5765*4882a593Smuzhiyun {
5766*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5767*4882a593Smuzhiyun int dd_idx;
5768*4882a593Smuzhiyun sector_t new_sector;
5769*4882a593Smuzhiyun sector_t logical_sector, last_sector;
5770*4882a593Smuzhiyun struct stripe_head *sh;
5771*4882a593Smuzhiyun const int rw = bio_data_dir(bi);
5772*4882a593Smuzhiyun DEFINE_WAIT(w);
5773*4882a593Smuzhiyun bool do_prepare;
5774*4882a593Smuzhiyun bool do_flush = false;
5775*4882a593Smuzhiyun
5776*4882a593Smuzhiyun if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
5777*4882a593Smuzhiyun int ret = log_handle_flush_request(conf, bi);
5778*4882a593Smuzhiyun
5779*4882a593Smuzhiyun if (ret == 0)
5780*4882a593Smuzhiyun return true;
5781*4882a593Smuzhiyun if (ret == -ENODEV) {
5782*4882a593Smuzhiyun if (md_flush_request(mddev, bi))
5783*4882a593Smuzhiyun return true;
5784*4882a593Smuzhiyun }
5785*4882a593Smuzhiyun /* ret == -EAGAIN, fallback */
5786*4882a593Smuzhiyun /*
5787*4882a593Smuzhiyun * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
5788*4882a593Smuzhiyun * we need to flush journal device
5789*4882a593Smuzhiyun */
5790*4882a593Smuzhiyun do_flush = bi->bi_opf & REQ_PREFLUSH;
5791*4882a593Smuzhiyun }
5792*4882a593Smuzhiyun
5793*4882a593Smuzhiyun if (!md_write_start(mddev, bi))
5794*4882a593Smuzhiyun return false;
5795*4882a593Smuzhiyun /*
5796*4882a593Smuzhiyun * If array is degraded, better not do chunk aligned read because
5797*4882a593Smuzhiyun * later we might have to read it again in order to reconstruct
5798*4882a593Smuzhiyun * data on failed drives.
5799*4882a593Smuzhiyun */
5800*4882a593Smuzhiyun if (rw == READ && mddev->degraded == 0 &&
5801*4882a593Smuzhiyun mddev->reshape_position == MaxSector) {
5802*4882a593Smuzhiyun bi = chunk_aligned_read(mddev, bi);
5803*4882a593Smuzhiyun if (!bi)
5804*4882a593Smuzhiyun return true;
5805*4882a593Smuzhiyun }
5806*4882a593Smuzhiyun
5807*4882a593Smuzhiyun if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
5808*4882a593Smuzhiyun make_discard_request(mddev, bi);
5809*4882a593Smuzhiyun md_write_end(mddev);
5810*4882a593Smuzhiyun return true;
5811*4882a593Smuzhiyun }
5812*4882a593Smuzhiyun
5813*4882a593Smuzhiyun logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
5814*4882a593Smuzhiyun last_sector = bio_end_sector(bi);
5815*4882a593Smuzhiyun bi->bi_next = NULL;
5816*4882a593Smuzhiyun
5817*4882a593Smuzhiyun prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
5818*4882a593Smuzhiyun for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
5819*4882a593Smuzhiyun int previous;
5820*4882a593Smuzhiyun int seq;
5821*4882a593Smuzhiyun
5822*4882a593Smuzhiyun do_prepare = false;
5823*4882a593Smuzhiyun retry:
5824*4882a593Smuzhiyun seq = read_seqcount_begin(&conf->gen_lock);
5825*4882a593Smuzhiyun previous = 0;
5826*4882a593Smuzhiyun if (do_prepare)
5827*4882a593Smuzhiyun prepare_to_wait(&conf->wait_for_overlap, &w,
5828*4882a593Smuzhiyun TASK_UNINTERRUPTIBLE);
5829*4882a593Smuzhiyun if (unlikely(conf->reshape_progress != MaxSector)) {
5830*4882a593Smuzhiyun /* spinlock is needed as reshape_progress may be
5831*4882a593Smuzhiyun * 64bit on a 32bit platform, and so it might be
5832*4882a593Smuzhiyun * possible to see a half-updated value
5833*4882a593Smuzhiyun * Of course reshape_progress could change after
5834*4882a593Smuzhiyun * the lock is dropped, so once we get a reference
5835*4882a593Smuzhiyun * to the stripe that we think it is, we will have
5836*4882a593Smuzhiyun * to check again.
5837*4882a593Smuzhiyun */
5838*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
5839*4882a593Smuzhiyun if (mddev->reshape_backwards
5840*4882a593Smuzhiyun ? logical_sector < conf->reshape_progress
5841*4882a593Smuzhiyun : logical_sector >= conf->reshape_progress) {
5842*4882a593Smuzhiyun previous = 1;
5843*4882a593Smuzhiyun } else {
5844*4882a593Smuzhiyun if (mddev->reshape_backwards
5845*4882a593Smuzhiyun ? logical_sector < conf->reshape_safe
5846*4882a593Smuzhiyun : logical_sector >= conf->reshape_safe) {
5847*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
5848*4882a593Smuzhiyun schedule();
5849*4882a593Smuzhiyun do_prepare = true;
5850*4882a593Smuzhiyun goto retry;
5851*4882a593Smuzhiyun }
5852*4882a593Smuzhiyun }
5853*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
5854*4882a593Smuzhiyun }
5855*4882a593Smuzhiyun
5856*4882a593Smuzhiyun new_sector = raid5_compute_sector(conf, logical_sector,
5857*4882a593Smuzhiyun previous,
5858*4882a593Smuzhiyun &dd_idx, NULL);
5859*4882a593Smuzhiyun pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
5860*4882a593Smuzhiyun (unsigned long long)new_sector,
5861*4882a593Smuzhiyun (unsigned long long)logical_sector);
5862*4882a593Smuzhiyun
5863*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, new_sector, previous,
5864*4882a593Smuzhiyun (bi->bi_opf & REQ_RAHEAD), 0);
5865*4882a593Smuzhiyun if (sh) {
5866*4882a593Smuzhiyun if (unlikely(previous)) {
5867*4882a593Smuzhiyun /* expansion might have moved on while waiting for a
5868*4882a593Smuzhiyun * stripe, so we must do the range check again.
5869*4882a593Smuzhiyun * Expansion could still move past after this
5870*4882a593Smuzhiyun * test, but as we are holding a reference to
5871*4882a593Smuzhiyun * 'sh', we know that if that happens,
5872*4882a593Smuzhiyun * STRIPE_EXPANDING will get set and the expansion
5873*4882a593Smuzhiyun * won't proceed until we finish with the stripe.
5874*4882a593Smuzhiyun */
5875*4882a593Smuzhiyun int must_retry = 0;
5876*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
5877*4882a593Smuzhiyun if (mddev->reshape_backwards
5878*4882a593Smuzhiyun ? logical_sector >= conf->reshape_progress
5879*4882a593Smuzhiyun : logical_sector < conf->reshape_progress)
5880*4882a593Smuzhiyun /* mismatch, need to try again */
5881*4882a593Smuzhiyun must_retry = 1;
5882*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
5883*4882a593Smuzhiyun if (must_retry) {
5884*4882a593Smuzhiyun raid5_release_stripe(sh);
5885*4882a593Smuzhiyun schedule();
5886*4882a593Smuzhiyun do_prepare = true;
5887*4882a593Smuzhiyun goto retry;
5888*4882a593Smuzhiyun }
5889*4882a593Smuzhiyun }
5890*4882a593Smuzhiyun if (read_seqcount_retry(&conf->gen_lock, seq)) {
5891*4882a593Smuzhiyun /* Might have got the wrong stripe_head
5892*4882a593Smuzhiyun * by accident
5893*4882a593Smuzhiyun */
5894*4882a593Smuzhiyun raid5_release_stripe(sh);
5895*4882a593Smuzhiyun goto retry;
5896*4882a593Smuzhiyun }
5897*4882a593Smuzhiyun
5898*4882a593Smuzhiyun if (test_bit(STRIPE_EXPANDING, &sh->state) ||
5899*4882a593Smuzhiyun !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
5900*4882a593Smuzhiyun /* Stripe is busy expanding or
5901*4882a593Smuzhiyun * add failed due to overlap. Flush everything
5902*4882a593Smuzhiyun * and wait a while
5903*4882a593Smuzhiyun */
5904*4882a593Smuzhiyun md_wakeup_thread(mddev->thread);
5905*4882a593Smuzhiyun raid5_release_stripe(sh);
5906*4882a593Smuzhiyun schedule();
5907*4882a593Smuzhiyun do_prepare = true;
5908*4882a593Smuzhiyun goto retry;
5909*4882a593Smuzhiyun }
5910*4882a593Smuzhiyun if (do_flush) {
5911*4882a593Smuzhiyun set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
5912*4882a593Smuzhiyun /* we only need flush for one stripe */
5913*4882a593Smuzhiyun do_flush = false;
5914*4882a593Smuzhiyun }
5915*4882a593Smuzhiyun
5916*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
5917*4882a593Smuzhiyun clear_bit(STRIPE_DELAYED, &sh->state);
5918*4882a593Smuzhiyun if ((!sh->batch_head || sh == sh->batch_head) &&
5919*4882a593Smuzhiyun (bi->bi_opf & REQ_SYNC) &&
5920*4882a593Smuzhiyun !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5921*4882a593Smuzhiyun atomic_inc(&conf->preread_active_stripes);
5922*4882a593Smuzhiyun release_stripe_plug(mddev, sh);
5923*4882a593Smuzhiyun } else {
5924*4882a593Smuzhiyun /* cannot get stripe for read-ahead, just give-up */
5925*4882a593Smuzhiyun bi->bi_status = BLK_STS_IOERR;
5926*4882a593Smuzhiyun break;
5927*4882a593Smuzhiyun }
5928*4882a593Smuzhiyun }
5929*4882a593Smuzhiyun finish_wait(&conf->wait_for_overlap, &w);
5930*4882a593Smuzhiyun
5931*4882a593Smuzhiyun if (rw == WRITE)
5932*4882a593Smuzhiyun md_write_end(mddev);
5933*4882a593Smuzhiyun bio_endio(bi);
5934*4882a593Smuzhiyun return true;
5935*4882a593Smuzhiyun }
5936*4882a593Smuzhiyun
5937*4882a593Smuzhiyun static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5938*4882a593Smuzhiyun
reshape_request(struct mddev * mddev,sector_t sector_nr,int * skipped)5939*4882a593Smuzhiyun static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5940*4882a593Smuzhiyun {
5941*4882a593Smuzhiyun /* reshaping is quite different to recovery/resync so it is
5942*4882a593Smuzhiyun * handled quite separately ... here.
5943*4882a593Smuzhiyun *
5944*4882a593Smuzhiyun * On each call to sync_request, we gather one chunk worth of
5945*4882a593Smuzhiyun * destination stripes and flag them as expanding.
5946*4882a593Smuzhiyun * Then we find all the source stripes and request reads.
5947*4882a593Smuzhiyun * As the reads complete, handle_stripe will copy the data
5948*4882a593Smuzhiyun * into the destination stripe and release that stripe.
5949*4882a593Smuzhiyun */
5950*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
5951*4882a593Smuzhiyun struct stripe_head *sh;
5952*4882a593Smuzhiyun struct md_rdev *rdev;
5953*4882a593Smuzhiyun sector_t first_sector, last_sector;
5954*4882a593Smuzhiyun int raid_disks = conf->previous_raid_disks;
5955*4882a593Smuzhiyun int data_disks = raid_disks - conf->max_degraded;
5956*4882a593Smuzhiyun int new_data_disks = conf->raid_disks - conf->max_degraded;
5957*4882a593Smuzhiyun int i;
5958*4882a593Smuzhiyun int dd_idx;
5959*4882a593Smuzhiyun sector_t writepos, readpos, safepos;
5960*4882a593Smuzhiyun sector_t stripe_addr;
5961*4882a593Smuzhiyun int reshape_sectors;
5962*4882a593Smuzhiyun struct list_head stripes;
5963*4882a593Smuzhiyun sector_t retn;
5964*4882a593Smuzhiyun
5965*4882a593Smuzhiyun if (sector_nr == 0) {
5966*4882a593Smuzhiyun /* If restarting in the middle, skip the initial sectors */
5967*4882a593Smuzhiyun if (mddev->reshape_backwards &&
5968*4882a593Smuzhiyun conf->reshape_progress < raid5_size(mddev, 0, 0)) {
5969*4882a593Smuzhiyun sector_nr = raid5_size(mddev, 0, 0)
5970*4882a593Smuzhiyun - conf->reshape_progress;
5971*4882a593Smuzhiyun } else if (mddev->reshape_backwards &&
5972*4882a593Smuzhiyun conf->reshape_progress == MaxSector) {
5973*4882a593Smuzhiyun /* shouldn't happen, but just in case, finish up.*/
5974*4882a593Smuzhiyun sector_nr = MaxSector;
5975*4882a593Smuzhiyun } else if (!mddev->reshape_backwards &&
5976*4882a593Smuzhiyun conf->reshape_progress > 0)
5977*4882a593Smuzhiyun sector_nr = conf->reshape_progress;
5978*4882a593Smuzhiyun sector_div(sector_nr, new_data_disks);
5979*4882a593Smuzhiyun if (sector_nr) {
5980*4882a593Smuzhiyun mddev->curr_resync_completed = sector_nr;
5981*4882a593Smuzhiyun sysfs_notify_dirent_safe(mddev->sysfs_completed);
5982*4882a593Smuzhiyun *skipped = 1;
5983*4882a593Smuzhiyun retn = sector_nr;
5984*4882a593Smuzhiyun goto finish;
5985*4882a593Smuzhiyun }
5986*4882a593Smuzhiyun }
5987*4882a593Smuzhiyun
5988*4882a593Smuzhiyun /* We need to process a full chunk at a time.
5989*4882a593Smuzhiyun * If old and new chunk sizes differ, we need to process the
5990*4882a593Smuzhiyun * largest of these
5991*4882a593Smuzhiyun */
5992*4882a593Smuzhiyun
5993*4882a593Smuzhiyun reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
5994*4882a593Smuzhiyun
5995*4882a593Smuzhiyun /* We update the metadata at least every 10 seconds, or when
5996*4882a593Smuzhiyun * the data about to be copied would over-write the source of
5997*4882a593Smuzhiyun * the data at the front of the range. i.e. one new_stripe
5998*4882a593Smuzhiyun * along from reshape_progress new_maps to after where
5999*4882a593Smuzhiyun * reshape_safe old_maps to
6000*4882a593Smuzhiyun */
6001*4882a593Smuzhiyun writepos = conf->reshape_progress;
6002*4882a593Smuzhiyun sector_div(writepos, new_data_disks);
6003*4882a593Smuzhiyun readpos = conf->reshape_progress;
6004*4882a593Smuzhiyun sector_div(readpos, data_disks);
6005*4882a593Smuzhiyun safepos = conf->reshape_safe;
6006*4882a593Smuzhiyun sector_div(safepos, data_disks);
6007*4882a593Smuzhiyun if (mddev->reshape_backwards) {
6008*4882a593Smuzhiyun BUG_ON(writepos < reshape_sectors);
6009*4882a593Smuzhiyun writepos -= reshape_sectors;
6010*4882a593Smuzhiyun readpos += reshape_sectors;
6011*4882a593Smuzhiyun safepos += reshape_sectors;
6012*4882a593Smuzhiyun } else {
6013*4882a593Smuzhiyun writepos += reshape_sectors;
6014*4882a593Smuzhiyun /* readpos and safepos are worst-case calculations.
6015*4882a593Smuzhiyun * A negative number is overly pessimistic, and causes
6016*4882a593Smuzhiyun * obvious problems for unsigned storage. So clip to 0.
6017*4882a593Smuzhiyun */
6018*4882a593Smuzhiyun readpos -= min_t(sector_t, reshape_sectors, readpos);
6019*4882a593Smuzhiyun safepos -= min_t(sector_t, reshape_sectors, safepos);
6020*4882a593Smuzhiyun }
6021*4882a593Smuzhiyun
6022*4882a593Smuzhiyun /* Having calculated the 'writepos' possibly use it
6023*4882a593Smuzhiyun * to set 'stripe_addr' which is where we will write to.
6024*4882a593Smuzhiyun */
6025*4882a593Smuzhiyun if (mddev->reshape_backwards) {
6026*4882a593Smuzhiyun BUG_ON(conf->reshape_progress == 0);
6027*4882a593Smuzhiyun stripe_addr = writepos;
6028*4882a593Smuzhiyun BUG_ON((mddev->dev_sectors &
6029*4882a593Smuzhiyun ~((sector_t)reshape_sectors - 1))
6030*4882a593Smuzhiyun - reshape_sectors - stripe_addr
6031*4882a593Smuzhiyun != sector_nr);
6032*4882a593Smuzhiyun } else {
6033*4882a593Smuzhiyun BUG_ON(writepos != sector_nr + reshape_sectors);
6034*4882a593Smuzhiyun stripe_addr = sector_nr;
6035*4882a593Smuzhiyun }
6036*4882a593Smuzhiyun
6037*4882a593Smuzhiyun /* 'writepos' is the most advanced device address we might write.
6038*4882a593Smuzhiyun * 'readpos' is the least advanced device address we might read.
6039*4882a593Smuzhiyun * 'safepos' is the least address recorded in the metadata as having
6040*4882a593Smuzhiyun * been reshaped.
6041*4882a593Smuzhiyun * If there is a min_offset_diff, these are adjusted either by
6042*4882a593Smuzhiyun * increasing the safepos/readpos if diff is negative, or
6043*4882a593Smuzhiyun * increasing writepos if diff is positive.
6044*4882a593Smuzhiyun * If 'readpos' is then behind 'writepos', there is no way that we can
6045*4882a593Smuzhiyun * ensure safety in the face of a crash - that must be done by userspace
6046*4882a593Smuzhiyun * making a backup of the data. So in that case there is no particular
6047*4882a593Smuzhiyun * rush to update metadata.
6048*4882a593Smuzhiyun * Otherwise if 'safepos' is behind 'writepos', then we really need to
6049*4882a593Smuzhiyun * update the metadata to advance 'safepos' to match 'readpos' so that
6050*4882a593Smuzhiyun * we can be safe in the event of a crash.
6051*4882a593Smuzhiyun * So we insist on updating metadata if safepos is behind writepos and
6052*4882a593Smuzhiyun * readpos is beyond writepos.
6053*4882a593Smuzhiyun * In any case, update the metadata every 10 seconds.
6054*4882a593Smuzhiyun * Maybe that number should be configurable, but I'm not sure it is
6055*4882a593Smuzhiyun * worth it.... maybe it could be a multiple of safemode_delay???
6056*4882a593Smuzhiyun */
6057*4882a593Smuzhiyun if (conf->min_offset_diff < 0) {
6058*4882a593Smuzhiyun safepos += -conf->min_offset_diff;
6059*4882a593Smuzhiyun readpos += -conf->min_offset_diff;
6060*4882a593Smuzhiyun } else
6061*4882a593Smuzhiyun writepos += conf->min_offset_diff;
6062*4882a593Smuzhiyun
6063*4882a593Smuzhiyun if ((mddev->reshape_backwards
6064*4882a593Smuzhiyun ? (safepos > writepos && readpos < writepos)
6065*4882a593Smuzhiyun : (safepos < writepos && readpos > writepos)) ||
6066*4882a593Smuzhiyun time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
6067*4882a593Smuzhiyun /* Cannot proceed until we've updated the superblock... */
6068*4882a593Smuzhiyun wait_event(conf->wait_for_overlap,
6069*4882a593Smuzhiyun atomic_read(&conf->reshape_stripes)==0
6070*4882a593Smuzhiyun || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6071*4882a593Smuzhiyun if (atomic_read(&conf->reshape_stripes) != 0)
6072*4882a593Smuzhiyun return 0;
6073*4882a593Smuzhiyun mddev->reshape_position = conf->reshape_progress;
6074*4882a593Smuzhiyun mddev->curr_resync_completed = sector_nr;
6075*4882a593Smuzhiyun if (!mddev->reshape_backwards)
6076*4882a593Smuzhiyun /* Can update recovery_offset */
6077*4882a593Smuzhiyun rdev_for_each(rdev, mddev)
6078*4882a593Smuzhiyun if (rdev->raid_disk >= 0 &&
6079*4882a593Smuzhiyun !test_bit(Journal, &rdev->flags) &&
6080*4882a593Smuzhiyun !test_bit(In_sync, &rdev->flags) &&
6081*4882a593Smuzhiyun rdev->recovery_offset < sector_nr)
6082*4882a593Smuzhiyun rdev->recovery_offset = sector_nr;
6083*4882a593Smuzhiyun
6084*4882a593Smuzhiyun conf->reshape_checkpoint = jiffies;
6085*4882a593Smuzhiyun set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6086*4882a593Smuzhiyun md_wakeup_thread(mddev->thread);
6087*4882a593Smuzhiyun wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
6088*4882a593Smuzhiyun test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6089*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6090*4882a593Smuzhiyun return 0;
6091*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6092*4882a593Smuzhiyun conf->reshape_safe = mddev->reshape_position;
6093*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6094*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
6095*4882a593Smuzhiyun sysfs_notify_dirent_safe(mddev->sysfs_completed);
6096*4882a593Smuzhiyun }
6097*4882a593Smuzhiyun
6098*4882a593Smuzhiyun INIT_LIST_HEAD(&stripes);
6099*4882a593Smuzhiyun for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
6100*4882a593Smuzhiyun int j;
6101*4882a593Smuzhiyun int skipped_disk = 0;
6102*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
6103*4882a593Smuzhiyun set_bit(STRIPE_EXPANDING, &sh->state);
6104*4882a593Smuzhiyun atomic_inc(&conf->reshape_stripes);
6105*4882a593Smuzhiyun /* If any of this stripe is beyond the end of the old
6106*4882a593Smuzhiyun * array, then we need to zero those blocks
6107*4882a593Smuzhiyun */
6108*4882a593Smuzhiyun for (j=sh->disks; j--;) {
6109*4882a593Smuzhiyun sector_t s;
6110*4882a593Smuzhiyun if (j == sh->pd_idx)
6111*4882a593Smuzhiyun continue;
6112*4882a593Smuzhiyun if (conf->level == 6 &&
6113*4882a593Smuzhiyun j == sh->qd_idx)
6114*4882a593Smuzhiyun continue;
6115*4882a593Smuzhiyun s = raid5_compute_blocknr(sh, j, 0);
6116*4882a593Smuzhiyun if (s < raid5_size(mddev, 0, 0)) {
6117*4882a593Smuzhiyun skipped_disk = 1;
6118*4882a593Smuzhiyun continue;
6119*4882a593Smuzhiyun }
6120*4882a593Smuzhiyun memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf));
6121*4882a593Smuzhiyun set_bit(R5_Expanded, &sh->dev[j].flags);
6122*4882a593Smuzhiyun set_bit(R5_UPTODATE, &sh->dev[j].flags);
6123*4882a593Smuzhiyun }
6124*4882a593Smuzhiyun if (!skipped_disk) {
6125*4882a593Smuzhiyun set_bit(STRIPE_EXPAND_READY, &sh->state);
6126*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
6127*4882a593Smuzhiyun }
6128*4882a593Smuzhiyun list_add(&sh->lru, &stripes);
6129*4882a593Smuzhiyun }
6130*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6131*4882a593Smuzhiyun if (mddev->reshape_backwards)
6132*4882a593Smuzhiyun conf->reshape_progress -= reshape_sectors * new_data_disks;
6133*4882a593Smuzhiyun else
6134*4882a593Smuzhiyun conf->reshape_progress += reshape_sectors * new_data_disks;
6135*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6136*4882a593Smuzhiyun /* Ok, those stripe are ready. We can start scheduling
6137*4882a593Smuzhiyun * reads on the source stripes.
6138*4882a593Smuzhiyun * The source stripes are determined by mapping the first and last
6139*4882a593Smuzhiyun * block on the destination stripes.
6140*4882a593Smuzhiyun */
6141*4882a593Smuzhiyun first_sector =
6142*4882a593Smuzhiyun raid5_compute_sector(conf, stripe_addr*(new_data_disks),
6143*4882a593Smuzhiyun 1, &dd_idx, NULL);
6144*4882a593Smuzhiyun last_sector =
6145*4882a593Smuzhiyun raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
6146*4882a593Smuzhiyun * new_data_disks - 1),
6147*4882a593Smuzhiyun 1, &dd_idx, NULL);
6148*4882a593Smuzhiyun if (last_sector >= mddev->dev_sectors)
6149*4882a593Smuzhiyun last_sector = mddev->dev_sectors - 1;
6150*4882a593Smuzhiyun while (first_sector <= last_sector) {
6151*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
6152*4882a593Smuzhiyun set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
6153*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
6154*4882a593Smuzhiyun raid5_release_stripe(sh);
6155*4882a593Smuzhiyun first_sector += RAID5_STRIPE_SECTORS(conf);
6156*4882a593Smuzhiyun }
6157*4882a593Smuzhiyun /* Now that the sources are clearly marked, we can release
6158*4882a593Smuzhiyun * the destination stripes
6159*4882a593Smuzhiyun */
6160*4882a593Smuzhiyun while (!list_empty(&stripes)) {
6161*4882a593Smuzhiyun sh = list_entry(stripes.next, struct stripe_head, lru);
6162*4882a593Smuzhiyun list_del_init(&sh->lru);
6163*4882a593Smuzhiyun raid5_release_stripe(sh);
6164*4882a593Smuzhiyun }
6165*4882a593Smuzhiyun /* If this takes us to the resync_max point where we have to pause,
6166*4882a593Smuzhiyun * then we need to write out the superblock.
6167*4882a593Smuzhiyun */
6168*4882a593Smuzhiyun sector_nr += reshape_sectors;
6169*4882a593Smuzhiyun retn = reshape_sectors;
6170*4882a593Smuzhiyun finish:
6171*4882a593Smuzhiyun if (mddev->curr_resync_completed > mddev->resync_max ||
6172*4882a593Smuzhiyun (sector_nr - mddev->curr_resync_completed) * 2
6173*4882a593Smuzhiyun >= mddev->resync_max - mddev->curr_resync_completed) {
6174*4882a593Smuzhiyun /* Cannot proceed until we've updated the superblock... */
6175*4882a593Smuzhiyun wait_event(conf->wait_for_overlap,
6176*4882a593Smuzhiyun atomic_read(&conf->reshape_stripes) == 0
6177*4882a593Smuzhiyun || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6178*4882a593Smuzhiyun if (atomic_read(&conf->reshape_stripes) != 0)
6179*4882a593Smuzhiyun goto ret;
6180*4882a593Smuzhiyun mddev->reshape_position = conf->reshape_progress;
6181*4882a593Smuzhiyun mddev->curr_resync_completed = sector_nr;
6182*4882a593Smuzhiyun if (!mddev->reshape_backwards)
6183*4882a593Smuzhiyun /* Can update recovery_offset */
6184*4882a593Smuzhiyun rdev_for_each(rdev, mddev)
6185*4882a593Smuzhiyun if (rdev->raid_disk >= 0 &&
6186*4882a593Smuzhiyun !test_bit(Journal, &rdev->flags) &&
6187*4882a593Smuzhiyun !test_bit(In_sync, &rdev->flags) &&
6188*4882a593Smuzhiyun rdev->recovery_offset < sector_nr)
6189*4882a593Smuzhiyun rdev->recovery_offset = sector_nr;
6190*4882a593Smuzhiyun conf->reshape_checkpoint = jiffies;
6191*4882a593Smuzhiyun set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6192*4882a593Smuzhiyun md_wakeup_thread(mddev->thread);
6193*4882a593Smuzhiyun wait_event(mddev->sb_wait,
6194*4882a593Smuzhiyun !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
6195*4882a593Smuzhiyun || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6196*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6197*4882a593Smuzhiyun goto ret;
6198*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6199*4882a593Smuzhiyun conf->reshape_safe = mddev->reshape_position;
6200*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6201*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
6202*4882a593Smuzhiyun sysfs_notify_dirent_safe(mddev->sysfs_completed);
6203*4882a593Smuzhiyun }
6204*4882a593Smuzhiyun ret:
6205*4882a593Smuzhiyun return retn;
6206*4882a593Smuzhiyun }
6207*4882a593Smuzhiyun
raid5_sync_request(struct mddev * mddev,sector_t sector_nr,int * skipped)6208*4882a593Smuzhiyun static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
6209*4882a593Smuzhiyun int *skipped)
6210*4882a593Smuzhiyun {
6211*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6212*4882a593Smuzhiyun struct stripe_head *sh;
6213*4882a593Smuzhiyun sector_t max_sector = mddev->dev_sectors;
6214*4882a593Smuzhiyun sector_t sync_blocks;
6215*4882a593Smuzhiyun int still_degraded = 0;
6216*4882a593Smuzhiyun int i;
6217*4882a593Smuzhiyun
6218*4882a593Smuzhiyun if (sector_nr >= max_sector) {
6219*4882a593Smuzhiyun /* just being told to finish up .. nothing much to do */
6220*4882a593Smuzhiyun
6221*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
6222*4882a593Smuzhiyun end_reshape(conf);
6223*4882a593Smuzhiyun return 0;
6224*4882a593Smuzhiyun }
6225*4882a593Smuzhiyun
6226*4882a593Smuzhiyun if (mddev->curr_resync < max_sector) /* aborted */
6227*4882a593Smuzhiyun md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
6228*4882a593Smuzhiyun &sync_blocks, 1);
6229*4882a593Smuzhiyun else /* completed sync */
6230*4882a593Smuzhiyun conf->fullsync = 0;
6231*4882a593Smuzhiyun md_bitmap_close_sync(mddev->bitmap);
6232*4882a593Smuzhiyun
6233*4882a593Smuzhiyun return 0;
6234*4882a593Smuzhiyun }
6235*4882a593Smuzhiyun
6236*4882a593Smuzhiyun /* Allow raid5_quiesce to complete */
6237*4882a593Smuzhiyun wait_event(conf->wait_for_overlap, conf->quiesce != 2);
6238*4882a593Smuzhiyun
6239*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6240*4882a593Smuzhiyun return reshape_request(mddev, sector_nr, skipped);
6241*4882a593Smuzhiyun
6242*4882a593Smuzhiyun /* No need to check resync_max as we never do more than one
6243*4882a593Smuzhiyun * stripe, and as resync_max will always be on a chunk boundary,
6244*4882a593Smuzhiyun * if the check in md_do_sync didn't fire, there is no chance
6245*4882a593Smuzhiyun * of overstepping resync_max here
6246*4882a593Smuzhiyun */
6247*4882a593Smuzhiyun
6248*4882a593Smuzhiyun /* if there is too many failed drives and we are trying
6249*4882a593Smuzhiyun * to resync, then assert that we are finished, because there is
6250*4882a593Smuzhiyun * nothing we can do.
6251*4882a593Smuzhiyun */
6252*4882a593Smuzhiyun if (mddev->degraded >= conf->max_degraded &&
6253*4882a593Smuzhiyun test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6254*4882a593Smuzhiyun sector_t rv = mddev->dev_sectors - sector_nr;
6255*4882a593Smuzhiyun *skipped = 1;
6256*4882a593Smuzhiyun return rv;
6257*4882a593Smuzhiyun }
6258*4882a593Smuzhiyun if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
6259*4882a593Smuzhiyun !conf->fullsync &&
6260*4882a593Smuzhiyun !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
6261*4882a593Smuzhiyun sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
6262*4882a593Smuzhiyun /* we can skip this block, and probably more */
6263*4882a593Smuzhiyun do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
6264*4882a593Smuzhiyun *skipped = 1;
6265*4882a593Smuzhiyun /* keep things rounded to whole stripes */
6266*4882a593Smuzhiyun return sync_blocks * RAID5_STRIPE_SECTORS(conf);
6267*4882a593Smuzhiyun }
6268*4882a593Smuzhiyun
6269*4882a593Smuzhiyun md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
6270*4882a593Smuzhiyun
6271*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
6272*4882a593Smuzhiyun if (sh == NULL) {
6273*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
6274*4882a593Smuzhiyun /* make sure we don't swamp the stripe cache if someone else
6275*4882a593Smuzhiyun * is trying to get access
6276*4882a593Smuzhiyun */
6277*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
6278*4882a593Smuzhiyun }
6279*4882a593Smuzhiyun /* Need to check if array will still be degraded after recovery/resync
6280*4882a593Smuzhiyun * Note in case of > 1 drive failures it's possible we're rebuilding
6281*4882a593Smuzhiyun * one drive while leaving another faulty drive in array.
6282*4882a593Smuzhiyun */
6283*4882a593Smuzhiyun rcu_read_lock();
6284*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
6285*4882a593Smuzhiyun struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
6286*4882a593Smuzhiyun
6287*4882a593Smuzhiyun if (rdev == NULL || test_bit(Faulty, &rdev->flags))
6288*4882a593Smuzhiyun still_degraded = 1;
6289*4882a593Smuzhiyun }
6290*4882a593Smuzhiyun rcu_read_unlock();
6291*4882a593Smuzhiyun
6292*4882a593Smuzhiyun md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
6293*4882a593Smuzhiyun
6294*4882a593Smuzhiyun set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
6295*4882a593Smuzhiyun set_bit(STRIPE_HANDLE, &sh->state);
6296*4882a593Smuzhiyun
6297*4882a593Smuzhiyun raid5_release_stripe(sh);
6298*4882a593Smuzhiyun
6299*4882a593Smuzhiyun return RAID5_STRIPE_SECTORS(conf);
6300*4882a593Smuzhiyun }
6301*4882a593Smuzhiyun
retry_aligned_read(struct r5conf * conf,struct bio * raid_bio,unsigned int offset)6302*4882a593Smuzhiyun static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
6303*4882a593Smuzhiyun unsigned int offset)
6304*4882a593Smuzhiyun {
6305*4882a593Smuzhiyun /* We may not be able to submit a whole bio at once as there
6306*4882a593Smuzhiyun * may not be enough stripe_heads available.
6307*4882a593Smuzhiyun * We cannot pre-allocate enough stripe_heads as we may need
6308*4882a593Smuzhiyun * more than exist in the cache (if we allow ever large chunks).
6309*4882a593Smuzhiyun * So we do one stripe head at a time and record in
6310*4882a593Smuzhiyun * ->bi_hw_segments how many have been done.
6311*4882a593Smuzhiyun *
6312*4882a593Smuzhiyun * We *know* that this entire raid_bio is in one chunk, so
6313*4882a593Smuzhiyun * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
6314*4882a593Smuzhiyun */
6315*4882a593Smuzhiyun struct stripe_head *sh;
6316*4882a593Smuzhiyun int dd_idx;
6317*4882a593Smuzhiyun sector_t sector, logical_sector, last_sector;
6318*4882a593Smuzhiyun int scnt = 0;
6319*4882a593Smuzhiyun int handled = 0;
6320*4882a593Smuzhiyun
6321*4882a593Smuzhiyun logical_sector = raid_bio->bi_iter.bi_sector &
6322*4882a593Smuzhiyun ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6323*4882a593Smuzhiyun sector = raid5_compute_sector(conf, logical_sector,
6324*4882a593Smuzhiyun 0, &dd_idx, NULL);
6325*4882a593Smuzhiyun last_sector = bio_end_sector(raid_bio);
6326*4882a593Smuzhiyun
6327*4882a593Smuzhiyun for (; logical_sector < last_sector;
6328*4882a593Smuzhiyun logical_sector += RAID5_STRIPE_SECTORS(conf),
6329*4882a593Smuzhiyun sector += RAID5_STRIPE_SECTORS(conf),
6330*4882a593Smuzhiyun scnt++) {
6331*4882a593Smuzhiyun
6332*4882a593Smuzhiyun if (scnt < offset)
6333*4882a593Smuzhiyun /* already done this stripe */
6334*4882a593Smuzhiyun continue;
6335*4882a593Smuzhiyun
6336*4882a593Smuzhiyun sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
6337*4882a593Smuzhiyun
6338*4882a593Smuzhiyun if (!sh) {
6339*4882a593Smuzhiyun /* failed to get a stripe - must wait */
6340*4882a593Smuzhiyun conf->retry_read_aligned = raid_bio;
6341*4882a593Smuzhiyun conf->retry_read_offset = scnt;
6342*4882a593Smuzhiyun return handled;
6343*4882a593Smuzhiyun }
6344*4882a593Smuzhiyun
6345*4882a593Smuzhiyun if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
6346*4882a593Smuzhiyun raid5_release_stripe(sh);
6347*4882a593Smuzhiyun conf->retry_read_aligned = raid_bio;
6348*4882a593Smuzhiyun conf->retry_read_offset = scnt;
6349*4882a593Smuzhiyun return handled;
6350*4882a593Smuzhiyun }
6351*4882a593Smuzhiyun
6352*4882a593Smuzhiyun set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
6353*4882a593Smuzhiyun handle_stripe(sh);
6354*4882a593Smuzhiyun raid5_release_stripe(sh);
6355*4882a593Smuzhiyun handled++;
6356*4882a593Smuzhiyun }
6357*4882a593Smuzhiyun
6358*4882a593Smuzhiyun bio_endio(raid_bio);
6359*4882a593Smuzhiyun
6360*4882a593Smuzhiyun if (atomic_dec_and_test(&conf->active_aligned_reads))
6361*4882a593Smuzhiyun wake_up(&conf->wait_for_quiescent);
6362*4882a593Smuzhiyun return handled;
6363*4882a593Smuzhiyun }
6364*4882a593Smuzhiyun
handle_active_stripes(struct r5conf * conf,int group,struct r5worker * worker,struct list_head * temp_inactive_list)6365*4882a593Smuzhiyun static int handle_active_stripes(struct r5conf *conf, int group,
6366*4882a593Smuzhiyun struct r5worker *worker,
6367*4882a593Smuzhiyun struct list_head *temp_inactive_list)
6368*4882a593Smuzhiyun __releases(&conf->device_lock)
6369*4882a593Smuzhiyun __acquires(&conf->device_lock)
6370*4882a593Smuzhiyun {
6371*4882a593Smuzhiyun struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
6372*4882a593Smuzhiyun int i, batch_size = 0, hash;
6373*4882a593Smuzhiyun bool release_inactive = false;
6374*4882a593Smuzhiyun
6375*4882a593Smuzhiyun while (batch_size < MAX_STRIPE_BATCH &&
6376*4882a593Smuzhiyun (sh = __get_priority_stripe(conf, group)) != NULL)
6377*4882a593Smuzhiyun batch[batch_size++] = sh;
6378*4882a593Smuzhiyun
6379*4882a593Smuzhiyun if (batch_size == 0) {
6380*4882a593Smuzhiyun for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
6381*4882a593Smuzhiyun if (!list_empty(temp_inactive_list + i))
6382*4882a593Smuzhiyun break;
6383*4882a593Smuzhiyun if (i == NR_STRIPE_HASH_LOCKS) {
6384*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6385*4882a593Smuzhiyun log_flush_stripe_to_raid(conf);
6386*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6387*4882a593Smuzhiyun return batch_size;
6388*4882a593Smuzhiyun }
6389*4882a593Smuzhiyun release_inactive = true;
6390*4882a593Smuzhiyun }
6391*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6392*4882a593Smuzhiyun
6393*4882a593Smuzhiyun release_inactive_stripe_list(conf, temp_inactive_list,
6394*4882a593Smuzhiyun NR_STRIPE_HASH_LOCKS);
6395*4882a593Smuzhiyun
6396*4882a593Smuzhiyun r5l_flush_stripe_to_raid(conf->log);
6397*4882a593Smuzhiyun if (release_inactive) {
6398*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6399*4882a593Smuzhiyun return 0;
6400*4882a593Smuzhiyun }
6401*4882a593Smuzhiyun
6402*4882a593Smuzhiyun for (i = 0; i < batch_size; i++)
6403*4882a593Smuzhiyun handle_stripe(batch[i]);
6404*4882a593Smuzhiyun log_write_stripe_run(conf);
6405*4882a593Smuzhiyun
6406*4882a593Smuzhiyun cond_resched();
6407*4882a593Smuzhiyun
6408*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6409*4882a593Smuzhiyun for (i = 0; i < batch_size; i++) {
6410*4882a593Smuzhiyun hash = batch[i]->hash_lock_index;
6411*4882a593Smuzhiyun __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
6412*4882a593Smuzhiyun }
6413*4882a593Smuzhiyun return batch_size;
6414*4882a593Smuzhiyun }
6415*4882a593Smuzhiyun
raid5_do_work(struct work_struct * work)6416*4882a593Smuzhiyun static void raid5_do_work(struct work_struct *work)
6417*4882a593Smuzhiyun {
6418*4882a593Smuzhiyun struct r5worker *worker = container_of(work, struct r5worker, work);
6419*4882a593Smuzhiyun struct r5worker_group *group = worker->group;
6420*4882a593Smuzhiyun struct r5conf *conf = group->conf;
6421*4882a593Smuzhiyun struct mddev *mddev = conf->mddev;
6422*4882a593Smuzhiyun int group_id = group - conf->worker_groups;
6423*4882a593Smuzhiyun int handled;
6424*4882a593Smuzhiyun struct blk_plug plug;
6425*4882a593Smuzhiyun
6426*4882a593Smuzhiyun pr_debug("+++ raid5worker active\n");
6427*4882a593Smuzhiyun
6428*4882a593Smuzhiyun blk_start_plug(&plug);
6429*4882a593Smuzhiyun handled = 0;
6430*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6431*4882a593Smuzhiyun while (1) {
6432*4882a593Smuzhiyun int batch_size, released;
6433*4882a593Smuzhiyun
6434*4882a593Smuzhiyun released = release_stripe_list(conf, worker->temp_inactive_list);
6435*4882a593Smuzhiyun
6436*4882a593Smuzhiyun batch_size = handle_active_stripes(conf, group_id, worker,
6437*4882a593Smuzhiyun worker->temp_inactive_list);
6438*4882a593Smuzhiyun worker->working = false;
6439*4882a593Smuzhiyun if (!batch_size && !released)
6440*4882a593Smuzhiyun break;
6441*4882a593Smuzhiyun handled += batch_size;
6442*4882a593Smuzhiyun wait_event_lock_irq(mddev->sb_wait,
6443*4882a593Smuzhiyun !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
6444*4882a593Smuzhiyun conf->device_lock);
6445*4882a593Smuzhiyun }
6446*4882a593Smuzhiyun pr_debug("%d stripes handled\n", handled);
6447*4882a593Smuzhiyun
6448*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6449*4882a593Smuzhiyun
6450*4882a593Smuzhiyun flush_deferred_bios(conf);
6451*4882a593Smuzhiyun
6452*4882a593Smuzhiyun r5l_flush_stripe_to_raid(conf->log);
6453*4882a593Smuzhiyun
6454*4882a593Smuzhiyun async_tx_issue_pending_all();
6455*4882a593Smuzhiyun blk_finish_plug(&plug);
6456*4882a593Smuzhiyun
6457*4882a593Smuzhiyun pr_debug("--- raid5worker inactive\n");
6458*4882a593Smuzhiyun }
6459*4882a593Smuzhiyun
6460*4882a593Smuzhiyun /*
6461*4882a593Smuzhiyun * This is our raid5 kernel thread.
6462*4882a593Smuzhiyun *
6463*4882a593Smuzhiyun * We scan the hash table for stripes which can be handled now.
6464*4882a593Smuzhiyun * During the scan, completed stripes are saved for us by the interrupt
6465*4882a593Smuzhiyun * handler, so that they will not have to wait for our next wakeup.
6466*4882a593Smuzhiyun */
raid5d(struct md_thread * thread)6467*4882a593Smuzhiyun static void raid5d(struct md_thread *thread)
6468*4882a593Smuzhiyun {
6469*4882a593Smuzhiyun struct mddev *mddev = thread->mddev;
6470*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6471*4882a593Smuzhiyun int handled;
6472*4882a593Smuzhiyun struct blk_plug plug;
6473*4882a593Smuzhiyun
6474*4882a593Smuzhiyun pr_debug("+++ raid5d active\n");
6475*4882a593Smuzhiyun
6476*4882a593Smuzhiyun md_check_recovery(mddev);
6477*4882a593Smuzhiyun
6478*4882a593Smuzhiyun blk_start_plug(&plug);
6479*4882a593Smuzhiyun handled = 0;
6480*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6481*4882a593Smuzhiyun while (1) {
6482*4882a593Smuzhiyun struct bio *bio;
6483*4882a593Smuzhiyun int batch_size, released;
6484*4882a593Smuzhiyun unsigned int offset;
6485*4882a593Smuzhiyun
6486*4882a593Smuzhiyun released = release_stripe_list(conf, conf->temp_inactive_list);
6487*4882a593Smuzhiyun if (released)
6488*4882a593Smuzhiyun clear_bit(R5_DID_ALLOC, &conf->cache_state);
6489*4882a593Smuzhiyun
6490*4882a593Smuzhiyun if (
6491*4882a593Smuzhiyun !list_empty(&conf->bitmap_list)) {
6492*4882a593Smuzhiyun /* Now is a good time to flush some bitmap updates */
6493*4882a593Smuzhiyun conf->seq_flush++;
6494*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6495*4882a593Smuzhiyun md_bitmap_unplug(mddev->bitmap);
6496*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6497*4882a593Smuzhiyun conf->seq_write = conf->seq_flush;
6498*4882a593Smuzhiyun activate_bit_delay(conf, conf->temp_inactive_list);
6499*4882a593Smuzhiyun }
6500*4882a593Smuzhiyun raid5_activate_delayed(conf);
6501*4882a593Smuzhiyun
6502*4882a593Smuzhiyun while ((bio = remove_bio_from_retry(conf, &offset))) {
6503*4882a593Smuzhiyun int ok;
6504*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6505*4882a593Smuzhiyun ok = retry_aligned_read(conf, bio, offset);
6506*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6507*4882a593Smuzhiyun if (!ok)
6508*4882a593Smuzhiyun break;
6509*4882a593Smuzhiyun handled++;
6510*4882a593Smuzhiyun }
6511*4882a593Smuzhiyun
6512*4882a593Smuzhiyun batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
6513*4882a593Smuzhiyun conf->temp_inactive_list);
6514*4882a593Smuzhiyun if (!batch_size && !released)
6515*4882a593Smuzhiyun break;
6516*4882a593Smuzhiyun handled += batch_size;
6517*4882a593Smuzhiyun
6518*4882a593Smuzhiyun if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
6519*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6520*4882a593Smuzhiyun md_check_recovery(mddev);
6521*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6522*4882a593Smuzhiyun
6523*4882a593Smuzhiyun /*
6524*4882a593Smuzhiyun * Waiting on MD_SB_CHANGE_PENDING below may deadlock
6525*4882a593Smuzhiyun * seeing md_check_recovery() is needed to clear
6526*4882a593Smuzhiyun * the flag when using mdmon.
6527*4882a593Smuzhiyun */
6528*4882a593Smuzhiyun continue;
6529*4882a593Smuzhiyun }
6530*4882a593Smuzhiyun
6531*4882a593Smuzhiyun wait_event_lock_irq(mddev->sb_wait,
6532*4882a593Smuzhiyun !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
6533*4882a593Smuzhiyun conf->device_lock);
6534*4882a593Smuzhiyun }
6535*4882a593Smuzhiyun pr_debug("%d stripes handled\n", handled);
6536*4882a593Smuzhiyun
6537*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6538*4882a593Smuzhiyun if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
6539*4882a593Smuzhiyun mutex_trylock(&conf->cache_size_mutex)) {
6540*4882a593Smuzhiyun grow_one_stripe(conf, __GFP_NOWARN);
6541*4882a593Smuzhiyun /* Set flag even if allocation failed. This helps
6542*4882a593Smuzhiyun * slow down allocation requests when mem is short
6543*4882a593Smuzhiyun */
6544*4882a593Smuzhiyun set_bit(R5_DID_ALLOC, &conf->cache_state);
6545*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
6546*4882a593Smuzhiyun }
6547*4882a593Smuzhiyun
6548*4882a593Smuzhiyun flush_deferred_bios(conf);
6549*4882a593Smuzhiyun
6550*4882a593Smuzhiyun r5l_flush_stripe_to_raid(conf->log);
6551*4882a593Smuzhiyun
6552*4882a593Smuzhiyun async_tx_issue_pending_all();
6553*4882a593Smuzhiyun blk_finish_plug(&plug);
6554*4882a593Smuzhiyun
6555*4882a593Smuzhiyun pr_debug("--- raid5d inactive\n");
6556*4882a593Smuzhiyun }
6557*4882a593Smuzhiyun
6558*4882a593Smuzhiyun static ssize_t
raid5_show_stripe_cache_size(struct mddev * mddev,char * page)6559*4882a593Smuzhiyun raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6560*4882a593Smuzhiyun {
6561*4882a593Smuzhiyun struct r5conf *conf;
6562*4882a593Smuzhiyun int ret = 0;
6563*4882a593Smuzhiyun spin_lock(&mddev->lock);
6564*4882a593Smuzhiyun conf = mddev->private;
6565*4882a593Smuzhiyun if (conf)
6566*4882a593Smuzhiyun ret = sprintf(page, "%d\n", conf->min_nr_stripes);
6567*4882a593Smuzhiyun spin_unlock(&mddev->lock);
6568*4882a593Smuzhiyun return ret;
6569*4882a593Smuzhiyun }
6570*4882a593Smuzhiyun
6571*4882a593Smuzhiyun int
raid5_set_cache_size(struct mddev * mddev,int size)6572*4882a593Smuzhiyun raid5_set_cache_size(struct mddev *mddev, int size)
6573*4882a593Smuzhiyun {
6574*4882a593Smuzhiyun int result = 0;
6575*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6576*4882a593Smuzhiyun
6577*4882a593Smuzhiyun if (size <= 16 || size > 32768)
6578*4882a593Smuzhiyun return -EINVAL;
6579*4882a593Smuzhiyun
6580*4882a593Smuzhiyun conf->min_nr_stripes = size;
6581*4882a593Smuzhiyun mutex_lock(&conf->cache_size_mutex);
6582*4882a593Smuzhiyun while (size < conf->max_nr_stripes &&
6583*4882a593Smuzhiyun drop_one_stripe(conf))
6584*4882a593Smuzhiyun ;
6585*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
6586*4882a593Smuzhiyun
6587*4882a593Smuzhiyun md_allow_write(mddev);
6588*4882a593Smuzhiyun
6589*4882a593Smuzhiyun mutex_lock(&conf->cache_size_mutex);
6590*4882a593Smuzhiyun while (size > conf->max_nr_stripes)
6591*4882a593Smuzhiyun if (!grow_one_stripe(conf, GFP_KERNEL)) {
6592*4882a593Smuzhiyun conf->min_nr_stripes = conf->max_nr_stripes;
6593*4882a593Smuzhiyun result = -ENOMEM;
6594*4882a593Smuzhiyun break;
6595*4882a593Smuzhiyun }
6596*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
6597*4882a593Smuzhiyun
6598*4882a593Smuzhiyun return result;
6599*4882a593Smuzhiyun }
6600*4882a593Smuzhiyun EXPORT_SYMBOL(raid5_set_cache_size);
6601*4882a593Smuzhiyun
6602*4882a593Smuzhiyun static ssize_t
raid5_store_stripe_cache_size(struct mddev * mddev,const char * page,size_t len)6603*4882a593Smuzhiyun raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
6604*4882a593Smuzhiyun {
6605*4882a593Smuzhiyun struct r5conf *conf;
6606*4882a593Smuzhiyun unsigned long new;
6607*4882a593Smuzhiyun int err;
6608*4882a593Smuzhiyun
6609*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6610*4882a593Smuzhiyun return -EINVAL;
6611*4882a593Smuzhiyun if (kstrtoul(page, 10, &new))
6612*4882a593Smuzhiyun return -EINVAL;
6613*4882a593Smuzhiyun err = mddev_lock(mddev);
6614*4882a593Smuzhiyun if (err)
6615*4882a593Smuzhiyun return err;
6616*4882a593Smuzhiyun conf = mddev->private;
6617*4882a593Smuzhiyun if (!conf)
6618*4882a593Smuzhiyun err = -ENODEV;
6619*4882a593Smuzhiyun else
6620*4882a593Smuzhiyun err = raid5_set_cache_size(mddev, new);
6621*4882a593Smuzhiyun mddev_unlock(mddev);
6622*4882a593Smuzhiyun
6623*4882a593Smuzhiyun return err ?: len;
6624*4882a593Smuzhiyun }
6625*4882a593Smuzhiyun
6626*4882a593Smuzhiyun static struct md_sysfs_entry
6627*4882a593Smuzhiyun raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
6628*4882a593Smuzhiyun raid5_show_stripe_cache_size,
6629*4882a593Smuzhiyun raid5_store_stripe_cache_size);
6630*4882a593Smuzhiyun
6631*4882a593Smuzhiyun static ssize_t
raid5_show_rmw_level(struct mddev * mddev,char * page)6632*4882a593Smuzhiyun raid5_show_rmw_level(struct mddev *mddev, char *page)
6633*4882a593Smuzhiyun {
6634*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6635*4882a593Smuzhiyun if (conf)
6636*4882a593Smuzhiyun return sprintf(page, "%d\n", conf->rmw_level);
6637*4882a593Smuzhiyun else
6638*4882a593Smuzhiyun return 0;
6639*4882a593Smuzhiyun }
6640*4882a593Smuzhiyun
6641*4882a593Smuzhiyun static ssize_t
raid5_store_rmw_level(struct mddev * mddev,const char * page,size_t len)6642*4882a593Smuzhiyun raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
6643*4882a593Smuzhiyun {
6644*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6645*4882a593Smuzhiyun unsigned long new;
6646*4882a593Smuzhiyun
6647*4882a593Smuzhiyun if (!conf)
6648*4882a593Smuzhiyun return -ENODEV;
6649*4882a593Smuzhiyun
6650*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6651*4882a593Smuzhiyun return -EINVAL;
6652*4882a593Smuzhiyun
6653*4882a593Smuzhiyun if (kstrtoul(page, 10, &new))
6654*4882a593Smuzhiyun return -EINVAL;
6655*4882a593Smuzhiyun
6656*4882a593Smuzhiyun if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
6657*4882a593Smuzhiyun return -EINVAL;
6658*4882a593Smuzhiyun
6659*4882a593Smuzhiyun if (new != PARITY_DISABLE_RMW &&
6660*4882a593Smuzhiyun new != PARITY_ENABLE_RMW &&
6661*4882a593Smuzhiyun new != PARITY_PREFER_RMW)
6662*4882a593Smuzhiyun return -EINVAL;
6663*4882a593Smuzhiyun
6664*4882a593Smuzhiyun conf->rmw_level = new;
6665*4882a593Smuzhiyun return len;
6666*4882a593Smuzhiyun }
6667*4882a593Smuzhiyun
6668*4882a593Smuzhiyun static struct md_sysfs_entry
6669*4882a593Smuzhiyun raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
6670*4882a593Smuzhiyun raid5_show_rmw_level,
6671*4882a593Smuzhiyun raid5_store_rmw_level);
6672*4882a593Smuzhiyun
6673*4882a593Smuzhiyun static ssize_t
raid5_show_stripe_size(struct mddev * mddev,char * page)6674*4882a593Smuzhiyun raid5_show_stripe_size(struct mddev *mddev, char *page)
6675*4882a593Smuzhiyun {
6676*4882a593Smuzhiyun struct r5conf *conf;
6677*4882a593Smuzhiyun int ret = 0;
6678*4882a593Smuzhiyun
6679*4882a593Smuzhiyun spin_lock(&mddev->lock);
6680*4882a593Smuzhiyun conf = mddev->private;
6681*4882a593Smuzhiyun if (conf)
6682*4882a593Smuzhiyun ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf));
6683*4882a593Smuzhiyun spin_unlock(&mddev->lock);
6684*4882a593Smuzhiyun return ret;
6685*4882a593Smuzhiyun }
6686*4882a593Smuzhiyun
6687*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
6688*4882a593Smuzhiyun static ssize_t
raid5_store_stripe_size(struct mddev * mddev,const char * page,size_t len)6689*4882a593Smuzhiyun raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
6690*4882a593Smuzhiyun {
6691*4882a593Smuzhiyun struct r5conf *conf;
6692*4882a593Smuzhiyun unsigned long new;
6693*4882a593Smuzhiyun int err;
6694*4882a593Smuzhiyun int size;
6695*4882a593Smuzhiyun
6696*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6697*4882a593Smuzhiyun return -EINVAL;
6698*4882a593Smuzhiyun if (kstrtoul(page, 10, &new))
6699*4882a593Smuzhiyun return -EINVAL;
6700*4882a593Smuzhiyun
6701*4882a593Smuzhiyun /*
6702*4882a593Smuzhiyun * The value should not be bigger than PAGE_SIZE. It requires to
6703*4882a593Smuzhiyun * be multiple of DEFAULT_STRIPE_SIZE and the value should be power
6704*4882a593Smuzhiyun * of two.
6705*4882a593Smuzhiyun */
6706*4882a593Smuzhiyun if (new % DEFAULT_STRIPE_SIZE != 0 ||
6707*4882a593Smuzhiyun new > PAGE_SIZE || new == 0 ||
6708*4882a593Smuzhiyun new != roundup_pow_of_two(new))
6709*4882a593Smuzhiyun return -EINVAL;
6710*4882a593Smuzhiyun
6711*4882a593Smuzhiyun err = mddev_lock(mddev);
6712*4882a593Smuzhiyun if (err)
6713*4882a593Smuzhiyun return err;
6714*4882a593Smuzhiyun
6715*4882a593Smuzhiyun conf = mddev->private;
6716*4882a593Smuzhiyun if (!conf) {
6717*4882a593Smuzhiyun err = -ENODEV;
6718*4882a593Smuzhiyun goto out_unlock;
6719*4882a593Smuzhiyun }
6720*4882a593Smuzhiyun
6721*4882a593Smuzhiyun if (new == conf->stripe_size)
6722*4882a593Smuzhiyun goto out_unlock;
6723*4882a593Smuzhiyun
6724*4882a593Smuzhiyun pr_debug("md/raid: change stripe_size from %lu to %lu\n",
6725*4882a593Smuzhiyun conf->stripe_size, new);
6726*4882a593Smuzhiyun
6727*4882a593Smuzhiyun if (mddev->sync_thread ||
6728*4882a593Smuzhiyun test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6729*4882a593Smuzhiyun mddev->reshape_position != MaxSector ||
6730*4882a593Smuzhiyun mddev->sysfs_active) {
6731*4882a593Smuzhiyun err = -EBUSY;
6732*4882a593Smuzhiyun goto out_unlock;
6733*4882a593Smuzhiyun }
6734*4882a593Smuzhiyun
6735*4882a593Smuzhiyun mddev_suspend(mddev);
6736*4882a593Smuzhiyun mutex_lock(&conf->cache_size_mutex);
6737*4882a593Smuzhiyun size = conf->max_nr_stripes;
6738*4882a593Smuzhiyun
6739*4882a593Smuzhiyun shrink_stripes(conf);
6740*4882a593Smuzhiyun
6741*4882a593Smuzhiyun conf->stripe_size = new;
6742*4882a593Smuzhiyun conf->stripe_shift = ilog2(new) - 9;
6743*4882a593Smuzhiyun conf->stripe_sectors = new >> 9;
6744*4882a593Smuzhiyun if (grow_stripes(conf, size)) {
6745*4882a593Smuzhiyun pr_warn("md/raid:%s: couldn't allocate buffers\n",
6746*4882a593Smuzhiyun mdname(mddev));
6747*4882a593Smuzhiyun err = -ENOMEM;
6748*4882a593Smuzhiyun }
6749*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
6750*4882a593Smuzhiyun mddev_resume(mddev);
6751*4882a593Smuzhiyun
6752*4882a593Smuzhiyun out_unlock:
6753*4882a593Smuzhiyun mddev_unlock(mddev);
6754*4882a593Smuzhiyun return err ?: len;
6755*4882a593Smuzhiyun }
6756*4882a593Smuzhiyun
6757*4882a593Smuzhiyun static struct md_sysfs_entry
6758*4882a593Smuzhiyun raid5_stripe_size = __ATTR(stripe_size, 0644,
6759*4882a593Smuzhiyun raid5_show_stripe_size,
6760*4882a593Smuzhiyun raid5_store_stripe_size);
6761*4882a593Smuzhiyun #else
6762*4882a593Smuzhiyun static struct md_sysfs_entry
6763*4882a593Smuzhiyun raid5_stripe_size = __ATTR(stripe_size, 0444,
6764*4882a593Smuzhiyun raid5_show_stripe_size,
6765*4882a593Smuzhiyun NULL);
6766*4882a593Smuzhiyun #endif
6767*4882a593Smuzhiyun
6768*4882a593Smuzhiyun static ssize_t
raid5_show_preread_threshold(struct mddev * mddev,char * page)6769*4882a593Smuzhiyun raid5_show_preread_threshold(struct mddev *mddev, char *page)
6770*4882a593Smuzhiyun {
6771*4882a593Smuzhiyun struct r5conf *conf;
6772*4882a593Smuzhiyun int ret = 0;
6773*4882a593Smuzhiyun spin_lock(&mddev->lock);
6774*4882a593Smuzhiyun conf = mddev->private;
6775*4882a593Smuzhiyun if (conf)
6776*4882a593Smuzhiyun ret = sprintf(page, "%d\n", conf->bypass_threshold);
6777*4882a593Smuzhiyun spin_unlock(&mddev->lock);
6778*4882a593Smuzhiyun return ret;
6779*4882a593Smuzhiyun }
6780*4882a593Smuzhiyun
6781*4882a593Smuzhiyun static ssize_t
raid5_store_preread_threshold(struct mddev * mddev,const char * page,size_t len)6782*4882a593Smuzhiyun raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
6783*4882a593Smuzhiyun {
6784*4882a593Smuzhiyun struct r5conf *conf;
6785*4882a593Smuzhiyun unsigned long new;
6786*4882a593Smuzhiyun int err;
6787*4882a593Smuzhiyun
6788*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6789*4882a593Smuzhiyun return -EINVAL;
6790*4882a593Smuzhiyun if (kstrtoul(page, 10, &new))
6791*4882a593Smuzhiyun return -EINVAL;
6792*4882a593Smuzhiyun
6793*4882a593Smuzhiyun err = mddev_lock(mddev);
6794*4882a593Smuzhiyun if (err)
6795*4882a593Smuzhiyun return err;
6796*4882a593Smuzhiyun conf = mddev->private;
6797*4882a593Smuzhiyun if (!conf)
6798*4882a593Smuzhiyun err = -ENODEV;
6799*4882a593Smuzhiyun else if (new > conf->min_nr_stripes)
6800*4882a593Smuzhiyun err = -EINVAL;
6801*4882a593Smuzhiyun else
6802*4882a593Smuzhiyun conf->bypass_threshold = new;
6803*4882a593Smuzhiyun mddev_unlock(mddev);
6804*4882a593Smuzhiyun return err ?: len;
6805*4882a593Smuzhiyun }
6806*4882a593Smuzhiyun
6807*4882a593Smuzhiyun static struct md_sysfs_entry
6808*4882a593Smuzhiyun raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
6809*4882a593Smuzhiyun S_IRUGO | S_IWUSR,
6810*4882a593Smuzhiyun raid5_show_preread_threshold,
6811*4882a593Smuzhiyun raid5_store_preread_threshold);
6812*4882a593Smuzhiyun
6813*4882a593Smuzhiyun static ssize_t
raid5_show_skip_copy(struct mddev * mddev,char * page)6814*4882a593Smuzhiyun raid5_show_skip_copy(struct mddev *mddev, char *page)
6815*4882a593Smuzhiyun {
6816*4882a593Smuzhiyun struct r5conf *conf;
6817*4882a593Smuzhiyun int ret = 0;
6818*4882a593Smuzhiyun spin_lock(&mddev->lock);
6819*4882a593Smuzhiyun conf = mddev->private;
6820*4882a593Smuzhiyun if (conf)
6821*4882a593Smuzhiyun ret = sprintf(page, "%d\n", conf->skip_copy);
6822*4882a593Smuzhiyun spin_unlock(&mddev->lock);
6823*4882a593Smuzhiyun return ret;
6824*4882a593Smuzhiyun }
6825*4882a593Smuzhiyun
6826*4882a593Smuzhiyun static ssize_t
raid5_store_skip_copy(struct mddev * mddev,const char * page,size_t len)6827*4882a593Smuzhiyun raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6828*4882a593Smuzhiyun {
6829*4882a593Smuzhiyun struct r5conf *conf;
6830*4882a593Smuzhiyun unsigned long new;
6831*4882a593Smuzhiyun int err;
6832*4882a593Smuzhiyun
6833*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6834*4882a593Smuzhiyun return -EINVAL;
6835*4882a593Smuzhiyun if (kstrtoul(page, 10, &new))
6836*4882a593Smuzhiyun return -EINVAL;
6837*4882a593Smuzhiyun new = !!new;
6838*4882a593Smuzhiyun
6839*4882a593Smuzhiyun err = mddev_lock(mddev);
6840*4882a593Smuzhiyun if (err)
6841*4882a593Smuzhiyun return err;
6842*4882a593Smuzhiyun conf = mddev->private;
6843*4882a593Smuzhiyun if (!conf)
6844*4882a593Smuzhiyun err = -ENODEV;
6845*4882a593Smuzhiyun else if (new != conf->skip_copy) {
6846*4882a593Smuzhiyun struct request_queue *q = mddev->queue;
6847*4882a593Smuzhiyun
6848*4882a593Smuzhiyun mddev_suspend(mddev);
6849*4882a593Smuzhiyun conf->skip_copy = new;
6850*4882a593Smuzhiyun if (new)
6851*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
6852*4882a593Smuzhiyun else
6853*4882a593Smuzhiyun blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
6854*4882a593Smuzhiyun mddev_resume(mddev);
6855*4882a593Smuzhiyun }
6856*4882a593Smuzhiyun mddev_unlock(mddev);
6857*4882a593Smuzhiyun return err ?: len;
6858*4882a593Smuzhiyun }
6859*4882a593Smuzhiyun
6860*4882a593Smuzhiyun static struct md_sysfs_entry
6861*4882a593Smuzhiyun raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
6862*4882a593Smuzhiyun raid5_show_skip_copy,
6863*4882a593Smuzhiyun raid5_store_skip_copy);
6864*4882a593Smuzhiyun
6865*4882a593Smuzhiyun static ssize_t
stripe_cache_active_show(struct mddev * mddev,char * page)6866*4882a593Smuzhiyun stripe_cache_active_show(struct mddev *mddev, char *page)
6867*4882a593Smuzhiyun {
6868*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
6869*4882a593Smuzhiyun if (conf)
6870*4882a593Smuzhiyun return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
6871*4882a593Smuzhiyun else
6872*4882a593Smuzhiyun return 0;
6873*4882a593Smuzhiyun }
6874*4882a593Smuzhiyun
6875*4882a593Smuzhiyun static struct md_sysfs_entry
6876*4882a593Smuzhiyun raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
6877*4882a593Smuzhiyun
6878*4882a593Smuzhiyun static ssize_t
raid5_show_group_thread_cnt(struct mddev * mddev,char * page)6879*4882a593Smuzhiyun raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6880*4882a593Smuzhiyun {
6881*4882a593Smuzhiyun struct r5conf *conf;
6882*4882a593Smuzhiyun int ret = 0;
6883*4882a593Smuzhiyun spin_lock(&mddev->lock);
6884*4882a593Smuzhiyun conf = mddev->private;
6885*4882a593Smuzhiyun if (conf)
6886*4882a593Smuzhiyun ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
6887*4882a593Smuzhiyun spin_unlock(&mddev->lock);
6888*4882a593Smuzhiyun return ret;
6889*4882a593Smuzhiyun }
6890*4882a593Smuzhiyun
6891*4882a593Smuzhiyun static int alloc_thread_groups(struct r5conf *conf, int cnt,
6892*4882a593Smuzhiyun int *group_cnt,
6893*4882a593Smuzhiyun struct r5worker_group **worker_groups);
6894*4882a593Smuzhiyun static ssize_t
raid5_store_group_thread_cnt(struct mddev * mddev,const char * page,size_t len)6895*4882a593Smuzhiyun raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6896*4882a593Smuzhiyun {
6897*4882a593Smuzhiyun struct r5conf *conf;
6898*4882a593Smuzhiyun unsigned int new;
6899*4882a593Smuzhiyun int err;
6900*4882a593Smuzhiyun struct r5worker_group *new_groups, *old_groups;
6901*4882a593Smuzhiyun int group_cnt;
6902*4882a593Smuzhiyun
6903*4882a593Smuzhiyun if (len >= PAGE_SIZE)
6904*4882a593Smuzhiyun return -EINVAL;
6905*4882a593Smuzhiyun if (kstrtouint(page, 10, &new))
6906*4882a593Smuzhiyun return -EINVAL;
6907*4882a593Smuzhiyun /* 8192 should be big enough */
6908*4882a593Smuzhiyun if (new > 8192)
6909*4882a593Smuzhiyun return -EINVAL;
6910*4882a593Smuzhiyun
6911*4882a593Smuzhiyun err = mddev_lock(mddev);
6912*4882a593Smuzhiyun if (err)
6913*4882a593Smuzhiyun return err;
6914*4882a593Smuzhiyun conf = mddev->private;
6915*4882a593Smuzhiyun if (!conf)
6916*4882a593Smuzhiyun err = -ENODEV;
6917*4882a593Smuzhiyun else if (new != conf->worker_cnt_per_group) {
6918*4882a593Smuzhiyun mddev_suspend(mddev);
6919*4882a593Smuzhiyun
6920*4882a593Smuzhiyun old_groups = conf->worker_groups;
6921*4882a593Smuzhiyun if (old_groups)
6922*4882a593Smuzhiyun flush_workqueue(raid5_wq);
6923*4882a593Smuzhiyun
6924*4882a593Smuzhiyun err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
6925*4882a593Smuzhiyun if (!err) {
6926*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
6927*4882a593Smuzhiyun conf->group_cnt = group_cnt;
6928*4882a593Smuzhiyun conf->worker_cnt_per_group = new;
6929*4882a593Smuzhiyun conf->worker_groups = new_groups;
6930*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
6931*4882a593Smuzhiyun
6932*4882a593Smuzhiyun if (old_groups)
6933*4882a593Smuzhiyun kfree(old_groups[0].workers);
6934*4882a593Smuzhiyun kfree(old_groups);
6935*4882a593Smuzhiyun }
6936*4882a593Smuzhiyun mddev_resume(mddev);
6937*4882a593Smuzhiyun }
6938*4882a593Smuzhiyun mddev_unlock(mddev);
6939*4882a593Smuzhiyun
6940*4882a593Smuzhiyun return err ?: len;
6941*4882a593Smuzhiyun }
6942*4882a593Smuzhiyun
6943*4882a593Smuzhiyun static struct md_sysfs_entry
6944*4882a593Smuzhiyun raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
6945*4882a593Smuzhiyun raid5_show_group_thread_cnt,
6946*4882a593Smuzhiyun raid5_store_group_thread_cnt);
6947*4882a593Smuzhiyun
6948*4882a593Smuzhiyun static struct attribute *raid5_attrs[] = {
6949*4882a593Smuzhiyun &raid5_stripecache_size.attr,
6950*4882a593Smuzhiyun &raid5_stripecache_active.attr,
6951*4882a593Smuzhiyun &raid5_preread_bypass_threshold.attr,
6952*4882a593Smuzhiyun &raid5_group_thread_cnt.attr,
6953*4882a593Smuzhiyun &raid5_skip_copy.attr,
6954*4882a593Smuzhiyun &raid5_rmw_level.attr,
6955*4882a593Smuzhiyun &raid5_stripe_size.attr,
6956*4882a593Smuzhiyun &r5c_journal_mode.attr,
6957*4882a593Smuzhiyun &ppl_write_hint.attr,
6958*4882a593Smuzhiyun NULL,
6959*4882a593Smuzhiyun };
6960*4882a593Smuzhiyun static struct attribute_group raid5_attrs_group = {
6961*4882a593Smuzhiyun .name = NULL,
6962*4882a593Smuzhiyun .attrs = raid5_attrs,
6963*4882a593Smuzhiyun };
6964*4882a593Smuzhiyun
alloc_thread_groups(struct r5conf * conf,int cnt,int * group_cnt,struct r5worker_group ** worker_groups)6965*4882a593Smuzhiyun static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
6966*4882a593Smuzhiyun struct r5worker_group **worker_groups)
6967*4882a593Smuzhiyun {
6968*4882a593Smuzhiyun int i, j, k;
6969*4882a593Smuzhiyun ssize_t size;
6970*4882a593Smuzhiyun struct r5worker *workers;
6971*4882a593Smuzhiyun
6972*4882a593Smuzhiyun if (cnt == 0) {
6973*4882a593Smuzhiyun *group_cnt = 0;
6974*4882a593Smuzhiyun *worker_groups = NULL;
6975*4882a593Smuzhiyun return 0;
6976*4882a593Smuzhiyun }
6977*4882a593Smuzhiyun *group_cnt = num_possible_nodes();
6978*4882a593Smuzhiyun size = sizeof(struct r5worker) * cnt;
6979*4882a593Smuzhiyun workers = kcalloc(size, *group_cnt, GFP_NOIO);
6980*4882a593Smuzhiyun *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group),
6981*4882a593Smuzhiyun GFP_NOIO);
6982*4882a593Smuzhiyun if (!*worker_groups || !workers) {
6983*4882a593Smuzhiyun kfree(workers);
6984*4882a593Smuzhiyun kfree(*worker_groups);
6985*4882a593Smuzhiyun return -ENOMEM;
6986*4882a593Smuzhiyun }
6987*4882a593Smuzhiyun
6988*4882a593Smuzhiyun for (i = 0; i < *group_cnt; i++) {
6989*4882a593Smuzhiyun struct r5worker_group *group;
6990*4882a593Smuzhiyun
6991*4882a593Smuzhiyun group = &(*worker_groups)[i];
6992*4882a593Smuzhiyun INIT_LIST_HEAD(&group->handle_list);
6993*4882a593Smuzhiyun INIT_LIST_HEAD(&group->loprio_list);
6994*4882a593Smuzhiyun group->conf = conf;
6995*4882a593Smuzhiyun group->workers = workers + i * cnt;
6996*4882a593Smuzhiyun
6997*4882a593Smuzhiyun for (j = 0; j < cnt; j++) {
6998*4882a593Smuzhiyun struct r5worker *worker = group->workers + j;
6999*4882a593Smuzhiyun worker->group = group;
7000*4882a593Smuzhiyun INIT_WORK(&worker->work, raid5_do_work);
7001*4882a593Smuzhiyun
7002*4882a593Smuzhiyun for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
7003*4882a593Smuzhiyun INIT_LIST_HEAD(worker->temp_inactive_list + k);
7004*4882a593Smuzhiyun }
7005*4882a593Smuzhiyun }
7006*4882a593Smuzhiyun
7007*4882a593Smuzhiyun return 0;
7008*4882a593Smuzhiyun }
7009*4882a593Smuzhiyun
free_thread_groups(struct r5conf * conf)7010*4882a593Smuzhiyun static void free_thread_groups(struct r5conf *conf)
7011*4882a593Smuzhiyun {
7012*4882a593Smuzhiyun if (conf->worker_groups)
7013*4882a593Smuzhiyun kfree(conf->worker_groups[0].workers);
7014*4882a593Smuzhiyun kfree(conf->worker_groups);
7015*4882a593Smuzhiyun conf->worker_groups = NULL;
7016*4882a593Smuzhiyun }
7017*4882a593Smuzhiyun
7018*4882a593Smuzhiyun static sector_t
raid5_size(struct mddev * mddev,sector_t sectors,int raid_disks)7019*4882a593Smuzhiyun raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
7020*4882a593Smuzhiyun {
7021*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
7022*4882a593Smuzhiyun
7023*4882a593Smuzhiyun if (!sectors)
7024*4882a593Smuzhiyun sectors = mddev->dev_sectors;
7025*4882a593Smuzhiyun if (!raid_disks)
7026*4882a593Smuzhiyun /* size is defined by the smallest of previous and new size */
7027*4882a593Smuzhiyun raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
7028*4882a593Smuzhiyun
7029*4882a593Smuzhiyun sectors &= ~((sector_t)conf->chunk_sectors - 1);
7030*4882a593Smuzhiyun sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
7031*4882a593Smuzhiyun return sectors * (raid_disks - conf->max_degraded);
7032*4882a593Smuzhiyun }
7033*4882a593Smuzhiyun
free_scratch_buffer(struct r5conf * conf,struct raid5_percpu * percpu)7034*4882a593Smuzhiyun static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7035*4882a593Smuzhiyun {
7036*4882a593Smuzhiyun safe_put_page(percpu->spare_page);
7037*4882a593Smuzhiyun percpu->spare_page = NULL;
7038*4882a593Smuzhiyun kvfree(percpu->scribble);
7039*4882a593Smuzhiyun percpu->scribble = NULL;
7040*4882a593Smuzhiyun }
7041*4882a593Smuzhiyun
alloc_scratch_buffer(struct r5conf * conf,struct raid5_percpu * percpu)7042*4882a593Smuzhiyun static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7043*4882a593Smuzhiyun {
7044*4882a593Smuzhiyun if (conf->level == 6 && !percpu->spare_page) {
7045*4882a593Smuzhiyun percpu->spare_page = alloc_page(GFP_KERNEL);
7046*4882a593Smuzhiyun if (!percpu->spare_page)
7047*4882a593Smuzhiyun return -ENOMEM;
7048*4882a593Smuzhiyun }
7049*4882a593Smuzhiyun
7050*4882a593Smuzhiyun if (scribble_alloc(percpu,
7051*4882a593Smuzhiyun max(conf->raid_disks,
7052*4882a593Smuzhiyun conf->previous_raid_disks),
7053*4882a593Smuzhiyun max(conf->chunk_sectors,
7054*4882a593Smuzhiyun conf->prev_chunk_sectors)
7055*4882a593Smuzhiyun / RAID5_STRIPE_SECTORS(conf))) {
7056*4882a593Smuzhiyun free_scratch_buffer(conf, percpu);
7057*4882a593Smuzhiyun return -ENOMEM;
7058*4882a593Smuzhiyun }
7059*4882a593Smuzhiyun
7060*4882a593Smuzhiyun return 0;
7061*4882a593Smuzhiyun }
7062*4882a593Smuzhiyun
raid456_cpu_dead(unsigned int cpu,struct hlist_node * node)7063*4882a593Smuzhiyun static int raid456_cpu_dead(unsigned int cpu, struct hlist_node *node)
7064*4882a593Smuzhiyun {
7065*4882a593Smuzhiyun struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7066*4882a593Smuzhiyun
7067*4882a593Smuzhiyun free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
7068*4882a593Smuzhiyun return 0;
7069*4882a593Smuzhiyun }
7070*4882a593Smuzhiyun
raid5_free_percpu(struct r5conf * conf)7071*4882a593Smuzhiyun static void raid5_free_percpu(struct r5conf *conf)
7072*4882a593Smuzhiyun {
7073*4882a593Smuzhiyun if (!conf->percpu)
7074*4882a593Smuzhiyun return;
7075*4882a593Smuzhiyun
7076*4882a593Smuzhiyun cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7077*4882a593Smuzhiyun free_percpu(conf->percpu);
7078*4882a593Smuzhiyun }
7079*4882a593Smuzhiyun
free_conf(struct r5conf * conf)7080*4882a593Smuzhiyun static void free_conf(struct r5conf *conf)
7081*4882a593Smuzhiyun {
7082*4882a593Smuzhiyun int i;
7083*4882a593Smuzhiyun
7084*4882a593Smuzhiyun log_exit(conf);
7085*4882a593Smuzhiyun
7086*4882a593Smuzhiyun unregister_shrinker(&conf->shrinker);
7087*4882a593Smuzhiyun free_thread_groups(conf);
7088*4882a593Smuzhiyun shrink_stripes(conf);
7089*4882a593Smuzhiyun raid5_free_percpu(conf);
7090*4882a593Smuzhiyun for (i = 0; i < conf->pool_size; i++)
7091*4882a593Smuzhiyun if (conf->disks[i].extra_page)
7092*4882a593Smuzhiyun put_page(conf->disks[i].extra_page);
7093*4882a593Smuzhiyun kfree(conf->disks);
7094*4882a593Smuzhiyun bioset_exit(&conf->bio_split);
7095*4882a593Smuzhiyun kfree(conf->stripe_hashtbl);
7096*4882a593Smuzhiyun kfree(conf->pending_data);
7097*4882a593Smuzhiyun kfree(conf);
7098*4882a593Smuzhiyun }
7099*4882a593Smuzhiyun
raid456_cpu_up_prepare(unsigned int cpu,struct hlist_node * node)7100*4882a593Smuzhiyun static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
7101*4882a593Smuzhiyun {
7102*4882a593Smuzhiyun struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7103*4882a593Smuzhiyun struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
7104*4882a593Smuzhiyun
7105*4882a593Smuzhiyun if (alloc_scratch_buffer(conf, percpu)) {
7106*4882a593Smuzhiyun pr_warn("%s: failed memory allocation for cpu%u\n",
7107*4882a593Smuzhiyun __func__, cpu);
7108*4882a593Smuzhiyun return -ENOMEM;
7109*4882a593Smuzhiyun }
7110*4882a593Smuzhiyun return 0;
7111*4882a593Smuzhiyun }
7112*4882a593Smuzhiyun
raid5_alloc_percpu(struct r5conf * conf)7113*4882a593Smuzhiyun static int raid5_alloc_percpu(struct r5conf *conf)
7114*4882a593Smuzhiyun {
7115*4882a593Smuzhiyun int err = 0;
7116*4882a593Smuzhiyun
7117*4882a593Smuzhiyun conf->percpu = alloc_percpu(struct raid5_percpu);
7118*4882a593Smuzhiyun if (!conf->percpu)
7119*4882a593Smuzhiyun return -ENOMEM;
7120*4882a593Smuzhiyun
7121*4882a593Smuzhiyun err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7122*4882a593Smuzhiyun if (!err) {
7123*4882a593Smuzhiyun conf->scribble_disks = max(conf->raid_disks,
7124*4882a593Smuzhiyun conf->previous_raid_disks);
7125*4882a593Smuzhiyun conf->scribble_sectors = max(conf->chunk_sectors,
7126*4882a593Smuzhiyun conf->prev_chunk_sectors);
7127*4882a593Smuzhiyun }
7128*4882a593Smuzhiyun return err;
7129*4882a593Smuzhiyun }
7130*4882a593Smuzhiyun
raid5_cache_scan(struct shrinker * shrink,struct shrink_control * sc)7131*4882a593Smuzhiyun static unsigned long raid5_cache_scan(struct shrinker *shrink,
7132*4882a593Smuzhiyun struct shrink_control *sc)
7133*4882a593Smuzhiyun {
7134*4882a593Smuzhiyun struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
7135*4882a593Smuzhiyun unsigned long ret = SHRINK_STOP;
7136*4882a593Smuzhiyun
7137*4882a593Smuzhiyun if (mutex_trylock(&conf->cache_size_mutex)) {
7138*4882a593Smuzhiyun ret= 0;
7139*4882a593Smuzhiyun while (ret < sc->nr_to_scan &&
7140*4882a593Smuzhiyun conf->max_nr_stripes > conf->min_nr_stripes) {
7141*4882a593Smuzhiyun if (drop_one_stripe(conf) == 0) {
7142*4882a593Smuzhiyun ret = SHRINK_STOP;
7143*4882a593Smuzhiyun break;
7144*4882a593Smuzhiyun }
7145*4882a593Smuzhiyun ret++;
7146*4882a593Smuzhiyun }
7147*4882a593Smuzhiyun mutex_unlock(&conf->cache_size_mutex);
7148*4882a593Smuzhiyun }
7149*4882a593Smuzhiyun return ret;
7150*4882a593Smuzhiyun }
7151*4882a593Smuzhiyun
raid5_cache_count(struct shrinker * shrink,struct shrink_control * sc)7152*4882a593Smuzhiyun static unsigned long raid5_cache_count(struct shrinker *shrink,
7153*4882a593Smuzhiyun struct shrink_control *sc)
7154*4882a593Smuzhiyun {
7155*4882a593Smuzhiyun struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
7156*4882a593Smuzhiyun
7157*4882a593Smuzhiyun if (conf->max_nr_stripes < conf->min_nr_stripes)
7158*4882a593Smuzhiyun /* unlikely, but not impossible */
7159*4882a593Smuzhiyun return 0;
7160*4882a593Smuzhiyun return conf->max_nr_stripes - conf->min_nr_stripes;
7161*4882a593Smuzhiyun }
7162*4882a593Smuzhiyun
setup_conf(struct mddev * mddev)7163*4882a593Smuzhiyun static struct r5conf *setup_conf(struct mddev *mddev)
7164*4882a593Smuzhiyun {
7165*4882a593Smuzhiyun struct r5conf *conf;
7166*4882a593Smuzhiyun int raid_disk, memory, max_disks;
7167*4882a593Smuzhiyun struct md_rdev *rdev;
7168*4882a593Smuzhiyun struct disk_info *disk;
7169*4882a593Smuzhiyun char pers_name[6];
7170*4882a593Smuzhiyun int i;
7171*4882a593Smuzhiyun int group_cnt;
7172*4882a593Smuzhiyun struct r5worker_group *new_group;
7173*4882a593Smuzhiyun int ret;
7174*4882a593Smuzhiyun
7175*4882a593Smuzhiyun if (mddev->new_level != 5
7176*4882a593Smuzhiyun && mddev->new_level != 4
7177*4882a593Smuzhiyun && mddev->new_level != 6) {
7178*4882a593Smuzhiyun pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
7179*4882a593Smuzhiyun mdname(mddev), mddev->new_level);
7180*4882a593Smuzhiyun return ERR_PTR(-EIO);
7181*4882a593Smuzhiyun }
7182*4882a593Smuzhiyun if ((mddev->new_level == 5
7183*4882a593Smuzhiyun && !algorithm_valid_raid5(mddev->new_layout)) ||
7184*4882a593Smuzhiyun (mddev->new_level == 6
7185*4882a593Smuzhiyun && !algorithm_valid_raid6(mddev->new_layout))) {
7186*4882a593Smuzhiyun pr_warn("md/raid:%s: layout %d not supported\n",
7187*4882a593Smuzhiyun mdname(mddev), mddev->new_layout);
7188*4882a593Smuzhiyun return ERR_PTR(-EIO);
7189*4882a593Smuzhiyun }
7190*4882a593Smuzhiyun if (mddev->new_level == 6 && mddev->raid_disks < 4) {
7191*4882a593Smuzhiyun pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
7192*4882a593Smuzhiyun mdname(mddev), mddev->raid_disks);
7193*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
7194*4882a593Smuzhiyun }
7195*4882a593Smuzhiyun
7196*4882a593Smuzhiyun if (!mddev->new_chunk_sectors ||
7197*4882a593Smuzhiyun (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
7198*4882a593Smuzhiyun !is_power_of_2(mddev->new_chunk_sectors)) {
7199*4882a593Smuzhiyun pr_warn("md/raid:%s: invalid chunk size %d\n",
7200*4882a593Smuzhiyun mdname(mddev), mddev->new_chunk_sectors << 9);
7201*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
7202*4882a593Smuzhiyun }
7203*4882a593Smuzhiyun
7204*4882a593Smuzhiyun conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
7205*4882a593Smuzhiyun if (conf == NULL)
7206*4882a593Smuzhiyun goto abort;
7207*4882a593Smuzhiyun
7208*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
7209*4882a593Smuzhiyun conf->stripe_size = DEFAULT_STRIPE_SIZE;
7210*4882a593Smuzhiyun conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9;
7211*4882a593Smuzhiyun conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9;
7212*4882a593Smuzhiyun #endif
7213*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->free_list);
7214*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->pending_list);
7215*4882a593Smuzhiyun conf->pending_data = kcalloc(PENDING_IO_MAX,
7216*4882a593Smuzhiyun sizeof(struct r5pending_data),
7217*4882a593Smuzhiyun GFP_KERNEL);
7218*4882a593Smuzhiyun if (!conf->pending_data)
7219*4882a593Smuzhiyun goto abort;
7220*4882a593Smuzhiyun for (i = 0; i < PENDING_IO_MAX; i++)
7221*4882a593Smuzhiyun list_add(&conf->pending_data[i].sibling, &conf->free_list);
7222*4882a593Smuzhiyun /* Don't enable multi-threading by default*/
7223*4882a593Smuzhiyun if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
7224*4882a593Smuzhiyun conf->group_cnt = group_cnt;
7225*4882a593Smuzhiyun conf->worker_cnt_per_group = 0;
7226*4882a593Smuzhiyun conf->worker_groups = new_group;
7227*4882a593Smuzhiyun } else
7228*4882a593Smuzhiyun goto abort;
7229*4882a593Smuzhiyun spin_lock_init(&conf->device_lock);
7230*4882a593Smuzhiyun seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
7231*4882a593Smuzhiyun mutex_init(&conf->cache_size_mutex);
7232*4882a593Smuzhiyun init_waitqueue_head(&conf->wait_for_quiescent);
7233*4882a593Smuzhiyun init_waitqueue_head(&conf->wait_for_stripe);
7234*4882a593Smuzhiyun init_waitqueue_head(&conf->wait_for_overlap);
7235*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->handle_list);
7236*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->loprio_list);
7237*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->hold_list);
7238*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->delayed_list);
7239*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->bitmap_list);
7240*4882a593Smuzhiyun init_llist_head(&conf->released_stripes);
7241*4882a593Smuzhiyun atomic_set(&conf->active_stripes, 0);
7242*4882a593Smuzhiyun atomic_set(&conf->preread_active_stripes, 0);
7243*4882a593Smuzhiyun atomic_set(&conf->active_aligned_reads, 0);
7244*4882a593Smuzhiyun spin_lock_init(&conf->pending_bios_lock);
7245*4882a593Smuzhiyun conf->batch_bio_dispatch = true;
7246*4882a593Smuzhiyun rdev_for_each(rdev, mddev) {
7247*4882a593Smuzhiyun if (test_bit(Journal, &rdev->flags))
7248*4882a593Smuzhiyun continue;
7249*4882a593Smuzhiyun if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
7250*4882a593Smuzhiyun conf->batch_bio_dispatch = false;
7251*4882a593Smuzhiyun break;
7252*4882a593Smuzhiyun }
7253*4882a593Smuzhiyun }
7254*4882a593Smuzhiyun
7255*4882a593Smuzhiyun conf->bypass_threshold = BYPASS_THRESHOLD;
7256*4882a593Smuzhiyun conf->recovery_disabled = mddev->recovery_disabled - 1;
7257*4882a593Smuzhiyun
7258*4882a593Smuzhiyun conf->raid_disks = mddev->raid_disks;
7259*4882a593Smuzhiyun if (mddev->reshape_position == MaxSector)
7260*4882a593Smuzhiyun conf->previous_raid_disks = mddev->raid_disks;
7261*4882a593Smuzhiyun else
7262*4882a593Smuzhiyun conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
7263*4882a593Smuzhiyun max_disks = max(conf->raid_disks, conf->previous_raid_disks);
7264*4882a593Smuzhiyun
7265*4882a593Smuzhiyun conf->disks = kcalloc(max_disks, sizeof(struct disk_info),
7266*4882a593Smuzhiyun GFP_KERNEL);
7267*4882a593Smuzhiyun
7268*4882a593Smuzhiyun if (!conf->disks)
7269*4882a593Smuzhiyun goto abort;
7270*4882a593Smuzhiyun
7271*4882a593Smuzhiyun for (i = 0; i < max_disks; i++) {
7272*4882a593Smuzhiyun conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
7273*4882a593Smuzhiyun if (!conf->disks[i].extra_page)
7274*4882a593Smuzhiyun goto abort;
7275*4882a593Smuzhiyun }
7276*4882a593Smuzhiyun
7277*4882a593Smuzhiyun ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
7278*4882a593Smuzhiyun if (ret)
7279*4882a593Smuzhiyun goto abort;
7280*4882a593Smuzhiyun conf->mddev = mddev;
7281*4882a593Smuzhiyun
7282*4882a593Smuzhiyun if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
7283*4882a593Smuzhiyun goto abort;
7284*4882a593Smuzhiyun
7285*4882a593Smuzhiyun /* We init hash_locks[0] separately to that it can be used
7286*4882a593Smuzhiyun * as the reference lock in the spin_lock_nest_lock() call
7287*4882a593Smuzhiyun * in lock_all_device_hash_locks_irq in order to convince
7288*4882a593Smuzhiyun * lockdep that we know what we are doing.
7289*4882a593Smuzhiyun */
7290*4882a593Smuzhiyun spin_lock_init(conf->hash_locks);
7291*4882a593Smuzhiyun for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
7292*4882a593Smuzhiyun spin_lock_init(conf->hash_locks + i);
7293*4882a593Smuzhiyun
7294*4882a593Smuzhiyun for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
7295*4882a593Smuzhiyun INIT_LIST_HEAD(conf->inactive_list + i);
7296*4882a593Smuzhiyun
7297*4882a593Smuzhiyun for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
7298*4882a593Smuzhiyun INIT_LIST_HEAD(conf->temp_inactive_list + i);
7299*4882a593Smuzhiyun
7300*4882a593Smuzhiyun atomic_set(&conf->r5c_cached_full_stripes, 0);
7301*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
7302*4882a593Smuzhiyun atomic_set(&conf->r5c_cached_partial_stripes, 0);
7303*4882a593Smuzhiyun INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
7304*4882a593Smuzhiyun atomic_set(&conf->r5c_flushing_full_stripes, 0);
7305*4882a593Smuzhiyun atomic_set(&conf->r5c_flushing_partial_stripes, 0);
7306*4882a593Smuzhiyun
7307*4882a593Smuzhiyun conf->level = mddev->new_level;
7308*4882a593Smuzhiyun conf->chunk_sectors = mddev->new_chunk_sectors;
7309*4882a593Smuzhiyun if (raid5_alloc_percpu(conf) != 0)
7310*4882a593Smuzhiyun goto abort;
7311*4882a593Smuzhiyun
7312*4882a593Smuzhiyun pr_debug("raid456: run(%s) called.\n", mdname(mddev));
7313*4882a593Smuzhiyun
7314*4882a593Smuzhiyun rdev_for_each(rdev, mddev) {
7315*4882a593Smuzhiyun raid_disk = rdev->raid_disk;
7316*4882a593Smuzhiyun if (raid_disk >= max_disks
7317*4882a593Smuzhiyun || raid_disk < 0 || test_bit(Journal, &rdev->flags))
7318*4882a593Smuzhiyun continue;
7319*4882a593Smuzhiyun disk = conf->disks + raid_disk;
7320*4882a593Smuzhiyun
7321*4882a593Smuzhiyun if (test_bit(Replacement, &rdev->flags)) {
7322*4882a593Smuzhiyun if (disk->replacement)
7323*4882a593Smuzhiyun goto abort;
7324*4882a593Smuzhiyun disk->replacement = rdev;
7325*4882a593Smuzhiyun } else {
7326*4882a593Smuzhiyun if (disk->rdev)
7327*4882a593Smuzhiyun goto abort;
7328*4882a593Smuzhiyun disk->rdev = rdev;
7329*4882a593Smuzhiyun }
7330*4882a593Smuzhiyun
7331*4882a593Smuzhiyun if (test_bit(In_sync, &rdev->flags)) {
7332*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
7333*4882a593Smuzhiyun pr_info("md/raid:%s: device %s operational as raid disk %d\n",
7334*4882a593Smuzhiyun mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
7335*4882a593Smuzhiyun } else if (rdev->saved_raid_disk != raid_disk)
7336*4882a593Smuzhiyun /* Cannot rely on bitmap to complete recovery */
7337*4882a593Smuzhiyun conf->fullsync = 1;
7338*4882a593Smuzhiyun }
7339*4882a593Smuzhiyun
7340*4882a593Smuzhiyun conf->level = mddev->new_level;
7341*4882a593Smuzhiyun if (conf->level == 6) {
7342*4882a593Smuzhiyun conf->max_degraded = 2;
7343*4882a593Smuzhiyun if (raid6_call.xor_syndrome)
7344*4882a593Smuzhiyun conf->rmw_level = PARITY_ENABLE_RMW;
7345*4882a593Smuzhiyun else
7346*4882a593Smuzhiyun conf->rmw_level = PARITY_DISABLE_RMW;
7347*4882a593Smuzhiyun } else {
7348*4882a593Smuzhiyun conf->max_degraded = 1;
7349*4882a593Smuzhiyun conf->rmw_level = PARITY_ENABLE_RMW;
7350*4882a593Smuzhiyun }
7351*4882a593Smuzhiyun conf->algorithm = mddev->new_layout;
7352*4882a593Smuzhiyun conf->reshape_progress = mddev->reshape_position;
7353*4882a593Smuzhiyun if (conf->reshape_progress != MaxSector) {
7354*4882a593Smuzhiyun conf->prev_chunk_sectors = mddev->chunk_sectors;
7355*4882a593Smuzhiyun conf->prev_algo = mddev->layout;
7356*4882a593Smuzhiyun } else {
7357*4882a593Smuzhiyun conf->prev_chunk_sectors = conf->chunk_sectors;
7358*4882a593Smuzhiyun conf->prev_algo = conf->algorithm;
7359*4882a593Smuzhiyun }
7360*4882a593Smuzhiyun
7361*4882a593Smuzhiyun conf->min_nr_stripes = NR_STRIPES;
7362*4882a593Smuzhiyun if (mddev->reshape_position != MaxSector) {
7363*4882a593Smuzhiyun int stripes = max_t(int,
7364*4882a593Smuzhiyun ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4,
7365*4882a593Smuzhiyun ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4);
7366*4882a593Smuzhiyun conf->min_nr_stripes = max(NR_STRIPES, stripes);
7367*4882a593Smuzhiyun if (conf->min_nr_stripes != NR_STRIPES)
7368*4882a593Smuzhiyun pr_info("md/raid:%s: force stripe size %d for reshape\n",
7369*4882a593Smuzhiyun mdname(mddev), conf->min_nr_stripes);
7370*4882a593Smuzhiyun }
7371*4882a593Smuzhiyun memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
7372*4882a593Smuzhiyun max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
7373*4882a593Smuzhiyun atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
7374*4882a593Smuzhiyun if (grow_stripes(conf, conf->min_nr_stripes)) {
7375*4882a593Smuzhiyun pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
7376*4882a593Smuzhiyun mdname(mddev), memory);
7377*4882a593Smuzhiyun goto abort;
7378*4882a593Smuzhiyun } else
7379*4882a593Smuzhiyun pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
7380*4882a593Smuzhiyun /*
7381*4882a593Smuzhiyun * Losing a stripe head costs more than the time to refill it,
7382*4882a593Smuzhiyun * it reduces the queue depth and so can hurt throughput.
7383*4882a593Smuzhiyun * So set it rather large, scaled by number of devices.
7384*4882a593Smuzhiyun */
7385*4882a593Smuzhiyun conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
7386*4882a593Smuzhiyun conf->shrinker.scan_objects = raid5_cache_scan;
7387*4882a593Smuzhiyun conf->shrinker.count_objects = raid5_cache_count;
7388*4882a593Smuzhiyun conf->shrinker.batch = 128;
7389*4882a593Smuzhiyun conf->shrinker.flags = 0;
7390*4882a593Smuzhiyun if (register_shrinker(&conf->shrinker)) {
7391*4882a593Smuzhiyun pr_warn("md/raid:%s: couldn't register shrinker.\n",
7392*4882a593Smuzhiyun mdname(mddev));
7393*4882a593Smuzhiyun goto abort;
7394*4882a593Smuzhiyun }
7395*4882a593Smuzhiyun
7396*4882a593Smuzhiyun sprintf(pers_name, "raid%d", mddev->new_level);
7397*4882a593Smuzhiyun conf->thread = md_register_thread(raid5d, mddev, pers_name);
7398*4882a593Smuzhiyun if (!conf->thread) {
7399*4882a593Smuzhiyun pr_warn("md/raid:%s: couldn't allocate thread.\n",
7400*4882a593Smuzhiyun mdname(mddev));
7401*4882a593Smuzhiyun goto abort;
7402*4882a593Smuzhiyun }
7403*4882a593Smuzhiyun
7404*4882a593Smuzhiyun return conf;
7405*4882a593Smuzhiyun
7406*4882a593Smuzhiyun abort:
7407*4882a593Smuzhiyun if (conf) {
7408*4882a593Smuzhiyun free_conf(conf);
7409*4882a593Smuzhiyun return ERR_PTR(-EIO);
7410*4882a593Smuzhiyun } else
7411*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
7412*4882a593Smuzhiyun }
7413*4882a593Smuzhiyun
only_parity(int raid_disk,int algo,int raid_disks,int max_degraded)7414*4882a593Smuzhiyun static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
7415*4882a593Smuzhiyun {
7416*4882a593Smuzhiyun switch (algo) {
7417*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
7418*4882a593Smuzhiyun if (raid_disk < max_degraded)
7419*4882a593Smuzhiyun return 1;
7420*4882a593Smuzhiyun break;
7421*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
7422*4882a593Smuzhiyun if (raid_disk >= raid_disks - max_degraded)
7423*4882a593Smuzhiyun return 1;
7424*4882a593Smuzhiyun break;
7425*4882a593Smuzhiyun case ALGORITHM_PARITY_0_6:
7426*4882a593Smuzhiyun if (raid_disk == 0 ||
7427*4882a593Smuzhiyun raid_disk == raid_disks - 1)
7428*4882a593Smuzhiyun return 1;
7429*4882a593Smuzhiyun break;
7430*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC_6:
7431*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC_6:
7432*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC_6:
7433*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC_6:
7434*4882a593Smuzhiyun if (raid_disk == raid_disks - 1)
7435*4882a593Smuzhiyun return 1;
7436*4882a593Smuzhiyun }
7437*4882a593Smuzhiyun return 0;
7438*4882a593Smuzhiyun }
7439*4882a593Smuzhiyun
raid5_set_io_opt(struct r5conf * conf)7440*4882a593Smuzhiyun static void raid5_set_io_opt(struct r5conf *conf)
7441*4882a593Smuzhiyun {
7442*4882a593Smuzhiyun blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
7443*4882a593Smuzhiyun (conf->raid_disks - conf->max_degraded));
7444*4882a593Smuzhiyun }
7445*4882a593Smuzhiyun
raid5_run(struct mddev * mddev)7446*4882a593Smuzhiyun static int raid5_run(struct mddev *mddev)
7447*4882a593Smuzhiyun {
7448*4882a593Smuzhiyun struct r5conf *conf;
7449*4882a593Smuzhiyun int working_disks = 0;
7450*4882a593Smuzhiyun int dirty_parity_disks = 0;
7451*4882a593Smuzhiyun struct md_rdev *rdev;
7452*4882a593Smuzhiyun struct md_rdev *journal_dev = NULL;
7453*4882a593Smuzhiyun sector_t reshape_offset = 0;
7454*4882a593Smuzhiyun int i;
7455*4882a593Smuzhiyun long long min_offset_diff = 0;
7456*4882a593Smuzhiyun int first = 1;
7457*4882a593Smuzhiyun
7458*4882a593Smuzhiyun if (mddev_init_writes_pending(mddev) < 0)
7459*4882a593Smuzhiyun return -ENOMEM;
7460*4882a593Smuzhiyun
7461*4882a593Smuzhiyun if (mddev->recovery_cp != MaxSector)
7462*4882a593Smuzhiyun pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
7463*4882a593Smuzhiyun mdname(mddev));
7464*4882a593Smuzhiyun
7465*4882a593Smuzhiyun rdev_for_each(rdev, mddev) {
7466*4882a593Smuzhiyun long long diff;
7467*4882a593Smuzhiyun
7468*4882a593Smuzhiyun if (test_bit(Journal, &rdev->flags)) {
7469*4882a593Smuzhiyun journal_dev = rdev;
7470*4882a593Smuzhiyun continue;
7471*4882a593Smuzhiyun }
7472*4882a593Smuzhiyun if (rdev->raid_disk < 0)
7473*4882a593Smuzhiyun continue;
7474*4882a593Smuzhiyun diff = (rdev->new_data_offset - rdev->data_offset);
7475*4882a593Smuzhiyun if (first) {
7476*4882a593Smuzhiyun min_offset_diff = diff;
7477*4882a593Smuzhiyun first = 0;
7478*4882a593Smuzhiyun } else if (mddev->reshape_backwards &&
7479*4882a593Smuzhiyun diff < min_offset_diff)
7480*4882a593Smuzhiyun min_offset_diff = diff;
7481*4882a593Smuzhiyun else if (!mddev->reshape_backwards &&
7482*4882a593Smuzhiyun diff > min_offset_diff)
7483*4882a593Smuzhiyun min_offset_diff = diff;
7484*4882a593Smuzhiyun }
7485*4882a593Smuzhiyun
7486*4882a593Smuzhiyun if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
7487*4882a593Smuzhiyun (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
7488*4882a593Smuzhiyun pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
7489*4882a593Smuzhiyun mdname(mddev));
7490*4882a593Smuzhiyun return -EINVAL;
7491*4882a593Smuzhiyun }
7492*4882a593Smuzhiyun
7493*4882a593Smuzhiyun if (mddev->reshape_position != MaxSector) {
7494*4882a593Smuzhiyun /* Check that we can continue the reshape.
7495*4882a593Smuzhiyun * Difficulties arise if the stripe we would write to
7496*4882a593Smuzhiyun * next is at or after the stripe we would read from next.
7497*4882a593Smuzhiyun * For a reshape that changes the number of devices, this
7498*4882a593Smuzhiyun * is only possible for a very short time, and mdadm makes
7499*4882a593Smuzhiyun * sure that time appears to have past before assembling
7500*4882a593Smuzhiyun * the array. So we fail if that time hasn't passed.
7501*4882a593Smuzhiyun * For a reshape that keeps the number of devices the same
7502*4882a593Smuzhiyun * mdadm must be monitoring the reshape can keeping the
7503*4882a593Smuzhiyun * critical areas read-only and backed up. It will start
7504*4882a593Smuzhiyun * the array in read-only mode, so we check for that.
7505*4882a593Smuzhiyun */
7506*4882a593Smuzhiyun sector_t here_new, here_old;
7507*4882a593Smuzhiyun int old_disks;
7508*4882a593Smuzhiyun int max_degraded = (mddev->level == 6 ? 2 : 1);
7509*4882a593Smuzhiyun int chunk_sectors;
7510*4882a593Smuzhiyun int new_data_disks;
7511*4882a593Smuzhiyun
7512*4882a593Smuzhiyun if (journal_dev) {
7513*4882a593Smuzhiyun pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
7514*4882a593Smuzhiyun mdname(mddev));
7515*4882a593Smuzhiyun return -EINVAL;
7516*4882a593Smuzhiyun }
7517*4882a593Smuzhiyun
7518*4882a593Smuzhiyun if (mddev->new_level != mddev->level) {
7519*4882a593Smuzhiyun pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
7520*4882a593Smuzhiyun mdname(mddev));
7521*4882a593Smuzhiyun return -EINVAL;
7522*4882a593Smuzhiyun }
7523*4882a593Smuzhiyun old_disks = mddev->raid_disks - mddev->delta_disks;
7524*4882a593Smuzhiyun /* reshape_position must be on a new-stripe boundary, and one
7525*4882a593Smuzhiyun * further up in new geometry must map after here in old
7526*4882a593Smuzhiyun * geometry.
7527*4882a593Smuzhiyun * If the chunk sizes are different, then as we perform reshape
7528*4882a593Smuzhiyun * in units of the largest of the two, reshape_position needs
7529*4882a593Smuzhiyun * be a multiple of the largest chunk size times new data disks.
7530*4882a593Smuzhiyun */
7531*4882a593Smuzhiyun here_new = mddev->reshape_position;
7532*4882a593Smuzhiyun chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
7533*4882a593Smuzhiyun new_data_disks = mddev->raid_disks - max_degraded;
7534*4882a593Smuzhiyun if (sector_div(here_new, chunk_sectors * new_data_disks)) {
7535*4882a593Smuzhiyun pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
7536*4882a593Smuzhiyun mdname(mddev));
7537*4882a593Smuzhiyun return -EINVAL;
7538*4882a593Smuzhiyun }
7539*4882a593Smuzhiyun reshape_offset = here_new * chunk_sectors;
7540*4882a593Smuzhiyun /* here_new is the stripe we will write to */
7541*4882a593Smuzhiyun here_old = mddev->reshape_position;
7542*4882a593Smuzhiyun sector_div(here_old, chunk_sectors * (old_disks-max_degraded));
7543*4882a593Smuzhiyun /* here_old is the first stripe that we might need to read
7544*4882a593Smuzhiyun * from */
7545*4882a593Smuzhiyun if (mddev->delta_disks == 0) {
7546*4882a593Smuzhiyun /* We cannot be sure it is safe to start an in-place
7547*4882a593Smuzhiyun * reshape. It is only safe if user-space is monitoring
7548*4882a593Smuzhiyun * and taking constant backups.
7549*4882a593Smuzhiyun * mdadm always starts a situation like this in
7550*4882a593Smuzhiyun * readonly mode so it can take control before
7551*4882a593Smuzhiyun * allowing any writes. So just check for that.
7552*4882a593Smuzhiyun */
7553*4882a593Smuzhiyun if (abs(min_offset_diff) >= mddev->chunk_sectors &&
7554*4882a593Smuzhiyun abs(min_offset_diff) >= mddev->new_chunk_sectors)
7555*4882a593Smuzhiyun /* not really in-place - so OK */;
7556*4882a593Smuzhiyun else if (mddev->ro == 0) {
7557*4882a593Smuzhiyun pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
7558*4882a593Smuzhiyun mdname(mddev));
7559*4882a593Smuzhiyun return -EINVAL;
7560*4882a593Smuzhiyun }
7561*4882a593Smuzhiyun } else if (mddev->reshape_backwards
7562*4882a593Smuzhiyun ? (here_new * chunk_sectors + min_offset_diff <=
7563*4882a593Smuzhiyun here_old * chunk_sectors)
7564*4882a593Smuzhiyun : (here_new * chunk_sectors >=
7565*4882a593Smuzhiyun here_old * chunk_sectors + (-min_offset_diff))) {
7566*4882a593Smuzhiyun /* Reading from the same stripe as writing to - bad */
7567*4882a593Smuzhiyun pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
7568*4882a593Smuzhiyun mdname(mddev));
7569*4882a593Smuzhiyun return -EINVAL;
7570*4882a593Smuzhiyun }
7571*4882a593Smuzhiyun pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
7572*4882a593Smuzhiyun /* OK, we should be able to continue; */
7573*4882a593Smuzhiyun } else {
7574*4882a593Smuzhiyun BUG_ON(mddev->level != mddev->new_level);
7575*4882a593Smuzhiyun BUG_ON(mddev->layout != mddev->new_layout);
7576*4882a593Smuzhiyun BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
7577*4882a593Smuzhiyun BUG_ON(mddev->delta_disks != 0);
7578*4882a593Smuzhiyun }
7579*4882a593Smuzhiyun
7580*4882a593Smuzhiyun if (test_bit(MD_HAS_JOURNAL, &mddev->flags) &&
7581*4882a593Smuzhiyun test_bit(MD_HAS_PPL, &mddev->flags)) {
7582*4882a593Smuzhiyun pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n",
7583*4882a593Smuzhiyun mdname(mddev));
7584*4882a593Smuzhiyun clear_bit(MD_HAS_PPL, &mddev->flags);
7585*4882a593Smuzhiyun clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags);
7586*4882a593Smuzhiyun }
7587*4882a593Smuzhiyun
7588*4882a593Smuzhiyun if (mddev->private == NULL)
7589*4882a593Smuzhiyun conf = setup_conf(mddev);
7590*4882a593Smuzhiyun else
7591*4882a593Smuzhiyun conf = mddev->private;
7592*4882a593Smuzhiyun
7593*4882a593Smuzhiyun if (IS_ERR(conf))
7594*4882a593Smuzhiyun return PTR_ERR(conf);
7595*4882a593Smuzhiyun
7596*4882a593Smuzhiyun if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
7597*4882a593Smuzhiyun if (!journal_dev) {
7598*4882a593Smuzhiyun pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
7599*4882a593Smuzhiyun mdname(mddev));
7600*4882a593Smuzhiyun mddev->ro = 1;
7601*4882a593Smuzhiyun set_disk_ro(mddev->gendisk, 1);
7602*4882a593Smuzhiyun } else if (mddev->recovery_cp == MaxSector)
7603*4882a593Smuzhiyun set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
7604*4882a593Smuzhiyun }
7605*4882a593Smuzhiyun
7606*4882a593Smuzhiyun conf->min_offset_diff = min_offset_diff;
7607*4882a593Smuzhiyun mddev->thread = conf->thread;
7608*4882a593Smuzhiyun conf->thread = NULL;
7609*4882a593Smuzhiyun mddev->private = conf;
7610*4882a593Smuzhiyun
7611*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
7612*4882a593Smuzhiyun i++) {
7613*4882a593Smuzhiyun rdev = conf->disks[i].rdev;
7614*4882a593Smuzhiyun if (!rdev && conf->disks[i].replacement) {
7615*4882a593Smuzhiyun /* The replacement is all we have yet */
7616*4882a593Smuzhiyun rdev = conf->disks[i].replacement;
7617*4882a593Smuzhiyun conf->disks[i].replacement = NULL;
7618*4882a593Smuzhiyun clear_bit(Replacement, &rdev->flags);
7619*4882a593Smuzhiyun conf->disks[i].rdev = rdev;
7620*4882a593Smuzhiyun }
7621*4882a593Smuzhiyun if (!rdev)
7622*4882a593Smuzhiyun continue;
7623*4882a593Smuzhiyun if (conf->disks[i].replacement &&
7624*4882a593Smuzhiyun conf->reshape_progress != MaxSector) {
7625*4882a593Smuzhiyun /* replacements and reshape simply do not mix. */
7626*4882a593Smuzhiyun pr_warn("md: cannot handle concurrent replacement and reshape.\n");
7627*4882a593Smuzhiyun goto abort;
7628*4882a593Smuzhiyun }
7629*4882a593Smuzhiyun if (test_bit(In_sync, &rdev->flags)) {
7630*4882a593Smuzhiyun working_disks++;
7631*4882a593Smuzhiyun continue;
7632*4882a593Smuzhiyun }
7633*4882a593Smuzhiyun /* This disc is not fully in-sync. However if it
7634*4882a593Smuzhiyun * just stored parity (beyond the recovery_offset),
7635*4882a593Smuzhiyun * when we don't need to be concerned about the
7636*4882a593Smuzhiyun * array being dirty.
7637*4882a593Smuzhiyun * When reshape goes 'backwards', we never have
7638*4882a593Smuzhiyun * partially completed devices, so we only need
7639*4882a593Smuzhiyun * to worry about reshape going forwards.
7640*4882a593Smuzhiyun */
7641*4882a593Smuzhiyun /* Hack because v0.91 doesn't store recovery_offset properly. */
7642*4882a593Smuzhiyun if (mddev->major_version == 0 &&
7643*4882a593Smuzhiyun mddev->minor_version > 90)
7644*4882a593Smuzhiyun rdev->recovery_offset = reshape_offset;
7645*4882a593Smuzhiyun
7646*4882a593Smuzhiyun if (rdev->recovery_offset < reshape_offset) {
7647*4882a593Smuzhiyun /* We need to check old and new layout */
7648*4882a593Smuzhiyun if (!only_parity(rdev->raid_disk,
7649*4882a593Smuzhiyun conf->algorithm,
7650*4882a593Smuzhiyun conf->raid_disks,
7651*4882a593Smuzhiyun conf->max_degraded))
7652*4882a593Smuzhiyun continue;
7653*4882a593Smuzhiyun }
7654*4882a593Smuzhiyun if (!only_parity(rdev->raid_disk,
7655*4882a593Smuzhiyun conf->prev_algo,
7656*4882a593Smuzhiyun conf->previous_raid_disks,
7657*4882a593Smuzhiyun conf->max_degraded))
7658*4882a593Smuzhiyun continue;
7659*4882a593Smuzhiyun dirty_parity_disks++;
7660*4882a593Smuzhiyun }
7661*4882a593Smuzhiyun
7662*4882a593Smuzhiyun /*
7663*4882a593Smuzhiyun * 0 for a fully functional array, 1 or 2 for a degraded array.
7664*4882a593Smuzhiyun */
7665*4882a593Smuzhiyun mddev->degraded = raid5_calc_degraded(conf);
7666*4882a593Smuzhiyun
7667*4882a593Smuzhiyun if (has_failed(conf)) {
7668*4882a593Smuzhiyun pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
7669*4882a593Smuzhiyun mdname(mddev), mddev->degraded, conf->raid_disks);
7670*4882a593Smuzhiyun goto abort;
7671*4882a593Smuzhiyun }
7672*4882a593Smuzhiyun
7673*4882a593Smuzhiyun /* device size must be a multiple of chunk size */
7674*4882a593Smuzhiyun mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
7675*4882a593Smuzhiyun mddev->resync_max_sectors = mddev->dev_sectors;
7676*4882a593Smuzhiyun
7677*4882a593Smuzhiyun if (mddev->degraded > dirty_parity_disks &&
7678*4882a593Smuzhiyun mddev->recovery_cp != MaxSector) {
7679*4882a593Smuzhiyun if (test_bit(MD_HAS_PPL, &mddev->flags))
7680*4882a593Smuzhiyun pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
7681*4882a593Smuzhiyun mdname(mddev));
7682*4882a593Smuzhiyun else if (mddev->ok_start_degraded)
7683*4882a593Smuzhiyun pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
7684*4882a593Smuzhiyun mdname(mddev));
7685*4882a593Smuzhiyun else {
7686*4882a593Smuzhiyun pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
7687*4882a593Smuzhiyun mdname(mddev));
7688*4882a593Smuzhiyun goto abort;
7689*4882a593Smuzhiyun }
7690*4882a593Smuzhiyun }
7691*4882a593Smuzhiyun
7692*4882a593Smuzhiyun pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
7693*4882a593Smuzhiyun mdname(mddev), conf->level,
7694*4882a593Smuzhiyun mddev->raid_disks-mddev->degraded, mddev->raid_disks,
7695*4882a593Smuzhiyun mddev->new_layout);
7696*4882a593Smuzhiyun
7697*4882a593Smuzhiyun print_raid5_conf(conf);
7698*4882a593Smuzhiyun
7699*4882a593Smuzhiyun if (conf->reshape_progress != MaxSector) {
7700*4882a593Smuzhiyun conf->reshape_safe = conf->reshape_progress;
7701*4882a593Smuzhiyun atomic_set(&conf->reshape_stripes, 0);
7702*4882a593Smuzhiyun clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7703*4882a593Smuzhiyun clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7704*4882a593Smuzhiyun set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7705*4882a593Smuzhiyun set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7706*4882a593Smuzhiyun mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7707*4882a593Smuzhiyun "reshape");
7708*4882a593Smuzhiyun if (!mddev->sync_thread)
7709*4882a593Smuzhiyun goto abort;
7710*4882a593Smuzhiyun }
7711*4882a593Smuzhiyun
7712*4882a593Smuzhiyun /* Ok, everything is just fine now */
7713*4882a593Smuzhiyun if (mddev->to_remove == &raid5_attrs_group)
7714*4882a593Smuzhiyun mddev->to_remove = NULL;
7715*4882a593Smuzhiyun else if (mddev->kobj.sd &&
7716*4882a593Smuzhiyun sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
7717*4882a593Smuzhiyun pr_warn("raid5: failed to create sysfs attributes for %s\n",
7718*4882a593Smuzhiyun mdname(mddev));
7719*4882a593Smuzhiyun md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7720*4882a593Smuzhiyun
7721*4882a593Smuzhiyun if (mddev->queue) {
7722*4882a593Smuzhiyun int chunk_size;
7723*4882a593Smuzhiyun /* read-ahead size must cover two whole stripes, which
7724*4882a593Smuzhiyun * is 2 * (datadisks) * chunksize where 'n' is the
7725*4882a593Smuzhiyun * number of raid devices
7726*4882a593Smuzhiyun */
7727*4882a593Smuzhiyun int data_disks = conf->previous_raid_disks - conf->max_degraded;
7728*4882a593Smuzhiyun int stripe = data_disks *
7729*4882a593Smuzhiyun ((mddev->chunk_sectors << 9) / PAGE_SIZE);
7730*4882a593Smuzhiyun
7731*4882a593Smuzhiyun chunk_size = mddev->chunk_sectors << 9;
7732*4882a593Smuzhiyun blk_queue_io_min(mddev->queue, chunk_size);
7733*4882a593Smuzhiyun raid5_set_io_opt(conf);
7734*4882a593Smuzhiyun mddev->queue->limits.raid_partial_stripes_expensive = 1;
7735*4882a593Smuzhiyun /*
7736*4882a593Smuzhiyun * We can only discard a whole stripe. It doesn't make sense to
7737*4882a593Smuzhiyun * discard data disk but write parity disk
7738*4882a593Smuzhiyun */
7739*4882a593Smuzhiyun stripe = stripe * PAGE_SIZE;
7740*4882a593Smuzhiyun /* Round up to power of 2, as discard handling
7741*4882a593Smuzhiyun * currently assumes that */
7742*4882a593Smuzhiyun while ((stripe-1) & stripe)
7743*4882a593Smuzhiyun stripe = (stripe | (stripe-1)) + 1;
7744*4882a593Smuzhiyun mddev->queue->limits.discard_alignment = stripe;
7745*4882a593Smuzhiyun mddev->queue->limits.discard_granularity = stripe;
7746*4882a593Smuzhiyun
7747*4882a593Smuzhiyun blk_queue_max_write_same_sectors(mddev->queue, 0);
7748*4882a593Smuzhiyun blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
7749*4882a593Smuzhiyun
7750*4882a593Smuzhiyun rdev_for_each(rdev, mddev) {
7751*4882a593Smuzhiyun disk_stack_limits(mddev->gendisk, rdev->bdev,
7752*4882a593Smuzhiyun rdev->data_offset << 9);
7753*4882a593Smuzhiyun disk_stack_limits(mddev->gendisk, rdev->bdev,
7754*4882a593Smuzhiyun rdev->new_data_offset << 9);
7755*4882a593Smuzhiyun }
7756*4882a593Smuzhiyun
7757*4882a593Smuzhiyun /*
7758*4882a593Smuzhiyun * zeroing is required, otherwise data
7759*4882a593Smuzhiyun * could be lost. Consider a scenario: discard a stripe
7760*4882a593Smuzhiyun * (the stripe could be inconsistent if
7761*4882a593Smuzhiyun * discard_zeroes_data is 0); write one disk of the
7762*4882a593Smuzhiyun * stripe (the stripe could be inconsistent again
7763*4882a593Smuzhiyun * depending on which disks are used to calculate
7764*4882a593Smuzhiyun * parity); the disk is broken; The stripe data of this
7765*4882a593Smuzhiyun * disk is lost.
7766*4882a593Smuzhiyun *
7767*4882a593Smuzhiyun * We only allow DISCARD if the sysadmin has confirmed that
7768*4882a593Smuzhiyun * only safe devices are in use by setting a module parameter.
7769*4882a593Smuzhiyun * A better idea might be to turn DISCARD into WRITE_ZEROES
7770*4882a593Smuzhiyun * requests, as that is required to be safe.
7771*4882a593Smuzhiyun */
7772*4882a593Smuzhiyun if (devices_handle_discard_safely &&
7773*4882a593Smuzhiyun mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7774*4882a593Smuzhiyun mddev->queue->limits.discard_granularity >= stripe)
7775*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_DISCARD,
7776*4882a593Smuzhiyun mddev->queue);
7777*4882a593Smuzhiyun else
7778*4882a593Smuzhiyun blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
7779*4882a593Smuzhiyun mddev->queue);
7780*4882a593Smuzhiyun
7781*4882a593Smuzhiyun blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
7782*4882a593Smuzhiyun }
7783*4882a593Smuzhiyun
7784*4882a593Smuzhiyun if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
7785*4882a593Smuzhiyun goto abort;
7786*4882a593Smuzhiyun
7787*4882a593Smuzhiyun return 0;
7788*4882a593Smuzhiyun abort:
7789*4882a593Smuzhiyun md_unregister_thread(&mddev->thread);
7790*4882a593Smuzhiyun print_raid5_conf(conf);
7791*4882a593Smuzhiyun free_conf(conf);
7792*4882a593Smuzhiyun mddev->private = NULL;
7793*4882a593Smuzhiyun pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
7794*4882a593Smuzhiyun return -EIO;
7795*4882a593Smuzhiyun }
7796*4882a593Smuzhiyun
raid5_free(struct mddev * mddev,void * priv)7797*4882a593Smuzhiyun static void raid5_free(struct mddev *mddev, void *priv)
7798*4882a593Smuzhiyun {
7799*4882a593Smuzhiyun struct r5conf *conf = priv;
7800*4882a593Smuzhiyun
7801*4882a593Smuzhiyun free_conf(conf);
7802*4882a593Smuzhiyun mddev->to_remove = &raid5_attrs_group;
7803*4882a593Smuzhiyun }
7804*4882a593Smuzhiyun
raid5_status(struct seq_file * seq,struct mddev * mddev)7805*4882a593Smuzhiyun static void raid5_status(struct seq_file *seq, struct mddev *mddev)
7806*4882a593Smuzhiyun {
7807*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
7808*4882a593Smuzhiyun int i;
7809*4882a593Smuzhiyun
7810*4882a593Smuzhiyun seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
7811*4882a593Smuzhiyun conf->chunk_sectors / 2, mddev->layout);
7812*4882a593Smuzhiyun seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
7813*4882a593Smuzhiyun rcu_read_lock();
7814*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
7815*4882a593Smuzhiyun struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
7816*4882a593Smuzhiyun seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
7817*4882a593Smuzhiyun }
7818*4882a593Smuzhiyun rcu_read_unlock();
7819*4882a593Smuzhiyun seq_printf (seq, "]");
7820*4882a593Smuzhiyun }
7821*4882a593Smuzhiyun
print_raid5_conf(struct r5conf * conf)7822*4882a593Smuzhiyun static void print_raid5_conf (struct r5conf *conf)
7823*4882a593Smuzhiyun {
7824*4882a593Smuzhiyun int i;
7825*4882a593Smuzhiyun struct disk_info *tmp;
7826*4882a593Smuzhiyun
7827*4882a593Smuzhiyun pr_debug("RAID conf printout:\n");
7828*4882a593Smuzhiyun if (!conf) {
7829*4882a593Smuzhiyun pr_debug("(conf==NULL)\n");
7830*4882a593Smuzhiyun return;
7831*4882a593Smuzhiyun }
7832*4882a593Smuzhiyun pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
7833*4882a593Smuzhiyun conf->raid_disks,
7834*4882a593Smuzhiyun conf->raid_disks - conf->mddev->degraded);
7835*4882a593Smuzhiyun
7836*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
7837*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
7838*4882a593Smuzhiyun tmp = conf->disks + i;
7839*4882a593Smuzhiyun if (tmp->rdev)
7840*4882a593Smuzhiyun pr_debug(" disk %d, o:%d, dev:%s\n",
7841*4882a593Smuzhiyun i, !test_bit(Faulty, &tmp->rdev->flags),
7842*4882a593Smuzhiyun bdevname(tmp->rdev->bdev, b));
7843*4882a593Smuzhiyun }
7844*4882a593Smuzhiyun }
7845*4882a593Smuzhiyun
raid5_spare_active(struct mddev * mddev)7846*4882a593Smuzhiyun static int raid5_spare_active(struct mddev *mddev)
7847*4882a593Smuzhiyun {
7848*4882a593Smuzhiyun int i;
7849*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
7850*4882a593Smuzhiyun struct disk_info *tmp;
7851*4882a593Smuzhiyun int count = 0;
7852*4882a593Smuzhiyun unsigned long flags;
7853*4882a593Smuzhiyun
7854*4882a593Smuzhiyun for (i = 0; i < conf->raid_disks; i++) {
7855*4882a593Smuzhiyun tmp = conf->disks + i;
7856*4882a593Smuzhiyun if (tmp->replacement
7857*4882a593Smuzhiyun && tmp->replacement->recovery_offset == MaxSector
7858*4882a593Smuzhiyun && !test_bit(Faulty, &tmp->replacement->flags)
7859*4882a593Smuzhiyun && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
7860*4882a593Smuzhiyun /* Replacement has just become active. */
7861*4882a593Smuzhiyun if (!tmp->rdev
7862*4882a593Smuzhiyun || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
7863*4882a593Smuzhiyun count++;
7864*4882a593Smuzhiyun if (tmp->rdev) {
7865*4882a593Smuzhiyun /* Replaced device not technically faulty,
7866*4882a593Smuzhiyun * but we need to be sure it gets removed
7867*4882a593Smuzhiyun * and never re-added.
7868*4882a593Smuzhiyun */
7869*4882a593Smuzhiyun set_bit(Faulty, &tmp->rdev->flags);
7870*4882a593Smuzhiyun sysfs_notify_dirent_safe(
7871*4882a593Smuzhiyun tmp->rdev->sysfs_state);
7872*4882a593Smuzhiyun }
7873*4882a593Smuzhiyun sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
7874*4882a593Smuzhiyun } else if (tmp->rdev
7875*4882a593Smuzhiyun && tmp->rdev->recovery_offset == MaxSector
7876*4882a593Smuzhiyun && !test_bit(Faulty, &tmp->rdev->flags)
7877*4882a593Smuzhiyun && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
7878*4882a593Smuzhiyun count++;
7879*4882a593Smuzhiyun sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
7880*4882a593Smuzhiyun }
7881*4882a593Smuzhiyun }
7882*4882a593Smuzhiyun spin_lock_irqsave(&conf->device_lock, flags);
7883*4882a593Smuzhiyun mddev->degraded = raid5_calc_degraded(conf);
7884*4882a593Smuzhiyun spin_unlock_irqrestore(&conf->device_lock, flags);
7885*4882a593Smuzhiyun print_raid5_conf(conf);
7886*4882a593Smuzhiyun return count;
7887*4882a593Smuzhiyun }
7888*4882a593Smuzhiyun
raid5_remove_disk(struct mddev * mddev,struct md_rdev * rdev)7889*4882a593Smuzhiyun static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7890*4882a593Smuzhiyun {
7891*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
7892*4882a593Smuzhiyun int err = 0;
7893*4882a593Smuzhiyun int number = rdev->raid_disk;
7894*4882a593Smuzhiyun struct md_rdev **rdevp;
7895*4882a593Smuzhiyun struct disk_info *p = conf->disks + number;
7896*4882a593Smuzhiyun
7897*4882a593Smuzhiyun print_raid5_conf(conf);
7898*4882a593Smuzhiyun if (test_bit(Journal, &rdev->flags) && conf->log) {
7899*4882a593Smuzhiyun /*
7900*4882a593Smuzhiyun * we can't wait pending write here, as this is called in
7901*4882a593Smuzhiyun * raid5d, wait will deadlock.
7902*4882a593Smuzhiyun * neilb: there is no locking about new writes here,
7903*4882a593Smuzhiyun * so this cannot be safe.
7904*4882a593Smuzhiyun */
7905*4882a593Smuzhiyun if (atomic_read(&conf->active_stripes) ||
7906*4882a593Smuzhiyun atomic_read(&conf->r5c_cached_full_stripes) ||
7907*4882a593Smuzhiyun atomic_read(&conf->r5c_cached_partial_stripes)) {
7908*4882a593Smuzhiyun return -EBUSY;
7909*4882a593Smuzhiyun }
7910*4882a593Smuzhiyun log_exit(conf);
7911*4882a593Smuzhiyun return 0;
7912*4882a593Smuzhiyun }
7913*4882a593Smuzhiyun if (rdev == p->rdev)
7914*4882a593Smuzhiyun rdevp = &p->rdev;
7915*4882a593Smuzhiyun else if (rdev == p->replacement)
7916*4882a593Smuzhiyun rdevp = &p->replacement;
7917*4882a593Smuzhiyun else
7918*4882a593Smuzhiyun return 0;
7919*4882a593Smuzhiyun
7920*4882a593Smuzhiyun if (number >= conf->raid_disks &&
7921*4882a593Smuzhiyun conf->reshape_progress == MaxSector)
7922*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
7923*4882a593Smuzhiyun
7924*4882a593Smuzhiyun if (test_bit(In_sync, &rdev->flags) ||
7925*4882a593Smuzhiyun atomic_read(&rdev->nr_pending)) {
7926*4882a593Smuzhiyun err = -EBUSY;
7927*4882a593Smuzhiyun goto abort;
7928*4882a593Smuzhiyun }
7929*4882a593Smuzhiyun /* Only remove non-faulty devices if recovery
7930*4882a593Smuzhiyun * isn't possible.
7931*4882a593Smuzhiyun */
7932*4882a593Smuzhiyun if (!test_bit(Faulty, &rdev->flags) &&
7933*4882a593Smuzhiyun mddev->recovery_disabled != conf->recovery_disabled &&
7934*4882a593Smuzhiyun !has_failed(conf) &&
7935*4882a593Smuzhiyun (!p->replacement || p->replacement == rdev) &&
7936*4882a593Smuzhiyun number < conf->raid_disks) {
7937*4882a593Smuzhiyun err = -EBUSY;
7938*4882a593Smuzhiyun goto abort;
7939*4882a593Smuzhiyun }
7940*4882a593Smuzhiyun *rdevp = NULL;
7941*4882a593Smuzhiyun if (!test_bit(RemoveSynchronized, &rdev->flags)) {
7942*4882a593Smuzhiyun synchronize_rcu();
7943*4882a593Smuzhiyun if (atomic_read(&rdev->nr_pending)) {
7944*4882a593Smuzhiyun /* lost the race, try later */
7945*4882a593Smuzhiyun err = -EBUSY;
7946*4882a593Smuzhiyun *rdevp = rdev;
7947*4882a593Smuzhiyun }
7948*4882a593Smuzhiyun }
7949*4882a593Smuzhiyun if (!err) {
7950*4882a593Smuzhiyun err = log_modify(conf, rdev, false);
7951*4882a593Smuzhiyun if (err)
7952*4882a593Smuzhiyun goto abort;
7953*4882a593Smuzhiyun }
7954*4882a593Smuzhiyun if (p->replacement) {
7955*4882a593Smuzhiyun /* We must have just cleared 'rdev' */
7956*4882a593Smuzhiyun p->rdev = p->replacement;
7957*4882a593Smuzhiyun clear_bit(Replacement, &p->replacement->flags);
7958*4882a593Smuzhiyun smp_mb(); /* Make sure other CPUs may see both as identical
7959*4882a593Smuzhiyun * but will never see neither - if they are careful
7960*4882a593Smuzhiyun */
7961*4882a593Smuzhiyun p->replacement = NULL;
7962*4882a593Smuzhiyun
7963*4882a593Smuzhiyun if (!err)
7964*4882a593Smuzhiyun err = log_modify(conf, p->rdev, true);
7965*4882a593Smuzhiyun }
7966*4882a593Smuzhiyun
7967*4882a593Smuzhiyun clear_bit(WantReplacement, &rdev->flags);
7968*4882a593Smuzhiyun abort:
7969*4882a593Smuzhiyun
7970*4882a593Smuzhiyun print_raid5_conf(conf);
7971*4882a593Smuzhiyun return err;
7972*4882a593Smuzhiyun }
7973*4882a593Smuzhiyun
raid5_add_disk(struct mddev * mddev,struct md_rdev * rdev)7974*4882a593Smuzhiyun static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7975*4882a593Smuzhiyun {
7976*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
7977*4882a593Smuzhiyun int ret, err = -EEXIST;
7978*4882a593Smuzhiyun int disk;
7979*4882a593Smuzhiyun struct disk_info *p;
7980*4882a593Smuzhiyun int first = 0;
7981*4882a593Smuzhiyun int last = conf->raid_disks - 1;
7982*4882a593Smuzhiyun
7983*4882a593Smuzhiyun if (test_bit(Journal, &rdev->flags)) {
7984*4882a593Smuzhiyun if (conf->log)
7985*4882a593Smuzhiyun return -EBUSY;
7986*4882a593Smuzhiyun
7987*4882a593Smuzhiyun rdev->raid_disk = 0;
7988*4882a593Smuzhiyun /*
7989*4882a593Smuzhiyun * The array is in readonly mode if journal is missing, so no
7990*4882a593Smuzhiyun * write requests running. We should be safe
7991*4882a593Smuzhiyun */
7992*4882a593Smuzhiyun ret = log_init(conf, rdev, false);
7993*4882a593Smuzhiyun if (ret)
7994*4882a593Smuzhiyun return ret;
7995*4882a593Smuzhiyun
7996*4882a593Smuzhiyun ret = r5l_start(conf->log);
7997*4882a593Smuzhiyun if (ret)
7998*4882a593Smuzhiyun return ret;
7999*4882a593Smuzhiyun
8000*4882a593Smuzhiyun return 0;
8001*4882a593Smuzhiyun }
8002*4882a593Smuzhiyun if (mddev->recovery_disabled == conf->recovery_disabled)
8003*4882a593Smuzhiyun return -EBUSY;
8004*4882a593Smuzhiyun
8005*4882a593Smuzhiyun if (rdev->saved_raid_disk < 0 && has_failed(conf))
8006*4882a593Smuzhiyun /* no point adding a device */
8007*4882a593Smuzhiyun return -EINVAL;
8008*4882a593Smuzhiyun
8009*4882a593Smuzhiyun if (rdev->raid_disk >= 0)
8010*4882a593Smuzhiyun first = last = rdev->raid_disk;
8011*4882a593Smuzhiyun
8012*4882a593Smuzhiyun /*
8013*4882a593Smuzhiyun * find the disk ... but prefer rdev->saved_raid_disk
8014*4882a593Smuzhiyun * if possible.
8015*4882a593Smuzhiyun */
8016*4882a593Smuzhiyun if (rdev->saved_raid_disk >= 0 &&
8017*4882a593Smuzhiyun rdev->saved_raid_disk >= first &&
8018*4882a593Smuzhiyun rdev->saved_raid_disk <= last &&
8019*4882a593Smuzhiyun conf->disks[rdev->saved_raid_disk].rdev == NULL)
8020*4882a593Smuzhiyun first = rdev->saved_raid_disk;
8021*4882a593Smuzhiyun
8022*4882a593Smuzhiyun for (disk = first; disk <= last; disk++) {
8023*4882a593Smuzhiyun p = conf->disks + disk;
8024*4882a593Smuzhiyun if (p->rdev == NULL) {
8025*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
8026*4882a593Smuzhiyun rdev->raid_disk = disk;
8027*4882a593Smuzhiyun if (rdev->saved_raid_disk != disk)
8028*4882a593Smuzhiyun conf->fullsync = 1;
8029*4882a593Smuzhiyun rcu_assign_pointer(p->rdev, rdev);
8030*4882a593Smuzhiyun
8031*4882a593Smuzhiyun err = log_modify(conf, rdev, true);
8032*4882a593Smuzhiyun
8033*4882a593Smuzhiyun goto out;
8034*4882a593Smuzhiyun }
8035*4882a593Smuzhiyun }
8036*4882a593Smuzhiyun for (disk = first; disk <= last; disk++) {
8037*4882a593Smuzhiyun p = conf->disks + disk;
8038*4882a593Smuzhiyun if (test_bit(WantReplacement, &p->rdev->flags) &&
8039*4882a593Smuzhiyun p->replacement == NULL) {
8040*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
8041*4882a593Smuzhiyun set_bit(Replacement, &rdev->flags);
8042*4882a593Smuzhiyun rdev->raid_disk = disk;
8043*4882a593Smuzhiyun err = 0;
8044*4882a593Smuzhiyun conf->fullsync = 1;
8045*4882a593Smuzhiyun rcu_assign_pointer(p->replacement, rdev);
8046*4882a593Smuzhiyun break;
8047*4882a593Smuzhiyun }
8048*4882a593Smuzhiyun }
8049*4882a593Smuzhiyun out:
8050*4882a593Smuzhiyun print_raid5_conf(conf);
8051*4882a593Smuzhiyun return err;
8052*4882a593Smuzhiyun }
8053*4882a593Smuzhiyun
raid5_resize(struct mddev * mddev,sector_t sectors)8054*4882a593Smuzhiyun static int raid5_resize(struct mddev *mddev, sector_t sectors)
8055*4882a593Smuzhiyun {
8056*4882a593Smuzhiyun /* no resync is happening, and there is enough space
8057*4882a593Smuzhiyun * on all devices, so we can resize.
8058*4882a593Smuzhiyun * We need to make sure resync covers any new space.
8059*4882a593Smuzhiyun * If the array is shrinking we should possibly wait until
8060*4882a593Smuzhiyun * any io in the removed space completes, but it hardly seems
8061*4882a593Smuzhiyun * worth it.
8062*4882a593Smuzhiyun */
8063*4882a593Smuzhiyun sector_t newsize;
8064*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8065*4882a593Smuzhiyun
8066*4882a593Smuzhiyun if (raid5_has_log(conf) || raid5_has_ppl(conf))
8067*4882a593Smuzhiyun return -EINVAL;
8068*4882a593Smuzhiyun sectors &= ~((sector_t)conf->chunk_sectors - 1);
8069*4882a593Smuzhiyun newsize = raid5_size(mddev, sectors, mddev->raid_disks);
8070*4882a593Smuzhiyun if (mddev->external_size &&
8071*4882a593Smuzhiyun mddev->array_sectors > newsize)
8072*4882a593Smuzhiyun return -EINVAL;
8073*4882a593Smuzhiyun if (mddev->bitmap) {
8074*4882a593Smuzhiyun int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
8075*4882a593Smuzhiyun if (ret)
8076*4882a593Smuzhiyun return ret;
8077*4882a593Smuzhiyun }
8078*4882a593Smuzhiyun md_set_array_sectors(mddev, newsize);
8079*4882a593Smuzhiyun if (sectors > mddev->dev_sectors &&
8080*4882a593Smuzhiyun mddev->recovery_cp > mddev->dev_sectors) {
8081*4882a593Smuzhiyun mddev->recovery_cp = mddev->dev_sectors;
8082*4882a593Smuzhiyun set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8083*4882a593Smuzhiyun }
8084*4882a593Smuzhiyun mddev->dev_sectors = sectors;
8085*4882a593Smuzhiyun mddev->resync_max_sectors = sectors;
8086*4882a593Smuzhiyun return 0;
8087*4882a593Smuzhiyun }
8088*4882a593Smuzhiyun
check_stripe_cache(struct mddev * mddev)8089*4882a593Smuzhiyun static int check_stripe_cache(struct mddev *mddev)
8090*4882a593Smuzhiyun {
8091*4882a593Smuzhiyun /* Can only proceed if there are plenty of stripe_heads.
8092*4882a593Smuzhiyun * We need a minimum of one full stripe,, and for sensible progress
8093*4882a593Smuzhiyun * it is best to have about 4 times that.
8094*4882a593Smuzhiyun * If we require 4 times, then the default 256 4K stripe_heads will
8095*4882a593Smuzhiyun * allow for chunk sizes up to 256K, which is probably OK.
8096*4882a593Smuzhiyun * If the chunk size is greater, user-space should request more
8097*4882a593Smuzhiyun * stripe_heads first.
8098*4882a593Smuzhiyun */
8099*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8100*4882a593Smuzhiyun if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8101*4882a593Smuzhiyun > conf->min_nr_stripes ||
8102*4882a593Smuzhiyun ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8103*4882a593Smuzhiyun > conf->min_nr_stripes) {
8104*4882a593Smuzhiyun pr_warn("md/raid:%s: reshape: not enough stripes. Needed %lu\n",
8105*4882a593Smuzhiyun mdname(mddev),
8106*4882a593Smuzhiyun ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
8107*4882a593Smuzhiyun / RAID5_STRIPE_SIZE(conf))*4);
8108*4882a593Smuzhiyun return 0;
8109*4882a593Smuzhiyun }
8110*4882a593Smuzhiyun return 1;
8111*4882a593Smuzhiyun }
8112*4882a593Smuzhiyun
check_reshape(struct mddev * mddev)8113*4882a593Smuzhiyun static int check_reshape(struct mddev *mddev)
8114*4882a593Smuzhiyun {
8115*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8116*4882a593Smuzhiyun
8117*4882a593Smuzhiyun if (raid5_has_log(conf) || raid5_has_ppl(conf))
8118*4882a593Smuzhiyun return -EINVAL;
8119*4882a593Smuzhiyun if (mddev->delta_disks == 0 &&
8120*4882a593Smuzhiyun mddev->new_layout == mddev->layout &&
8121*4882a593Smuzhiyun mddev->new_chunk_sectors == mddev->chunk_sectors)
8122*4882a593Smuzhiyun return 0; /* nothing to do */
8123*4882a593Smuzhiyun if (has_failed(conf))
8124*4882a593Smuzhiyun return -EINVAL;
8125*4882a593Smuzhiyun if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
8126*4882a593Smuzhiyun /* We might be able to shrink, but the devices must
8127*4882a593Smuzhiyun * be made bigger first.
8128*4882a593Smuzhiyun * For raid6, 4 is the minimum size.
8129*4882a593Smuzhiyun * Otherwise 2 is the minimum
8130*4882a593Smuzhiyun */
8131*4882a593Smuzhiyun int min = 2;
8132*4882a593Smuzhiyun if (mddev->level == 6)
8133*4882a593Smuzhiyun min = 4;
8134*4882a593Smuzhiyun if (mddev->raid_disks + mddev->delta_disks < min)
8135*4882a593Smuzhiyun return -EINVAL;
8136*4882a593Smuzhiyun }
8137*4882a593Smuzhiyun
8138*4882a593Smuzhiyun if (!check_stripe_cache(mddev))
8139*4882a593Smuzhiyun return -ENOSPC;
8140*4882a593Smuzhiyun
8141*4882a593Smuzhiyun if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
8142*4882a593Smuzhiyun mddev->delta_disks > 0)
8143*4882a593Smuzhiyun if (resize_chunks(conf,
8144*4882a593Smuzhiyun conf->previous_raid_disks
8145*4882a593Smuzhiyun + max(0, mddev->delta_disks),
8146*4882a593Smuzhiyun max(mddev->new_chunk_sectors,
8147*4882a593Smuzhiyun mddev->chunk_sectors)
8148*4882a593Smuzhiyun ) < 0)
8149*4882a593Smuzhiyun return -ENOMEM;
8150*4882a593Smuzhiyun
8151*4882a593Smuzhiyun if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size)
8152*4882a593Smuzhiyun return 0; /* never bother to shrink */
8153*4882a593Smuzhiyun return resize_stripes(conf, (conf->previous_raid_disks
8154*4882a593Smuzhiyun + mddev->delta_disks));
8155*4882a593Smuzhiyun }
8156*4882a593Smuzhiyun
raid5_start_reshape(struct mddev * mddev)8157*4882a593Smuzhiyun static int raid5_start_reshape(struct mddev *mddev)
8158*4882a593Smuzhiyun {
8159*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8160*4882a593Smuzhiyun struct md_rdev *rdev;
8161*4882a593Smuzhiyun int spares = 0;
8162*4882a593Smuzhiyun unsigned long flags;
8163*4882a593Smuzhiyun
8164*4882a593Smuzhiyun if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8165*4882a593Smuzhiyun return -EBUSY;
8166*4882a593Smuzhiyun
8167*4882a593Smuzhiyun if (!check_stripe_cache(mddev))
8168*4882a593Smuzhiyun return -ENOSPC;
8169*4882a593Smuzhiyun
8170*4882a593Smuzhiyun if (has_failed(conf))
8171*4882a593Smuzhiyun return -EINVAL;
8172*4882a593Smuzhiyun
8173*4882a593Smuzhiyun rdev_for_each(rdev, mddev) {
8174*4882a593Smuzhiyun if (!test_bit(In_sync, &rdev->flags)
8175*4882a593Smuzhiyun && !test_bit(Faulty, &rdev->flags))
8176*4882a593Smuzhiyun spares++;
8177*4882a593Smuzhiyun }
8178*4882a593Smuzhiyun
8179*4882a593Smuzhiyun if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
8180*4882a593Smuzhiyun /* Not enough devices even to make a degraded array
8181*4882a593Smuzhiyun * of that size
8182*4882a593Smuzhiyun */
8183*4882a593Smuzhiyun return -EINVAL;
8184*4882a593Smuzhiyun
8185*4882a593Smuzhiyun /* Refuse to reduce size of the array. Any reductions in
8186*4882a593Smuzhiyun * array size must be through explicit setting of array_size
8187*4882a593Smuzhiyun * attribute.
8188*4882a593Smuzhiyun */
8189*4882a593Smuzhiyun if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
8190*4882a593Smuzhiyun < mddev->array_sectors) {
8191*4882a593Smuzhiyun pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
8192*4882a593Smuzhiyun mdname(mddev));
8193*4882a593Smuzhiyun return -EINVAL;
8194*4882a593Smuzhiyun }
8195*4882a593Smuzhiyun
8196*4882a593Smuzhiyun atomic_set(&conf->reshape_stripes, 0);
8197*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
8198*4882a593Smuzhiyun write_seqcount_begin(&conf->gen_lock);
8199*4882a593Smuzhiyun conf->previous_raid_disks = conf->raid_disks;
8200*4882a593Smuzhiyun conf->raid_disks += mddev->delta_disks;
8201*4882a593Smuzhiyun conf->prev_chunk_sectors = conf->chunk_sectors;
8202*4882a593Smuzhiyun conf->chunk_sectors = mddev->new_chunk_sectors;
8203*4882a593Smuzhiyun conf->prev_algo = conf->algorithm;
8204*4882a593Smuzhiyun conf->algorithm = mddev->new_layout;
8205*4882a593Smuzhiyun conf->generation++;
8206*4882a593Smuzhiyun /* Code that selects data_offset needs to see the generation update
8207*4882a593Smuzhiyun * if reshape_progress has been set - so a memory barrier needed.
8208*4882a593Smuzhiyun */
8209*4882a593Smuzhiyun smp_mb();
8210*4882a593Smuzhiyun if (mddev->reshape_backwards)
8211*4882a593Smuzhiyun conf->reshape_progress = raid5_size(mddev, 0, 0);
8212*4882a593Smuzhiyun else
8213*4882a593Smuzhiyun conf->reshape_progress = 0;
8214*4882a593Smuzhiyun conf->reshape_safe = conf->reshape_progress;
8215*4882a593Smuzhiyun write_seqcount_end(&conf->gen_lock);
8216*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
8217*4882a593Smuzhiyun
8218*4882a593Smuzhiyun /* Now make sure any requests that proceeded on the assumption
8219*4882a593Smuzhiyun * the reshape wasn't running - like Discard or Read - have
8220*4882a593Smuzhiyun * completed.
8221*4882a593Smuzhiyun */
8222*4882a593Smuzhiyun mddev_suspend(mddev);
8223*4882a593Smuzhiyun mddev_resume(mddev);
8224*4882a593Smuzhiyun
8225*4882a593Smuzhiyun /* Add some new drives, as many as will fit.
8226*4882a593Smuzhiyun * We know there are enough to make the newly sized array work.
8227*4882a593Smuzhiyun * Don't add devices if we are reducing the number of
8228*4882a593Smuzhiyun * devices in the array. This is because it is not possible
8229*4882a593Smuzhiyun * to correctly record the "partially reconstructed" state of
8230*4882a593Smuzhiyun * such devices during the reshape and confusion could result.
8231*4882a593Smuzhiyun */
8232*4882a593Smuzhiyun if (mddev->delta_disks >= 0) {
8233*4882a593Smuzhiyun rdev_for_each(rdev, mddev)
8234*4882a593Smuzhiyun if (rdev->raid_disk < 0 &&
8235*4882a593Smuzhiyun !test_bit(Faulty, &rdev->flags)) {
8236*4882a593Smuzhiyun if (raid5_add_disk(mddev, rdev) == 0) {
8237*4882a593Smuzhiyun if (rdev->raid_disk
8238*4882a593Smuzhiyun >= conf->previous_raid_disks)
8239*4882a593Smuzhiyun set_bit(In_sync, &rdev->flags);
8240*4882a593Smuzhiyun else
8241*4882a593Smuzhiyun rdev->recovery_offset = 0;
8242*4882a593Smuzhiyun
8243*4882a593Smuzhiyun /* Failure here is OK */
8244*4882a593Smuzhiyun sysfs_link_rdev(mddev, rdev);
8245*4882a593Smuzhiyun }
8246*4882a593Smuzhiyun } else if (rdev->raid_disk >= conf->previous_raid_disks
8247*4882a593Smuzhiyun && !test_bit(Faulty, &rdev->flags)) {
8248*4882a593Smuzhiyun /* This is a spare that was manually added */
8249*4882a593Smuzhiyun set_bit(In_sync, &rdev->flags);
8250*4882a593Smuzhiyun }
8251*4882a593Smuzhiyun
8252*4882a593Smuzhiyun /* When a reshape changes the number of devices,
8253*4882a593Smuzhiyun * ->degraded is measured against the larger of the
8254*4882a593Smuzhiyun * pre and post number of devices.
8255*4882a593Smuzhiyun */
8256*4882a593Smuzhiyun spin_lock_irqsave(&conf->device_lock, flags);
8257*4882a593Smuzhiyun mddev->degraded = raid5_calc_degraded(conf);
8258*4882a593Smuzhiyun spin_unlock_irqrestore(&conf->device_lock, flags);
8259*4882a593Smuzhiyun }
8260*4882a593Smuzhiyun mddev->raid_disks = conf->raid_disks;
8261*4882a593Smuzhiyun mddev->reshape_position = conf->reshape_progress;
8262*4882a593Smuzhiyun set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8263*4882a593Smuzhiyun
8264*4882a593Smuzhiyun clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8265*4882a593Smuzhiyun clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8266*4882a593Smuzhiyun clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8267*4882a593Smuzhiyun set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8268*4882a593Smuzhiyun set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8269*4882a593Smuzhiyun mddev->sync_thread = md_register_thread(md_do_sync, mddev,
8270*4882a593Smuzhiyun "reshape");
8271*4882a593Smuzhiyun if (!mddev->sync_thread) {
8272*4882a593Smuzhiyun mddev->recovery = 0;
8273*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
8274*4882a593Smuzhiyun write_seqcount_begin(&conf->gen_lock);
8275*4882a593Smuzhiyun mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
8276*4882a593Smuzhiyun mddev->new_chunk_sectors =
8277*4882a593Smuzhiyun conf->chunk_sectors = conf->prev_chunk_sectors;
8278*4882a593Smuzhiyun mddev->new_layout = conf->algorithm = conf->prev_algo;
8279*4882a593Smuzhiyun rdev_for_each(rdev, mddev)
8280*4882a593Smuzhiyun rdev->new_data_offset = rdev->data_offset;
8281*4882a593Smuzhiyun smp_wmb();
8282*4882a593Smuzhiyun conf->generation --;
8283*4882a593Smuzhiyun conf->reshape_progress = MaxSector;
8284*4882a593Smuzhiyun mddev->reshape_position = MaxSector;
8285*4882a593Smuzhiyun write_seqcount_end(&conf->gen_lock);
8286*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
8287*4882a593Smuzhiyun return -EAGAIN;
8288*4882a593Smuzhiyun }
8289*4882a593Smuzhiyun conf->reshape_checkpoint = jiffies;
8290*4882a593Smuzhiyun md_wakeup_thread(mddev->sync_thread);
8291*4882a593Smuzhiyun md_new_event(mddev);
8292*4882a593Smuzhiyun return 0;
8293*4882a593Smuzhiyun }
8294*4882a593Smuzhiyun
8295*4882a593Smuzhiyun /* This is called from the reshape thread and should make any
8296*4882a593Smuzhiyun * changes needed in 'conf'
8297*4882a593Smuzhiyun */
end_reshape(struct r5conf * conf)8298*4882a593Smuzhiyun static void end_reshape(struct r5conf *conf)
8299*4882a593Smuzhiyun {
8300*4882a593Smuzhiyun
8301*4882a593Smuzhiyun if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
8302*4882a593Smuzhiyun struct md_rdev *rdev;
8303*4882a593Smuzhiyun
8304*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
8305*4882a593Smuzhiyun conf->previous_raid_disks = conf->raid_disks;
8306*4882a593Smuzhiyun md_finish_reshape(conf->mddev);
8307*4882a593Smuzhiyun smp_wmb();
8308*4882a593Smuzhiyun conf->reshape_progress = MaxSector;
8309*4882a593Smuzhiyun conf->mddev->reshape_position = MaxSector;
8310*4882a593Smuzhiyun rdev_for_each(rdev, conf->mddev)
8311*4882a593Smuzhiyun if (rdev->raid_disk >= 0 &&
8312*4882a593Smuzhiyun !test_bit(Journal, &rdev->flags) &&
8313*4882a593Smuzhiyun !test_bit(In_sync, &rdev->flags))
8314*4882a593Smuzhiyun rdev->recovery_offset = MaxSector;
8315*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
8316*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
8317*4882a593Smuzhiyun
8318*4882a593Smuzhiyun if (conf->mddev->queue)
8319*4882a593Smuzhiyun raid5_set_io_opt(conf);
8320*4882a593Smuzhiyun }
8321*4882a593Smuzhiyun }
8322*4882a593Smuzhiyun
8323*4882a593Smuzhiyun /* This is called from the raid5d thread with mddev_lock held.
8324*4882a593Smuzhiyun * It makes config changes to the device.
8325*4882a593Smuzhiyun */
raid5_finish_reshape(struct mddev * mddev)8326*4882a593Smuzhiyun static void raid5_finish_reshape(struct mddev *mddev)
8327*4882a593Smuzhiyun {
8328*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8329*4882a593Smuzhiyun
8330*4882a593Smuzhiyun if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8331*4882a593Smuzhiyun
8332*4882a593Smuzhiyun if (mddev->delta_disks <= 0) {
8333*4882a593Smuzhiyun int d;
8334*4882a593Smuzhiyun spin_lock_irq(&conf->device_lock);
8335*4882a593Smuzhiyun mddev->degraded = raid5_calc_degraded(conf);
8336*4882a593Smuzhiyun spin_unlock_irq(&conf->device_lock);
8337*4882a593Smuzhiyun for (d = conf->raid_disks ;
8338*4882a593Smuzhiyun d < conf->raid_disks - mddev->delta_disks;
8339*4882a593Smuzhiyun d++) {
8340*4882a593Smuzhiyun struct md_rdev *rdev = conf->disks[d].rdev;
8341*4882a593Smuzhiyun if (rdev)
8342*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
8343*4882a593Smuzhiyun rdev = conf->disks[d].replacement;
8344*4882a593Smuzhiyun if (rdev)
8345*4882a593Smuzhiyun clear_bit(In_sync, &rdev->flags);
8346*4882a593Smuzhiyun }
8347*4882a593Smuzhiyun }
8348*4882a593Smuzhiyun mddev->layout = conf->algorithm;
8349*4882a593Smuzhiyun mddev->chunk_sectors = conf->chunk_sectors;
8350*4882a593Smuzhiyun mddev->reshape_position = MaxSector;
8351*4882a593Smuzhiyun mddev->delta_disks = 0;
8352*4882a593Smuzhiyun mddev->reshape_backwards = 0;
8353*4882a593Smuzhiyun }
8354*4882a593Smuzhiyun }
8355*4882a593Smuzhiyun
raid5_quiesce(struct mddev * mddev,int quiesce)8356*4882a593Smuzhiyun static void raid5_quiesce(struct mddev *mddev, int quiesce)
8357*4882a593Smuzhiyun {
8358*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8359*4882a593Smuzhiyun
8360*4882a593Smuzhiyun if (quiesce) {
8361*4882a593Smuzhiyun /* stop all writes */
8362*4882a593Smuzhiyun lock_all_device_hash_locks_irq(conf);
8363*4882a593Smuzhiyun /* '2' tells resync/reshape to pause so that all
8364*4882a593Smuzhiyun * active stripes can drain
8365*4882a593Smuzhiyun */
8366*4882a593Smuzhiyun r5c_flush_cache(conf, INT_MAX);
8367*4882a593Smuzhiyun conf->quiesce = 2;
8368*4882a593Smuzhiyun wait_event_cmd(conf->wait_for_quiescent,
8369*4882a593Smuzhiyun atomic_read(&conf->active_stripes) == 0 &&
8370*4882a593Smuzhiyun atomic_read(&conf->active_aligned_reads) == 0,
8371*4882a593Smuzhiyun unlock_all_device_hash_locks_irq(conf),
8372*4882a593Smuzhiyun lock_all_device_hash_locks_irq(conf));
8373*4882a593Smuzhiyun conf->quiesce = 1;
8374*4882a593Smuzhiyun unlock_all_device_hash_locks_irq(conf);
8375*4882a593Smuzhiyun /* allow reshape to continue */
8376*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
8377*4882a593Smuzhiyun } else {
8378*4882a593Smuzhiyun /* re-enable writes */
8379*4882a593Smuzhiyun lock_all_device_hash_locks_irq(conf);
8380*4882a593Smuzhiyun conf->quiesce = 0;
8381*4882a593Smuzhiyun wake_up(&conf->wait_for_quiescent);
8382*4882a593Smuzhiyun wake_up(&conf->wait_for_overlap);
8383*4882a593Smuzhiyun unlock_all_device_hash_locks_irq(conf);
8384*4882a593Smuzhiyun }
8385*4882a593Smuzhiyun log_quiesce(conf, quiesce);
8386*4882a593Smuzhiyun }
8387*4882a593Smuzhiyun
raid45_takeover_raid0(struct mddev * mddev,int level)8388*4882a593Smuzhiyun static void *raid45_takeover_raid0(struct mddev *mddev, int level)
8389*4882a593Smuzhiyun {
8390*4882a593Smuzhiyun struct r0conf *raid0_conf = mddev->private;
8391*4882a593Smuzhiyun sector_t sectors;
8392*4882a593Smuzhiyun
8393*4882a593Smuzhiyun /* for raid0 takeover only one zone is supported */
8394*4882a593Smuzhiyun if (raid0_conf->nr_strip_zones > 1) {
8395*4882a593Smuzhiyun pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
8396*4882a593Smuzhiyun mdname(mddev));
8397*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8398*4882a593Smuzhiyun }
8399*4882a593Smuzhiyun
8400*4882a593Smuzhiyun sectors = raid0_conf->strip_zone[0].zone_end;
8401*4882a593Smuzhiyun sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
8402*4882a593Smuzhiyun mddev->dev_sectors = sectors;
8403*4882a593Smuzhiyun mddev->new_level = level;
8404*4882a593Smuzhiyun mddev->new_layout = ALGORITHM_PARITY_N;
8405*4882a593Smuzhiyun mddev->new_chunk_sectors = mddev->chunk_sectors;
8406*4882a593Smuzhiyun mddev->raid_disks += 1;
8407*4882a593Smuzhiyun mddev->delta_disks = 1;
8408*4882a593Smuzhiyun /* make sure it will be not marked as dirty */
8409*4882a593Smuzhiyun mddev->recovery_cp = MaxSector;
8410*4882a593Smuzhiyun
8411*4882a593Smuzhiyun return setup_conf(mddev);
8412*4882a593Smuzhiyun }
8413*4882a593Smuzhiyun
raid5_takeover_raid1(struct mddev * mddev)8414*4882a593Smuzhiyun static void *raid5_takeover_raid1(struct mddev *mddev)
8415*4882a593Smuzhiyun {
8416*4882a593Smuzhiyun int chunksect;
8417*4882a593Smuzhiyun void *ret;
8418*4882a593Smuzhiyun
8419*4882a593Smuzhiyun if (mddev->raid_disks != 2 ||
8420*4882a593Smuzhiyun mddev->degraded > 1)
8421*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8422*4882a593Smuzhiyun
8423*4882a593Smuzhiyun /* Should check if there are write-behind devices? */
8424*4882a593Smuzhiyun
8425*4882a593Smuzhiyun chunksect = 64*2; /* 64K by default */
8426*4882a593Smuzhiyun
8427*4882a593Smuzhiyun /* The array must be an exact multiple of chunksize */
8428*4882a593Smuzhiyun while (chunksect && (mddev->array_sectors & (chunksect-1)))
8429*4882a593Smuzhiyun chunksect >>= 1;
8430*4882a593Smuzhiyun
8431*4882a593Smuzhiyun if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private))
8432*4882a593Smuzhiyun /* array size does not allow a suitable chunk size */
8433*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8434*4882a593Smuzhiyun
8435*4882a593Smuzhiyun mddev->new_level = 5;
8436*4882a593Smuzhiyun mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
8437*4882a593Smuzhiyun mddev->new_chunk_sectors = chunksect;
8438*4882a593Smuzhiyun
8439*4882a593Smuzhiyun ret = setup_conf(mddev);
8440*4882a593Smuzhiyun if (!IS_ERR(ret))
8441*4882a593Smuzhiyun mddev_clear_unsupported_flags(mddev,
8442*4882a593Smuzhiyun UNSUPPORTED_MDDEV_FLAGS);
8443*4882a593Smuzhiyun return ret;
8444*4882a593Smuzhiyun }
8445*4882a593Smuzhiyun
raid5_takeover_raid6(struct mddev * mddev)8446*4882a593Smuzhiyun static void *raid5_takeover_raid6(struct mddev *mddev)
8447*4882a593Smuzhiyun {
8448*4882a593Smuzhiyun int new_layout;
8449*4882a593Smuzhiyun
8450*4882a593Smuzhiyun switch (mddev->layout) {
8451*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC_6:
8452*4882a593Smuzhiyun new_layout = ALGORITHM_LEFT_ASYMMETRIC;
8453*4882a593Smuzhiyun break;
8454*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC_6:
8455*4882a593Smuzhiyun new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
8456*4882a593Smuzhiyun break;
8457*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC_6:
8458*4882a593Smuzhiyun new_layout = ALGORITHM_LEFT_SYMMETRIC;
8459*4882a593Smuzhiyun break;
8460*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC_6:
8461*4882a593Smuzhiyun new_layout = ALGORITHM_RIGHT_SYMMETRIC;
8462*4882a593Smuzhiyun break;
8463*4882a593Smuzhiyun case ALGORITHM_PARITY_0_6:
8464*4882a593Smuzhiyun new_layout = ALGORITHM_PARITY_0;
8465*4882a593Smuzhiyun break;
8466*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
8467*4882a593Smuzhiyun new_layout = ALGORITHM_PARITY_N;
8468*4882a593Smuzhiyun break;
8469*4882a593Smuzhiyun default:
8470*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8471*4882a593Smuzhiyun }
8472*4882a593Smuzhiyun mddev->new_level = 5;
8473*4882a593Smuzhiyun mddev->new_layout = new_layout;
8474*4882a593Smuzhiyun mddev->delta_disks = -1;
8475*4882a593Smuzhiyun mddev->raid_disks -= 1;
8476*4882a593Smuzhiyun return setup_conf(mddev);
8477*4882a593Smuzhiyun }
8478*4882a593Smuzhiyun
raid5_check_reshape(struct mddev * mddev)8479*4882a593Smuzhiyun static int raid5_check_reshape(struct mddev *mddev)
8480*4882a593Smuzhiyun {
8481*4882a593Smuzhiyun /* For a 2-drive array, the layout and chunk size can be changed
8482*4882a593Smuzhiyun * immediately as not restriping is needed.
8483*4882a593Smuzhiyun * For larger arrays we record the new value - after validation
8484*4882a593Smuzhiyun * to be used by a reshape pass.
8485*4882a593Smuzhiyun */
8486*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8487*4882a593Smuzhiyun int new_chunk = mddev->new_chunk_sectors;
8488*4882a593Smuzhiyun
8489*4882a593Smuzhiyun if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
8490*4882a593Smuzhiyun return -EINVAL;
8491*4882a593Smuzhiyun if (new_chunk > 0) {
8492*4882a593Smuzhiyun if (!is_power_of_2(new_chunk))
8493*4882a593Smuzhiyun return -EINVAL;
8494*4882a593Smuzhiyun if (new_chunk < (PAGE_SIZE>>9))
8495*4882a593Smuzhiyun return -EINVAL;
8496*4882a593Smuzhiyun if (mddev->array_sectors & (new_chunk-1))
8497*4882a593Smuzhiyun /* not factor of array size */
8498*4882a593Smuzhiyun return -EINVAL;
8499*4882a593Smuzhiyun }
8500*4882a593Smuzhiyun
8501*4882a593Smuzhiyun /* They look valid */
8502*4882a593Smuzhiyun
8503*4882a593Smuzhiyun if (mddev->raid_disks == 2) {
8504*4882a593Smuzhiyun /* can make the change immediately */
8505*4882a593Smuzhiyun if (mddev->new_layout >= 0) {
8506*4882a593Smuzhiyun conf->algorithm = mddev->new_layout;
8507*4882a593Smuzhiyun mddev->layout = mddev->new_layout;
8508*4882a593Smuzhiyun }
8509*4882a593Smuzhiyun if (new_chunk > 0) {
8510*4882a593Smuzhiyun conf->chunk_sectors = new_chunk ;
8511*4882a593Smuzhiyun mddev->chunk_sectors = new_chunk;
8512*4882a593Smuzhiyun }
8513*4882a593Smuzhiyun set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8514*4882a593Smuzhiyun md_wakeup_thread(mddev->thread);
8515*4882a593Smuzhiyun }
8516*4882a593Smuzhiyun return check_reshape(mddev);
8517*4882a593Smuzhiyun }
8518*4882a593Smuzhiyun
raid6_check_reshape(struct mddev * mddev)8519*4882a593Smuzhiyun static int raid6_check_reshape(struct mddev *mddev)
8520*4882a593Smuzhiyun {
8521*4882a593Smuzhiyun int new_chunk = mddev->new_chunk_sectors;
8522*4882a593Smuzhiyun
8523*4882a593Smuzhiyun if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
8524*4882a593Smuzhiyun return -EINVAL;
8525*4882a593Smuzhiyun if (new_chunk > 0) {
8526*4882a593Smuzhiyun if (!is_power_of_2(new_chunk))
8527*4882a593Smuzhiyun return -EINVAL;
8528*4882a593Smuzhiyun if (new_chunk < (PAGE_SIZE >> 9))
8529*4882a593Smuzhiyun return -EINVAL;
8530*4882a593Smuzhiyun if (mddev->array_sectors & (new_chunk-1))
8531*4882a593Smuzhiyun /* not factor of array size */
8532*4882a593Smuzhiyun return -EINVAL;
8533*4882a593Smuzhiyun }
8534*4882a593Smuzhiyun
8535*4882a593Smuzhiyun /* They look valid */
8536*4882a593Smuzhiyun return check_reshape(mddev);
8537*4882a593Smuzhiyun }
8538*4882a593Smuzhiyun
raid5_takeover(struct mddev * mddev)8539*4882a593Smuzhiyun static void *raid5_takeover(struct mddev *mddev)
8540*4882a593Smuzhiyun {
8541*4882a593Smuzhiyun /* raid5 can take over:
8542*4882a593Smuzhiyun * raid0 - if there is only one strip zone - make it a raid4 layout
8543*4882a593Smuzhiyun * raid1 - if there are two drives. We need to know the chunk size
8544*4882a593Smuzhiyun * raid4 - trivial - just use a raid4 layout.
8545*4882a593Smuzhiyun * raid6 - Providing it is a *_6 layout
8546*4882a593Smuzhiyun */
8547*4882a593Smuzhiyun if (mddev->level == 0)
8548*4882a593Smuzhiyun return raid45_takeover_raid0(mddev, 5);
8549*4882a593Smuzhiyun if (mddev->level == 1)
8550*4882a593Smuzhiyun return raid5_takeover_raid1(mddev);
8551*4882a593Smuzhiyun if (mddev->level == 4) {
8552*4882a593Smuzhiyun mddev->new_layout = ALGORITHM_PARITY_N;
8553*4882a593Smuzhiyun mddev->new_level = 5;
8554*4882a593Smuzhiyun return setup_conf(mddev);
8555*4882a593Smuzhiyun }
8556*4882a593Smuzhiyun if (mddev->level == 6)
8557*4882a593Smuzhiyun return raid5_takeover_raid6(mddev);
8558*4882a593Smuzhiyun
8559*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8560*4882a593Smuzhiyun }
8561*4882a593Smuzhiyun
raid4_takeover(struct mddev * mddev)8562*4882a593Smuzhiyun static void *raid4_takeover(struct mddev *mddev)
8563*4882a593Smuzhiyun {
8564*4882a593Smuzhiyun /* raid4 can take over:
8565*4882a593Smuzhiyun * raid0 - if there is only one strip zone
8566*4882a593Smuzhiyun * raid5 - if layout is right
8567*4882a593Smuzhiyun */
8568*4882a593Smuzhiyun if (mddev->level == 0)
8569*4882a593Smuzhiyun return raid45_takeover_raid0(mddev, 4);
8570*4882a593Smuzhiyun if (mddev->level == 5 &&
8571*4882a593Smuzhiyun mddev->layout == ALGORITHM_PARITY_N) {
8572*4882a593Smuzhiyun mddev->new_layout = 0;
8573*4882a593Smuzhiyun mddev->new_level = 4;
8574*4882a593Smuzhiyun return setup_conf(mddev);
8575*4882a593Smuzhiyun }
8576*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8577*4882a593Smuzhiyun }
8578*4882a593Smuzhiyun
8579*4882a593Smuzhiyun static struct md_personality raid5_personality;
8580*4882a593Smuzhiyun
raid6_takeover(struct mddev * mddev)8581*4882a593Smuzhiyun static void *raid6_takeover(struct mddev *mddev)
8582*4882a593Smuzhiyun {
8583*4882a593Smuzhiyun /* Currently can only take over a raid5. We map the
8584*4882a593Smuzhiyun * personality to an equivalent raid6 personality
8585*4882a593Smuzhiyun * with the Q block at the end.
8586*4882a593Smuzhiyun */
8587*4882a593Smuzhiyun int new_layout;
8588*4882a593Smuzhiyun
8589*4882a593Smuzhiyun if (mddev->pers != &raid5_personality)
8590*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8591*4882a593Smuzhiyun if (mddev->degraded > 1)
8592*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8593*4882a593Smuzhiyun if (mddev->raid_disks > 253)
8594*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8595*4882a593Smuzhiyun if (mddev->raid_disks < 3)
8596*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8597*4882a593Smuzhiyun
8598*4882a593Smuzhiyun switch (mddev->layout) {
8599*4882a593Smuzhiyun case ALGORITHM_LEFT_ASYMMETRIC:
8600*4882a593Smuzhiyun new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
8601*4882a593Smuzhiyun break;
8602*4882a593Smuzhiyun case ALGORITHM_RIGHT_ASYMMETRIC:
8603*4882a593Smuzhiyun new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
8604*4882a593Smuzhiyun break;
8605*4882a593Smuzhiyun case ALGORITHM_LEFT_SYMMETRIC:
8606*4882a593Smuzhiyun new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
8607*4882a593Smuzhiyun break;
8608*4882a593Smuzhiyun case ALGORITHM_RIGHT_SYMMETRIC:
8609*4882a593Smuzhiyun new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
8610*4882a593Smuzhiyun break;
8611*4882a593Smuzhiyun case ALGORITHM_PARITY_0:
8612*4882a593Smuzhiyun new_layout = ALGORITHM_PARITY_0_6;
8613*4882a593Smuzhiyun break;
8614*4882a593Smuzhiyun case ALGORITHM_PARITY_N:
8615*4882a593Smuzhiyun new_layout = ALGORITHM_PARITY_N;
8616*4882a593Smuzhiyun break;
8617*4882a593Smuzhiyun default:
8618*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
8619*4882a593Smuzhiyun }
8620*4882a593Smuzhiyun mddev->new_level = 6;
8621*4882a593Smuzhiyun mddev->new_layout = new_layout;
8622*4882a593Smuzhiyun mddev->delta_disks = 1;
8623*4882a593Smuzhiyun mddev->raid_disks += 1;
8624*4882a593Smuzhiyun return setup_conf(mddev);
8625*4882a593Smuzhiyun }
8626*4882a593Smuzhiyun
raid5_change_consistency_policy(struct mddev * mddev,const char * buf)8627*4882a593Smuzhiyun static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
8628*4882a593Smuzhiyun {
8629*4882a593Smuzhiyun struct r5conf *conf;
8630*4882a593Smuzhiyun int err;
8631*4882a593Smuzhiyun
8632*4882a593Smuzhiyun err = mddev_lock(mddev);
8633*4882a593Smuzhiyun if (err)
8634*4882a593Smuzhiyun return err;
8635*4882a593Smuzhiyun conf = mddev->private;
8636*4882a593Smuzhiyun if (!conf) {
8637*4882a593Smuzhiyun mddev_unlock(mddev);
8638*4882a593Smuzhiyun return -ENODEV;
8639*4882a593Smuzhiyun }
8640*4882a593Smuzhiyun
8641*4882a593Smuzhiyun if (strncmp(buf, "ppl", 3) == 0) {
8642*4882a593Smuzhiyun /* ppl only works with RAID 5 */
8643*4882a593Smuzhiyun if (!raid5_has_ppl(conf) && conf->level == 5) {
8644*4882a593Smuzhiyun err = log_init(conf, NULL, true);
8645*4882a593Smuzhiyun if (!err) {
8646*4882a593Smuzhiyun err = resize_stripes(conf, conf->pool_size);
8647*4882a593Smuzhiyun if (err)
8648*4882a593Smuzhiyun log_exit(conf);
8649*4882a593Smuzhiyun }
8650*4882a593Smuzhiyun } else
8651*4882a593Smuzhiyun err = -EINVAL;
8652*4882a593Smuzhiyun } else if (strncmp(buf, "resync", 6) == 0) {
8653*4882a593Smuzhiyun if (raid5_has_ppl(conf)) {
8654*4882a593Smuzhiyun mddev_suspend(mddev);
8655*4882a593Smuzhiyun log_exit(conf);
8656*4882a593Smuzhiyun mddev_resume(mddev);
8657*4882a593Smuzhiyun err = resize_stripes(conf, conf->pool_size);
8658*4882a593Smuzhiyun } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) &&
8659*4882a593Smuzhiyun r5l_log_disk_error(conf)) {
8660*4882a593Smuzhiyun bool journal_dev_exists = false;
8661*4882a593Smuzhiyun struct md_rdev *rdev;
8662*4882a593Smuzhiyun
8663*4882a593Smuzhiyun rdev_for_each(rdev, mddev)
8664*4882a593Smuzhiyun if (test_bit(Journal, &rdev->flags)) {
8665*4882a593Smuzhiyun journal_dev_exists = true;
8666*4882a593Smuzhiyun break;
8667*4882a593Smuzhiyun }
8668*4882a593Smuzhiyun
8669*4882a593Smuzhiyun if (!journal_dev_exists) {
8670*4882a593Smuzhiyun mddev_suspend(mddev);
8671*4882a593Smuzhiyun clear_bit(MD_HAS_JOURNAL, &mddev->flags);
8672*4882a593Smuzhiyun mddev_resume(mddev);
8673*4882a593Smuzhiyun } else /* need remove journal device first */
8674*4882a593Smuzhiyun err = -EBUSY;
8675*4882a593Smuzhiyun } else
8676*4882a593Smuzhiyun err = -EINVAL;
8677*4882a593Smuzhiyun } else {
8678*4882a593Smuzhiyun err = -EINVAL;
8679*4882a593Smuzhiyun }
8680*4882a593Smuzhiyun
8681*4882a593Smuzhiyun if (!err)
8682*4882a593Smuzhiyun md_update_sb(mddev, 1);
8683*4882a593Smuzhiyun
8684*4882a593Smuzhiyun mddev_unlock(mddev);
8685*4882a593Smuzhiyun
8686*4882a593Smuzhiyun return err;
8687*4882a593Smuzhiyun }
8688*4882a593Smuzhiyun
raid5_start(struct mddev * mddev)8689*4882a593Smuzhiyun static int raid5_start(struct mddev *mddev)
8690*4882a593Smuzhiyun {
8691*4882a593Smuzhiyun struct r5conf *conf = mddev->private;
8692*4882a593Smuzhiyun
8693*4882a593Smuzhiyun return r5l_start(conf->log);
8694*4882a593Smuzhiyun }
8695*4882a593Smuzhiyun
8696*4882a593Smuzhiyun static struct md_personality raid6_personality =
8697*4882a593Smuzhiyun {
8698*4882a593Smuzhiyun .name = "raid6",
8699*4882a593Smuzhiyun .level = 6,
8700*4882a593Smuzhiyun .owner = THIS_MODULE,
8701*4882a593Smuzhiyun .make_request = raid5_make_request,
8702*4882a593Smuzhiyun .run = raid5_run,
8703*4882a593Smuzhiyun .start = raid5_start,
8704*4882a593Smuzhiyun .free = raid5_free,
8705*4882a593Smuzhiyun .status = raid5_status,
8706*4882a593Smuzhiyun .error_handler = raid5_error,
8707*4882a593Smuzhiyun .hot_add_disk = raid5_add_disk,
8708*4882a593Smuzhiyun .hot_remove_disk= raid5_remove_disk,
8709*4882a593Smuzhiyun .spare_active = raid5_spare_active,
8710*4882a593Smuzhiyun .sync_request = raid5_sync_request,
8711*4882a593Smuzhiyun .resize = raid5_resize,
8712*4882a593Smuzhiyun .size = raid5_size,
8713*4882a593Smuzhiyun .check_reshape = raid6_check_reshape,
8714*4882a593Smuzhiyun .start_reshape = raid5_start_reshape,
8715*4882a593Smuzhiyun .finish_reshape = raid5_finish_reshape,
8716*4882a593Smuzhiyun .quiesce = raid5_quiesce,
8717*4882a593Smuzhiyun .takeover = raid6_takeover,
8718*4882a593Smuzhiyun .change_consistency_policy = raid5_change_consistency_policy,
8719*4882a593Smuzhiyun };
8720*4882a593Smuzhiyun static struct md_personality raid5_personality =
8721*4882a593Smuzhiyun {
8722*4882a593Smuzhiyun .name = "raid5",
8723*4882a593Smuzhiyun .level = 5,
8724*4882a593Smuzhiyun .owner = THIS_MODULE,
8725*4882a593Smuzhiyun .make_request = raid5_make_request,
8726*4882a593Smuzhiyun .run = raid5_run,
8727*4882a593Smuzhiyun .start = raid5_start,
8728*4882a593Smuzhiyun .free = raid5_free,
8729*4882a593Smuzhiyun .status = raid5_status,
8730*4882a593Smuzhiyun .error_handler = raid5_error,
8731*4882a593Smuzhiyun .hot_add_disk = raid5_add_disk,
8732*4882a593Smuzhiyun .hot_remove_disk= raid5_remove_disk,
8733*4882a593Smuzhiyun .spare_active = raid5_spare_active,
8734*4882a593Smuzhiyun .sync_request = raid5_sync_request,
8735*4882a593Smuzhiyun .resize = raid5_resize,
8736*4882a593Smuzhiyun .size = raid5_size,
8737*4882a593Smuzhiyun .check_reshape = raid5_check_reshape,
8738*4882a593Smuzhiyun .start_reshape = raid5_start_reshape,
8739*4882a593Smuzhiyun .finish_reshape = raid5_finish_reshape,
8740*4882a593Smuzhiyun .quiesce = raid5_quiesce,
8741*4882a593Smuzhiyun .takeover = raid5_takeover,
8742*4882a593Smuzhiyun .change_consistency_policy = raid5_change_consistency_policy,
8743*4882a593Smuzhiyun };
8744*4882a593Smuzhiyun
8745*4882a593Smuzhiyun static struct md_personality raid4_personality =
8746*4882a593Smuzhiyun {
8747*4882a593Smuzhiyun .name = "raid4",
8748*4882a593Smuzhiyun .level = 4,
8749*4882a593Smuzhiyun .owner = THIS_MODULE,
8750*4882a593Smuzhiyun .make_request = raid5_make_request,
8751*4882a593Smuzhiyun .run = raid5_run,
8752*4882a593Smuzhiyun .start = raid5_start,
8753*4882a593Smuzhiyun .free = raid5_free,
8754*4882a593Smuzhiyun .status = raid5_status,
8755*4882a593Smuzhiyun .error_handler = raid5_error,
8756*4882a593Smuzhiyun .hot_add_disk = raid5_add_disk,
8757*4882a593Smuzhiyun .hot_remove_disk= raid5_remove_disk,
8758*4882a593Smuzhiyun .spare_active = raid5_spare_active,
8759*4882a593Smuzhiyun .sync_request = raid5_sync_request,
8760*4882a593Smuzhiyun .resize = raid5_resize,
8761*4882a593Smuzhiyun .size = raid5_size,
8762*4882a593Smuzhiyun .check_reshape = raid5_check_reshape,
8763*4882a593Smuzhiyun .start_reshape = raid5_start_reshape,
8764*4882a593Smuzhiyun .finish_reshape = raid5_finish_reshape,
8765*4882a593Smuzhiyun .quiesce = raid5_quiesce,
8766*4882a593Smuzhiyun .takeover = raid4_takeover,
8767*4882a593Smuzhiyun .change_consistency_policy = raid5_change_consistency_policy,
8768*4882a593Smuzhiyun };
8769*4882a593Smuzhiyun
raid5_init(void)8770*4882a593Smuzhiyun static int __init raid5_init(void)
8771*4882a593Smuzhiyun {
8772*4882a593Smuzhiyun int ret;
8773*4882a593Smuzhiyun
8774*4882a593Smuzhiyun raid5_wq = alloc_workqueue("raid5wq",
8775*4882a593Smuzhiyun WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
8776*4882a593Smuzhiyun if (!raid5_wq)
8777*4882a593Smuzhiyun return -ENOMEM;
8778*4882a593Smuzhiyun
8779*4882a593Smuzhiyun ret = cpuhp_setup_state_multi(CPUHP_MD_RAID5_PREPARE,
8780*4882a593Smuzhiyun "md/raid5:prepare",
8781*4882a593Smuzhiyun raid456_cpu_up_prepare,
8782*4882a593Smuzhiyun raid456_cpu_dead);
8783*4882a593Smuzhiyun if (ret) {
8784*4882a593Smuzhiyun destroy_workqueue(raid5_wq);
8785*4882a593Smuzhiyun return ret;
8786*4882a593Smuzhiyun }
8787*4882a593Smuzhiyun register_md_personality(&raid6_personality);
8788*4882a593Smuzhiyun register_md_personality(&raid5_personality);
8789*4882a593Smuzhiyun register_md_personality(&raid4_personality);
8790*4882a593Smuzhiyun return 0;
8791*4882a593Smuzhiyun }
8792*4882a593Smuzhiyun
raid5_exit(void)8793*4882a593Smuzhiyun static void raid5_exit(void)
8794*4882a593Smuzhiyun {
8795*4882a593Smuzhiyun unregister_md_personality(&raid6_personality);
8796*4882a593Smuzhiyun unregister_md_personality(&raid5_personality);
8797*4882a593Smuzhiyun unregister_md_personality(&raid4_personality);
8798*4882a593Smuzhiyun cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
8799*4882a593Smuzhiyun destroy_workqueue(raid5_wq);
8800*4882a593Smuzhiyun }
8801*4882a593Smuzhiyun
8802*4882a593Smuzhiyun module_init(raid5_init);
8803*4882a593Smuzhiyun module_exit(raid5_exit);
8804*4882a593Smuzhiyun MODULE_LICENSE("GPL");
8805*4882a593Smuzhiyun MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
8806*4882a593Smuzhiyun MODULE_ALIAS("md-personality-4"); /* RAID5 */
8807*4882a593Smuzhiyun MODULE_ALIAS("md-raid5");
8808*4882a593Smuzhiyun MODULE_ALIAS("md-raid4");
8809*4882a593Smuzhiyun MODULE_ALIAS("md-level-5");
8810*4882a593Smuzhiyun MODULE_ALIAS("md-level-4");
8811*4882a593Smuzhiyun MODULE_ALIAS("md-personality-8"); /* RAID6 */
8812*4882a593Smuzhiyun MODULE_ALIAS("md-raid6");
8813*4882a593Smuzhiyun MODULE_ALIAS("md-level-6");
8814*4882a593Smuzhiyun
8815*4882a593Smuzhiyun /* This used to be two separate modules, they were: */
8816*4882a593Smuzhiyun MODULE_ALIAS("raid5");
8817*4882a593Smuzhiyun MODULE_ALIAS("raid6");
8818