xref: /OK3568_Linux_fs/kernel/drivers/md/raid5.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _RAID5_H
3*4882a593Smuzhiyun #define _RAID5_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/raid/xor.h>
6*4882a593Smuzhiyun #include <linux/dmaengine.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Each stripe contains one buffer per device.  Each buffer can be in
11*4882a593Smuzhiyun  * one of a number of states stored in "flags".  Changes between
12*4882a593Smuzhiyun  * these states happen *almost* exclusively under the protection of the
13*4882a593Smuzhiyun  * STRIPE_ACTIVE flag.  Some very specific changes can happen in bi_end_io, and
14*4882a593Smuzhiyun  * these are not protected by STRIPE_ACTIVE.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * The flag bits that are used to represent these states are:
17*4882a593Smuzhiyun  *   R5_UPTODATE and R5_LOCKED
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * State Empty == !UPTODATE, !LOCK
20*4882a593Smuzhiyun  *        We have no data, and there is no active request
21*4882a593Smuzhiyun  * State Want == !UPTODATE, LOCK
22*4882a593Smuzhiyun  *        A read request is being submitted for this block
23*4882a593Smuzhiyun  * State Dirty == UPTODATE, LOCK
24*4882a593Smuzhiyun  *        Some new data is in this buffer, and it is being written out
25*4882a593Smuzhiyun  * State Clean == UPTODATE, !LOCK
26*4882a593Smuzhiyun  *        We have valid data which is the same as on disc
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * The possible state transitions are:
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  *  Empty -> Want   - on read or write to get old data for  parity calc
31*4882a593Smuzhiyun  *  Empty -> Dirty  - on compute_parity to satisfy write/sync request.
32*4882a593Smuzhiyun  *  Empty -> Clean  - on compute_block when computing a block for failed drive
33*4882a593Smuzhiyun  *  Want  -> Empty  - on failed read
34*4882a593Smuzhiyun  *  Want  -> Clean  - on successful completion of read request
35*4882a593Smuzhiyun  *  Dirty -> Clean  - on successful completion of write request
36*4882a593Smuzhiyun  *  Dirty -> Clean  - on failed write
37*4882a593Smuzhiyun  *  Clean -> Dirty  - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * The Want->Empty, Want->Clean, Dirty->Clean, transitions
40*4882a593Smuzhiyun  * all happen in b_end_io at interrupt time.
41*4882a593Smuzhiyun  * Each sets the Uptodate bit before releasing the Lock bit.
42*4882a593Smuzhiyun  * This leaves one multi-stage transition:
43*4882a593Smuzhiyun  *    Want->Dirty->Clean
44*4882a593Smuzhiyun  * This is safe because thinking that a Clean buffer is actually dirty
45*4882a593Smuzhiyun  * will at worst delay some action, and the stripe will be scheduled
46*4882a593Smuzhiyun  * for attention after the transition is complete.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * There is one possibility that is not covered by these states.  That
49*4882a593Smuzhiyun  * is if one drive has failed and there is a spare being rebuilt.  We
50*4882a593Smuzhiyun  * can't distinguish between a clean block that has been generated
51*4882a593Smuzhiyun  * from parity calculations, and a clean block that has been
52*4882a593Smuzhiyun  * successfully written to the spare ( or to parity when resyncing).
53*4882a593Smuzhiyun  * To distinguish these states we have a stripe bit STRIPE_INSYNC that
54*4882a593Smuzhiyun  * is set whenever a write is scheduled to the spare, or to the parity
55*4882a593Smuzhiyun  * disc if there is no spare.  A sync request clears this bit, and
56*4882a593Smuzhiyun  * when we find it set with no buffers locked, we know the sync is
57*4882a593Smuzhiyun  * complete.
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Buffers for the md device that arrive via make_request are attached
60*4882a593Smuzhiyun  * to the appropriate stripe in one of two lists linked on b_reqnext.
61*4882a593Smuzhiyun  * One list (bh_read) for read requests, one (bh_write) for write.
62*4882a593Smuzhiyun  * There should never be more than one buffer on the two lists
63*4882a593Smuzhiyun  * together, but we are not guaranteed of that so we allow for more.
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * If a buffer is on the read list when the associated cache buffer is
66*4882a593Smuzhiyun  * Uptodate, the data is copied into the read buffer and it's b_end_io
67*4882a593Smuzhiyun  * routine is called.  This may happen in the end_request routine only
68*4882a593Smuzhiyun  * if the buffer has just successfully been read.  end_request should
69*4882a593Smuzhiyun  * remove the buffers from the list and then set the Uptodate bit on
70*4882a593Smuzhiyun  * the buffer.  Other threads may do this only if they first check
71*4882a593Smuzhiyun  * that the Uptodate bit is set.  Once they have checked that they may
72*4882a593Smuzhiyun  * take buffers off the read queue.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * When a buffer on the write list is committed for write it is copied
75*4882a593Smuzhiyun  * into the cache buffer, which is then marked dirty, and moved onto a
76*4882a593Smuzhiyun  * third list, the written list (bh_written).  Once both the parity
77*4882a593Smuzhiyun  * block and the cached buffer are successfully written, any buffer on
78*4882a593Smuzhiyun  * a written list can be returned with b_end_io.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * The write list and read list both act as fifos.  The read list,
81*4882a593Smuzhiyun  * write list and written list are protected by the device_lock.
82*4882a593Smuzhiyun  * The device_lock is only for list manipulations and will only be
83*4882a593Smuzhiyun  * held for a very short time.  It can be claimed from interrupts.
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * Stripes in the stripe cache can be on one of two lists (or on
87*4882a593Smuzhiyun  * neither).  The "inactive_list" contains stripes which are not
88*4882a593Smuzhiyun  * currently being used for any request.  They can freely be reused
89*4882a593Smuzhiyun  * for another stripe.  The "handle_list" contains stripes that need
90*4882a593Smuzhiyun  * to be handled in some way.  Both of these are fifo queues.  Each
91*4882a593Smuzhiyun  * stripe is also (potentially) linked to a hash bucket in the hash
92*4882a593Smuzhiyun  * table so that it can be found by sector number.  Stripes that are
93*4882a593Smuzhiyun  * not hashed must be on the inactive_list, and will normally be at
94*4882a593Smuzhiyun  * the front.  All stripes start life this way.
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  * The inactive_list, handle_list and hash bucket lists are all protected by the
97*4882a593Smuzhiyun  * device_lock.
98*4882a593Smuzhiyun  *  - stripes have a reference counter. If count==0, they are on a list.
99*4882a593Smuzhiyun  *  - If a stripe might need handling, STRIPE_HANDLE is set.
100*4882a593Smuzhiyun  *  - When refcount reaches zero, then if STRIPE_HANDLE it is put on
101*4882a593Smuzhiyun  *    handle_list else inactive_list
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * This, combined with the fact that STRIPE_HANDLE is only ever
104*4882a593Smuzhiyun  * cleared while a stripe has a non-zero count means that if the
105*4882a593Smuzhiyun  * refcount is 0 and STRIPE_HANDLE is set, then it is on the
106*4882a593Smuzhiyun  * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
107*4882a593Smuzhiyun  * the stripe is on inactive_list.
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * The possible transitions are:
110*4882a593Smuzhiyun  *  activate an unhashed/inactive stripe (get_active_stripe())
111*4882a593Smuzhiyun  *     lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
112*4882a593Smuzhiyun  *  activate a hashed, possibly active stripe (get_active_stripe())
113*4882a593Smuzhiyun  *     lockdev check-hash if(!cnt++)unlink-stripe unlockdev
114*4882a593Smuzhiyun  *  attach a request to an active stripe (add_stripe_bh())
115*4882a593Smuzhiyun  *     lockdev attach-buffer unlockdev
116*4882a593Smuzhiyun  *  handle a stripe (handle_stripe())
117*4882a593Smuzhiyun  *     setSTRIPE_ACTIVE,  clrSTRIPE_HANDLE ...
118*4882a593Smuzhiyun  *		(lockdev check-buffers unlockdev) ..
119*4882a593Smuzhiyun  *		change-state ..
120*4882a593Smuzhiyun  *		record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
121*4882a593Smuzhiyun  *  release an active stripe (release_stripe())
122*4882a593Smuzhiyun  *     lockdev if (!--cnt) { if  STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * The refcount counts each thread that have activated the stripe,
125*4882a593Smuzhiyun  * plus raid5d if it is handling it, plus one for each active request
126*4882a593Smuzhiyun  * on a cached buffer, and plus one if the stripe is undergoing stripe
127*4882a593Smuzhiyun  * operations.
128*4882a593Smuzhiyun  *
129*4882a593Smuzhiyun  * The stripe operations are:
130*4882a593Smuzhiyun  * -copying data between the stripe cache and user application buffers
131*4882a593Smuzhiyun  * -computing blocks to save a disk access, or to recover a missing block
132*4882a593Smuzhiyun  * -updating the parity on a write operation (reconstruct write and
133*4882a593Smuzhiyun  *  read-modify-write)
134*4882a593Smuzhiyun  * -checking parity correctness
135*4882a593Smuzhiyun  * -running i/o to disk
136*4882a593Smuzhiyun  * These operations are carried out by raid5_run_ops which uses the async_tx
137*4882a593Smuzhiyun  * api to (optionally) offload operations to dedicated hardware engines.
138*4882a593Smuzhiyun  * When requesting an operation handle_stripe sets the pending bit for the
139*4882a593Smuzhiyun  * operation and increments the count.  raid5_run_ops is then run whenever
140*4882a593Smuzhiyun  * the count is non-zero.
141*4882a593Smuzhiyun  * There are some critical dependencies between the operations that prevent some
142*4882a593Smuzhiyun  * from being requested while another is in flight.
143*4882a593Smuzhiyun  * 1/ Parity check operations destroy the in cache version of the parity block,
144*4882a593Smuzhiyun  *    so we prevent parity dependent operations like writes and compute_blocks
145*4882a593Smuzhiyun  *    from starting while a check is in progress.  Some dma engines can perform
146*4882a593Smuzhiyun  *    the check without damaging the parity block, in these cases the parity
147*4882a593Smuzhiyun  *    block is re-marked up to date (assuming the check was successful) and is
148*4882a593Smuzhiyun  *    not re-read from disk.
149*4882a593Smuzhiyun  * 2/ When a write operation is requested we immediately lock the affected
150*4882a593Smuzhiyun  *    blocks, and mark them as not up to date.  This causes new read requests
151*4882a593Smuzhiyun  *    to be held off, as well as parity checks and compute block operations.
152*4882a593Smuzhiyun  * 3/ Once a compute block operation has been requested handle_stripe treats
153*4882a593Smuzhiyun  *    that block as if it is up to date.  raid5_run_ops guaruntees that any
154*4882a593Smuzhiyun  *    operation that is dependent on the compute block result is initiated after
155*4882a593Smuzhiyun  *    the compute block completes.
156*4882a593Smuzhiyun  */
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun  * Operations state - intermediate states that are visible outside of
160*4882a593Smuzhiyun  *   STRIPE_ACTIVE.
161*4882a593Smuzhiyun  * In general _idle indicates nothing is running, _run indicates a data
162*4882a593Smuzhiyun  * processing operation is active, and _result means the data processing result
163*4882a593Smuzhiyun  * is stable and can be acted upon.  For simple operations like biofill and
164*4882a593Smuzhiyun  * compute that only have an _idle and _run state they are indicated with
165*4882a593Smuzhiyun  * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun  * enum check_states - handles syncing / repairing a stripe
169*4882a593Smuzhiyun  * @check_state_idle - check operations are quiesced
170*4882a593Smuzhiyun  * @check_state_run - check operation is running
171*4882a593Smuzhiyun  * @check_state_result - set outside lock when check result is valid
172*4882a593Smuzhiyun  * @check_state_compute_run - check failed and we are repairing
173*4882a593Smuzhiyun  * @check_state_compute_result - set outside lock when compute result is valid
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun enum check_states {
176*4882a593Smuzhiyun 	check_state_idle = 0,
177*4882a593Smuzhiyun 	check_state_run, /* xor parity check */
178*4882a593Smuzhiyun 	check_state_run_q, /* q-parity check */
179*4882a593Smuzhiyun 	check_state_run_pq, /* pq dual parity check */
180*4882a593Smuzhiyun 	check_state_check_result,
181*4882a593Smuzhiyun 	check_state_compute_run, /* parity repair */
182*4882a593Smuzhiyun 	check_state_compute_result,
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun  * enum reconstruct_states - handles writing or expanding a stripe
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun enum reconstruct_states {
189*4882a593Smuzhiyun 	reconstruct_state_idle = 0,
190*4882a593Smuzhiyun 	reconstruct_state_prexor_drain_run,	/* prexor-write */
191*4882a593Smuzhiyun 	reconstruct_state_drain_run,		/* write */
192*4882a593Smuzhiyun 	reconstruct_state_run,			/* expand */
193*4882a593Smuzhiyun 	reconstruct_state_prexor_drain_result,
194*4882a593Smuzhiyun 	reconstruct_state_drain_result,
195*4882a593Smuzhiyun 	reconstruct_state_result,
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #define DEFAULT_STRIPE_SIZE	4096
199*4882a593Smuzhiyun struct stripe_head {
200*4882a593Smuzhiyun 	struct hlist_node	hash;
201*4882a593Smuzhiyun 	struct list_head	lru;	      /* inactive_list or handle_list */
202*4882a593Smuzhiyun 	struct llist_node	release_list;
203*4882a593Smuzhiyun 	struct r5conf		*raid_conf;
204*4882a593Smuzhiyun 	short			generation;	/* increments with every
205*4882a593Smuzhiyun 						 * reshape */
206*4882a593Smuzhiyun 	sector_t		sector;		/* sector of this row */
207*4882a593Smuzhiyun 	short			pd_idx;		/* parity disk index */
208*4882a593Smuzhiyun 	short			qd_idx;		/* 'Q' disk index for raid6 */
209*4882a593Smuzhiyun 	short			ddf_layout;/* use DDF ordering to calculate Q */
210*4882a593Smuzhiyun 	short			hash_lock_index;
211*4882a593Smuzhiyun 	unsigned long		state;		/* state flags */
212*4882a593Smuzhiyun 	atomic_t		count;	      /* nr of active thread/requests */
213*4882a593Smuzhiyun 	int			bm_seq;	/* sequence number for bitmap flushes */
214*4882a593Smuzhiyun 	int			disks;		/* disks in stripe */
215*4882a593Smuzhiyun 	int			overwrite_disks; /* total overwrite disks in stripe,
216*4882a593Smuzhiyun 						  * this is only checked when stripe
217*4882a593Smuzhiyun 						  * has STRIPE_BATCH_READY
218*4882a593Smuzhiyun 						  */
219*4882a593Smuzhiyun 	enum check_states	check_state;
220*4882a593Smuzhiyun 	enum reconstruct_states reconstruct_state;
221*4882a593Smuzhiyun 	spinlock_t		stripe_lock;
222*4882a593Smuzhiyun 	int			cpu;
223*4882a593Smuzhiyun 	struct r5worker_group	*group;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	struct stripe_head	*batch_head; /* protected by stripe lock */
226*4882a593Smuzhiyun 	spinlock_t		batch_lock; /* only header's lock is useful */
227*4882a593Smuzhiyun 	struct list_head	batch_list; /* protected by head's batch lock*/
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	union {
230*4882a593Smuzhiyun 		struct r5l_io_unit	*log_io;
231*4882a593Smuzhiyun 		struct ppl_io_unit	*ppl_io;
232*4882a593Smuzhiyun 	};
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	struct list_head	log_list;
235*4882a593Smuzhiyun 	sector_t		log_start; /* first meta block on the journal */
236*4882a593Smuzhiyun 	struct list_head	r5c; /* for r5c_cache->stripe_in_journal */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	struct page		*ppl_page; /* partial parity of this stripe */
239*4882a593Smuzhiyun 	/**
240*4882a593Smuzhiyun 	 * struct stripe_operations
241*4882a593Smuzhiyun 	 * @target - STRIPE_OP_COMPUTE_BLK target
242*4882a593Smuzhiyun 	 * @target2 - 2nd compute target in the raid6 case
243*4882a593Smuzhiyun 	 * @zero_sum_result - P and Q verification flags
244*4882a593Smuzhiyun 	 * @request - async service request flags for raid_run_ops
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	struct stripe_operations {
247*4882a593Smuzhiyun 		int 		     target, target2;
248*4882a593Smuzhiyun 		enum sum_check_flags zero_sum_result;
249*4882a593Smuzhiyun 	} ops;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
252*4882a593Smuzhiyun 	/* These pages will be used by bios in dev[i] */
253*4882a593Smuzhiyun 	struct page	**pages;
254*4882a593Smuzhiyun 	int	nr_pages;	/* page array size */
255*4882a593Smuzhiyun 	int	stripes_per_page;
256*4882a593Smuzhiyun #endif
257*4882a593Smuzhiyun 	struct r5dev {
258*4882a593Smuzhiyun 		/* rreq and rvec are used for the replacement device when
259*4882a593Smuzhiyun 		 * writing data to both devices.
260*4882a593Smuzhiyun 		 */
261*4882a593Smuzhiyun 		struct bio	req, rreq;
262*4882a593Smuzhiyun 		struct bio_vec	vec, rvec;
263*4882a593Smuzhiyun 		struct page	*page, *orig_page;
264*4882a593Smuzhiyun 		unsigned int    offset;     /* offset of the page */
265*4882a593Smuzhiyun 		struct bio	*toread, *read, *towrite, *written;
266*4882a593Smuzhiyun 		sector_t	sector;			/* sector of this page */
267*4882a593Smuzhiyun 		unsigned long	flags;
268*4882a593Smuzhiyun 		u32		log_checksum;
269*4882a593Smuzhiyun 		unsigned short	write_hint;
270*4882a593Smuzhiyun 	} dev[1]; /* allocated with extra space depending of RAID geometry */
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /* stripe_head_state - collects and tracks the dynamic state of a stripe_head
274*4882a593Smuzhiyun  *     for handle_stripe.
275*4882a593Smuzhiyun  */
276*4882a593Smuzhiyun struct stripe_head_state {
277*4882a593Smuzhiyun 	/* 'syncing' means that we need to read all devices, either
278*4882a593Smuzhiyun 	 * to check/correct parity, or to reconstruct a missing device.
279*4882a593Smuzhiyun 	 * 'replacing' means we are replacing one or more drives and
280*4882a593Smuzhiyun 	 * the source is valid at this point so we don't need to
281*4882a593Smuzhiyun 	 * read all devices, just the replacement targets.
282*4882a593Smuzhiyun 	 */
283*4882a593Smuzhiyun 	int syncing, expanding, expanded, replacing;
284*4882a593Smuzhiyun 	int locked, uptodate, to_read, to_write, failed, written;
285*4882a593Smuzhiyun 	int to_fill, compute, req_compute, non_overwrite;
286*4882a593Smuzhiyun 	int injournal, just_cached;
287*4882a593Smuzhiyun 	int failed_num[2];
288*4882a593Smuzhiyun 	int p_failed, q_failed;
289*4882a593Smuzhiyun 	int dec_preread_active;
290*4882a593Smuzhiyun 	unsigned long ops_request;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	struct md_rdev *blocked_rdev;
293*4882a593Smuzhiyun 	int handle_bad_blocks;
294*4882a593Smuzhiyun 	int log_failed;
295*4882a593Smuzhiyun 	int waiting_extra_page;
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /* Flags for struct r5dev.flags */
299*4882a593Smuzhiyun enum r5dev_flags {
300*4882a593Smuzhiyun 	R5_UPTODATE,	/* page contains current data */
301*4882a593Smuzhiyun 	R5_LOCKED,	/* IO has been submitted on "req" */
302*4882a593Smuzhiyun 	R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
303*4882a593Smuzhiyun 	R5_OVERWRITE,	/* towrite covers whole page */
304*4882a593Smuzhiyun /* and some that are internal to handle_stripe */
305*4882a593Smuzhiyun 	R5_Insync,	/* rdev && rdev->in_sync at start */
306*4882a593Smuzhiyun 	R5_Wantread,	/* want to schedule a read */
307*4882a593Smuzhiyun 	R5_Wantwrite,
308*4882a593Smuzhiyun 	R5_Overlap,	/* There is a pending overlapping request
309*4882a593Smuzhiyun 			 * on this block */
310*4882a593Smuzhiyun 	R5_ReadNoMerge, /* prevent bio from merging in block-layer */
311*4882a593Smuzhiyun 	R5_ReadError,	/* seen a read error here recently */
312*4882a593Smuzhiyun 	R5_ReWrite,	/* have tried to over-write the readerror */
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	R5_Expanded,	/* This block now has post-expand data */
315*4882a593Smuzhiyun 	R5_Wantcompute,	/* compute_block in progress treat as
316*4882a593Smuzhiyun 			 * uptodate
317*4882a593Smuzhiyun 			 */
318*4882a593Smuzhiyun 	R5_Wantfill,	/* dev->toread contains a bio that needs
319*4882a593Smuzhiyun 			 * filling
320*4882a593Smuzhiyun 			 */
321*4882a593Smuzhiyun 	R5_Wantdrain,	/* dev->towrite needs to be drained */
322*4882a593Smuzhiyun 	R5_WantFUA,	/* Write should be FUA */
323*4882a593Smuzhiyun 	R5_SyncIO,	/* The IO is sync */
324*4882a593Smuzhiyun 	R5_WriteError,	/* got a write error - need to record it */
325*4882a593Smuzhiyun 	R5_MadeGood,	/* A bad block has been fixed by writing to it */
326*4882a593Smuzhiyun 	R5_ReadRepl,	/* Will/did read from replacement rather than orig */
327*4882a593Smuzhiyun 	R5_MadeGoodRepl,/* A bad block on the replacement device has been
328*4882a593Smuzhiyun 			 * fixed by writing to it */
329*4882a593Smuzhiyun 	R5_NeedReplace,	/* This device has a replacement which is not
330*4882a593Smuzhiyun 			 * up-to-date at this stripe. */
331*4882a593Smuzhiyun 	R5_WantReplace, /* We need to update the replacement, we have read
332*4882a593Smuzhiyun 			 * data in, and now is a good time to write it out.
333*4882a593Smuzhiyun 			 */
334*4882a593Smuzhiyun 	R5_Discard,	/* Discard the stripe */
335*4882a593Smuzhiyun 	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
336*4882a593Smuzhiyun 	R5_InJournal,	/* data being written is in the journal device.
337*4882a593Smuzhiyun 			 * if R5_InJournal is set for parity pd_idx, all the
338*4882a593Smuzhiyun 			 * data and parity being written are in the journal
339*4882a593Smuzhiyun 			 * device
340*4882a593Smuzhiyun 			 */
341*4882a593Smuzhiyun 	R5_OrigPageUPTDODATE,	/* with write back cache, we read old data into
342*4882a593Smuzhiyun 				 * dev->orig_page for prexor. When this flag is
343*4882a593Smuzhiyun 				 * set, orig_page contains latest data in the
344*4882a593Smuzhiyun 				 * raid disk.
345*4882a593Smuzhiyun 				 */
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  * Stripe state
350*4882a593Smuzhiyun  */
351*4882a593Smuzhiyun enum {
352*4882a593Smuzhiyun 	STRIPE_ACTIVE,
353*4882a593Smuzhiyun 	STRIPE_HANDLE,
354*4882a593Smuzhiyun 	STRIPE_SYNC_REQUESTED,
355*4882a593Smuzhiyun 	STRIPE_SYNCING,
356*4882a593Smuzhiyun 	STRIPE_INSYNC,
357*4882a593Smuzhiyun 	STRIPE_REPLACED,
358*4882a593Smuzhiyun 	STRIPE_PREREAD_ACTIVE,
359*4882a593Smuzhiyun 	STRIPE_DELAYED,
360*4882a593Smuzhiyun 	STRIPE_DEGRADED,
361*4882a593Smuzhiyun 	STRIPE_BIT_DELAY,
362*4882a593Smuzhiyun 	STRIPE_EXPANDING,
363*4882a593Smuzhiyun 	STRIPE_EXPAND_SOURCE,
364*4882a593Smuzhiyun 	STRIPE_EXPAND_READY,
365*4882a593Smuzhiyun 	STRIPE_IO_STARTED,	/* do not count towards 'bypass_count' */
366*4882a593Smuzhiyun 	STRIPE_FULL_WRITE,	/* all blocks are set to be overwritten */
367*4882a593Smuzhiyun 	STRIPE_BIOFILL_RUN,
368*4882a593Smuzhiyun 	STRIPE_COMPUTE_RUN,
369*4882a593Smuzhiyun 	STRIPE_ON_UNPLUG_LIST,
370*4882a593Smuzhiyun 	STRIPE_DISCARD,
371*4882a593Smuzhiyun 	STRIPE_ON_RELEASE_LIST,
372*4882a593Smuzhiyun 	STRIPE_BATCH_READY,
373*4882a593Smuzhiyun 	STRIPE_BATCH_ERR,
374*4882a593Smuzhiyun 	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
375*4882a593Smuzhiyun 				 * to batch yet.
376*4882a593Smuzhiyun 				 */
377*4882a593Smuzhiyun 	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
378*4882a593Smuzhiyun 				 * this bit is used in two scenarios:
379*4882a593Smuzhiyun 				 *
380*4882a593Smuzhiyun 				 * 1. write-out phase
381*4882a593Smuzhiyun 				 *  set in first entry of r5l_write_stripe
382*4882a593Smuzhiyun 				 *  clear in second entry of r5l_write_stripe
383*4882a593Smuzhiyun 				 *  used to bypass logic in handle_stripe
384*4882a593Smuzhiyun 				 *
385*4882a593Smuzhiyun 				 * 2. caching phase
386*4882a593Smuzhiyun 				 *  set in r5c_try_caching_write()
387*4882a593Smuzhiyun 				 *  clear when journal write is done
388*4882a593Smuzhiyun 				 *  used to initiate r5c_cache_data()
389*4882a593Smuzhiyun 				 *  also used to bypass logic in handle_stripe
390*4882a593Smuzhiyun 				 */
391*4882a593Smuzhiyun 	STRIPE_R5C_CACHING,	/* the stripe is in caching phase
392*4882a593Smuzhiyun 				 * see more detail in the raid5-cache.c
393*4882a593Smuzhiyun 				 */
394*4882a593Smuzhiyun 	STRIPE_R5C_PARTIAL_STRIPE,	/* in r5c cache (to-be/being handled or
395*4882a593Smuzhiyun 					 * in conf->r5c_partial_stripe_list)
396*4882a593Smuzhiyun 					 */
397*4882a593Smuzhiyun 	STRIPE_R5C_FULL_STRIPE,	/* in r5c cache (to-be/being handled or
398*4882a593Smuzhiyun 				 * in conf->r5c_full_stripe_list)
399*4882a593Smuzhiyun 				 */
400*4882a593Smuzhiyun 	STRIPE_R5C_PREFLUSH,	/* need to flush journal device */
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun #define STRIPE_EXPAND_SYNC_FLAGS \
404*4882a593Smuzhiyun 	((1 << STRIPE_EXPAND_SOURCE) |\
405*4882a593Smuzhiyun 	(1 << STRIPE_EXPAND_READY) |\
406*4882a593Smuzhiyun 	(1 << STRIPE_EXPANDING) |\
407*4882a593Smuzhiyun 	(1 << STRIPE_SYNC_REQUESTED))
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun  * Operation request flags
410*4882a593Smuzhiyun  */
411*4882a593Smuzhiyun enum {
412*4882a593Smuzhiyun 	STRIPE_OP_BIOFILL,
413*4882a593Smuzhiyun 	STRIPE_OP_COMPUTE_BLK,
414*4882a593Smuzhiyun 	STRIPE_OP_PREXOR,
415*4882a593Smuzhiyun 	STRIPE_OP_BIODRAIN,
416*4882a593Smuzhiyun 	STRIPE_OP_RECONSTRUCT,
417*4882a593Smuzhiyun 	STRIPE_OP_CHECK,
418*4882a593Smuzhiyun 	STRIPE_OP_PARTIAL_PARITY,
419*4882a593Smuzhiyun };
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun  * RAID parity calculation preferences
423*4882a593Smuzhiyun  */
424*4882a593Smuzhiyun enum {
425*4882a593Smuzhiyun 	PARITY_DISABLE_RMW = 0,
426*4882a593Smuzhiyun 	PARITY_ENABLE_RMW,
427*4882a593Smuzhiyun 	PARITY_PREFER_RMW,
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun  * Pages requested from set_syndrome_sources()
432*4882a593Smuzhiyun  */
433*4882a593Smuzhiyun enum {
434*4882a593Smuzhiyun 	SYNDROME_SRC_ALL,
435*4882a593Smuzhiyun 	SYNDROME_SRC_WANT_DRAIN,
436*4882a593Smuzhiyun 	SYNDROME_SRC_WRITTEN,
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun  * Plugging:
440*4882a593Smuzhiyun  *
441*4882a593Smuzhiyun  * To improve write throughput, we need to delay the handling of some
442*4882a593Smuzhiyun  * stripes until there has been a chance that several write requests
443*4882a593Smuzhiyun  * for the one stripe have all been collected.
444*4882a593Smuzhiyun  * In particular, any write request that would require pre-reading
445*4882a593Smuzhiyun  * is put on a "delayed" queue until there are no stripes currently
446*4882a593Smuzhiyun  * in a pre-read phase.  Further, if the "delayed" queue is empty when
447*4882a593Smuzhiyun  * a stripe is put on it then we "plug" the queue and do not process it
448*4882a593Smuzhiyun  * until an unplug call is made. (the unplug_io_fn() is called).
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
451*4882a593Smuzhiyun  * it to the count of prereading stripes.
452*4882a593Smuzhiyun  * When write is initiated, or the stripe refcnt == 0 (just in case) we
453*4882a593Smuzhiyun  * clear the PREREAD_ACTIVE flag and decrement the count
454*4882a593Smuzhiyun  * Whenever the 'handle' queue is empty and the device is not plugged, we
455*4882a593Smuzhiyun  * move any strips from delayed to handle and clear the DELAYED flag and set
456*4882a593Smuzhiyun  * PREREAD_ACTIVE.
457*4882a593Smuzhiyun  * In stripe_handle, if we find pre-reading is necessary, we do it if
458*4882a593Smuzhiyun  * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
459*4882a593Smuzhiyun  * HANDLE gets cleared if stripe_handle leaves nothing locked.
460*4882a593Smuzhiyun  */
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
463*4882a593Smuzhiyun  * There are three safe ways to access disk_info.rdev.
464*4882a593Smuzhiyun  * 1/ when holding mddev->reconfig_mutex
465*4882a593Smuzhiyun  * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
466*4882a593Smuzhiyun  *    is called as part of performing resync/recovery/reshape.
467*4882a593Smuzhiyun  * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
468*4882a593Smuzhiyun  *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
469*4882a593Smuzhiyun  *    lock.
470*4882a593Smuzhiyun  * When .rdev is set to NULL, the nr_pending count checked again and if
471*4882a593Smuzhiyun  * it has been incremented, the pointer is put back in .rdev.
472*4882a593Smuzhiyun  */
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun struct disk_info {
475*4882a593Smuzhiyun 	struct md_rdev	*rdev, *replacement;
476*4882a593Smuzhiyun 	struct page	*extra_page; /* extra page to use in prexor */
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /*
480*4882a593Smuzhiyun  * Stripe cache
481*4882a593Smuzhiyun  */
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun #define NR_STRIPES		256
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
486*4882a593Smuzhiyun #define STRIPE_SIZE		PAGE_SIZE
487*4882a593Smuzhiyun #define STRIPE_SHIFT		(PAGE_SHIFT - 9)
488*4882a593Smuzhiyun #define STRIPE_SECTORS		(STRIPE_SIZE>>9)
489*4882a593Smuzhiyun #endif
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun #define	IO_THRESHOLD		1
492*4882a593Smuzhiyun #define BYPASS_THRESHOLD	1
493*4882a593Smuzhiyun #define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
494*4882a593Smuzhiyun #define HASH_MASK		(NR_HASH - 1)
495*4882a593Smuzhiyun #define MAX_STRIPE_BATCH	8
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
498*4882a593Smuzhiyun  * This is because we sometimes take all the spinlocks
499*4882a593Smuzhiyun  * and creating that much locking depth can cause
500*4882a593Smuzhiyun  * problems.
501*4882a593Smuzhiyun  */
502*4882a593Smuzhiyun #define NR_STRIPE_HASH_LOCKS 8
503*4882a593Smuzhiyun #define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun struct r5worker {
506*4882a593Smuzhiyun 	struct work_struct work;
507*4882a593Smuzhiyun 	struct r5worker_group *group;
508*4882a593Smuzhiyun 	struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
509*4882a593Smuzhiyun 	bool working;
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun struct r5worker_group {
513*4882a593Smuzhiyun 	struct list_head handle_list;
514*4882a593Smuzhiyun 	struct list_head loprio_list;
515*4882a593Smuzhiyun 	struct r5conf *conf;
516*4882a593Smuzhiyun 	struct r5worker *workers;
517*4882a593Smuzhiyun 	int stripes_cnt;
518*4882a593Smuzhiyun };
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /*
521*4882a593Smuzhiyun  * r5c journal modes of the array: write-back or write-through.
522*4882a593Smuzhiyun  * write-through mode has identical behavior as existing log only
523*4882a593Smuzhiyun  * implementation.
524*4882a593Smuzhiyun  */
525*4882a593Smuzhiyun enum r5c_journal_mode {
526*4882a593Smuzhiyun 	R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
527*4882a593Smuzhiyun 	R5C_JOURNAL_MODE_WRITE_BACK = 1,
528*4882a593Smuzhiyun };
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun enum r5_cache_state {
531*4882a593Smuzhiyun 	R5_INACTIVE_BLOCKED,	/* release of inactive stripes blocked,
532*4882a593Smuzhiyun 				 * waiting for 25% to be free
533*4882a593Smuzhiyun 				 */
534*4882a593Smuzhiyun 	R5_ALLOC_MORE,		/* It might help to allocate another
535*4882a593Smuzhiyun 				 * stripe.
536*4882a593Smuzhiyun 				 */
537*4882a593Smuzhiyun 	R5_DID_ALLOC,		/* A stripe was allocated, don't allocate
538*4882a593Smuzhiyun 				 * more until at least one has been
539*4882a593Smuzhiyun 				 * released.  This avoids flooding
540*4882a593Smuzhiyun 				 * the cache.
541*4882a593Smuzhiyun 				 */
542*4882a593Smuzhiyun 	R5C_LOG_TIGHT,		/* log device space tight, need to
543*4882a593Smuzhiyun 				 * prioritize stripes at last_checkpoint
544*4882a593Smuzhiyun 				 */
545*4882a593Smuzhiyun 	R5C_LOG_CRITICAL,	/* log device is running out of space,
546*4882a593Smuzhiyun 				 * only process stripes that are already
547*4882a593Smuzhiyun 				 * occupying the log
548*4882a593Smuzhiyun 				 */
549*4882a593Smuzhiyun 	R5C_EXTRA_PAGE_IN_USE,	/* a stripe is using disk_info.extra_page
550*4882a593Smuzhiyun 				 * for prexor
551*4882a593Smuzhiyun 				 */
552*4882a593Smuzhiyun };
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun #define PENDING_IO_MAX 512
555*4882a593Smuzhiyun #define PENDING_IO_ONE_FLUSH 128
556*4882a593Smuzhiyun struct r5pending_data {
557*4882a593Smuzhiyun 	struct list_head sibling;
558*4882a593Smuzhiyun 	sector_t sector; /* stripe sector */
559*4882a593Smuzhiyun 	struct bio_list bios;
560*4882a593Smuzhiyun };
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun struct r5conf {
563*4882a593Smuzhiyun 	struct hlist_head	*stripe_hashtbl;
564*4882a593Smuzhiyun 	/* only protect corresponding hash list and inactive_list */
565*4882a593Smuzhiyun 	spinlock_t		hash_locks[NR_STRIPE_HASH_LOCKS];
566*4882a593Smuzhiyun 	struct mddev		*mddev;
567*4882a593Smuzhiyun 	int			chunk_sectors;
568*4882a593Smuzhiyun 	int			level, algorithm, rmw_level;
569*4882a593Smuzhiyun 	int			max_degraded;
570*4882a593Smuzhiyun 	int			raid_disks;
571*4882a593Smuzhiyun 	int			max_nr_stripes;
572*4882a593Smuzhiyun 	int			min_nr_stripes;
573*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
574*4882a593Smuzhiyun 	unsigned long	stripe_size;
575*4882a593Smuzhiyun 	unsigned int	stripe_shift;
576*4882a593Smuzhiyun 	unsigned long	stripe_sectors;
577*4882a593Smuzhiyun #endif
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* reshape_progress is the leading edge of a 'reshape'
580*4882a593Smuzhiyun 	 * It has value MaxSector when no reshape is happening
581*4882a593Smuzhiyun 	 * If delta_disks < 0, it is the last sector we started work on,
582*4882a593Smuzhiyun 	 * else is it the next sector to work on.
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	sector_t		reshape_progress;
585*4882a593Smuzhiyun 	/* reshape_safe is the trailing edge of a reshape.  We know that
586*4882a593Smuzhiyun 	 * before (or after) this address, all reshape has completed.
587*4882a593Smuzhiyun 	 */
588*4882a593Smuzhiyun 	sector_t		reshape_safe;
589*4882a593Smuzhiyun 	int			previous_raid_disks;
590*4882a593Smuzhiyun 	int			prev_chunk_sectors;
591*4882a593Smuzhiyun 	int			prev_algo;
592*4882a593Smuzhiyun 	short			generation; /* increments with every reshape */
593*4882a593Smuzhiyun 	seqcount_spinlock_t	gen_lock;	/* lock against generation changes */
594*4882a593Smuzhiyun 	unsigned long		reshape_checkpoint; /* Time we last updated
595*4882a593Smuzhiyun 						     * metadata */
596*4882a593Smuzhiyun 	long long		min_offset_diff; /* minimum difference between
597*4882a593Smuzhiyun 						  * data_offset and
598*4882a593Smuzhiyun 						  * new_data_offset across all
599*4882a593Smuzhiyun 						  * devices.  May be negative,
600*4882a593Smuzhiyun 						  * but is closest to zero.
601*4882a593Smuzhiyun 						  */
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	struct list_head	handle_list; /* stripes needing handling */
604*4882a593Smuzhiyun 	struct list_head	loprio_list; /* low priority stripes */
605*4882a593Smuzhiyun 	struct list_head	hold_list; /* preread ready stripes */
606*4882a593Smuzhiyun 	struct list_head	delayed_list; /* stripes that have plugged requests */
607*4882a593Smuzhiyun 	struct list_head	bitmap_list; /* stripes delaying awaiting bitmap update */
608*4882a593Smuzhiyun 	struct bio		*retry_read_aligned; /* currently retrying aligned bios   */
609*4882a593Smuzhiyun 	unsigned int		retry_read_offset; /* sector offset into retry_read_aligned */
610*4882a593Smuzhiyun 	struct bio		*retry_read_aligned_list; /* aligned bios retry list  */
611*4882a593Smuzhiyun 	atomic_t		preread_active_stripes; /* stripes with scheduled io */
612*4882a593Smuzhiyun 	atomic_t		active_aligned_reads;
613*4882a593Smuzhiyun 	atomic_t		pending_full_writes; /* full write backlog */
614*4882a593Smuzhiyun 	int			bypass_count; /* bypassed prereads */
615*4882a593Smuzhiyun 	int			bypass_threshold; /* preread nice */
616*4882a593Smuzhiyun 	int			skip_copy; /* Don't copy data from bio to stripe cache */
617*4882a593Smuzhiyun 	struct list_head	*last_hold; /* detect hold_list promotions */
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	atomic_t		reshape_stripes; /* stripes with pending writes for reshape */
620*4882a593Smuzhiyun 	/* unfortunately we need two cache names as we temporarily have
621*4882a593Smuzhiyun 	 * two caches.
622*4882a593Smuzhiyun 	 */
623*4882a593Smuzhiyun 	int			active_name;
624*4882a593Smuzhiyun 	char			cache_name[2][32];
625*4882a593Smuzhiyun 	struct kmem_cache	*slab_cache; /* for allocating stripes */
626*4882a593Smuzhiyun 	struct mutex		cache_size_mutex; /* Protect changes to cache size */
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	int			seq_flush, seq_write;
629*4882a593Smuzhiyun 	int			quiesce;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	int			fullsync;  /* set to 1 if a full sync is needed,
632*4882a593Smuzhiyun 					    * (fresh device added).
633*4882a593Smuzhiyun 					    * Cleared when a sync completes.
634*4882a593Smuzhiyun 					    */
635*4882a593Smuzhiyun 	int			recovery_disabled;
636*4882a593Smuzhiyun 	/* per cpu variables */
637*4882a593Smuzhiyun 	struct raid5_percpu {
638*4882a593Smuzhiyun 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
639*4882a593Smuzhiyun 		void		*scribble;  /* space for constructing buffer
640*4882a593Smuzhiyun 					     * lists and performing address
641*4882a593Smuzhiyun 					     * conversions
642*4882a593Smuzhiyun 					     */
643*4882a593Smuzhiyun 		int scribble_obj_size;
644*4882a593Smuzhiyun 	} __percpu *percpu;
645*4882a593Smuzhiyun 	int scribble_disks;
646*4882a593Smuzhiyun 	int scribble_sectors;
647*4882a593Smuzhiyun 	struct hlist_node node;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/*
650*4882a593Smuzhiyun 	 * Free stripes pool
651*4882a593Smuzhiyun 	 */
652*4882a593Smuzhiyun 	atomic_t		active_stripes;
653*4882a593Smuzhiyun 	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	atomic_t		r5c_cached_full_stripes;
656*4882a593Smuzhiyun 	struct list_head	r5c_full_stripe_list;
657*4882a593Smuzhiyun 	atomic_t		r5c_cached_partial_stripes;
658*4882a593Smuzhiyun 	struct list_head	r5c_partial_stripe_list;
659*4882a593Smuzhiyun 	atomic_t		r5c_flushing_full_stripes;
660*4882a593Smuzhiyun 	atomic_t		r5c_flushing_partial_stripes;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	atomic_t		empty_inactive_list_nr;
663*4882a593Smuzhiyun 	struct llist_head	released_stripes;
664*4882a593Smuzhiyun 	wait_queue_head_t	wait_for_quiescent;
665*4882a593Smuzhiyun 	wait_queue_head_t	wait_for_stripe;
666*4882a593Smuzhiyun 	wait_queue_head_t	wait_for_overlap;
667*4882a593Smuzhiyun 	unsigned long		cache_state;
668*4882a593Smuzhiyun 	struct shrinker		shrinker;
669*4882a593Smuzhiyun 	int			pool_size; /* number of disks in stripeheads in pool */
670*4882a593Smuzhiyun 	spinlock_t		device_lock;
671*4882a593Smuzhiyun 	struct disk_info	*disks;
672*4882a593Smuzhiyun 	struct bio_set		bio_split;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* When taking over an array from a different personality, we store
675*4882a593Smuzhiyun 	 * the new thread here until we fully activate the array.
676*4882a593Smuzhiyun 	 */
677*4882a593Smuzhiyun 	struct md_thread	*thread;
678*4882a593Smuzhiyun 	struct list_head	temp_inactive_list[NR_STRIPE_HASH_LOCKS];
679*4882a593Smuzhiyun 	struct r5worker_group	*worker_groups;
680*4882a593Smuzhiyun 	int			group_cnt;
681*4882a593Smuzhiyun 	int			worker_cnt_per_group;
682*4882a593Smuzhiyun 	struct r5l_log		*log;
683*4882a593Smuzhiyun 	void			*log_private;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	spinlock_t		pending_bios_lock;
686*4882a593Smuzhiyun 	bool			batch_bio_dispatch;
687*4882a593Smuzhiyun 	struct r5pending_data	*pending_data;
688*4882a593Smuzhiyun 	struct list_head	free_list;
689*4882a593Smuzhiyun 	struct list_head	pending_list;
690*4882a593Smuzhiyun 	int			pending_data_cnt;
691*4882a593Smuzhiyun 	struct r5pending_data	*next_pending_data;
692*4882a593Smuzhiyun };
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun #if PAGE_SIZE == DEFAULT_STRIPE_SIZE
695*4882a593Smuzhiyun #define RAID5_STRIPE_SIZE(conf)	STRIPE_SIZE
696*4882a593Smuzhiyun #define RAID5_STRIPE_SHIFT(conf)	STRIPE_SHIFT
697*4882a593Smuzhiyun #define RAID5_STRIPE_SECTORS(conf)	STRIPE_SECTORS
698*4882a593Smuzhiyun #else
699*4882a593Smuzhiyun #define RAID5_STRIPE_SIZE(conf)	((conf)->stripe_size)
700*4882a593Smuzhiyun #define RAID5_STRIPE_SHIFT(conf)	((conf)->stripe_shift)
701*4882a593Smuzhiyun #define RAID5_STRIPE_SECTORS(conf)	((conf)->stripe_sectors)
702*4882a593Smuzhiyun #endif
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun /* bio's attached to a stripe+device for I/O are linked together in bi_sector
705*4882a593Smuzhiyun  * order without overlap.  There may be several bio's per stripe+device, and
706*4882a593Smuzhiyun  * a bio could span several devices.
707*4882a593Smuzhiyun  * When walking this list for a particular stripe+device, we must never proceed
708*4882a593Smuzhiyun  * beyond a bio that extends past this device, as the next bio might no longer
709*4882a593Smuzhiyun  * be valid.
710*4882a593Smuzhiyun  * This function is used to determine the 'next' bio in the list, given the
711*4882a593Smuzhiyun  * sector of the current stripe+device
712*4882a593Smuzhiyun  */
r5_next_bio(struct r5conf * conf,struct bio * bio,sector_t sector)713*4882a593Smuzhiyun static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
716*4882a593Smuzhiyun 		return bio->bi_next;
717*4882a593Smuzhiyun 	else
718*4882a593Smuzhiyun 		return NULL;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun  * Our supported algorithms
723*4882a593Smuzhiyun  */
724*4882a593Smuzhiyun #define ALGORITHM_LEFT_ASYMMETRIC	0 /* Rotating Parity N with Data Restart */
725*4882a593Smuzhiyun #define ALGORITHM_RIGHT_ASYMMETRIC	1 /* Rotating Parity 0 with Data Restart */
726*4882a593Smuzhiyun #define ALGORITHM_LEFT_SYMMETRIC	2 /* Rotating Parity N with Data Continuation */
727*4882a593Smuzhiyun #define ALGORITHM_RIGHT_SYMMETRIC	3 /* Rotating Parity 0 with Data Continuation */
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun /* Define non-rotating (raid4) algorithms.  These allow
730*4882a593Smuzhiyun  * conversion of raid4 to raid5.
731*4882a593Smuzhiyun  */
732*4882a593Smuzhiyun #define ALGORITHM_PARITY_0		4 /* P or P,Q are initial devices */
733*4882a593Smuzhiyun #define ALGORITHM_PARITY_N		5 /* P or P,Q are final devices. */
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun /* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
736*4882a593Smuzhiyun  * Firstly, the exact positioning of the parity block is slightly
737*4882a593Smuzhiyun  * different between the 'LEFT_*' modes of md and the "_N_*" modes
738*4882a593Smuzhiyun  * of DDF.
739*4882a593Smuzhiyun  * Secondly, or order of datablocks over which the Q syndrome is computed
740*4882a593Smuzhiyun  * is different.
741*4882a593Smuzhiyun  * Consequently we have different layouts for DDF/raid6 than md/raid6.
742*4882a593Smuzhiyun  * These layouts are from the DDFv1.2 spec.
743*4882a593Smuzhiyun  * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
744*4882a593Smuzhiyun  * leaves RLQ=3 as 'Vendor Specific'
745*4882a593Smuzhiyun  */
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun #define ALGORITHM_ROTATING_ZERO_RESTART	8 /* DDF PRL=6 RLQ=1 */
748*4882a593Smuzhiyun #define ALGORITHM_ROTATING_N_RESTART	9 /* DDF PRL=6 RLQ=2 */
749*4882a593Smuzhiyun #define ALGORITHM_ROTATING_N_CONTINUE	10 /*DDF PRL=6 RLQ=3 */
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun /* For every RAID5 algorithm we define a RAID6 algorithm
752*4882a593Smuzhiyun  * with exactly the same layout for data and parity, and
753*4882a593Smuzhiyun  * with the Q block always on the last device (N-1).
754*4882a593Smuzhiyun  * This allows trivial conversion from RAID5 to RAID6
755*4882a593Smuzhiyun  */
756*4882a593Smuzhiyun #define ALGORITHM_LEFT_ASYMMETRIC_6	16
757*4882a593Smuzhiyun #define ALGORITHM_RIGHT_ASYMMETRIC_6	17
758*4882a593Smuzhiyun #define ALGORITHM_LEFT_SYMMETRIC_6	18
759*4882a593Smuzhiyun #define ALGORITHM_RIGHT_SYMMETRIC_6	19
760*4882a593Smuzhiyun #define ALGORITHM_PARITY_0_6		20
761*4882a593Smuzhiyun #define ALGORITHM_PARITY_N_6		ALGORITHM_PARITY_N
762*4882a593Smuzhiyun 
algorithm_valid_raid5(int layout)763*4882a593Smuzhiyun static inline int algorithm_valid_raid5(int layout)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	return (layout >= 0) &&
766*4882a593Smuzhiyun 		(layout <= 5);
767*4882a593Smuzhiyun }
algorithm_valid_raid6(int layout)768*4882a593Smuzhiyun static inline int algorithm_valid_raid6(int layout)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	return (layout >= 0 && layout <= 5)
771*4882a593Smuzhiyun 		||
772*4882a593Smuzhiyun 		(layout >= 8 && layout <= 10)
773*4882a593Smuzhiyun 		||
774*4882a593Smuzhiyun 		(layout >= 16 && layout <= 20);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
algorithm_is_DDF(int layout)777*4882a593Smuzhiyun static inline int algorithm_is_DDF(int layout)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	return layout >= 8 && layout <= 10;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun #if PAGE_SIZE != DEFAULT_STRIPE_SIZE
783*4882a593Smuzhiyun /*
784*4882a593Smuzhiyun  * Return offset of the corresponding page for r5dev.
785*4882a593Smuzhiyun  */
raid5_get_page_offset(struct stripe_head * sh,int disk_idx)786*4882a593Smuzhiyun static inline int raid5_get_page_offset(struct stripe_head *sh, int disk_idx)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	return (disk_idx % sh->stripes_per_page) * RAID5_STRIPE_SIZE(sh->raid_conf);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun  * Return corresponding page address for r5dev.
793*4882a593Smuzhiyun  */
794*4882a593Smuzhiyun static inline struct page *
raid5_get_dev_page(struct stripe_head * sh,int disk_idx)795*4882a593Smuzhiyun raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	return sh->pages[disk_idx / sh->stripes_per_page];
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun extern void md_raid5_kick_device(struct r5conf *conf);
802*4882a593Smuzhiyun extern int raid5_set_cache_size(struct mddev *mddev, int size);
803*4882a593Smuzhiyun extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
804*4882a593Smuzhiyun extern void raid5_release_stripe(struct stripe_head *sh);
805*4882a593Smuzhiyun extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
806*4882a593Smuzhiyun 				     int previous, int *dd_idx,
807*4882a593Smuzhiyun 				     struct stripe_head *sh);
808*4882a593Smuzhiyun extern struct stripe_head *
809*4882a593Smuzhiyun raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
810*4882a593Smuzhiyun 			int previous, int noblock, int noquiesce);
811*4882a593Smuzhiyun extern int raid5_calc_degraded(struct r5conf *conf);
812*4882a593Smuzhiyun extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
813*4882a593Smuzhiyun #endif
814