xref: /OK3568_Linux_fs/kernel/fs/btrfs/raid56.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 Fusion-io  All rights reserved.
4*4882a593Smuzhiyun  * Copyright (C) 2012 Intel Corp. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/bio.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/raid/pq.h>
12*4882a593Smuzhiyun #include <linux/hash.h>
13*4882a593Smuzhiyun #include <linux/list_sort.h>
14*4882a593Smuzhiyun #include <linux/raid/xor.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include "ctree.h"
17*4882a593Smuzhiyun #include "disk-io.h"
18*4882a593Smuzhiyun #include "volumes.h"
19*4882a593Smuzhiyun #include "raid56.h"
20*4882a593Smuzhiyun #include "async-thread.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* set when additional merges to this rbio are not allowed */
23*4882a593Smuzhiyun #define RBIO_RMW_LOCKED_BIT	1
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * set when this rbio is sitting in the hash, but it is just a cache
27*4882a593Smuzhiyun  * of past RMW
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #define RBIO_CACHE_BIT		2
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * set when it is safe to trust the stripe_pages for caching
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define RBIO_CACHE_READY_BIT	3
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define RBIO_CACHE_SIZE 1024
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define BTRFS_STRIPE_HASH_TABLE_BITS				11
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* Used by the raid56 code to lock stripes for read/modify/write */
41*4882a593Smuzhiyun struct btrfs_stripe_hash {
42*4882a593Smuzhiyun 	struct list_head hash_list;
43*4882a593Smuzhiyun 	spinlock_t lock;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* Used by the raid56 code to lock stripes for read/modify/write */
47*4882a593Smuzhiyun struct btrfs_stripe_hash_table {
48*4882a593Smuzhiyun 	struct list_head stripe_cache;
49*4882a593Smuzhiyun 	spinlock_t cache_lock;
50*4882a593Smuzhiyun 	int cache_size;
51*4882a593Smuzhiyun 	struct btrfs_stripe_hash table[];
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun enum btrfs_rbio_ops {
55*4882a593Smuzhiyun 	BTRFS_RBIO_WRITE,
56*4882a593Smuzhiyun 	BTRFS_RBIO_READ_REBUILD,
57*4882a593Smuzhiyun 	BTRFS_RBIO_PARITY_SCRUB,
58*4882a593Smuzhiyun 	BTRFS_RBIO_REBUILD_MISSING,
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun struct btrfs_raid_bio {
62*4882a593Smuzhiyun 	struct btrfs_fs_info *fs_info;
63*4882a593Smuzhiyun 	struct btrfs_bio *bbio;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* while we're doing rmw on a stripe
66*4882a593Smuzhiyun 	 * we put it into a hash table so we can
67*4882a593Smuzhiyun 	 * lock the stripe and merge more rbios
68*4882a593Smuzhiyun 	 * into it.
69*4882a593Smuzhiyun 	 */
70*4882a593Smuzhiyun 	struct list_head hash_list;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/*
73*4882a593Smuzhiyun 	 * LRU list for the stripe cache
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	struct list_head stripe_cache;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * for scheduling work in the helper threads
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	struct btrfs_work work;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * bio list and bio_list_lock are used
84*4882a593Smuzhiyun 	 * to add more bios into the stripe
85*4882a593Smuzhiyun 	 * in hopes of avoiding the full rmw
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	struct bio_list bio_list;
88*4882a593Smuzhiyun 	spinlock_t bio_list_lock;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* also protected by the bio_list_lock, the
91*4882a593Smuzhiyun 	 * plug list is used by the plugging code
92*4882a593Smuzhiyun 	 * to collect partial bios while plugged.  The
93*4882a593Smuzhiyun 	 * stripe locking code also uses it to hand off
94*4882a593Smuzhiyun 	 * the stripe lock to the next pending IO
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	struct list_head plug_list;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/*
99*4882a593Smuzhiyun 	 * flags that tell us if it is safe to
100*4882a593Smuzhiyun 	 * merge with this bio
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	unsigned long flags;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* size of each individual stripe on disk */
105*4882a593Smuzhiyun 	int stripe_len;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* number of data stripes (no p/q) */
108*4882a593Smuzhiyun 	int nr_data;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	int real_stripes;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	int stripe_npages;
113*4882a593Smuzhiyun 	/*
114*4882a593Smuzhiyun 	 * set if we're doing a parity rebuild
115*4882a593Smuzhiyun 	 * for a read from higher up, which is handled
116*4882a593Smuzhiyun 	 * differently from a parity rebuild as part of
117*4882a593Smuzhiyun 	 * rmw
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	enum btrfs_rbio_ops operation;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* first bad stripe */
122*4882a593Smuzhiyun 	int faila;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* second bad stripe (for raid6 use) */
125*4882a593Smuzhiyun 	int failb;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	int scrubp;
128*4882a593Smuzhiyun 	/*
129*4882a593Smuzhiyun 	 * number of pages needed to represent the full
130*4882a593Smuzhiyun 	 * stripe
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	int nr_pages;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * size of all the bios in the bio_list.  This
136*4882a593Smuzhiyun 	 * helps us decide if the rbio maps to a full
137*4882a593Smuzhiyun 	 * stripe or not
138*4882a593Smuzhiyun 	 */
139*4882a593Smuzhiyun 	int bio_list_bytes;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	int generic_bio_cnt;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	refcount_t refs;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	atomic_t stripes_pending;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	atomic_t error;
148*4882a593Smuzhiyun 	/*
149*4882a593Smuzhiyun 	 * these are two arrays of pointers.  We allocate the
150*4882a593Smuzhiyun 	 * rbio big enough to hold them both and setup their
151*4882a593Smuzhiyun 	 * locations when the rbio is allocated
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* pointers to pages that we allocated for
155*4882a593Smuzhiyun 	 * reading/writing stripes directly from the disk (including P/Q)
156*4882a593Smuzhiyun 	 */
157*4882a593Smuzhiyun 	struct page **stripe_pages;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/*
160*4882a593Smuzhiyun 	 * pointers to the pages in the bio_list.  Stored
161*4882a593Smuzhiyun 	 * here for faster lookup
162*4882a593Smuzhiyun 	 */
163*4882a593Smuzhiyun 	struct page **bio_pages;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * bitmap to record which horizontal stripe has data
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	unsigned long *dbitmap;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* allocated with real_stripes-many pointers for finish_*() calls */
171*4882a593Smuzhiyun 	void **finish_pointers;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* allocated with stripe_npages-many bits for finish_*() calls */
174*4882a593Smuzhiyun 	unsigned long *finish_pbitmap;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178*4882a593Smuzhiyun static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179*4882a593Smuzhiyun static void rmw_work(struct btrfs_work *work);
180*4882a593Smuzhiyun static void read_rebuild_work(struct btrfs_work *work);
181*4882a593Smuzhiyun static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182*4882a593Smuzhiyun static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183*4882a593Smuzhiyun static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184*4882a593Smuzhiyun static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185*4882a593Smuzhiyun static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188*4882a593Smuzhiyun 					 int need_check);
189*4882a593Smuzhiyun static void scrub_parity_work(struct btrfs_work *work);
190*4882a593Smuzhiyun 
start_async_work(struct btrfs_raid_bio * rbio,btrfs_func_t work_func)191*4882a593Smuzhiyun static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194*4882a593Smuzhiyun 	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun  * the stripe hash table is used for locking, and to collect
199*4882a593Smuzhiyun  * bios in hopes of making a full stripe
200*4882a593Smuzhiyun  */
btrfs_alloc_stripe_hash_table(struct btrfs_fs_info * info)201*4882a593Smuzhiyun int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *table;
204*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *x;
205*4882a593Smuzhiyun 	struct btrfs_stripe_hash *cur;
206*4882a593Smuzhiyun 	struct btrfs_stripe_hash *h;
207*4882a593Smuzhiyun 	int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208*4882a593Smuzhiyun 	int i;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (info->stripe_hash_table)
211*4882a593Smuzhiyun 		return 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/*
214*4882a593Smuzhiyun 	 * The table is large, starting with order 4 and can go as high as
215*4882a593Smuzhiyun 	 * order 7 in case lock debugging is turned on.
216*4882a593Smuzhiyun 	 *
217*4882a593Smuzhiyun 	 * Try harder to allocate and fallback to vmalloc to lower the chance
218*4882a593Smuzhiyun 	 * of a failing mount.
219*4882a593Smuzhiyun 	 */
220*4882a593Smuzhiyun 	table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
221*4882a593Smuzhiyun 	if (!table)
222*4882a593Smuzhiyun 		return -ENOMEM;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	spin_lock_init(&table->cache_lock);
225*4882a593Smuzhiyun 	INIT_LIST_HEAD(&table->stripe_cache);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	h = table->table;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	for (i = 0; i < num_entries; i++) {
230*4882a593Smuzhiyun 		cur = h + i;
231*4882a593Smuzhiyun 		INIT_LIST_HEAD(&cur->hash_list);
232*4882a593Smuzhiyun 		spin_lock_init(&cur->lock);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	x = cmpxchg(&info->stripe_hash_table, NULL, table);
236*4882a593Smuzhiyun 	if (x)
237*4882a593Smuzhiyun 		kvfree(x);
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * caching an rbio means to copy anything from the
243*4882a593Smuzhiyun  * bio_pages array into the stripe_pages array.  We
244*4882a593Smuzhiyun  * use the page uptodate bit in the stripe cache array
245*4882a593Smuzhiyun  * to indicate if it has valid data
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * once the caching is done, we set the cache ready
248*4882a593Smuzhiyun  * bit.
249*4882a593Smuzhiyun  */
cache_rbio_pages(struct btrfs_raid_bio * rbio)250*4882a593Smuzhiyun static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	int i;
253*4882a593Smuzhiyun 	char *s;
254*4882a593Smuzhiyun 	char *d;
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	ret = alloc_rbio_pages(rbio);
258*4882a593Smuzhiyun 	if (ret)
259*4882a593Smuzhiyun 		return;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	for (i = 0; i < rbio->nr_pages; i++) {
262*4882a593Smuzhiyun 		if (!rbio->bio_pages[i])
263*4882a593Smuzhiyun 			continue;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		s = kmap(rbio->bio_pages[i]);
266*4882a593Smuzhiyun 		d = kmap(rbio->stripe_pages[i]);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		copy_page(d, s);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		kunmap(rbio->bio_pages[i]);
271*4882a593Smuzhiyun 		kunmap(rbio->stripe_pages[i]);
272*4882a593Smuzhiyun 		SetPageUptodate(rbio->stripe_pages[i]);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun  * we hash on the first logical address of the stripe
279*4882a593Smuzhiyun  */
rbio_bucket(struct btrfs_raid_bio * rbio)280*4882a593Smuzhiyun static int rbio_bucket(struct btrfs_raid_bio *rbio)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	u64 num = rbio->bbio->raid_map[0];
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/*
285*4882a593Smuzhiyun 	 * we shift down quite a bit.  We're using byte
286*4882a593Smuzhiyun 	 * addressing, and most of the lower bits are zeros.
287*4882a593Smuzhiyun 	 * This tends to upset hash_64, and it consistently
288*4882a593Smuzhiyun 	 * returns just one or two different values.
289*4882a593Smuzhiyun 	 *
290*4882a593Smuzhiyun 	 * shifting off the lower bits fixes things.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun  * stealing an rbio means taking all the uptodate pages from the stripe
297*4882a593Smuzhiyun  * array in the source rbio and putting them into the destination rbio
298*4882a593Smuzhiyun  */
steal_rbio(struct btrfs_raid_bio * src,struct btrfs_raid_bio * dest)299*4882a593Smuzhiyun static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	int i;
302*4882a593Smuzhiyun 	struct page *s;
303*4882a593Smuzhiyun 	struct page *d;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
306*4882a593Smuzhiyun 		return;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	for (i = 0; i < dest->nr_pages; i++) {
309*4882a593Smuzhiyun 		s = src->stripe_pages[i];
310*4882a593Smuzhiyun 		if (!s || !PageUptodate(s)) {
311*4882a593Smuzhiyun 			continue;
312*4882a593Smuzhiyun 		}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		d = dest->stripe_pages[i];
315*4882a593Smuzhiyun 		if (d)
316*4882a593Smuzhiyun 			__free_page(d);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		dest->stripe_pages[i] = s;
319*4882a593Smuzhiyun 		src->stripe_pages[i] = NULL;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun  * merging means we take the bio_list from the victim and
325*4882a593Smuzhiyun  * splice it into the destination.  The victim should
326*4882a593Smuzhiyun  * be discarded afterwards.
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * must be called with dest->rbio_list_lock held
329*4882a593Smuzhiyun  */
merge_rbio(struct btrfs_raid_bio * dest,struct btrfs_raid_bio * victim)330*4882a593Smuzhiyun static void merge_rbio(struct btrfs_raid_bio *dest,
331*4882a593Smuzhiyun 		       struct btrfs_raid_bio *victim)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	bio_list_merge(&dest->bio_list, &victim->bio_list);
334*4882a593Smuzhiyun 	dest->bio_list_bytes += victim->bio_list_bytes;
335*4882a593Smuzhiyun 	/* Also inherit the bitmaps from @victim. */
336*4882a593Smuzhiyun 	bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
337*4882a593Smuzhiyun 		  dest->stripe_npages);
338*4882a593Smuzhiyun 	dest->generic_bio_cnt += victim->generic_bio_cnt;
339*4882a593Smuzhiyun 	bio_list_init(&victim->bio_list);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun  * used to prune items that are in the cache.  The caller
344*4882a593Smuzhiyun  * must hold the hash table lock.
345*4882a593Smuzhiyun  */
__remove_rbio_from_cache(struct btrfs_raid_bio * rbio)346*4882a593Smuzhiyun static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	int bucket = rbio_bucket(rbio);
349*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *table;
350*4882a593Smuzhiyun 	struct btrfs_stripe_hash *h;
351*4882a593Smuzhiyun 	int freeit = 0;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/*
354*4882a593Smuzhiyun 	 * check the bit again under the hash table lock.
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
357*4882a593Smuzhiyun 		return;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	table = rbio->fs_info->stripe_hash_table;
360*4882a593Smuzhiyun 	h = table->table + bucket;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* hold the lock for the bucket because we may be
363*4882a593Smuzhiyun 	 * removing it from the hash table
364*4882a593Smuzhiyun 	 */
365*4882a593Smuzhiyun 	spin_lock(&h->lock);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/*
368*4882a593Smuzhiyun 	 * hold the lock for the bio list because we need
369*4882a593Smuzhiyun 	 * to make sure the bio list is empty
370*4882a593Smuzhiyun 	 */
371*4882a593Smuzhiyun 	spin_lock(&rbio->bio_list_lock);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
374*4882a593Smuzhiyun 		list_del_init(&rbio->stripe_cache);
375*4882a593Smuzhiyun 		table->cache_size -= 1;
376*4882a593Smuzhiyun 		freeit = 1;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		/* if the bio list isn't empty, this rbio is
379*4882a593Smuzhiyun 		 * still involved in an IO.  We take it out
380*4882a593Smuzhiyun 		 * of the cache list, and drop the ref that
381*4882a593Smuzhiyun 		 * was held for the list.
382*4882a593Smuzhiyun 		 *
383*4882a593Smuzhiyun 		 * If the bio_list was empty, we also remove
384*4882a593Smuzhiyun 		 * the rbio from the hash_table, and drop
385*4882a593Smuzhiyun 		 * the corresponding ref
386*4882a593Smuzhiyun 		 */
387*4882a593Smuzhiyun 		if (bio_list_empty(&rbio->bio_list)) {
388*4882a593Smuzhiyun 			if (!list_empty(&rbio->hash_list)) {
389*4882a593Smuzhiyun 				list_del_init(&rbio->hash_list);
390*4882a593Smuzhiyun 				refcount_dec(&rbio->refs);
391*4882a593Smuzhiyun 				BUG_ON(!list_empty(&rbio->plug_list));
392*4882a593Smuzhiyun 			}
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	spin_unlock(&rbio->bio_list_lock);
397*4882a593Smuzhiyun 	spin_unlock(&h->lock);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (freeit)
400*4882a593Smuzhiyun 		__free_raid_bio(rbio);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun  * prune a given rbio from the cache
405*4882a593Smuzhiyun  */
remove_rbio_from_cache(struct btrfs_raid_bio * rbio)406*4882a593Smuzhiyun static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *table;
409*4882a593Smuzhiyun 	unsigned long flags;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
412*4882a593Smuzhiyun 		return;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	table = rbio->fs_info->stripe_hash_table;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	spin_lock_irqsave(&table->cache_lock, flags);
417*4882a593Smuzhiyun 	__remove_rbio_from_cache(rbio);
418*4882a593Smuzhiyun 	spin_unlock_irqrestore(&table->cache_lock, flags);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun /*
422*4882a593Smuzhiyun  * remove everything in the cache
423*4882a593Smuzhiyun  */
btrfs_clear_rbio_cache(struct btrfs_fs_info * info)424*4882a593Smuzhiyun static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *table;
427*4882a593Smuzhiyun 	unsigned long flags;
428*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	table = info->stripe_hash_table;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	spin_lock_irqsave(&table->cache_lock, flags);
433*4882a593Smuzhiyun 	while (!list_empty(&table->stripe_cache)) {
434*4882a593Smuzhiyun 		rbio = list_entry(table->stripe_cache.next,
435*4882a593Smuzhiyun 				  struct btrfs_raid_bio,
436*4882a593Smuzhiyun 				  stripe_cache);
437*4882a593Smuzhiyun 		__remove_rbio_from_cache(rbio);
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	spin_unlock_irqrestore(&table->cache_lock, flags);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun  * remove all cached entries and free the hash table
444*4882a593Smuzhiyun  * used by unmount
445*4882a593Smuzhiyun  */
btrfs_free_stripe_hash_table(struct btrfs_fs_info * info)446*4882a593Smuzhiyun void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	if (!info->stripe_hash_table)
449*4882a593Smuzhiyun 		return;
450*4882a593Smuzhiyun 	btrfs_clear_rbio_cache(info);
451*4882a593Smuzhiyun 	kvfree(info->stripe_hash_table);
452*4882a593Smuzhiyun 	info->stripe_hash_table = NULL;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun  * insert an rbio into the stripe cache.  It
457*4882a593Smuzhiyun  * must have already been prepared by calling
458*4882a593Smuzhiyun  * cache_rbio_pages
459*4882a593Smuzhiyun  *
460*4882a593Smuzhiyun  * If this rbio was already cached, it gets
461*4882a593Smuzhiyun  * moved to the front of the lru.
462*4882a593Smuzhiyun  *
463*4882a593Smuzhiyun  * If the size of the rbio cache is too big, we
464*4882a593Smuzhiyun  * prune an item.
465*4882a593Smuzhiyun  */
cache_rbio(struct btrfs_raid_bio * rbio)466*4882a593Smuzhiyun static void cache_rbio(struct btrfs_raid_bio *rbio)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct btrfs_stripe_hash_table *table;
469*4882a593Smuzhiyun 	unsigned long flags;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
472*4882a593Smuzhiyun 		return;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	table = rbio->fs_info->stripe_hash_table;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	spin_lock_irqsave(&table->cache_lock, flags);
477*4882a593Smuzhiyun 	spin_lock(&rbio->bio_list_lock);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* bump our ref if we were not in the list before */
480*4882a593Smuzhiyun 	if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
481*4882a593Smuzhiyun 		refcount_inc(&rbio->refs);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (!list_empty(&rbio->stripe_cache)){
484*4882a593Smuzhiyun 		list_move(&rbio->stripe_cache, &table->stripe_cache);
485*4882a593Smuzhiyun 	} else {
486*4882a593Smuzhiyun 		list_add(&rbio->stripe_cache, &table->stripe_cache);
487*4882a593Smuzhiyun 		table->cache_size += 1;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	spin_unlock(&rbio->bio_list_lock);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (table->cache_size > RBIO_CACHE_SIZE) {
493*4882a593Smuzhiyun 		struct btrfs_raid_bio *found;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		found = list_entry(table->stripe_cache.prev,
496*4882a593Smuzhiyun 				  struct btrfs_raid_bio,
497*4882a593Smuzhiyun 				  stripe_cache);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		if (found != rbio)
500*4882a593Smuzhiyun 			__remove_rbio_from_cache(found);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	spin_unlock_irqrestore(&table->cache_lock, flags);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun  * helper function to run the xor_blocks api.  It is only
508*4882a593Smuzhiyun  * able to do MAX_XOR_BLOCKS at a time, so we need to
509*4882a593Smuzhiyun  * loop through.
510*4882a593Smuzhiyun  */
run_xor(void ** pages,int src_cnt,ssize_t len)511*4882a593Smuzhiyun static void run_xor(void **pages, int src_cnt, ssize_t len)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	int src_off = 0;
514*4882a593Smuzhiyun 	int xor_src_cnt = 0;
515*4882a593Smuzhiyun 	void *dest = pages[src_cnt];
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	while(src_cnt > 0) {
518*4882a593Smuzhiyun 		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
519*4882a593Smuzhiyun 		xor_blocks(xor_src_cnt, len, dest, pages + src_off);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		src_cnt -= xor_src_cnt;
522*4882a593Smuzhiyun 		src_off += xor_src_cnt;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun /*
527*4882a593Smuzhiyun  * Returns true if the bio list inside this rbio covers an entire stripe (no
528*4882a593Smuzhiyun  * rmw required).
529*4882a593Smuzhiyun  */
rbio_is_full(struct btrfs_raid_bio * rbio)530*4882a593Smuzhiyun static int rbio_is_full(struct btrfs_raid_bio *rbio)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	unsigned long flags;
533*4882a593Smuzhiyun 	unsigned long size = rbio->bio_list_bytes;
534*4882a593Smuzhiyun 	int ret = 1;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
537*4882a593Smuzhiyun 	if (size != rbio->nr_data * rbio->stripe_len)
538*4882a593Smuzhiyun 		ret = 0;
539*4882a593Smuzhiyun 	BUG_ON(size > rbio->nr_data * rbio->stripe_len);
540*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return ret;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun  * returns 1 if it is safe to merge two rbios together.
547*4882a593Smuzhiyun  * The merging is safe if the two rbios correspond to
548*4882a593Smuzhiyun  * the same stripe and if they are both going in the same
549*4882a593Smuzhiyun  * direction (read vs write), and if neither one is
550*4882a593Smuzhiyun  * locked for final IO
551*4882a593Smuzhiyun  *
552*4882a593Smuzhiyun  * The caller is responsible for locking such that
553*4882a593Smuzhiyun  * rmw_locked is safe to test
554*4882a593Smuzhiyun  */
rbio_can_merge(struct btrfs_raid_bio * last,struct btrfs_raid_bio * cur)555*4882a593Smuzhiyun static int rbio_can_merge(struct btrfs_raid_bio *last,
556*4882a593Smuzhiyun 			  struct btrfs_raid_bio *cur)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
559*4882a593Smuzhiyun 	    test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
560*4882a593Smuzhiyun 		return 0;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/*
563*4882a593Smuzhiyun 	 * we can't merge with cached rbios, since the
564*4882a593Smuzhiyun 	 * idea is that when we merge the destination
565*4882a593Smuzhiyun 	 * rbio is going to run our IO for us.  We can
566*4882a593Smuzhiyun 	 * steal from cached rbios though, other functions
567*4882a593Smuzhiyun 	 * handle that.
568*4882a593Smuzhiyun 	 */
569*4882a593Smuzhiyun 	if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
570*4882a593Smuzhiyun 	    test_bit(RBIO_CACHE_BIT, &cur->flags))
571*4882a593Smuzhiyun 		return 0;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (last->bbio->raid_map[0] !=
574*4882a593Smuzhiyun 	    cur->bbio->raid_map[0])
575*4882a593Smuzhiyun 		return 0;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/* we can't merge with different operations */
578*4882a593Smuzhiyun 	if (last->operation != cur->operation)
579*4882a593Smuzhiyun 		return 0;
580*4882a593Smuzhiyun 	/*
581*4882a593Smuzhiyun 	 * We've need read the full stripe from the drive.
582*4882a593Smuzhiyun 	 * check and repair the parity and write the new results.
583*4882a593Smuzhiyun 	 *
584*4882a593Smuzhiyun 	 * We're not allowed to add any new bios to the
585*4882a593Smuzhiyun 	 * bio list here, anyone else that wants to
586*4882a593Smuzhiyun 	 * change this stripe needs to do their own rmw.
587*4882a593Smuzhiyun 	 */
588*4882a593Smuzhiyun 	if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
589*4882a593Smuzhiyun 		return 0;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
592*4882a593Smuzhiyun 		return 0;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (last->operation == BTRFS_RBIO_READ_REBUILD) {
595*4882a593Smuzhiyun 		int fa = last->faila;
596*4882a593Smuzhiyun 		int fb = last->failb;
597*4882a593Smuzhiyun 		int cur_fa = cur->faila;
598*4882a593Smuzhiyun 		int cur_fb = cur->failb;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		if (last->faila >= last->failb) {
601*4882a593Smuzhiyun 			fa = last->failb;
602*4882a593Smuzhiyun 			fb = last->faila;
603*4882a593Smuzhiyun 		}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		if (cur->faila >= cur->failb) {
606*4882a593Smuzhiyun 			cur_fa = cur->failb;
607*4882a593Smuzhiyun 			cur_fb = cur->faila;
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		if (fa != cur_fa || fb != cur_fb)
611*4882a593Smuzhiyun 			return 0;
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 	return 1;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
rbio_stripe_page_index(struct btrfs_raid_bio * rbio,int stripe,int index)616*4882a593Smuzhiyun static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
617*4882a593Smuzhiyun 				  int index)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	return stripe * rbio->stripe_npages + index;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun  * these are just the pages from the rbio array, not from anything
624*4882a593Smuzhiyun  * the FS sent down to us
625*4882a593Smuzhiyun  */
rbio_stripe_page(struct btrfs_raid_bio * rbio,int stripe,int index)626*4882a593Smuzhiyun static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
627*4882a593Smuzhiyun 				     int index)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun  * helper to index into the pstripe
634*4882a593Smuzhiyun  */
rbio_pstripe_page(struct btrfs_raid_bio * rbio,int index)635*4882a593Smuzhiyun static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	return rbio_stripe_page(rbio, rbio->nr_data, index);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun  * helper to index into the qstripe, returns null
642*4882a593Smuzhiyun  * if there is no qstripe
643*4882a593Smuzhiyun  */
rbio_qstripe_page(struct btrfs_raid_bio * rbio,int index)644*4882a593Smuzhiyun static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	if (rbio->nr_data + 1 == rbio->real_stripes)
647*4882a593Smuzhiyun 		return NULL;
648*4882a593Smuzhiyun 	return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun  * The first stripe in the table for a logical address
653*4882a593Smuzhiyun  * has the lock.  rbios are added in one of three ways:
654*4882a593Smuzhiyun  *
655*4882a593Smuzhiyun  * 1) Nobody has the stripe locked yet.  The rbio is given
656*4882a593Smuzhiyun  * the lock and 0 is returned.  The caller must start the IO
657*4882a593Smuzhiyun  * themselves.
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * 2) Someone has the stripe locked, but we're able to merge
660*4882a593Smuzhiyun  * with the lock owner.  The rbio is freed and the IO will
661*4882a593Smuzhiyun  * start automatically along with the existing rbio.  1 is returned.
662*4882a593Smuzhiyun  *
663*4882a593Smuzhiyun  * 3) Someone has the stripe locked, but we're not able to merge.
664*4882a593Smuzhiyun  * The rbio is added to the lock owner's plug list, or merged into
665*4882a593Smuzhiyun  * an rbio already on the plug list.  When the lock owner unlocks,
666*4882a593Smuzhiyun  * the next rbio on the list is run and the IO is started automatically.
667*4882a593Smuzhiyun  * 1 is returned
668*4882a593Smuzhiyun  *
669*4882a593Smuzhiyun  * If we return 0, the caller still owns the rbio and must continue with
670*4882a593Smuzhiyun  * IO submission.  If we return 1, the caller must assume the rbio has
671*4882a593Smuzhiyun  * already been freed.
672*4882a593Smuzhiyun  */
lock_stripe_add(struct btrfs_raid_bio * rbio)673*4882a593Smuzhiyun static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct btrfs_stripe_hash *h;
676*4882a593Smuzhiyun 	struct btrfs_raid_bio *cur;
677*4882a593Smuzhiyun 	struct btrfs_raid_bio *pending;
678*4882a593Smuzhiyun 	unsigned long flags;
679*4882a593Smuzhiyun 	struct btrfs_raid_bio *freeit = NULL;
680*4882a593Smuzhiyun 	struct btrfs_raid_bio *cache_drop = NULL;
681*4882a593Smuzhiyun 	int ret = 0;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	spin_lock_irqsave(&h->lock, flags);
686*4882a593Smuzhiyun 	list_for_each_entry(cur, &h->hash_list, hash_list) {
687*4882a593Smuzhiyun 		if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
688*4882a593Smuzhiyun 			continue;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		spin_lock(&cur->bio_list_lock);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		/* Can we steal this cached rbio's pages? */
693*4882a593Smuzhiyun 		if (bio_list_empty(&cur->bio_list) &&
694*4882a593Smuzhiyun 		    list_empty(&cur->plug_list) &&
695*4882a593Smuzhiyun 		    test_bit(RBIO_CACHE_BIT, &cur->flags) &&
696*4882a593Smuzhiyun 		    !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
697*4882a593Smuzhiyun 			list_del_init(&cur->hash_list);
698*4882a593Smuzhiyun 			refcount_dec(&cur->refs);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 			steal_rbio(cur, rbio);
701*4882a593Smuzhiyun 			cache_drop = cur;
702*4882a593Smuzhiyun 			spin_unlock(&cur->bio_list_lock);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 			goto lockit;
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		/* Can we merge into the lock owner? */
708*4882a593Smuzhiyun 		if (rbio_can_merge(cur, rbio)) {
709*4882a593Smuzhiyun 			merge_rbio(cur, rbio);
710*4882a593Smuzhiyun 			spin_unlock(&cur->bio_list_lock);
711*4882a593Smuzhiyun 			freeit = rbio;
712*4882a593Smuzhiyun 			ret = 1;
713*4882a593Smuzhiyun 			goto out;
714*4882a593Smuzhiyun 		}
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		/*
718*4882a593Smuzhiyun 		 * We couldn't merge with the running rbio, see if we can merge
719*4882a593Smuzhiyun 		 * with the pending ones.  We don't have to check for rmw_locked
720*4882a593Smuzhiyun 		 * because there is no way they are inside finish_rmw right now
721*4882a593Smuzhiyun 		 */
722*4882a593Smuzhiyun 		list_for_each_entry(pending, &cur->plug_list, plug_list) {
723*4882a593Smuzhiyun 			if (rbio_can_merge(pending, rbio)) {
724*4882a593Smuzhiyun 				merge_rbio(pending, rbio);
725*4882a593Smuzhiyun 				spin_unlock(&cur->bio_list_lock);
726*4882a593Smuzhiyun 				freeit = rbio;
727*4882a593Smuzhiyun 				ret = 1;
728*4882a593Smuzhiyun 				goto out;
729*4882a593Smuzhiyun 			}
730*4882a593Smuzhiyun 		}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 		/*
733*4882a593Smuzhiyun 		 * No merging, put us on the tail of the plug list, our rbio
734*4882a593Smuzhiyun 		 * will be started with the currently running rbio unlocks
735*4882a593Smuzhiyun 		 */
736*4882a593Smuzhiyun 		list_add_tail(&rbio->plug_list, &cur->plug_list);
737*4882a593Smuzhiyun 		spin_unlock(&cur->bio_list_lock);
738*4882a593Smuzhiyun 		ret = 1;
739*4882a593Smuzhiyun 		goto out;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun lockit:
742*4882a593Smuzhiyun 	refcount_inc(&rbio->refs);
743*4882a593Smuzhiyun 	list_add(&rbio->hash_list, &h->hash_list);
744*4882a593Smuzhiyun out:
745*4882a593Smuzhiyun 	spin_unlock_irqrestore(&h->lock, flags);
746*4882a593Smuzhiyun 	if (cache_drop)
747*4882a593Smuzhiyun 		remove_rbio_from_cache(cache_drop);
748*4882a593Smuzhiyun 	if (freeit)
749*4882a593Smuzhiyun 		__free_raid_bio(freeit);
750*4882a593Smuzhiyun 	return ret;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun /*
754*4882a593Smuzhiyun  * called as rmw or parity rebuild is completed.  If the plug list has more
755*4882a593Smuzhiyun  * rbios waiting for this stripe, the next one on the list will be started
756*4882a593Smuzhiyun  */
unlock_stripe(struct btrfs_raid_bio * rbio)757*4882a593Smuzhiyun static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	int bucket;
760*4882a593Smuzhiyun 	struct btrfs_stripe_hash *h;
761*4882a593Smuzhiyun 	unsigned long flags;
762*4882a593Smuzhiyun 	int keep_cache = 0;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	bucket = rbio_bucket(rbio);
765*4882a593Smuzhiyun 	h = rbio->fs_info->stripe_hash_table->table + bucket;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (list_empty(&rbio->plug_list))
768*4882a593Smuzhiyun 		cache_rbio(rbio);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	spin_lock_irqsave(&h->lock, flags);
771*4882a593Smuzhiyun 	spin_lock(&rbio->bio_list_lock);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (!list_empty(&rbio->hash_list)) {
774*4882a593Smuzhiyun 		/*
775*4882a593Smuzhiyun 		 * if we're still cached and there is no other IO
776*4882a593Smuzhiyun 		 * to perform, just leave this rbio here for others
777*4882a593Smuzhiyun 		 * to steal from later
778*4882a593Smuzhiyun 		 */
779*4882a593Smuzhiyun 		if (list_empty(&rbio->plug_list) &&
780*4882a593Smuzhiyun 		    test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
781*4882a593Smuzhiyun 			keep_cache = 1;
782*4882a593Smuzhiyun 			clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
783*4882a593Smuzhiyun 			BUG_ON(!bio_list_empty(&rbio->bio_list));
784*4882a593Smuzhiyun 			goto done;
785*4882a593Smuzhiyun 		}
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		list_del_init(&rbio->hash_list);
788*4882a593Smuzhiyun 		refcount_dec(&rbio->refs);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 		/*
791*4882a593Smuzhiyun 		 * we use the plug list to hold all the rbios
792*4882a593Smuzhiyun 		 * waiting for the chance to lock this stripe.
793*4882a593Smuzhiyun 		 * hand the lock over to one of them.
794*4882a593Smuzhiyun 		 */
795*4882a593Smuzhiyun 		if (!list_empty(&rbio->plug_list)) {
796*4882a593Smuzhiyun 			struct btrfs_raid_bio *next;
797*4882a593Smuzhiyun 			struct list_head *head = rbio->plug_list.next;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 			next = list_entry(head, struct btrfs_raid_bio,
800*4882a593Smuzhiyun 					  plug_list);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 			list_del_init(&rbio->plug_list);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 			list_add(&next->hash_list, &h->hash_list);
805*4882a593Smuzhiyun 			refcount_inc(&next->refs);
806*4882a593Smuzhiyun 			spin_unlock(&rbio->bio_list_lock);
807*4882a593Smuzhiyun 			spin_unlock_irqrestore(&h->lock, flags);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 			if (next->operation == BTRFS_RBIO_READ_REBUILD)
810*4882a593Smuzhiyun 				start_async_work(next, read_rebuild_work);
811*4882a593Smuzhiyun 			else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
812*4882a593Smuzhiyun 				steal_rbio(rbio, next);
813*4882a593Smuzhiyun 				start_async_work(next, read_rebuild_work);
814*4882a593Smuzhiyun 			} else if (next->operation == BTRFS_RBIO_WRITE) {
815*4882a593Smuzhiyun 				steal_rbio(rbio, next);
816*4882a593Smuzhiyun 				start_async_work(next, rmw_work);
817*4882a593Smuzhiyun 			} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
818*4882a593Smuzhiyun 				steal_rbio(rbio, next);
819*4882a593Smuzhiyun 				start_async_work(next, scrub_parity_work);
820*4882a593Smuzhiyun 			}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 			goto done_nolock;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun done:
826*4882a593Smuzhiyun 	spin_unlock(&rbio->bio_list_lock);
827*4882a593Smuzhiyun 	spin_unlock_irqrestore(&h->lock, flags);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun done_nolock:
830*4882a593Smuzhiyun 	if (!keep_cache)
831*4882a593Smuzhiyun 		remove_rbio_from_cache(rbio);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
__free_raid_bio(struct btrfs_raid_bio * rbio)834*4882a593Smuzhiyun static void __free_raid_bio(struct btrfs_raid_bio *rbio)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	int i;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (!refcount_dec_and_test(&rbio->refs))
839*4882a593Smuzhiyun 		return;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	WARN_ON(!list_empty(&rbio->stripe_cache));
842*4882a593Smuzhiyun 	WARN_ON(!list_empty(&rbio->hash_list));
843*4882a593Smuzhiyun 	WARN_ON(!bio_list_empty(&rbio->bio_list));
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	for (i = 0; i < rbio->nr_pages; i++) {
846*4882a593Smuzhiyun 		if (rbio->stripe_pages[i]) {
847*4882a593Smuzhiyun 			__free_page(rbio->stripe_pages[i]);
848*4882a593Smuzhiyun 			rbio->stripe_pages[i] = NULL;
849*4882a593Smuzhiyun 		}
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	btrfs_put_bbio(rbio->bbio);
853*4882a593Smuzhiyun 	kfree(rbio);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
rbio_endio_bio_list(struct bio * cur,blk_status_t err)856*4882a593Smuzhiyun static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	struct bio *next;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	while (cur) {
861*4882a593Smuzhiyun 		next = cur->bi_next;
862*4882a593Smuzhiyun 		cur->bi_next = NULL;
863*4882a593Smuzhiyun 		cur->bi_status = err;
864*4882a593Smuzhiyun 		bio_endio(cur);
865*4882a593Smuzhiyun 		cur = next;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun /*
870*4882a593Smuzhiyun  * this frees the rbio and runs through all the bios in the
871*4882a593Smuzhiyun  * bio_list and calls end_io on them
872*4882a593Smuzhiyun  */
rbio_orig_end_io(struct btrfs_raid_bio * rbio,blk_status_t err)873*4882a593Smuzhiyun static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	struct bio *cur = bio_list_get(&rbio->bio_list);
876*4882a593Smuzhiyun 	struct bio *extra;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (rbio->generic_bio_cnt)
879*4882a593Smuzhiyun 		btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
880*4882a593Smuzhiyun 	/*
881*4882a593Smuzhiyun 	 * Clear the data bitmap, as the rbio may be cached for later usage.
882*4882a593Smuzhiyun 	 * do this before before unlock_stripe() so there will be no new bio
883*4882a593Smuzhiyun 	 * for this bio.
884*4882a593Smuzhiyun 	 */
885*4882a593Smuzhiyun 	bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	/*
888*4882a593Smuzhiyun 	 * At this moment, rbio->bio_list is empty, however since rbio does not
889*4882a593Smuzhiyun 	 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
890*4882a593Smuzhiyun 	 * hash list, rbio may be merged with others so that rbio->bio_list
891*4882a593Smuzhiyun 	 * becomes non-empty.
892*4882a593Smuzhiyun 	 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
893*4882a593Smuzhiyun 	 * more and we can call bio_endio() on all queued bios.
894*4882a593Smuzhiyun 	 */
895*4882a593Smuzhiyun 	unlock_stripe(rbio);
896*4882a593Smuzhiyun 	extra = bio_list_get(&rbio->bio_list);
897*4882a593Smuzhiyun 	__free_raid_bio(rbio);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	rbio_endio_bio_list(cur, err);
900*4882a593Smuzhiyun 	if (extra)
901*4882a593Smuzhiyun 		rbio_endio_bio_list(extra, err);
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun /*
905*4882a593Smuzhiyun  * end io function used by finish_rmw.  When we finally
906*4882a593Smuzhiyun  * get here, we've written a full stripe
907*4882a593Smuzhiyun  */
raid_write_end_io(struct bio * bio)908*4882a593Smuzhiyun static void raid_write_end_io(struct bio *bio)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio = bio->bi_private;
911*4882a593Smuzhiyun 	blk_status_t err = bio->bi_status;
912*4882a593Smuzhiyun 	int max_errors;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (err)
915*4882a593Smuzhiyun 		fail_bio_stripe(rbio, bio);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	bio_put(bio);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&rbio->stripes_pending))
920*4882a593Smuzhiyun 		return;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	err = BLK_STS_OK;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* OK, we have read all the stripes we need to. */
925*4882a593Smuzhiyun 	max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
926*4882a593Smuzhiyun 		     0 : rbio->bbio->max_errors;
927*4882a593Smuzhiyun 	if (atomic_read(&rbio->error) > max_errors)
928*4882a593Smuzhiyun 		err = BLK_STS_IOERR;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, err);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun /*
934*4882a593Smuzhiyun  * the read/modify/write code wants to use the original bio for
935*4882a593Smuzhiyun  * any pages it included, and then use the rbio for everything
936*4882a593Smuzhiyun  * else.  This function decides if a given index (stripe number)
937*4882a593Smuzhiyun  * and page number in that stripe fall inside the original bio
938*4882a593Smuzhiyun  * or the rbio.
939*4882a593Smuzhiyun  *
940*4882a593Smuzhiyun  * if you set bio_list_only, you'll get a NULL back for any ranges
941*4882a593Smuzhiyun  * that are outside the bio_list
942*4882a593Smuzhiyun  *
943*4882a593Smuzhiyun  * This doesn't take any refs on anything, you get a bare page pointer
944*4882a593Smuzhiyun  * and the caller must bump refs as required.
945*4882a593Smuzhiyun  *
946*4882a593Smuzhiyun  * You must call index_rbio_pages once before you can trust
947*4882a593Smuzhiyun  * the answers from this function.
948*4882a593Smuzhiyun  */
page_in_rbio(struct btrfs_raid_bio * rbio,int index,int pagenr,int bio_list_only)949*4882a593Smuzhiyun static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
950*4882a593Smuzhiyun 				 int index, int pagenr, int bio_list_only)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun 	int chunk_page;
953*4882a593Smuzhiyun 	struct page *p = NULL;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	spin_lock_irq(&rbio->bio_list_lock);
958*4882a593Smuzhiyun 	p = rbio->bio_pages[chunk_page];
959*4882a593Smuzhiyun 	spin_unlock_irq(&rbio->bio_list_lock);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	if (p || bio_list_only)
962*4882a593Smuzhiyun 		return p;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	return rbio->stripe_pages[chunk_page];
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun  * number of pages we need for the entire stripe across all the
969*4882a593Smuzhiyun  * drives
970*4882a593Smuzhiyun  */
rbio_nr_pages(unsigned long stripe_len,int nr_stripes)971*4882a593Smuzhiyun static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun /*
977*4882a593Smuzhiyun  * allocation and initial setup for the btrfs_raid_bio.  Not
978*4882a593Smuzhiyun  * this does not allocate any pages for rbio->pages.
979*4882a593Smuzhiyun  */
alloc_rbio(struct btrfs_fs_info * fs_info,struct btrfs_bio * bbio,u64 stripe_len)980*4882a593Smuzhiyun static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
981*4882a593Smuzhiyun 					 struct btrfs_bio *bbio,
982*4882a593Smuzhiyun 					 u64 stripe_len)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
985*4882a593Smuzhiyun 	int nr_data = 0;
986*4882a593Smuzhiyun 	int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
987*4882a593Smuzhiyun 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
988*4882a593Smuzhiyun 	int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
989*4882a593Smuzhiyun 	void *p;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	rbio = kzalloc(sizeof(*rbio) +
992*4882a593Smuzhiyun 		       sizeof(*rbio->stripe_pages) * num_pages +
993*4882a593Smuzhiyun 		       sizeof(*rbio->bio_pages) * num_pages +
994*4882a593Smuzhiyun 		       sizeof(*rbio->finish_pointers) * real_stripes +
995*4882a593Smuzhiyun 		       sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
996*4882a593Smuzhiyun 		       sizeof(*rbio->finish_pbitmap) *
997*4882a593Smuzhiyun 				BITS_TO_LONGS(stripe_npages),
998*4882a593Smuzhiyun 		       GFP_NOFS);
999*4882a593Smuzhiyun 	if (!rbio)
1000*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	bio_list_init(&rbio->bio_list);
1003*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rbio->plug_list);
1004*4882a593Smuzhiyun 	spin_lock_init(&rbio->bio_list_lock);
1005*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rbio->stripe_cache);
1006*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rbio->hash_list);
1007*4882a593Smuzhiyun 	rbio->bbio = bbio;
1008*4882a593Smuzhiyun 	rbio->fs_info = fs_info;
1009*4882a593Smuzhiyun 	rbio->stripe_len = stripe_len;
1010*4882a593Smuzhiyun 	rbio->nr_pages = num_pages;
1011*4882a593Smuzhiyun 	rbio->real_stripes = real_stripes;
1012*4882a593Smuzhiyun 	rbio->stripe_npages = stripe_npages;
1013*4882a593Smuzhiyun 	rbio->faila = -1;
1014*4882a593Smuzhiyun 	rbio->failb = -1;
1015*4882a593Smuzhiyun 	refcount_set(&rbio->refs, 1);
1016*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
1017*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, 0);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/*
1020*4882a593Smuzhiyun 	 * the stripe_pages, bio_pages, etc arrays point to the extra
1021*4882a593Smuzhiyun 	 * memory we allocated past the end of the rbio
1022*4882a593Smuzhiyun 	 */
1023*4882a593Smuzhiyun 	p = rbio + 1;
1024*4882a593Smuzhiyun #define CONSUME_ALLOC(ptr, count)	do {				\
1025*4882a593Smuzhiyun 		ptr = p;						\
1026*4882a593Smuzhiyun 		p = (unsigned char *)p + sizeof(*(ptr)) * (count);	\
1027*4882a593Smuzhiyun 	} while (0)
1028*4882a593Smuzhiyun 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1029*4882a593Smuzhiyun 	CONSUME_ALLOC(rbio->bio_pages, num_pages);
1030*4882a593Smuzhiyun 	CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1031*4882a593Smuzhiyun 	CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1032*4882a593Smuzhiyun 	CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1033*4882a593Smuzhiyun #undef  CONSUME_ALLOC
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1036*4882a593Smuzhiyun 		nr_data = real_stripes - 1;
1037*4882a593Smuzhiyun 	else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1038*4882a593Smuzhiyun 		nr_data = real_stripes - 2;
1039*4882a593Smuzhiyun 	else
1040*4882a593Smuzhiyun 		BUG();
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	rbio->nr_data = nr_data;
1043*4882a593Smuzhiyun 	return rbio;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun /* allocate pages for all the stripes in the bio, including parity */
alloc_rbio_pages(struct btrfs_raid_bio * rbio)1047*4882a593Smuzhiyun static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun 	int i;
1050*4882a593Smuzhiyun 	struct page *page;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	for (i = 0; i < rbio->nr_pages; i++) {
1053*4882a593Smuzhiyun 		if (rbio->stripe_pages[i])
1054*4882a593Smuzhiyun 			continue;
1055*4882a593Smuzhiyun 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1056*4882a593Smuzhiyun 		if (!page)
1057*4882a593Smuzhiyun 			return -ENOMEM;
1058*4882a593Smuzhiyun 		rbio->stripe_pages[i] = page;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 	return 0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun /* only allocate pages for p/q stripes */
alloc_rbio_parity_pages(struct btrfs_raid_bio * rbio)1064*4882a593Smuzhiyun static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	int i;
1067*4882a593Smuzhiyun 	struct page *page;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	for (; i < rbio->nr_pages; i++) {
1072*4882a593Smuzhiyun 		if (rbio->stripe_pages[i])
1073*4882a593Smuzhiyun 			continue;
1074*4882a593Smuzhiyun 		page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1075*4882a593Smuzhiyun 		if (!page)
1076*4882a593Smuzhiyun 			return -ENOMEM;
1077*4882a593Smuzhiyun 		rbio->stripe_pages[i] = page;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 	return 0;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun  * add a single page from a specific stripe into our list of bios for IO
1084*4882a593Smuzhiyun  * this will try to merge into existing bios if possible, and returns
1085*4882a593Smuzhiyun  * zero if all went well.
1086*4882a593Smuzhiyun  */
rbio_add_io_page(struct btrfs_raid_bio * rbio,struct bio_list * bio_list,struct page * page,int stripe_nr,unsigned long page_index,unsigned long bio_max_len)1087*4882a593Smuzhiyun static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1088*4882a593Smuzhiyun 			    struct bio_list *bio_list,
1089*4882a593Smuzhiyun 			    struct page *page,
1090*4882a593Smuzhiyun 			    int stripe_nr,
1091*4882a593Smuzhiyun 			    unsigned long page_index,
1092*4882a593Smuzhiyun 			    unsigned long bio_max_len)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun 	struct bio *last = bio_list->tail;
1095*4882a593Smuzhiyun 	int ret;
1096*4882a593Smuzhiyun 	struct bio *bio;
1097*4882a593Smuzhiyun 	struct btrfs_bio_stripe *stripe;
1098*4882a593Smuzhiyun 	u64 disk_start;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	stripe = &rbio->bbio->stripes[stripe_nr];
1101*4882a593Smuzhiyun 	disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	/* if the device is missing, just fail this stripe */
1104*4882a593Smuzhiyun 	if (!stripe->dev->bdev)
1105*4882a593Smuzhiyun 		return fail_rbio_index(rbio, stripe_nr);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	/* see if we can add this page onto our existing bio */
1108*4882a593Smuzhiyun 	if (last) {
1109*4882a593Smuzhiyun 		u64 last_end = (u64)last->bi_iter.bi_sector << 9;
1110*4882a593Smuzhiyun 		last_end += last->bi_iter.bi_size;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 		/*
1113*4882a593Smuzhiyun 		 * we can't merge these if they are from different
1114*4882a593Smuzhiyun 		 * devices or if they are not contiguous
1115*4882a593Smuzhiyun 		 */
1116*4882a593Smuzhiyun 		if (last_end == disk_start && !last->bi_status &&
1117*4882a593Smuzhiyun 		    last->bi_disk == stripe->dev->bdev->bd_disk &&
1118*4882a593Smuzhiyun 		    last->bi_partno == stripe->dev->bdev->bd_partno) {
1119*4882a593Smuzhiyun 			ret = bio_add_page(last, page, PAGE_SIZE, 0);
1120*4882a593Smuzhiyun 			if (ret == PAGE_SIZE)
1121*4882a593Smuzhiyun 				return 0;
1122*4882a593Smuzhiyun 		}
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	/* put a new bio on the list */
1126*4882a593Smuzhiyun 	bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1127*4882a593Smuzhiyun 	btrfs_io_bio(bio)->device = stripe->dev;
1128*4882a593Smuzhiyun 	bio->bi_iter.bi_size = 0;
1129*4882a593Smuzhiyun 	bio_set_dev(bio, stripe->dev->bdev);
1130*4882a593Smuzhiyun 	bio->bi_iter.bi_sector = disk_start >> 9;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	bio_add_page(bio, page, PAGE_SIZE, 0);
1133*4882a593Smuzhiyun 	bio_list_add(bio_list, bio);
1134*4882a593Smuzhiyun 	return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /*
1138*4882a593Smuzhiyun  * while we're doing the read/modify/write cycle, we could
1139*4882a593Smuzhiyun  * have errors in reading pages off the disk.  This checks
1140*4882a593Smuzhiyun  * for errors and if we're not able to read the page it'll
1141*4882a593Smuzhiyun  * trigger parity reconstruction.  The rmw will be finished
1142*4882a593Smuzhiyun  * after we've reconstructed the failed stripes
1143*4882a593Smuzhiyun  */
validate_rbio_for_rmw(struct btrfs_raid_bio * rbio)1144*4882a593Smuzhiyun static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	if (rbio->faila >= 0 || rbio->failb >= 0) {
1147*4882a593Smuzhiyun 		BUG_ON(rbio->faila == rbio->real_stripes - 1);
1148*4882a593Smuzhiyun 		__raid56_parity_recover(rbio);
1149*4882a593Smuzhiyun 	} else {
1150*4882a593Smuzhiyun 		finish_rmw(rbio);
1151*4882a593Smuzhiyun 	}
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun  * helper function to walk our bio list and populate the bio_pages array with
1156*4882a593Smuzhiyun  * the result.  This seems expensive, but it is faster than constantly
1157*4882a593Smuzhiyun  * searching through the bio list as we setup the IO in finish_rmw or stripe
1158*4882a593Smuzhiyun  * reconstruction.
1159*4882a593Smuzhiyun  *
1160*4882a593Smuzhiyun  * This must be called before you trust the answers from page_in_rbio
1161*4882a593Smuzhiyun  */
index_rbio_pages(struct btrfs_raid_bio * rbio)1162*4882a593Smuzhiyun static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun 	struct bio *bio;
1165*4882a593Smuzhiyun 	u64 start;
1166*4882a593Smuzhiyun 	unsigned long stripe_offset;
1167*4882a593Smuzhiyun 	unsigned long page_index;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	spin_lock_irq(&rbio->bio_list_lock);
1170*4882a593Smuzhiyun 	bio_list_for_each(bio, &rbio->bio_list) {
1171*4882a593Smuzhiyun 		struct bio_vec bvec;
1172*4882a593Smuzhiyun 		struct bvec_iter iter;
1173*4882a593Smuzhiyun 		int i = 0;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 		start = (u64)bio->bi_iter.bi_sector << 9;
1176*4882a593Smuzhiyun 		stripe_offset = start - rbio->bbio->raid_map[0];
1177*4882a593Smuzhiyun 		page_index = stripe_offset >> PAGE_SHIFT;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 		if (bio_flagged(bio, BIO_CLONED))
1180*4882a593Smuzhiyun 			bio->bi_iter = btrfs_io_bio(bio)->iter;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 		bio_for_each_segment(bvec, bio, iter) {
1183*4882a593Smuzhiyun 			rbio->bio_pages[page_index + i] = bvec.bv_page;
1184*4882a593Smuzhiyun 			i++;
1185*4882a593Smuzhiyun 		}
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun 	spin_unlock_irq(&rbio->bio_list_lock);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun /*
1191*4882a593Smuzhiyun  * this is called from one of two situations.  We either
1192*4882a593Smuzhiyun  * have a full stripe from the higher layers, or we've read all
1193*4882a593Smuzhiyun  * the missing bits off disk.
1194*4882a593Smuzhiyun  *
1195*4882a593Smuzhiyun  * This will calculate the parity and then send down any
1196*4882a593Smuzhiyun  * changed blocks.
1197*4882a593Smuzhiyun  */
finish_rmw(struct btrfs_raid_bio * rbio)1198*4882a593Smuzhiyun static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	struct btrfs_bio *bbio = rbio->bbio;
1201*4882a593Smuzhiyun 	void **pointers = rbio->finish_pointers;
1202*4882a593Smuzhiyun 	int nr_data = rbio->nr_data;
1203*4882a593Smuzhiyun 	int stripe;
1204*4882a593Smuzhiyun 	int pagenr;
1205*4882a593Smuzhiyun 	bool has_qstripe;
1206*4882a593Smuzhiyun 	struct bio_list bio_list;
1207*4882a593Smuzhiyun 	struct bio *bio;
1208*4882a593Smuzhiyun 	int ret;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	bio_list_init(&bio_list);
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	if (rbio->real_stripes - rbio->nr_data == 1)
1213*4882a593Smuzhiyun 		has_qstripe = false;
1214*4882a593Smuzhiyun 	else if (rbio->real_stripes - rbio->nr_data == 2)
1215*4882a593Smuzhiyun 		has_qstripe = true;
1216*4882a593Smuzhiyun 	else
1217*4882a593Smuzhiyun 		BUG();
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* We should have at least one data sector. */
1220*4882a593Smuzhiyun 	ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	/* at this point we either have a full stripe,
1223*4882a593Smuzhiyun 	 * or we've read the full stripe from the drive.
1224*4882a593Smuzhiyun 	 * recalculate the parity and write the new results.
1225*4882a593Smuzhiyun 	 *
1226*4882a593Smuzhiyun 	 * We're not allowed to add any new bios to the
1227*4882a593Smuzhiyun 	 * bio list here, anyone else that wants to
1228*4882a593Smuzhiyun 	 * change this stripe needs to do their own rmw.
1229*4882a593Smuzhiyun 	 */
1230*4882a593Smuzhiyun 	spin_lock_irq(&rbio->bio_list_lock);
1231*4882a593Smuzhiyun 	set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1232*4882a593Smuzhiyun 	spin_unlock_irq(&rbio->bio_list_lock);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	/*
1237*4882a593Smuzhiyun 	 * now that we've set rmw_locked, run through the
1238*4882a593Smuzhiyun 	 * bio list one last time and map the page pointers
1239*4882a593Smuzhiyun 	 *
1240*4882a593Smuzhiyun 	 * We don't cache full rbios because we're assuming
1241*4882a593Smuzhiyun 	 * the higher layers are unlikely to use this area of
1242*4882a593Smuzhiyun 	 * the disk again soon.  If they do use it again,
1243*4882a593Smuzhiyun 	 * hopefully they will send another full bio.
1244*4882a593Smuzhiyun 	 */
1245*4882a593Smuzhiyun 	index_rbio_pages(rbio);
1246*4882a593Smuzhiyun 	if (!rbio_is_full(rbio))
1247*4882a593Smuzhiyun 		cache_rbio_pages(rbio);
1248*4882a593Smuzhiyun 	else
1249*4882a593Smuzhiyun 		clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1252*4882a593Smuzhiyun 		struct page *p;
1253*4882a593Smuzhiyun 		/* first collect one page from each data stripe */
1254*4882a593Smuzhiyun 		for (stripe = 0; stripe < nr_data; stripe++) {
1255*4882a593Smuzhiyun 			p = page_in_rbio(rbio, stripe, pagenr, 0);
1256*4882a593Smuzhiyun 			pointers[stripe] = kmap(p);
1257*4882a593Smuzhiyun 		}
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 		/* then add the parity stripe */
1260*4882a593Smuzhiyun 		p = rbio_pstripe_page(rbio, pagenr);
1261*4882a593Smuzhiyun 		SetPageUptodate(p);
1262*4882a593Smuzhiyun 		pointers[stripe++] = kmap(p);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 		if (has_qstripe) {
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 			/*
1267*4882a593Smuzhiyun 			 * raid6, add the qstripe and call the
1268*4882a593Smuzhiyun 			 * library function to fill in our p/q
1269*4882a593Smuzhiyun 			 */
1270*4882a593Smuzhiyun 			p = rbio_qstripe_page(rbio, pagenr);
1271*4882a593Smuzhiyun 			SetPageUptodate(p);
1272*4882a593Smuzhiyun 			pointers[stripe++] = kmap(p);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1275*4882a593Smuzhiyun 						pointers);
1276*4882a593Smuzhiyun 		} else {
1277*4882a593Smuzhiyun 			/* raid5 */
1278*4882a593Smuzhiyun 			copy_page(pointers[nr_data], pointers[0]);
1279*4882a593Smuzhiyun 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1280*4882a593Smuzhiyun 		}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 		for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1284*4882a593Smuzhiyun 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1285*4882a593Smuzhiyun 	}
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	/*
1288*4882a593Smuzhiyun 	 * time to start writing.  Make bios for everything from the
1289*4882a593Smuzhiyun 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
1290*4882a593Smuzhiyun 	 * everything else.
1291*4882a593Smuzhiyun 	 */
1292*4882a593Smuzhiyun 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1293*4882a593Smuzhiyun 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1294*4882a593Smuzhiyun 			struct page *page;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 			/* This vertical stripe has no data, skip it. */
1297*4882a593Smuzhiyun 			if (!test_bit(pagenr, rbio->dbitmap))
1298*4882a593Smuzhiyun 				continue;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 			if (stripe < rbio->nr_data) {
1301*4882a593Smuzhiyun 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1302*4882a593Smuzhiyun 				if (!page)
1303*4882a593Smuzhiyun 					continue;
1304*4882a593Smuzhiyun 			} else {
1305*4882a593Smuzhiyun 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1306*4882a593Smuzhiyun 			}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 			ret = rbio_add_io_page(rbio, &bio_list,
1309*4882a593Smuzhiyun 				       page, stripe, pagenr, rbio->stripe_len);
1310*4882a593Smuzhiyun 			if (ret)
1311*4882a593Smuzhiyun 				goto cleanup;
1312*4882a593Smuzhiyun 		}
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (likely(!bbio->num_tgtdevs))
1316*4882a593Smuzhiyun 		goto write_data;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1319*4882a593Smuzhiyun 		if (!bbio->tgtdev_map[stripe])
1320*4882a593Smuzhiyun 			continue;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1323*4882a593Smuzhiyun 			struct page *page;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 			/* This vertical stripe has no data, skip it. */
1326*4882a593Smuzhiyun 			if (!test_bit(pagenr, rbio->dbitmap))
1327*4882a593Smuzhiyun 				continue;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 			if (stripe < rbio->nr_data) {
1330*4882a593Smuzhiyun 				page = page_in_rbio(rbio, stripe, pagenr, 1);
1331*4882a593Smuzhiyun 				if (!page)
1332*4882a593Smuzhiyun 					continue;
1333*4882a593Smuzhiyun 			} else {
1334*4882a593Smuzhiyun 			       page = rbio_stripe_page(rbio, stripe, pagenr);
1335*4882a593Smuzhiyun 			}
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 			ret = rbio_add_io_page(rbio, &bio_list, page,
1338*4882a593Smuzhiyun 					       rbio->bbio->tgtdev_map[stripe],
1339*4882a593Smuzhiyun 					       pagenr, rbio->stripe_len);
1340*4882a593Smuzhiyun 			if (ret)
1341*4882a593Smuzhiyun 				goto cleanup;
1342*4882a593Smuzhiyun 		}
1343*4882a593Smuzhiyun 	}
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun write_data:
1346*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1347*4882a593Smuzhiyun 	BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list))) {
1350*4882a593Smuzhiyun 		bio->bi_private = rbio;
1351*4882a593Smuzhiyun 		bio->bi_end_io = raid_write_end_io;
1352*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_WRITE;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 		submit_bio(bio);
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 	return;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun cleanup:
1359*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list)))
1362*4882a593Smuzhiyun 		bio_put(bio);
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun /*
1366*4882a593Smuzhiyun  * helper to find the stripe number for a given bio.  Used to figure out which
1367*4882a593Smuzhiyun  * stripe has failed.  This expects the bio to correspond to a physical disk,
1368*4882a593Smuzhiyun  * so it looks up based on physical sector numbers.
1369*4882a593Smuzhiyun  */
find_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1370*4882a593Smuzhiyun static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1371*4882a593Smuzhiyun 			   struct bio *bio)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	u64 physical = bio->bi_iter.bi_sector;
1374*4882a593Smuzhiyun 	int i;
1375*4882a593Smuzhiyun 	struct btrfs_bio_stripe *stripe;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	physical <<= 9;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	for (i = 0; i < rbio->bbio->num_stripes; i++) {
1380*4882a593Smuzhiyun 		stripe = &rbio->bbio->stripes[i];
1381*4882a593Smuzhiyun 		if (in_range(physical, stripe->physical, rbio->stripe_len) &&
1382*4882a593Smuzhiyun 		    stripe->dev->bdev &&
1383*4882a593Smuzhiyun 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
1384*4882a593Smuzhiyun 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
1385*4882a593Smuzhiyun 			return i;
1386*4882a593Smuzhiyun 		}
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 	return -1;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun /*
1392*4882a593Smuzhiyun  * helper to find the stripe number for a given
1393*4882a593Smuzhiyun  * bio (before mapping).  Used to figure out which stripe has
1394*4882a593Smuzhiyun  * failed.  This looks up based on logical block numbers.
1395*4882a593Smuzhiyun  */
find_logical_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1396*4882a593Smuzhiyun static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1397*4882a593Smuzhiyun 				   struct bio *bio)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1400*4882a593Smuzhiyun 	int i;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	for (i = 0; i < rbio->nr_data; i++) {
1403*4882a593Smuzhiyun 		u64 stripe_start = rbio->bbio->raid_map[i];
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 		if (in_range(logical, stripe_start, rbio->stripe_len))
1406*4882a593Smuzhiyun 			return i;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 	return -1;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /*
1412*4882a593Smuzhiyun  * returns -EIO if we had too many failures
1413*4882a593Smuzhiyun  */
fail_rbio_index(struct btrfs_raid_bio * rbio,int failed)1414*4882a593Smuzhiyun static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun 	unsigned long flags;
1417*4882a593Smuzhiyun 	int ret = 0;
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	spin_lock_irqsave(&rbio->bio_list_lock, flags);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	/* we already know this stripe is bad, move on */
1422*4882a593Smuzhiyun 	if (rbio->faila == failed || rbio->failb == failed)
1423*4882a593Smuzhiyun 		goto out;
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	if (rbio->faila == -1) {
1426*4882a593Smuzhiyun 		/* first failure on this rbio */
1427*4882a593Smuzhiyun 		rbio->faila = failed;
1428*4882a593Smuzhiyun 		atomic_inc(&rbio->error);
1429*4882a593Smuzhiyun 	} else if (rbio->failb == -1) {
1430*4882a593Smuzhiyun 		/* second failure on this rbio */
1431*4882a593Smuzhiyun 		rbio->failb = failed;
1432*4882a593Smuzhiyun 		atomic_inc(&rbio->error);
1433*4882a593Smuzhiyun 	} else {
1434*4882a593Smuzhiyun 		ret = -EIO;
1435*4882a593Smuzhiyun 	}
1436*4882a593Smuzhiyun out:
1437*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	return ret;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun /*
1443*4882a593Smuzhiyun  * helper to fail a stripe based on a physical disk
1444*4882a593Smuzhiyun  * bio.
1445*4882a593Smuzhiyun  */
fail_bio_stripe(struct btrfs_raid_bio * rbio,struct bio * bio)1446*4882a593Smuzhiyun static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1447*4882a593Smuzhiyun 			   struct bio *bio)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun 	int failed = find_bio_stripe(rbio, bio);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (failed < 0)
1452*4882a593Smuzhiyun 		return -EIO;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	return fail_rbio_index(rbio, failed);
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun /*
1458*4882a593Smuzhiyun  * this sets each page in the bio uptodate.  It should only be used on private
1459*4882a593Smuzhiyun  * rbio pages, nothing that comes in from the higher layers
1460*4882a593Smuzhiyun  */
set_bio_pages_uptodate(struct bio * bio)1461*4882a593Smuzhiyun static void set_bio_pages_uptodate(struct bio *bio)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun 	struct bio_vec *bvec;
1464*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	ASSERT(!bio_flagged(bio, BIO_CLONED));
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all)
1469*4882a593Smuzhiyun 		SetPageUptodate(bvec->bv_page);
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun /*
1473*4882a593Smuzhiyun  * end io for the read phase of the rmw cycle.  All the bios here are physical
1474*4882a593Smuzhiyun  * stripe bios we've read from the disk so we can recalculate the parity of the
1475*4882a593Smuzhiyun  * stripe.
1476*4882a593Smuzhiyun  *
1477*4882a593Smuzhiyun  * This will usually kick off finish_rmw once all the bios are read in, but it
1478*4882a593Smuzhiyun  * may trigger parity reconstruction if we had any errors along the way
1479*4882a593Smuzhiyun  */
raid_rmw_end_io(struct bio * bio)1480*4882a593Smuzhiyun static void raid_rmw_end_io(struct bio *bio)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio = bio->bi_private;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	if (bio->bi_status)
1485*4882a593Smuzhiyun 		fail_bio_stripe(rbio, bio);
1486*4882a593Smuzhiyun 	else
1487*4882a593Smuzhiyun 		set_bio_pages_uptodate(bio);
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	bio_put(bio);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&rbio->stripes_pending))
1492*4882a593Smuzhiyun 		return;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1495*4882a593Smuzhiyun 		goto cleanup;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	/*
1498*4882a593Smuzhiyun 	 * this will normally call finish_rmw to start our write
1499*4882a593Smuzhiyun 	 * but if there are any failed stripes we'll reconstruct
1500*4882a593Smuzhiyun 	 * from parity first
1501*4882a593Smuzhiyun 	 */
1502*4882a593Smuzhiyun 	validate_rbio_for_rmw(rbio);
1503*4882a593Smuzhiyun 	return;
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun cleanup:
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun /*
1511*4882a593Smuzhiyun  * the stripe must be locked by the caller.  It will
1512*4882a593Smuzhiyun  * unlock after all the writes are done
1513*4882a593Smuzhiyun  */
raid56_rmw_stripe(struct btrfs_raid_bio * rbio)1514*4882a593Smuzhiyun static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun 	int bios_to_read = 0;
1517*4882a593Smuzhiyun 	struct bio_list bio_list;
1518*4882a593Smuzhiyun 	int ret;
1519*4882a593Smuzhiyun 	int pagenr;
1520*4882a593Smuzhiyun 	int stripe;
1521*4882a593Smuzhiyun 	struct bio *bio;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	bio_list_init(&bio_list);
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	ret = alloc_rbio_pages(rbio);
1526*4882a593Smuzhiyun 	if (ret)
1527*4882a593Smuzhiyun 		goto cleanup;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	index_rbio_pages(rbio);
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
1532*4882a593Smuzhiyun 	/*
1533*4882a593Smuzhiyun 	 * build a list of bios to read all the missing parts of this
1534*4882a593Smuzhiyun 	 * stripe
1535*4882a593Smuzhiyun 	 */
1536*4882a593Smuzhiyun 	for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1537*4882a593Smuzhiyun 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1538*4882a593Smuzhiyun 			struct page *page;
1539*4882a593Smuzhiyun 			/*
1540*4882a593Smuzhiyun 			 * we want to find all the pages missing from
1541*4882a593Smuzhiyun 			 * the rbio and read them from the disk.  If
1542*4882a593Smuzhiyun 			 * page_in_rbio finds a page in the bio list
1543*4882a593Smuzhiyun 			 * we don't need to read it off the stripe.
1544*4882a593Smuzhiyun 			 */
1545*4882a593Smuzhiyun 			page = page_in_rbio(rbio, stripe, pagenr, 1);
1546*4882a593Smuzhiyun 			if (page)
1547*4882a593Smuzhiyun 				continue;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 			page = rbio_stripe_page(rbio, stripe, pagenr);
1550*4882a593Smuzhiyun 			/*
1551*4882a593Smuzhiyun 			 * the bio cache may have handed us an uptodate
1552*4882a593Smuzhiyun 			 * page.  If so, be happy and use it
1553*4882a593Smuzhiyun 			 */
1554*4882a593Smuzhiyun 			if (PageUptodate(page))
1555*4882a593Smuzhiyun 				continue;
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 			ret = rbio_add_io_page(rbio, &bio_list, page,
1558*4882a593Smuzhiyun 				       stripe, pagenr, rbio->stripe_len);
1559*4882a593Smuzhiyun 			if (ret)
1560*4882a593Smuzhiyun 				goto cleanup;
1561*4882a593Smuzhiyun 		}
1562*4882a593Smuzhiyun 	}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	bios_to_read = bio_list_size(&bio_list);
1565*4882a593Smuzhiyun 	if (!bios_to_read) {
1566*4882a593Smuzhiyun 		/*
1567*4882a593Smuzhiyun 		 * this can happen if others have merged with
1568*4882a593Smuzhiyun 		 * us, it means there is nothing left to read.
1569*4882a593Smuzhiyun 		 * But if there are missing devices it may not be
1570*4882a593Smuzhiyun 		 * safe to do the full stripe write yet.
1571*4882a593Smuzhiyun 		 */
1572*4882a593Smuzhiyun 		goto finish;
1573*4882a593Smuzhiyun 	}
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	/*
1576*4882a593Smuzhiyun 	 * the bbio may be freed once we submit the last bio.  Make sure
1577*4882a593Smuzhiyun 	 * not to touch it after that
1578*4882a593Smuzhiyun 	 */
1579*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, bios_to_read);
1580*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list))) {
1581*4882a593Smuzhiyun 		bio->bi_private = rbio;
1582*4882a593Smuzhiyun 		bio->bi_end_io = raid_rmw_end_io;
1583*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_READ;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 		submit_bio(bio);
1588*4882a593Smuzhiyun 	}
1589*4882a593Smuzhiyun 	/* the actual write will happen once the reads are done */
1590*4882a593Smuzhiyun 	return 0;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun cleanup:
1593*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list)))
1596*4882a593Smuzhiyun 		bio_put(bio);
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	return -EIO;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun finish:
1601*4882a593Smuzhiyun 	validate_rbio_for_rmw(rbio);
1602*4882a593Smuzhiyun 	return 0;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun /*
1606*4882a593Smuzhiyun  * if the upper layers pass in a full stripe, we thank them by only allocating
1607*4882a593Smuzhiyun  * enough pages to hold the parity, and sending it all down quickly.
1608*4882a593Smuzhiyun  */
full_stripe_write(struct btrfs_raid_bio * rbio)1609*4882a593Smuzhiyun static int full_stripe_write(struct btrfs_raid_bio *rbio)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	int ret;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	ret = alloc_rbio_parity_pages(rbio);
1614*4882a593Smuzhiyun 	if (ret) {
1615*4882a593Smuzhiyun 		__free_raid_bio(rbio);
1616*4882a593Smuzhiyun 		return ret;
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	ret = lock_stripe_add(rbio);
1620*4882a593Smuzhiyun 	if (ret == 0)
1621*4882a593Smuzhiyun 		finish_rmw(rbio);
1622*4882a593Smuzhiyun 	return 0;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun /*
1626*4882a593Smuzhiyun  * partial stripe writes get handed over to async helpers.
1627*4882a593Smuzhiyun  * We're really hoping to merge a few more writes into this
1628*4882a593Smuzhiyun  * rbio before calculating new parity
1629*4882a593Smuzhiyun  */
partial_stripe_write(struct btrfs_raid_bio * rbio)1630*4882a593Smuzhiyun static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	int ret;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	ret = lock_stripe_add(rbio);
1635*4882a593Smuzhiyun 	if (ret == 0)
1636*4882a593Smuzhiyun 		start_async_work(rbio, rmw_work);
1637*4882a593Smuzhiyun 	return 0;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun /*
1641*4882a593Smuzhiyun  * sometimes while we were reading from the drive to
1642*4882a593Smuzhiyun  * recalculate parity, enough new bios come into create
1643*4882a593Smuzhiyun  * a full stripe.  So we do a check here to see if we can
1644*4882a593Smuzhiyun  * go directly to finish_rmw
1645*4882a593Smuzhiyun  */
__raid56_parity_write(struct btrfs_raid_bio * rbio)1646*4882a593Smuzhiyun static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	/* head off into rmw land if we don't have a full stripe */
1649*4882a593Smuzhiyun 	if (!rbio_is_full(rbio))
1650*4882a593Smuzhiyun 		return partial_stripe_write(rbio);
1651*4882a593Smuzhiyun 	return full_stripe_write(rbio);
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun /*
1655*4882a593Smuzhiyun  * We use plugging call backs to collect full stripes.
1656*4882a593Smuzhiyun  * Any time we get a partial stripe write while plugged
1657*4882a593Smuzhiyun  * we collect it into a list.  When the unplug comes down,
1658*4882a593Smuzhiyun  * we sort the list by logical block number and merge
1659*4882a593Smuzhiyun  * everything we can into the same rbios
1660*4882a593Smuzhiyun  */
1661*4882a593Smuzhiyun struct btrfs_plug_cb {
1662*4882a593Smuzhiyun 	struct blk_plug_cb cb;
1663*4882a593Smuzhiyun 	struct btrfs_fs_info *info;
1664*4882a593Smuzhiyun 	struct list_head rbio_list;
1665*4882a593Smuzhiyun 	struct btrfs_work work;
1666*4882a593Smuzhiyun };
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun /*
1669*4882a593Smuzhiyun  * rbios on the plug list are sorted for easier merging.
1670*4882a593Smuzhiyun  */
plug_cmp(void * priv,struct list_head * a,struct list_head * b)1671*4882a593Smuzhiyun static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1672*4882a593Smuzhiyun {
1673*4882a593Smuzhiyun 	struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1674*4882a593Smuzhiyun 						 plug_list);
1675*4882a593Smuzhiyun 	struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1676*4882a593Smuzhiyun 						 plug_list);
1677*4882a593Smuzhiyun 	u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1678*4882a593Smuzhiyun 	u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	if (a_sector < b_sector)
1681*4882a593Smuzhiyun 		return -1;
1682*4882a593Smuzhiyun 	if (a_sector > b_sector)
1683*4882a593Smuzhiyun 		return 1;
1684*4882a593Smuzhiyun 	return 0;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun 
run_plug(struct btrfs_plug_cb * plug)1687*4882a593Smuzhiyun static void run_plug(struct btrfs_plug_cb *plug)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun 	struct btrfs_raid_bio *cur;
1690*4882a593Smuzhiyun 	struct btrfs_raid_bio *last = NULL;
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	/*
1693*4882a593Smuzhiyun 	 * sort our plug list then try to merge
1694*4882a593Smuzhiyun 	 * everything we can in hopes of creating full
1695*4882a593Smuzhiyun 	 * stripes.
1696*4882a593Smuzhiyun 	 */
1697*4882a593Smuzhiyun 	list_sort(NULL, &plug->rbio_list, plug_cmp);
1698*4882a593Smuzhiyun 	while (!list_empty(&plug->rbio_list)) {
1699*4882a593Smuzhiyun 		cur = list_entry(plug->rbio_list.next,
1700*4882a593Smuzhiyun 				 struct btrfs_raid_bio, plug_list);
1701*4882a593Smuzhiyun 		list_del_init(&cur->plug_list);
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 		if (rbio_is_full(cur)) {
1704*4882a593Smuzhiyun 			int ret;
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 			/* we have a full stripe, send it down */
1707*4882a593Smuzhiyun 			ret = full_stripe_write(cur);
1708*4882a593Smuzhiyun 			BUG_ON(ret);
1709*4882a593Smuzhiyun 			continue;
1710*4882a593Smuzhiyun 		}
1711*4882a593Smuzhiyun 		if (last) {
1712*4882a593Smuzhiyun 			if (rbio_can_merge(last, cur)) {
1713*4882a593Smuzhiyun 				merge_rbio(last, cur);
1714*4882a593Smuzhiyun 				__free_raid_bio(cur);
1715*4882a593Smuzhiyun 				continue;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 			}
1718*4882a593Smuzhiyun 			__raid56_parity_write(last);
1719*4882a593Smuzhiyun 		}
1720*4882a593Smuzhiyun 		last = cur;
1721*4882a593Smuzhiyun 	}
1722*4882a593Smuzhiyun 	if (last) {
1723*4882a593Smuzhiyun 		__raid56_parity_write(last);
1724*4882a593Smuzhiyun 	}
1725*4882a593Smuzhiyun 	kfree(plug);
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun /*
1729*4882a593Smuzhiyun  * if the unplug comes from schedule, we have to push the
1730*4882a593Smuzhiyun  * work off to a helper thread
1731*4882a593Smuzhiyun  */
unplug_work(struct btrfs_work * work)1732*4882a593Smuzhiyun static void unplug_work(struct btrfs_work *work)
1733*4882a593Smuzhiyun {
1734*4882a593Smuzhiyun 	struct btrfs_plug_cb *plug;
1735*4882a593Smuzhiyun 	plug = container_of(work, struct btrfs_plug_cb, work);
1736*4882a593Smuzhiyun 	run_plug(plug);
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun 
btrfs_raid_unplug(struct blk_plug_cb * cb,bool from_schedule)1739*4882a593Smuzhiyun static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun 	struct btrfs_plug_cb *plug;
1742*4882a593Smuzhiyun 	plug = container_of(cb, struct btrfs_plug_cb, cb);
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	if (from_schedule) {
1745*4882a593Smuzhiyun 		btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1746*4882a593Smuzhiyun 		btrfs_queue_work(plug->info->rmw_workers,
1747*4882a593Smuzhiyun 				 &plug->work);
1748*4882a593Smuzhiyun 		return;
1749*4882a593Smuzhiyun 	}
1750*4882a593Smuzhiyun 	run_plug(plug);
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
rbio_add_bio(struct btrfs_raid_bio * rbio,struct bio * orig_bio)1754*4882a593Smuzhiyun static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1755*4882a593Smuzhiyun {
1756*4882a593Smuzhiyun 	const struct btrfs_fs_info *fs_info = rbio->fs_info;
1757*4882a593Smuzhiyun 	const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
1758*4882a593Smuzhiyun 	const u64 full_stripe_start = rbio->bbio->raid_map[0];
1759*4882a593Smuzhiyun 	const u32 orig_len = orig_bio->bi_iter.bi_size;
1760*4882a593Smuzhiyun 	const u32 sectorsize = fs_info->sectorsize;
1761*4882a593Smuzhiyun 	u64 cur_logical;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	ASSERT(orig_logical >= full_stripe_start &&
1764*4882a593Smuzhiyun 	       orig_logical + orig_len <= full_stripe_start +
1765*4882a593Smuzhiyun 	       rbio->nr_data * rbio->stripe_len);
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	bio_list_add(&rbio->bio_list, orig_bio);
1768*4882a593Smuzhiyun 	rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	/* Update the dbitmap. */
1771*4882a593Smuzhiyun 	for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
1772*4882a593Smuzhiyun 	     cur_logical += sectorsize) {
1773*4882a593Smuzhiyun 		int bit = ((u32)(cur_logical - full_stripe_start) >>
1774*4882a593Smuzhiyun 			   PAGE_SHIFT) % rbio->stripe_npages;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 		set_bit(bit, rbio->dbitmap);
1777*4882a593Smuzhiyun 	}
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun /*
1781*4882a593Smuzhiyun  * our main entry point for writes from the rest of the FS.
1782*4882a593Smuzhiyun  */
raid56_parity_write(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len)1783*4882a593Smuzhiyun int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1784*4882a593Smuzhiyun 			struct btrfs_bio *bbio, u64 stripe_len)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
1787*4882a593Smuzhiyun 	struct btrfs_plug_cb *plug = NULL;
1788*4882a593Smuzhiyun 	struct blk_plug_cb *cb;
1789*4882a593Smuzhiyun 	int ret;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
1792*4882a593Smuzhiyun 	if (IS_ERR(rbio)) {
1793*4882a593Smuzhiyun 		btrfs_put_bbio(bbio);
1794*4882a593Smuzhiyun 		return PTR_ERR(rbio);
1795*4882a593Smuzhiyun 	}
1796*4882a593Smuzhiyun 	rbio->operation = BTRFS_RBIO_WRITE;
1797*4882a593Smuzhiyun 	rbio_add_bio(rbio, bio);
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	btrfs_bio_counter_inc_noblocked(fs_info);
1800*4882a593Smuzhiyun 	rbio->generic_bio_cnt = 1;
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	/*
1803*4882a593Smuzhiyun 	 * don't plug on full rbios, just get them out the door
1804*4882a593Smuzhiyun 	 * as quickly as we can
1805*4882a593Smuzhiyun 	 */
1806*4882a593Smuzhiyun 	if (rbio_is_full(rbio)) {
1807*4882a593Smuzhiyun 		ret = full_stripe_write(rbio);
1808*4882a593Smuzhiyun 		if (ret)
1809*4882a593Smuzhiyun 			btrfs_bio_counter_dec(fs_info);
1810*4882a593Smuzhiyun 		return ret;
1811*4882a593Smuzhiyun 	}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1814*4882a593Smuzhiyun 	if (cb) {
1815*4882a593Smuzhiyun 		plug = container_of(cb, struct btrfs_plug_cb, cb);
1816*4882a593Smuzhiyun 		if (!plug->info) {
1817*4882a593Smuzhiyun 			plug->info = fs_info;
1818*4882a593Smuzhiyun 			INIT_LIST_HEAD(&plug->rbio_list);
1819*4882a593Smuzhiyun 		}
1820*4882a593Smuzhiyun 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
1821*4882a593Smuzhiyun 		ret = 0;
1822*4882a593Smuzhiyun 	} else {
1823*4882a593Smuzhiyun 		ret = __raid56_parity_write(rbio);
1824*4882a593Smuzhiyun 		if (ret)
1825*4882a593Smuzhiyun 			btrfs_bio_counter_dec(fs_info);
1826*4882a593Smuzhiyun 	}
1827*4882a593Smuzhiyun 	return ret;
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun /*
1831*4882a593Smuzhiyun  * all parity reconstruction happens here.  We've read in everything
1832*4882a593Smuzhiyun  * we can find from the drives and this does the heavy lifting of
1833*4882a593Smuzhiyun  * sorting the good from the bad.
1834*4882a593Smuzhiyun  */
__raid_recover_end_io(struct btrfs_raid_bio * rbio)1835*4882a593Smuzhiyun static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun 	int pagenr, stripe;
1838*4882a593Smuzhiyun 	void **pointers;
1839*4882a593Smuzhiyun 	int faila = -1, failb = -1;
1840*4882a593Smuzhiyun 	struct page *page;
1841*4882a593Smuzhiyun 	blk_status_t err;
1842*4882a593Smuzhiyun 	int i;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1845*4882a593Smuzhiyun 	if (!pointers) {
1846*4882a593Smuzhiyun 		err = BLK_STS_RESOURCE;
1847*4882a593Smuzhiyun 		goto cleanup_io;
1848*4882a593Smuzhiyun 	}
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	faila = rbio->faila;
1851*4882a593Smuzhiyun 	failb = rbio->failb;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1854*4882a593Smuzhiyun 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1855*4882a593Smuzhiyun 		spin_lock_irq(&rbio->bio_list_lock);
1856*4882a593Smuzhiyun 		set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1857*4882a593Smuzhiyun 		spin_unlock_irq(&rbio->bio_list_lock);
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	index_rbio_pages(rbio);
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1863*4882a593Smuzhiyun 		/*
1864*4882a593Smuzhiyun 		 * Now we just use bitmap to mark the horizontal stripes in
1865*4882a593Smuzhiyun 		 * which we have data when doing parity scrub.
1866*4882a593Smuzhiyun 		 */
1867*4882a593Smuzhiyun 		if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1868*4882a593Smuzhiyun 		    !test_bit(pagenr, rbio->dbitmap))
1869*4882a593Smuzhiyun 			continue;
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 		/* setup our array of pointers with pages
1872*4882a593Smuzhiyun 		 * from each stripe
1873*4882a593Smuzhiyun 		 */
1874*4882a593Smuzhiyun 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1875*4882a593Smuzhiyun 			/*
1876*4882a593Smuzhiyun 			 * if we're rebuilding a read, we have to use
1877*4882a593Smuzhiyun 			 * pages from the bio list
1878*4882a593Smuzhiyun 			 */
1879*4882a593Smuzhiyun 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1880*4882a593Smuzhiyun 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1881*4882a593Smuzhiyun 			    (stripe == faila || stripe == failb)) {
1882*4882a593Smuzhiyun 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1883*4882a593Smuzhiyun 			} else {
1884*4882a593Smuzhiyun 				page = rbio_stripe_page(rbio, stripe, pagenr);
1885*4882a593Smuzhiyun 			}
1886*4882a593Smuzhiyun 			pointers[stripe] = kmap(page);
1887*4882a593Smuzhiyun 		}
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 		/* all raid6 handling here */
1890*4882a593Smuzhiyun 		if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1891*4882a593Smuzhiyun 			/*
1892*4882a593Smuzhiyun 			 * single failure, rebuild from parity raid5
1893*4882a593Smuzhiyun 			 * style
1894*4882a593Smuzhiyun 			 */
1895*4882a593Smuzhiyun 			if (failb < 0) {
1896*4882a593Smuzhiyun 				if (faila == rbio->nr_data) {
1897*4882a593Smuzhiyun 					/*
1898*4882a593Smuzhiyun 					 * Just the P stripe has failed, without
1899*4882a593Smuzhiyun 					 * a bad data or Q stripe.
1900*4882a593Smuzhiyun 					 * TODO, we should redo the xor here.
1901*4882a593Smuzhiyun 					 */
1902*4882a593Smuzhiyun 					err = BLK_STS_IOERR;
1903*4882a593Smuzhiyun 					goto cleanup;
1904*4882a593Smuzhiyun 				}
1905*4882a593Smuzhiyun 				/*
1906*4882a593Smuzhiyun 				 * a single failure in raid6 is rebuilt
1907*4882a593Smuzhiyun 				 * in the pstripe code below
1908*4882a593Smuzhiyun 				 */
1909*4882a593Smuzhiyun 				goto pstripe;
1910*4882a593Smuzhiyun 			}
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 			/* make sure our ps and qs are in order */
1913*4882a593Smuzhiyun 			if (faila > failb)
1914*4882a593Smuzhiyun 				swap(faila, failb);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 			/* if the q stripe is failed, do a pstripe reconstruction
1917*4882a593Smuzhiyun 			 * from the xors.
1918*4882a593Smuzhiyun 			 * If both the q stripe and the P stripe are failed, we're
1919*4882a593Smuzhiyun 			 * here due to a crc mismatch and we can't give them the
1920*4882a593Smuzhiyun 			 * data they want
1921*4882a593Smuzhiyun 			 */
1922*4882a593Smuzhiyun 			if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1923*4882a593Smuzhiyun 				if (rbio->bbio->raid_map[faila] ==
1924*4882a593Smuzhiyun 				    RAID5_P_STRIPE) {
1925*4882a593Smuzhiyun 					err = BLK_STS_IOERR;
1926*4882a593Smuzhiyun 					goto cleanup;
1927*4882a593Smuzhiyun 				}
1928*4882a593Smuzhiyun 				/*
1929*4882a593Smuzhiyun 				 * otherwise we have one bad data stripe and
1930*4882a593Smuzhiyun 				 * a good P stripe.  raid5!
1931*4882a593Smuzhiyun 				 */
1932*4882a593Smuzhiyun 				goto pstripe;
1933*4882a593Smuzhiyun 			}
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 			if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1936*4882a593Smuzhiyun 				raid6_datap_recov(rbio->real_stripes,
1937*4882a593Smuzhiyun 						  PAGE_SIZE, faila, pointers);
1938*4882a593Smuzhiyun 			} else {
1939*4882a593Smuzhiyun 				raid6_2data_recov(rbio->real_stripes,
1940*4882a593Smuzhiyun 						  PAGE_SIZE, faila, failb,
1941*4882a593Smuzhiyun 						  pointers);
1942*4882a593Smuzhiyun 			}
1943*4882a593Smuzhiyun 		} else {
1944*4882a593Smuzhiyun 			void *p;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 			/* rebuild from P stripe here (raid5 or raid6) */
1947*4882a593Smuzhiyun 			BUG_ON(failb != -1);
1948*4882a593Smuzhiyun pstripe:
1949*4882a593Smuzhiyun 			/* Copy parity block into failed block to start with */
1950*4882a593Smuzhiyun 			copy_page(pointers[faila], pointers[rbio->nr_data]);
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 			/* rearrange the pointer array */
1953*4882a593Smuzhiyun 			p = pointers[faila];
1954*4882a593Smuzhiyun 			for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1955*4882a593Smuzhiyun 				pointers[stripe] = pointers[stripe + 1];
1956*4882a593Smuzhiyun 			pointers[rbio->nr_data - 1] = p;
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 			/* xor in the rest */
1959*4882a593Smuzhiyun 			run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1960*4882a593Smuzhiyun 		}
1961*4882a593Smuzhiyun 		/* if we're doing this rebuild as part of an rmw, go through
1962*4882a593Smuzhiyun 		 * and set all of our private rbio pages in the
1963*4882a593Smuzhiyun 		 * failed stripes as uptodate.  This way finish_rmw will
1964*4882a593Smuzhiyun 		 * know they can be trusted.  If this was a read reconstruction,
1965*4882a593Smuzhiyun 		 * other endio functions will fiddle the uptodate bits
1966*4882a593Smuzhiyun 		 */
1967*4882a593Smuzhiyun 		if (rbio->operation == BTRFS_RBIO_WRITE) {
1968*4882a593Smuzhiyun 			for (i = 0;  i < rbio->stripe_npages; i++) {
1969*4882a593Smuzhiyun 				if (faila != -1) {
1970*4882a593Smuzhiyun 					page = rbio_stripe_page(rbio, faila, i);
1971*4882a593Smuzhiyun 					SetPageUptodate(page);
1972*4882a593Smuzhiyun 				}
1973*4882a593Smuzhiyun 				if (failb != -1) {
1974*4882a593Smuzhiyun 					page = rbio_stripe_page(rbio, failb, i);
1975*4882a593Smuzhiyun 					SetPageUptodate(page);
1976*4882a593Smuzhiyun 				}
1977*4882a593Smuzhiyun 			}
1978*4882a593Smuzhiyun 		}
1979*4882a593Smuzhiyun 		for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1980*4882a593Smuzhiyun 			/*
1981*4882a593Smuzhiyun 			 * if we're rebuilding a read, we have to use
1982*4882a593Smuzhiyun 			 * pages from the bio list
1983*4882a593Smuzhiyun 			 */
1984*4882a593Smuzhiyun 			if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1985*4882a593Smuzhiyun 			     rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1986*4882a593Smuzhiyun 			    (stripe == faila || stripe == failb)) {
1987*4882a593Smuzhiyun 				page = page_in_rbio(rbio, stripe, pagenr, 0);
1988*4882a593Smuzhiyun 			} else {
1989*4882a593Smuzhiyun 				page = rbio_stripe_page(rbio, stripe, pagenr);
1990*4882a593Smuzhiyun 			}
1991*4882a593Smuzhiyun 			kunmap(page);
1992*4882a593Smuzhiyun 		}
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	err = BLK_STS_OK;
1996*4882a593Smuzhiyun cleanup:
1997*4882a593Smuzhiyun 	kfree(pointers);
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun cleanup_io:
2000*4882a593Smuzhiyun 	/*
2001*4882a593Smuzhiyun 	 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
2002*4882a593Smuzhiyun 	 * valid rbio which is consistent with ondisk content, thus such a
2003*4882a593Smuzhiyun 	 * valid rbio can be cached to avoid further disk reads.
2004*4882a593Smuzhiyun 	 */
2005*4882a593Smuzhiyun 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2006*4882a593Smuzhiyun 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
2007*4882a593Smuzhiyun 		/*
2008*4882a593Smuzhiyun 		 * - In case of two failures, where rbio->failb != -1:
2009*4882a593Smuzhiyun 		 *
2010*4882a593Smuzhiyun 		 *   Do not cache this rbio since the above read reconstruction
2011*4882a593Smuzhiyun 		 *   (raid6_datap_recov() or raid6_2data_recov()) may have
2012*4882a593Smuzhiyun 		 *   changed some content of stripes which are not identical to
2013*4882a593Smuzhiyun 		 *   on-disk content any more, otherwise, a later write/recover
2014*4882a593Smuzhiyun 		 *   may steal stripe_pages from this rbio and end up with
2015*4882a593Smuzhiyun 		 *   corruptions or rebuild failures.
2016*4882a593Smuzhiyun 		 *
2017*4882a593Smuzhiyun 		 * - In case of single failure, where rbio->failb == -1:
2018*4882a593Smuzhiyun 		 *
2019*4882a593Smuzhiyun 		 *   Cache this rbio iff the above read reconstruction is
2020*4882a593Smuzhiyun 		 *   executed without problems.
2021*4882a593Smuzhiyun 		 */
2022*4882a593Smuzhiyun 		if (err == BLK_STS_OK && rbio->failb < 0)
2023*4882a593Smuzhiyun 			cache_rbio_pages(rbio);
2024*4882a593Smuzhiyun 		else
2025*4882a593Smuzhiyun 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 		rbio_orig_end_io(rbio, err);
2028*4882a593Smuzhiyun 	} else if (err == BLK_STS_OK) {
2029*4882a593Smuzhiyun 		rbio->faila = -1;
2030*4882a593Smuzhiyun 		rbio->failb = -1;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 		if (rbio->operation == BTRFS_RBIO_WRITE)
2033*4882a593Smuzhiyun 			finish_rmw(rbio);
2034*4882a593Smuzhiyun 		else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2035*4882a593Smuzhiyun 			finish_parity_scrub(rbio, 0);
2036*4882a593Smuzhiyun 		else
2037*4882a593Smuzhiyun 			BUG();
2038*4882a593Smuzhiyun 	} else {
2039*4882a593Smuzhiyun 		rbio_orig_end_io(rbio, err);
2040*4882a593Smuzhiyun 	}
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun /*
2044*4882a593Smuzhiyun  * This is called only for stripes we've read from disk to
2045*4882a593Smuzhiyun  * reconstruct the parity.
2046*4882a593Smuzhiyun  */
raid_recover_end_io(struct bio * bio)2047*4882a593Smuzhiyun static void raid_recover_end_io(struct bio *bio)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio = bio->bi_private;
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	/*
2052*4882a593Smuzhiyun 	 * we only read stripe pages off the disk, set them
2053*4882a593Smuzhiyun 	 * up to date if there were no errors
2054*4882a593Smuzhiyun 	 */
2055*4882a593Smuzhiyun 	if (bio->bi_status)
2056*4882a593Smuzhiyun 		fail_bio_stripe(rbio, bio);
2057*4882a593Smuzhiyun 	else
2058*4882a593Smuzhiyun 		set_bio_pages_uptodate(bio);
2059*4882a593Smuzhiyun 	bio_put(bio);
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2062*4882a593Smuzhiyun 		return;
2063*4882a593Smuzhiyun 
2064*4882a593Smuzhiyun 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2065*4882a593Smuzhiyun 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2066*4882a593Smuzhiyun 	else
2067*4882a593Smuzhiyun 		__raid_recover_end_io(rbio);
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun /*
2071*4882a593Smuzhiyun  * reads everything we need off the disk to reconstruct
2072*4882a593Smuzhiyun  * the parity. endio handlers trigger final reconstruction
2073*4882a593Smuzhiyun  * when the IO is done.
2074*4882a593Smuzhiyun  *
2075*4882a593Smuzhiyun  * This is used both for reads from the higher layers and for
2076*4882a593Smuzhiyun  * parity construction required to finish a rmw cycle.
2077*4882a593Smuzhiyun  */
__raid56_parity_recover(struct btrfs_raid_bio * rbio)2078*4882a593Smuzhiyun static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2079*4882a593Smuzhiyun {
2080*4882a593Smuzhiyun 	int bios_to_read = 0;
2081*4882a593Smuzhiyun 	struct bio_list bio_list;
2082*4882a593Smuzhiyun 	int ret;
2083*4882a593Smuzhiyun 	int pagenr;
2084*4882a593Smuzhiyun 	int stripe;
2085*4882a593Smuzhiyun 	struct bio *bio;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	bio_list_init(&bio_list);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	ret = alloc_rbio_pages(rbio);
2090*4882a593Smuzhiyun 	if (ret)
2091*4882a593Smuzhiyun 		goto cleanup;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	/*
2096*4882a593Smuzhiyun 	 * Read everything that hasn't failed. However this time we will
2097*4882a593Smuzhiyun 	 * not trust any cached sector.
2098*4882a593Smuzhiyun 	 * As we may read out some stale data but higher layer is not reading
2099*4882a593Smuzhiyun 	 * that stale part.
2100*4882a593Smuzhiyun 	 *
2101*4882a593Smuzhiyun 	 * So here we always re-read everything in recovery path.
2102*4882a593Smuzhiyun 	 */
2103*4882a593Smuzhiyun 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2104*4882a593Smuzhiyun 		if (rbio->faila == stripe || rbio->failb == stripe) {
2105*4882a593Smuzhiyun 			atomic_inc(&rbio->error);
2106*4882a593Smuzhiyun 			continue;
2107*4882a593Smuzhiyun 		}
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 		for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2110*4882a593Smuzhiyun 			ret = rbio_add_io_page(rbio, &bio_list,
2111*4882a593Smuzhiyun 				       rbio_stripe_page(rbio, stripe, pagenr),
2112*4882a593Smuzhiyun 				       stripe, pagenr, rbio->stripe_len);
2113*4882a593Smuzhiyun 			if (ret < 0)
2114*4882a593Smuzhiyun 				goto cleanup;
2115*4882a593Smuzhiyun 		}
2116*4882a593Smuzhiyun 	}
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 	bios_to_read = bio_list_size(&bio_list);
2119*4882a593Smuzhiyun 	if (!bios_to_read) {
2120*4882a593Smuzhiyun 		/*
2121*4882a593Smuzhiyun 		 * we might have no bios to read just because the pages
2122*4882a593Smuzhiyun 		 * were up to date, or we might have no bios to read because
2123*4882a593Smuzhiyun 		 * the devices were gone.
2124*4882a593Smuzhiyun 		 */
2125*4882a593Smuzhiyun 		if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2126*4882a593Smuzhiyun 			__raid_recover_end_io(rbio);
2127*4882a593Smuzhiyun 			return 0;
2128*4882a593Smuzhiyun 		} else {
2129*4882a593Smuzhiyun 			goto cleanup;
2130*4882a593Smuzhiyun 		}
2131*4882a593Smuzhiyun 	}
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	/*
2134*4882a593Smuzhiyun 	 * the bbio may be freed once we submit the last bio.  Make sure
2135*4882a593Smuzhiyun 	 * not to touch it after that
2136*4882a593Smuzhiyun 	 */
2137*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, bios_to_read);
2138*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list))) {
2139*4882a593Smuzhiyun 		bio->bi_private = rbio;
2140*4882a593Smuzhiyun 		bio->bi_end_io = raid_recover_end_io;
2141*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_READ;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 		submit_bio(bio);
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	return 0;
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun cleanup:
2151*4882a593Smuzhiyun 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2152*4882a593Smuzhiyun 	    rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2153*4882a593Smuzhiyun 		rbio_orig_end_io(rbio, BLK_STS_IOERR);
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list)))
2156*4882a593Smuzhiyun 		bio_put(bio);
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	return -EIO;
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun /*
2162*4882a593Smuzhiyun  * the main entry point for reads from the higher layers.  This
2163*4882a593Smuzhiyun  * is really only called when the normal read path had a failure,
2164*4882a593Smuzhiyun  * so we assume the bio they send down corresponds to a failed part
2165*4882a593Smuzhiyun  * of the drive.
2166*4882a593Smuzhiyun  */
raid56_parity_recover(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,int mirror_num,int generic_io)2167*4882a593Smuzhiyun int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2168*4882a593Smuzhiyun 			  struct btrfs_bio *bbio, u64 stripe_len,
2169*4882a593Smuzhiyun 			  int mirror_num, int generic_io)
2170*4882a593Smuzhiyun {
2171*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2172*4882a593Smuzhiyun 	int ret;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	if (generic_io) {
2175*4882a593Smuzhiyun 		ASSERT(bbio->mirror_num == mirror_num);
2176*4882a593Smuzhiyun 		btrfs_io_bio(bio)->mirror_num = mirror_num;
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2180*4882a593Smuzhiyun 	if (IS_ERR(rbio)) {
2181*4882a593Smuzhiyun 		if (generic_io)
2182*4882a593Smuzhiyun 			btrfs_put_bbio(bbio);
2183*4882a593Smuzhiyun 		return PTR_ERR(rbio);
2184*4882a593Smuzhiyun 	}
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	rbio->operation = BTRFS_RBIO_READ_REBUILD;
2187*4882a593Smuzhiyun 	rbio_add_bio(rbio, bio);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2190*4882a593Smuzhiyun 	if (rbio->faila == -1) {
2191*4882a593Smuzhiyun 		btrfs_warn(fs_info,
2192*4882a593Smuzhiyun 	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2193*4882a593Smuzhiyun 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
2194*4882a593Smuzhiyun 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
2195*4882a593Smuzhiyun 		if (generic_io)
2196*4882a593Smuzhiyun 			btrfs_put_bbio(bbio);
2197*4882a593Smuzhiyun 		kfree(rbio);
2198*4882a593Smuzhiyun 		return -EIO;
2199*4882a593Smuzhiyun 	}
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	if (generic_io) {
2202*4882a593Smuzhiyun 		btrfs_bio_counter_inc_noblocked(fs_info);
2203*4882a593Smuzhiyun 		rbio->generic_bio_cnt = 1;
2204*4882a593Smuzhiyun 	} else {
2205*4882a593Smuzhiyun 		btrfs_get_bbio(bbio);
2206*4882a593Smuzhiyun 	}
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	/*
2209*4882a593Smuzhiyun 	 * Loop retry:
2210*4882a593Smuzhiyun 	 * for 'mirror == 2', reconstruct from all other stripes.
2211*4882a593Smuzhiyun 	 * for 'mirror_num > 2', select a stripe to fail on every retry.
2212*4882a593Smuzhiyun 	 */
2213*4882a593Smuzhiyun 	if (mirror_num > 2) {
2214*4882a593Smuzhiyun 		/*
2215*4882a593Smuzhiyun 		 * 'mirror == 3' is to fail the p stripe and
2216*4882a593Smuzhiyun 		 * reconstruct from the q stripe.  'mirror > 3' is to
2217*4882a593Smuzhiyun 		 * fail a data stripe and reconstruct from p+q stripe.
2218*4882a593Smuzhiyun 		 */
2219*4882a593Smuzhiyun 		rbio->failb = rbio->real_stripes - (mirror_num - 1);
2220*4882a593Smuzhiyun 		ASSERT(rbio->failb > 0);
2221*4882a593Smuzhiyun 		if (rbio->failb <= rbio->faila)
2222*4882a593Smuzhiyun 			rbio->failb--;
2223*4882a593Smuzhiyun 	}
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	ret = lock_stripe_add(rbio);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	/*
2228*4882a593Smuzhiyun 	 * __raid56_parity_recover will end the bio with
2229*4882a593Smuzhiyun 	 * any errors it hits.  We don't want to return
2230*4882a593Smuzhiyun 	 * its error value up the stack because our caller
2231*4882a593Smuzhiyun 	 * will end up calling bio_endio with any nonzero
2232*4882a593Smuzhiyun 	 * return
2233*4882a593Smuzhiyun 	 */
2234*4882a593Smuzhiyun 	if (ret == 0)
2235*4882a593Smuzhiyun 		__raid56_parity_recover(rbio);
2236*4882a593Smuzhiyun 	/*
2237*4882a593Smuzhiyun 	 * our rbio has been added to the list of
2238*4882a593Smuzhiyun 	 * rbios that will be handled after the
2239*4882a593Smuzhiyun 	 * currently lock owner is done
2240*4882a593Smuzhiyun 	 */
2241*4882a593Smuzhiyun 	return 0;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun }
2244*4882a593Smuzhiyun 
rmw_work(struct btrfs_work * work)2245*4882a593Smuzhiyun static void rmw_work(struct btrfs_work *work)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	rbio = container_of(work, struct btrfs_raid_bio, work);
2250*4882a593Smuzhiyun 	raid56_rmw_stripe(rbio);
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun 
read_rebuild_work(struct btrfs_work * work)2253*4882a593Smuzhiyun static void read_rebuild_work(struct btrfs_work *work)
2254*4882a593Smuzhiyun {
2255*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 	rbio = container_of(work, struct btrfs_raid_bio, work);
2258*4882a593Smuzhiyun 	__raid56_parity_recover(rbio);
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun /*
2262*4882a593Smuzhiyun  * The following code is used to scrub/replace the parity stripe
2263*4882a593Smuzhiyun  *
2264*4882a593Smuzhiyun  * Caller must have already increased bio_counter for getting @bbio.
2265*4882a593Smuzhiyun  *
2266*4882a593Smuzhiyun  * Note: We need make sure all the pages that add into the scrub/replace
2267*4882a593Smuzhiyun  * raid bio are correct and not be changed during the scrub/replace. That
2268*4882a593Smuzhiyun  * is those pages just hold metadata or file data with checksum.
2269*4882a593Smuzhiyun  */
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 stripe_len,struct btrfs_device * scrub_dev,unsigned long * dbitmap,int stripe_nsectors)2272*4882a593Smuzhiyun raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2273*4882a593Smuzhiyun 			       struct btrfs_bio *bbio, u64 stripe_len,
2274*4882a593Smuzhiyun 			       struct btrfs_device *scrub_dev,
2275*4882a593Smuzhiyun 			       unsigned long *dbitmap, int stripe_nsectors)
2276*4882a593Smuzhiyun {
2277*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2278*4882a593Smuzhiyun 	int i;
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	rbio = alloc_rbio(fs_info, bbio, stripe_len);
2281*4882a593Smuzhiyun 	if (IS_ERR(rbio))
2282*4882a593Smuzhiyun 		return NULL;
2283*4882a593Smuzhiyun 	bio_list_add(&rbio->bio_list, bio);
2284*4882a593Smuzhiyun 	/*
2285*4882a593Smuzhiyun 	 * This is a special bio which is used to hold the completion handler
2286*4882a593Smuzhiyun 	 * and make the scrub rbio is similar to the other types
2287*4882a593Smuzhiyun 	 */
2288*4882a593Smuzhiyun 	ASSERT(!bio->bi_iter.bi_size);
2289*4882a593Smuzhiyun 	rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	/*
2292*4882a593Smuzhiyun 	 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2293*4882a593Smuzhiyun 	 * to the end position, so this search can start from the first parity
2294*4882a593Smuzhiyun 	 * stripe.
2295*4882a593Smuzhiyun 	 */
2296*4882a593Smuzhiyun 	for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2297*4882a593Smuzhiyun 		if (bbio->stripes[i].dev == scrub_dev) {
2298*4882a593Smuzhiyun 			rbio->scrubp = i;
2299*4882a593Smuzhiyun 			break;
2300*4882a593Smuzhiyun 		}
2301*4882a593Smuzhiyun 	}
2302*4882a593Smuzhiyun 	ASSERT(i < rbio->real_stripes);
2303*4882a593Smuzhiyun 
2304*4882a593Smuzhiyun 	/* Now we just support the sectorsize equals to page size */
2305*4882a593Smuzhiyun 	ASSERT(fs_info->sectorsize == PAGE_SIZE);
2306*4882a593Smuzhiyun 	ASSERT(rbio->stripe_npages == stripe_nsectors);
2307*4882a593Smuzhiyun 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	/*
2310*4882a593Smuzhiyun 	 * We have already increased bio_counter when getting bbio, record it
2311*4882a593Smuzhiyun 	 * so we can free it at rbio_orig_end_io().
2312*4882a593Smuzhiyun 	 */
2313*4882a593Smuzhiyun 	rbio->generic_bio_cnt = 1;
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	return rbio;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun /* Used for both parity scrub and missing. */
raid56_add_scrub_pages(struct btrfs_raid_bio * rbio,struct page * page,u64 logical)2319*4882a593Smuzhiyun void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2320*4882a593Smuzhiyun 			    u64 logical)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun 	int stripe_offset;
2323*4882a593Smuzhiyun 	int index;
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	ASSERT(logical >= rbio->bbio->raid_map[0]);
2326*4882a593Smuzhiyun 	ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2327*4882a593Smuzhiyun 				rbio->stripe_len * rbio->nr_data);
2328*4882a593Smuzhiyun 	stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2329*4882a593Smuzhiyun 	index = stripe_offset >> PAGE_SHIFT;
2330*4882a593Smuzhiyun 	rbio->bio_pages[index] = page;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun /*
2334*4882a593Smuzhiyun  * We just scrub the parity that we have correct data on the same horizontal,
2335*4882a593Smuzhiyun  * so we needn't allocate all pages for all the stripes.
2336*4882a593Smuzhiyun  */
alloc_rbio_essential_pages(struct btrfs_raid_bio * rbio)2337*4882a593Smuzhiyun static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2338*4882a593Smuzhiyun {
2339*4882a593Smuzhiyun 	int i;
2340*4882a593Smuzhiyun 	int bit;
2341*4882a593Smuzhiyun 	int index;
2342*4882a593Smuzhiyun 	struct page *page;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2345*4882a593Smuzhiyun 		for (i = 0; i < rbio->real_stripes; i++) {
2346*4882a593Smuzhiyun 			index = i * rbio->stripe_npages + bit;
2347*4882a593Smuzhiyun 			if (rbio->stripe_pages[index])
2348*4882a593Smuzhiyun 				continue;
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun 			page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2351*4882a593Smuzhiyun 			if (!page)
2352*4882a593Smuzhiyun 				return -ENOMEM;
2353*4882a593Smuzhiyun 			rbio->stripe_pages[index] = page;
2354*4882a593Smuzhiyun 		}
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 	return 0;
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun 
finish_parity_scrub(struct btrfs_raid_bio * rbio,int need_check)2359*4882a593Smuzhiyun static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2360*4882a593Smuzhiyun 					 int need_check)
2361*4882a593Smuzhiyun {
2362*4882a593Smuzhiyun 	struct btrfs_bio *bbio = rbio->bbio;
2363*4882a593Smuzhiyun 	void **pointers = rbio->finish_pointers;
2364*4882a593Smuzhiyun 	unsigned long *pbitmap = rbio->finish_pbitmap;
2365*4882a593Smuzhiyun 	int nr_data = rbio->nr_data;
2366*4882a593Smuzhiyun 	int stripe;
2367*4882a593Smuzhiyun 	int pagenr;
2368*4882a593Smuzhiyun 	bool has_qstripe;
2369*4882a593Smuzhiyun 	struct page *p_page = NULL;
2370*4882a593Smuzhiyun 	struct page *q_page = NULL;
2371*4882a593Smuzhiyun 	struct bio_list bio_list;
2372*4882a593Smuzhiyun 	struct bio *bio;
2373*4882a593Smuzhiyun 	int is_replace = 0;
2374*4882a593Smuzhiyun 	int ret;
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	bio_list_init(&bio_list);
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	if (rbio->real_stripes - rbio->nr_data == 1)
2379*4882a593Smuzhiyun 		has_qstripe = false;
2380*4882a593Smuzhiyun 	else if (rbio->real_stripes - rbio->nr_data == 2)
2381*4882a593Smuzhiyun 		has_qstripe = true;
2382*4882a593Smuzhiyun 	else
2383*4882a593Smuzhiyun 		BUG();
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2386*4882a593Smuzhiyun 		is_replace = 1;
2387*4882a593Smuzhiyun 		bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2388*4882a593Smuzhiyun 	}
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	/*
2391*4882a593Smuzhiyun 	 * Because the higher layers(scrubber) are unlikely to
2392*4882a593Smuzhiyun 	 * use this area of the disk again soon, so don't cache
2393*4882a593Smuzhiyun 	 * it.
2394*4882a593Smuzhiyun 	 */
2395*4882a593Smuzhiyun 	clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	if (!need_check)
2398*4882a593Smuzhiyun 		goto writeback;
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun 	p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2401*4882a593Smuzhiyun 	if (!p_page)
2402*4882a593Smuzhiyun 		goto cleanup;
2403*4882a593Smuzhiyun 	SetPageUptodate(p_page);
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 	if (has_qstripe) {
2406*4882a593Smuzhiyun 		/* RAID6, allocate and map temp space for the Q stripe */
2407*4882a593Smuzhiyun 		q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2408*4882a593Smuzhiyun 		if (!q_page) {
2409*4882a593Smuzhiyun 			__free_page(p_page);
2410*4882a593Smuzhiyun 			goto cleanup;
2411*4882a593Smuzhiyun 		}
2412*4882a593Smuzhiyun 		SetPageUptodate(q_page);
2413*4882a593Smuzhiyun 		pointers[rbio->real_stripes - 1] = kmap(q_page);
2414*4882a593Smuzhiyun 	}
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 	/* Map the parity stripe just once */
2419*4882a593Smuzhiyun 	pointers[nr_data] = kmap(p_page);
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2422*4882a593Smuzhiyun 		struct page *p;
2423*4882a593Smuzhiyun 		void *parity;
2424*4882a593Smuzhiyun 		/* first collect one page from each data stripe */
2425*4882a593Smuzhiyun 		for (stripe = 0; stripe < nr_data; stripe++) {
2426*4882a593Smuzhiyun 			p = page_in_rbio(rbio, stripe, pagenr, 0);
2427*4882a593Smuzhiyun 			pointers[stripe] = kmap(p);
2428*4882a593Smuzhiyun 		}
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 		if (has_qstripe) {
2431*4882a593Smuzhiyun 			/* RAID6, call the library function to fill in our P/Q */
2432*4882a593Smuzhiyun 			raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2433*4882a593Smuzhiyun 						pointers);
2434*4882a593Smuzhiyun 		} else {
2435*4882a593Smuzhiyun 			/* raid5 */
2436*4882a593Smuzhiyun 			copy_page(pointers[nr_data], pointers[0]);
2437*4882a593Smuzhiyun 			run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2438*4882a593Smuzhiyun 		}
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 		/* Check scrubbing parity and repair it */
2441*4882a593Smuzhiyun 		p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2442*4882a593Smuzhiyun 		parity = kmap(p);
2443*4882a593Smuzhiyun 		if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2444*4882a593Smuzhiyun 			copy_page(parity, pointers[rbio->scrubp]);
2445*4882a593Smuzhiyun 		else
2446*4882a593Smuzhiyun 			/* Parity is right, needn't writeback */
2447*4882a593Smuzhiyun 			bitmap_clear(rbio->dbitmap, pagenr, 1);
2448*4882a593Smuzhiyun 		kunmap(p);
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 		for (stripe = 0; stripe < nr_data; stripe++)
2451*4882a593Smuzhiyun 			kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2452*4882a593Smuzhiyun 	}
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	kunmap(p_page);
2455*4882a593Smuzhiyun 	__free_page(p_page);
2456*4882a593Smuzhiyun 	if (q_page) {
2457*4882a593Smuzhiyun 		kunmap(q_page);
2458*4882a593Smuzhiyun 		__free_page(q_page);
2459*4882a593Smuzhiyun 	}
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun writeback:
2462*4882a593Smuzhiyun 	/*
2463*4882a593Smuzhiyun 	 * time to start writing.  Make bios for everything from the
2464*4882a593Smuzhiyun 	 * higher layers (the bio_list in our rbio) and our p/q.  Ignore
2465*4882a593Smuzhiyun 	 * everything else.
2466*4882a593Smuzhiyun 	 */
2467*4882a593Smuzhiyun 	for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2468*4882a593Smuzhiyun 		struct page *page;
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2471*4882a593Smuzhiyun 		ret = rbio_add_io_page(rbio, &bio_list,
2472*4882a593Smuzhiyun 			       page, rbio->scrubp, pagenr, rbio->stripe_len);
2473*4882a593Smuzhiyun 		if (ret)
2474*4882a593Smuzhiyun 			goto cleanup;
2475*4882a593Smuzhiyun 	}
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	if (!is_replace)
2478*4882a593Smuzhiyun 		goto submit_write;
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2481*4882a593Smuzhiyun 		struct page *page;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 		page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2484*4882a593Smuzhiyun 		ret = rbio_add_io_page(rbio, &bio_list, page,
2485*4882a593Smuzhiyun 				       bbio->tgtdev_map[rbio->scrubp],
2486*4882a593Smuzhiyun 				       pagenr, rbio->stripe_len);
2487*4882a593Smuzhiyun 		if (ret)
2488*4882a593Smuzhiyun 			goto cleanup;
2489*4882a593Smuzhiyun 	}
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun submit_write:
2492*4882a593Smuzhiyun 	nr_data = bio_list_size(&bio_list);
2493*4882a593Smuzhiyun 	if (!nr_data) {
2494*4882a593Smuzhiyun 		/* Every parity is right */
2495*4882a593Smuzhiyun 		rbio_orig_end_io(rbio, BLK_STS_OK);
2496*4882a593Smuzhiyun 		return;
2497*4882a593Smuzhiyun 	}
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, nr_data);
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list))) {
2502*4882a593Smuzhiyun 		bio->bi_private = rbio;
2503*4882a593Smuzhiyun 		bio->bi_end_io = raid_write_end_io;
2504*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_WRITE;
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 		submit_bio(bio);
2507*4882a593Smuzhiyun 	}
2508*4882a593Smuzhiyun 	return;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun cleanup:
2511*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list)))
2514*4882a593Smuzhiyun 		bio_put(bio);
2515*4882a593Smuzhiyun }
2516*4882a593Smuzhiyun 
is_data_stripe(struct btrfs_raid_bio * rbio,int stripe)2517*4882a593Smuzhiyun static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2518*4882a593Smuzhiyun {
2519*4882a593Smuzhiyun 	if (stripe >= 0 && stripe < rbio->nr_data)
2520*4882a593Smuzhiyun 		return 1;
2521*4882a593Smuzhiyun 	return 0;
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun /*
2525*4882a593Smuzhiyun  * While we're doing the parity check and repair, we could have errors
2526*4882a593Smuzhiyun  * in reading pages off the disk.  This checks for errors and if we're
2527*4882a593Smuzhiyun  * not able to read the page it'll trigger parity reconstruction.  The
2528*4882a593Smuzhiyun  * parity scrub will be finished after we've reconstructed the failed
2529*4882a593Smuzhiyun  * stripes
2530*4882a593Smuzhiyun  */
validate_rbio_for_parity_scrub(struct btrfs_raid_bio * rbio)2531*4882a593Smuzhiyun static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2532*4882a593Smuzhiyun {
2533*4882a593Smuzhiyun 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2534*4882a593Smuzhiyun 		goto cleanup;
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	if (rbio->faila >= 0 || rbio->failb >= 0) {
2537*4882a593Smuzhiyun 		int dfail = 0, failp = -1;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 		if (is_data_stripe(rbio, rbio->faila))
2540*4882a593Smuzhiyun 			dfail++;
2541*4882a593Smuzhiyun 		else if (is_parity_stripe(rbio->faila))
2542*4882a593Smuzhiyun 			failp = rbio->faila;
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 		if (is_data_stripe(rbio, rbio->failb))
2545*4882a593Smuzhiyun 			dfail++;
2546*4882a593Smuzhiyun 		else if (is_parity_stripe(rbio->failb))
2547*4882a593Smuzhiyun 			failp = rbio->failb;
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 		/*
2550*4882a593Smuzhiyun 		 * Because we can not use a scrubbing parity to repair
2551*4882a593Smuzhiyun 		 * the data, so the capability of the repair is declined.
2552*4882a593Smuzhiyun 		 * (In the case of RAID5, we can not repair anything)
2553*4882a593Smuzhiyun 		 */
2554*4882a593Smuzhiyun 		if (dfail > rbio->bbio->max_errors - 1)
2555*4882a593Smuzhiyun 			goto cleanup;
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 		/*
2558*4882a593Smuzhiyun 		 * If all data is good, only parity is correctly, just
2559*4882a593Smuzhiyun 		 * repair the parity.
2560*4882a593Smuzhiyun 		 */
2561*4882a593Smuzhiyun 		if (dfail == 0) {
2562*4882a593Smuzhiyun 			finish_parity_scrub(rbio, 0);
2563*4882a593Smuzhiyun 			return;
2564*4882a593Smuzhiyun 		}
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 		/*
2567*4882a593Smuzhiyun 		 * Here means we got one corrupted data stripe and one
2568*4882a593Smuzhiyun 		 * corrupted parity on RAID6, if the corrupted parity
2569*4882a593Smuzhiyun 		 * is scrubbing parity, luckily, use the other one to repair
2570*4882a593Smuzhiyun 		 * the data, or we can not repair the data stripe.
2571*4882a593Smuzhiyun 		 */
2572*4882a593Smuzhiyun 		if (failp != rbio->scrubp)
2573*4882a593Smuzhiyun 			goto cleanup;
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 		__raid_recover_end_io(rbio);
2576*4882a593Smuzhiyun 	} else {
2577*4882a593Smuzhiyun 		finish_parity_scrub(rbio, 1);
2578*4882a593Smuzhiyun 	}
2579*4882a593Smuzhiyun 	return;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun cleanup:
2582*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun /*
2586*4882a593Smuzhiyun  * end io for the read phase of the rmw cycle.  All the bios here are physical
2587*4882a593Smuzhiyun  * stripe bios we've read from the disk so we can recalculate the parity of the
2588*4882a593Smuzhiyun  * stripe.
2589*4882a593Smuzhiyun  *
2590*4882a593Smuzhiyun  * This will usually kick off finish_rmw once all the bios are read in, but it
2591*4882a593Smuzhiyun  * may trigger parity reconstruction if we had any errors along the way
2592*4882a593Smuzhiyun  */
raid56_parity_scrub_end_io(struct bio * bio)2593*4882a593Smuzhiyun static void raid56_parity_scrub_end_io(struct bio *bio)
2594*4882a593Smuzhiyun {
2595*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio = bio->bi_private;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	if (bio->bi_status)
2598*4882a593Smuzhiyun 		fail_bio_stripe(rbio, bio);
2599*4882a593Smuzhiyun 	else
2600*4882a593Smuzhiyun 		set_bio_pages_uptodate(bio);
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	bio_put(bio);
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&rbio->stripes_pending))
2605*4882a593Smuzhiyun 		return;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	/*
2608*4882a593Smuzhiyun 	 * this will normally call finish_rmw to start our write
2609*4882a593Smuzhiyun 	 * but if there are any failed stripes we'll reconstruct
2610*4882a593Smuzhiyun 	 * from parity first
2611*4882a593Smuzhiyun 	 */
2612*4882a593Smuzhiyun 	validate_rbio_for_parity_scrub(rbio);
2613*4882a593Smuzhiyun }
2614*4882a593Smuzhiyun 
raid56_parity_scrub_stripe(struct btrfs_raid_bio * rbio)2615*4882a593Smuzhiyun static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2616*4882a593Smuzhiyun {
2617*4882a593Smuzhiyun 	int bios_to_read = 0;
2618*4882a593Smuzhiyun 	struct bio_list bio_list;
2619*4882a593Smuzhiyun 	int ret;
2620*4882a593Smuzhiyun 	int pagenr;
2621*4882a593Smuzhiyun 	int stripe;
2622*4882a593Smuzhiyun 	struct bio *bio;
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 	bio_list_init(&bio_list);
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun 	ret = alloc_rbio_essential_pages(rbio);
2627*4882a593Smuzhiyun 	if (ret)
2628*4882a593Smuzhiyun 		goto cleanup;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	atomic_set(&rbio->error, 0);
2631*4882a593Smuzhiyun 	/*
2632*4882a593Smuzhiyun 	 * build a list of bios to read all the missing parts of this
2633*4882a593Smuzhiyun 	 * stripe
2634*4882a593Smuzhiyun 	 */
2635*4882a593Smuzhiyun 	for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2636*4882a593Smuzhiyun 		for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2637*4882a593Smuzhiyun 			struct page *page;
2638*4882a593Smuzhiyun 			/*
2639*4882a593Smuzhiyun 			 * we want to find all the pages missing from
2640*4882a593Smuzhiyun 			 * the rbio and read them from the disk.  If
2641*4882a593Smuzhiyun 			 * page_in_rbio finds a page in the bio list
2642*4882a593Smuzhiyun 			 * we don't need to read it off the stripe.
2643*4882a593Smuzhiyun 			 */
2644*4882a593Smuzhiyun 			page = page_in_rbio(rbio, stripe, pagenr, 1);
2645*4882a593Smuzhiyun 			if (page)
2646*4882a593Smuzhiyun 				continue;
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 			page = rbio_stripe_page(rbio, stripe, pagenr);
2649*4882a593Smuzhiyun 			/*
2650*4882a593Smuzhiyun 			 * the bio cache may have handed us an uptodate
2651*4882a593Smuzhiyun 			 * page.  If so, be happy and use it
2652*4882a593Smuzhiyun 			 */
2653*4882a593Smuzhiyun 			if (PageUptodate(page))
2654*4882a593Smuzhiyun 				continue;
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 			ret = rbio_add_io_page(rbio, &bio_list, page,
2657*4882a593Smuzhiyun 				       stripe, pagenr, rbio->stripe_len);
2658*4882a593Smuzhiyun 			if (ret)
2659*4882a593Smuzhiyun 				goto cleanup;
2660*4882a593Smuzhiyun 		}
2661*4882a593Smuzhiyun 	}
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	bios_to_read = bio_list_size(&bio_list);
2664*4882a593Smuzhiyun 	if (!bios_to_read) {
2665*4882a593Smuzhiyun 		/*
2666*4882a593Smuzhiyun 		 * this can happen if others have merged with
2667*4882a593Smuzhiyun 		 * us, it means there is nothing left to read.
2668*4882a593Smuzhiyun 		 * But if there are missing devices it may not be
2669*4882a593Smuzhiyun 		 * safe to do the full stripe write yet.
2670*4882a593Smuzhiyun 		 */
2671*4882a593Smuzhiyun 		goto finish;
2672*4882a593Smuzhiyun 	}
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	/*
2675*4882a593Smuzhiyun 	 * the bbio may be freed once we submit the last bio.  Make sure
2676*4882a593Smuzhiyun 	 * not to touch it after that
2677*4882a593Smuzhiyun 	 */
2678*4882a593Smuzhiyun 	atomic_set(&rbio->stripes_pending, bios_to_read);
2679*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list))) {
2680*4882a593Smuzhiyun 		bio->bi_private = rbio;
2681*4882a593Smuzhiyun 		bio->bi_end_io = raid56_parity_scrub_end_io;
2682*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_READ;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 		submit_bio(bio);
2687*4882a593Smuzhiyun 	}
2688*4882a593Smuzhiyun 	/* the actual write will happen once the reads are done */
2689*4882a593Smuzhiyun 	return;
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun cleanup:
2692*4882a593Smuzhiyun 	rbio_orig_end_io(rbio, BLK_STS_IOERR);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&bio_list)))
2695*4882a593Smuzhiyun 		bio_put(bio);
2696*4882a593Smuzhiyun 
2697*4882a593Smuzhiyun 	return;
2698*4882a593Smuzhiyun 
2699*4882a593Smuzhiyun finish:
2700*4882a593Smuzhiyun 	validate_rbio_for_parity_scrub(rbio);
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun 
scrub_parity_work(struct btrfs_work * work)2703*4882a593Smuzhiyun static void scrub_parity_work(struct btrfs_work *work)
2704*4882a593Smuzhiyun {
2705*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	rbio = container_of(work, struct btrfs_raid_bio, work);
2708*4882a593Smuzhiyun 	raid56_parity_scrub_stripe(rbio);
2709*4882a593Smuzhiyun }
2710*4882a593Smuzhiyun 
raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio * rbio)2711*4882a593Smuzhiyun void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2712*4882a593Smuzhiyun {
2713*4882a593Smuzhiyun 	if (!lock_stripe_add(rbio))
2714*4882a593Smuzhiyun 		start_async_work(rbio, scrub_parity_work);
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun 
2717*4882a593Smuzhiyun /* The following code is used for dev replace of a missing RAID 5/6 device. */
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info * fs_info,struct bio * bio,struct btrfs_bio * bbio,u64 length)2720*4882a593Smuzhiyun raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2721*4882a593Smuzhiyun 			  struct btrfs_bio *bbio, u64 length)
2722*4882a593Smuzhiyun {
2723*4882a593Smuzhiyun 	struct btrfs_raid_bio *rbio;
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	rbio = alloc_rbio(fs_info, bbio, length);
2726*4882a593Smuzhiyun 	if (IS_ERR(rbio))
2727*4882a593Smuzhiyun 		return NULL;
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2730*4882a593Smuzhiyun 	bio_list_add(&rbio->bio_list, bio);
2731*4882a593Smuzhiyun 	/*
2732*4882a593Smuzhiyun 	 * This is a special bio which is used to hold the completion handler
2733*4882a593Smuzhiyun 	 * and make the scrub rbio is similar to the other types
2734*4882a593Smuzhiyun 	 */
2735*4882a593Smuzhiyun 	ASSERT(!bio->bi_iter.bi_size);
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 	rbio->faila = find_logical_bio_stripe(rbio, bio);
2738*4882a593Smuzhiyun 	if (rbio->faila == -1) {
2739*4882a593Smuzhiyun 		BUG();
2740*4882a593Smuzhiyun 		kfree(rbio);
2741*4882a593Smuzhiyun 		return NULL;
2742*4882a593Smuzhiyun 	}
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	/*
2745*4882a593Smuzhiyun 	 * When we get bbio, we have already increased bio_counter, record it
2746*4882a593Smuzhiyun 	 * so we can free it at rbio_orig_end_io()
2747*4882a593Smuzhiyun 	 */
2748*4882a593Smuzhiyun 	rbio->generic_bio_cnt = 1;
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	return rbio;
2751*4882a593Smuzhiyun }
2752*4882a593Smuzhiyun 
raid56_submit_missing_rbio(struct btrfs_raid_bio * rbio)2753*4882a593Smuzhiyun void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2754*4882a593Smuzhiyun {
2755*4882a593Smuzhiyun 	if (!lock_stripe_add(rbio))
2756*4882a593Smuzhiyun 		start_async_work(rbio, read_rebuild_work);
2757*4882a593Smuzhiyun }
2758