xref: /OK3568_Linux_fs/kernel/block/bio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/swap.h>
7*4882a593Smuzhiyun #include <linux/bio.h>
8*4882a593Smuzhiyun #include <linux/blkdev.h>
9*4882a593Smuzhiyun #include <linux/uio.h>
10*4882a593Smuzhiyun #include <linux/iocontext.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/export.h>
15*4882a593Smuzhiyun #include <linux/mempool.h>
16*4882a593Smuzhiyun #include <linux/workqueue.h>
17*4882a593Smuzhiyun #include <linux/cgroup.h>
18*4882a593Smuzhiyun #include <linux/blk-cgroup.h>
19*4882a593Smuzhiyun #include <linux/highmem.h>
20*4882a593Smuzhiyun #include <linux/sched/sysctl.h>
21*4882a593Smuzhiyun #include <linux/blk-crypto.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <trace/events/block.h>
24*4882a593Smuzhiyun #include "blk.h"
25*4882a593Smuzhiyun #include "blk-rq-qos.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Test patch to inline a certain number of bi_io_vec's inside the bio
29*4882a593Smuzhiyun  * itself, to shrink a bio data allocation from two mempool calls to one
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun #define BIO_INLINE_VECS		4
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * if you change this list, also change bvec_alloc or things will
35*4882a593Smuzhiyun  * break badly! cannot be bigger than what you can fit into an
36*4882a593Smuzhiyun  * unsigned short
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
39*4882a593Smuzhiyun static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
40*4882a593Smuzhiyun 	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun #undef BV
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
46*4882a593Smuzhiyun  * IO code that does not need private memory pools.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun struct bio_set fs_bio_set;
49*4882a593Smuzhiyun EXPORT_SYMBOL(fs_bio_set);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * Our slab pool management
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun struct bio_slab {
55*4882a593Smuzhiyun 	struct kmem_cache *slab;
56*4882a593Smuzhiyun 	unsigned int slab_ref;
57*4882a593Smuzhiyun 	unsigned int slab_size;
58*4882a593Smuzhiyun 	char name[8];
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun static DEFINE_MUTEX(bio_slab_lock);
61*4882a593Smuzhiyun static struct bio_slab *bio_slabs;
62*4882a593Smuzhiyun static unsigned int bio_slab_nr, bio_slab_max;
63*4882a593Smuzhiyun 
bio_find_or_create_slab(unsigned int extra_size)64*4882a593Smuzhiyun static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	unsigned int sz = sizeof(struct bio) + extra_size;
67*4882a593Smuzhiyun 	struct kmem_cache *slab = NULL;
68*4882a593Smuzhiyun 	struct bio_slab *bslab, *new_bio_slabs;
69*4882a593Smuzhiyun 	unsigned int new_bio_slab_max;
70*4882a593Smuzhiyun 	unsigned int i, entry = -1;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mutex_lock(&bio_slab_lock);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	i = 0;
75*4882a593Smuzhiyun 	while (i < bio_slab_nr) {
76*4882a593Smuzhiyun 		bslab = &bio_slabs[i];
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		if (!bslab->slab && entry == -1)
79*4882a593Smuzhiyun 			entry = i;
80*4882a593Smuzhiyun 		else if (bslab->slab_size == sz) {
81*4882a593Smuzhiyun 			slab = bslab->slab;
82*4882a593Smuzhiyun 			bslab->slab_ref++;
83*4882a593Smuzhiyun 			break;
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun 		i++;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (slab)
89*4882a593Smuzhiyun 		goto out_unlock;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (bio_slab_nr == bio_slab_max && entry == -1) {
92*4882a593Smuzhiyun 		new_bio_slab_max = bio_slab_max << 1;
93*4882a593Smuzhiyun 		new_bio_slabs = krealloc(bio_slabs,
94*4882a593Smuzhiyun 					 new_bio_slab_max * sizeof(struct bio_slab),
95*4882a593Smuzhiyun 					 GFP_KERNEL);
96*4882a593Smuzhiyun 		if (!new_bio_slabs)
97*4882a593Smuzhiyun 			goto out_unlock;
98*4882a593Smuzhiyun 		bio_slab_max = new_bio_slab_max;
99*4882a593Smuzhiyun 		bio_slabs = new_bio_slabs;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 	if (entry == -1)
102*4882a593Smuzhiyun 		entry = bio_slab_nr++;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	bslab = &bio_slabs[entry];
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
107*4882a593Smuzhiyun 	slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
108*4882a593Smuzhiyun 				 SLAB_HWCACHE_ALIGN, NULL);
109*4882a593Smuzhiyun 	if (!slab)
110*4882a593Smuzhiyun 		goto out_unlock;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	bslab->slab = slab;
113*4882a593Smuzhiyun 	bslab->slab_ref = 1;
114*4882a593Smuzhiyun 	bslab->slab_size = sz;
115*4882a593Smuzhiyun out_unlock:
116*4882a593Smuzhiyun 	mutex_unlock(&bio_slab_lock);
117*4882a593Smuzhiyun 	return slab;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
bio_put_slab(struct bio_set * bs)120*4882a593Smuzhiyun static void bio_put_slab(struct bio_set *bs)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct bio_slab *bslab = NULL;
123*4882a593Smuzhiyun 	unsigned int i;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	mutex_lock(&bio_slab_lock);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	for (i = 0; i < bio_slab_nr; i++) {
128*4882a593Smuzhiyun 		if (bs->bio_slab == bio_slabs[i].slab) {
129*4882a593Smuzhiyun 			bslab = &bio_slabs[i];
130*4882a593Smuzhiyun 			break;
131*4882a593Smuzhiyun 		}
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
135*4882a593Smuzhiyun 		goto out;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	WARN_ON(!bslab->slab_ref);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (--bslab->slab_ref)
140*4882a593Smuzhiyun 		goto out;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	kmem_cache_destroy(bslab->slab);
143*4882a593Smuzhiyun 	bslab->slab = NULL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun out:
146*4882a593Smuzhiyun 	mutex_unlock(&bio_slab_lock);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
bvec_nr_vecs(unsigned short idx)149*4882a593Smuzhiyun unsigned int bvec_nr_vecs(unsigned short idx)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	return bvec_slabs[--idx].nr_vecs;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
bvec_free(mempool_t * pool,struct bio_vec * bv,unsigned int idx)154*4882a593Smuzhiyun void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	if (!idx)
157*4882a593Smuzhiyun 		return;
158*4882a593Smuzhiyun 	idx--;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	BIO_BUG_ON(idx >= BVEC_POOL_NR);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (idx == BVEC_POOL_MAX) {
163*4882a593Smuzhiyun 		mempool_free(bv, pool);
164*4882a593Smuzhiyun 	} else {
165*4882a593Smuzhiyun 		struct biovec_slab *bvs = bvec_slabs + idx;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 		kmem_cache_free(bvs->slab, bv);
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
bvec_alloc(gfp_t gfp_mask,int nr,unsigned long * idx,mempool_t * pool)171*4882a593Smuzhiyun struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
172*4882a593Smuzhiyun 			   mempool_t *pool)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct bio_vec *bvl;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/*
177*4882a593Smuzhiyun 	 * see comment near bvec_array define!
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	switch (nr) {
180*4882a593Smuzhiyun 	case 1:
181*4882a593Smuzhiyun 		*idx = 0;
182*4882a593Smuzhiyun 		break;
183*4882a593Smuzhiyun 	case 2 ... 4:
184*4882a593Smuzhiyun 		*idx = 1;
185*4882a593Smuzhiyun 		break;
186*4882a593Smuzhiyun 	case 5 ... 16:
187*4882a593Smuzhiyun 		*idx = 2;
188*4882a593Smuzhiyun 		break;
189*4882a593Smuzhiyun 	case 17 ... 64:
190*4882a593Smuzhiyun 		*idx = 3;
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 	case 65 ... 128:
193*4882a593Smuzhiyun 		*idx = 4;
194*4882a593Smuzhiyun 		break;
195*4882a593Smuzhiyun 	case 129 ... BIO_MAX_PAGES:
196*4882a593Smuzhiyun 		*idx = 5;
197*4882a593Smuzhiyun 		break;
198*4882a593Smuzhiyun 	default:
199*4882a593Smuzhiyun 		return NULL;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * idx now points to the pool we want to allocate from. only the
204*4882a593Smuzhiyun 	 * 1-vec entry pool is mempool backed.
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	if (*idx == BVEC_POOL_MAX) {
207*4882a593Smuzhiyun fallback:
208*4882a593Smuzhiyun 		bvl = mempool_alloc(pool, gfp_mask);
209*4882a593Smuzhiyun 	} else {
210*4882a593Smuzhiyun 		struct biovec_slab *bvs = bvec_slabs + *idx;
211*4882a593Smuzhiyun 		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		/*
214*4882a593Smuzhiyun 		 * Make this allocation restricted and don't dump info on
215*4882a593Smuzhiyun 		 * allocation failures, since we'll fallback to the mempool
216*4882a593Smuzhiyun 		 * in case of failure.
217*4882a593Smuzhiyun 		 */
218*4882a593Smuzhiyun 		__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		/*
221*4882a593Smuzhiyun 		 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
222*4882a593Smuzhiyun 		 * is set, retry with the 1-entry mempool
223*4882a593Smuzhiyun 		 */
224*4882a593Smuzhiyun 		bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
225*4882a593Smuzhiyun 		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
226*4882a593Smuzhiyun 			*idx = BVEC_POOL_MAX;
227*4882a593Smuzhiyun 			goto fallback;
228*4882a593Smuzhiyun 		}
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	(*idx)++;
232*4882a593Smuzhiyun 	return bvl;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
bio_uninit(struct bio * bio)235*4882a593Smuzhiyun void bio_uninit(struct bio *bio)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun #ifdef CONFIG_BLK_CGROUP
238*4882a593Smuzhiyun 	if (bio->bi_blkg) {
239*4882a593Smuzhiyun 		blkg_put(bio->bi_blkg);
240*4882a593Smuzhiyun 		bio->bi_blkg = NULL;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun 	if (bio_integrity(bio))
244*4882a593Smuzhiyun 		bio_integrity_free(bio);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	bio_crypt_free_ctx(bio);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun EXPORT_SYMBOL(bio_uninit);
249*4882a593Smuzhiyun 
bio_free(struct bio * bio)250*4882a593Smuzhiyun static void bio_free(struct bio *bio)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct bio_set *bs = bio->bi_pool;
253*4882a593Smuzhiyun 	void *p;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	bio_uninit(bio);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (bs) {
258*4882a593Smuzhiyun 		bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		/*
261*4882a593Smuzhiyun 		 * If we have front padding, adjust the bio pointer before freeing
262*4882a593Smuzhiyun 		 */
263*4882a593Smuzhiyun 		p = bio;
264*4882a593Smuzhiyun 		p -= bs->front_pad;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		mempool_free(p, &bs->bio_pool);
267*4882a593Smuzhiyun 	} else {
268*4882a593Smuzhiyun 		/* Bio was allocated by bio_kmalloc() */
269*4882a593Smuzhiyun 		kfree(bio);
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun  * Users of this function have their own bio allocation. Subsequently,
275*4882a593Smuzhiyun  * they must remember to pair any call to bio_init() with bio_uninit()
276*4882a593Smuzhiyun  * when IO has completed, or when the bio is released.
277*4882a593Smuzhiyun  */
bio_init(struct bio * bio,struct bio_vec * table,unsigned short max_vecs)278*4882a593Smuzhiyun void bio_init(struct bio *bio, struct bio_vec *table,
279*4882a593Smuzhiyun 	      unsigned short max_vecs)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	memset(bio, 0, sizeof(*bio));
282*4882a593Smuzhiyun 	atomic_set(&bio->__bi_remaining, 1);
283*4882a593Smuzhiyun 	atomic_set(&bio->__bi_cnt, 1);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	bio->bi_io_vec = table;
286*4882a593Smuzhiyun 	bio->bi_max_vecs = max_vecs;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun EXPORT_SYMBOL(bio_init);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * bio_reset - reinitialize a bio
292*4882a593Smuzhiyun  * @bio:	bio to reset
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * Description:
295*4882a593Smuzhiyun  *   After calling bio_reset(), @bio will be in the same state as a freshly
296*4882a593Smuzhiyun  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
297*4882a593Smuzhiyun  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
298*4882a593Smuzhiyun  *   comment in struct bio.
299*4882a593Smuzhiyun  */
bio_reset(struct bio * bio)300*4882a593Smuzhiyun void bio_reset(struct bio *bio)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	bio_uninit(bio);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	memset(bio, 0, BIO_RESET_BYTES);
307*4882a593Smuzhiyun 	bio->bi_flags = flags;
308*4882a593Smuzhiyun 	atomic_set(&bio->__bi_remaining, 1);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun EXPORT_SYMBOL(bio_reset);
311*4882a593Smuzhiyun 
__bio_chain_endio(struct bio * bio)312*4882a593Smuzhiyun static struct bio *__bio_chain_endio(struct bio *bio)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct bio *parent = bio->bi_private;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (bio->bi_status && !parent->bi_status)
317*4882a593Smuzhiyun 		parent->bi_status = bio->bi_status;
318*4882a593Smuzhiyun 	bio_put(bio);
319*4882a593Smuzhiyun 	return parent;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
bio_chain_endio(struct bio * bio)322*4882a593Smuzhiyun static void bio_chain_endio(struct bio *bio)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	bio_endio(__bio_chain_endio(bio));
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun  * bio_chain - chain bio completions
329*4882a593Smuzhiyun  * @bio: the target bio
330*4882a593Smuzhiyun  * @parent: the parent bio of @bio
331*4882a593Smuzhiyun  *
332*4882a593Smuzhiyun  * The caller won't have a bi_end_io called when @bio completes - instead,
333*4882a593Smuzhiyun  * @parent's bi_end_io won't be called until both @parent and @bio have
334*4882a593Smuzhiyun  * completed; the chained bio will also be freed when it completes.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * The caller must not set bi_private or bi_end_io in @bio.
337*4882a593Smuzhiyun  */
bio_chain(struct bio * bio,struct bio * parent)338*4882a593Smuzhiyun void bio_chain(struct bio *bio, struct bio *parent)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	BUG_ON(bio->bi_private || bio->bi_end_io);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	bio->bi_private = parent;
343*4882a593Smuzhiyun 	bio->bi_end_io	= bio_chain_endio;
344*4882a593Smuzhiyun 	bio_inc_remaining(parent);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun EXPORT_SYMBOL(bio_chain);
347*4882a593Smuzhiyun 
bio_alloc_rescue(struct work_struct * work)348*4882a593Smuzhiyun static void bio_alloc_rescue(struct work_struct *work)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
351*4882a593Smuzhiyun 	struct bio *bio;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	while (1) {
354*4882a593Smuzhiyun 		spin_lock(&bs->rescue_lock);
355*4882a593Smuzhiyun 		bio = bio_list_pop(&bs->rescue_list);
356*4882a593Smuzhiyun 		spin_unlock(&bs->rescue_lock);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (!bio)
359*4882a593Smuzhiyun 			break;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		submit_bio_noacct(bio);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
punt_bios_to_rescuer(struct bio_set * bs)365*4882a593Smuzhiyun static void punt_bios_to_rescuer(struct bio_set *bs)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct bio_list punt, nopunt;
368*4882a593Smuzhiyun 	struct bio *bio;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
371*4882a593Smuzhiyun 		return;
372*4882a593Smuzhiyun 	/*
373*4882a593Smuzhiyun 	 * In order to guarantee forward progress we must punt only bios that
374*4882a593Smuzhiyun 	 * were allocated from this bio_set; otherwise, if there was a bio on
375*4882a593Smuzhiyun 	 * there for a stacking driver higher up in the stack, processing it
376*4882a593Smuzhiyun 	 * could require allocating bios from this bio_set, and doing that from
377*4882a593Smuzhiyun 	 * our own rescuer would be bad.
378*4882a593Smuzhiyun 	 *
379*4882a593Smuzhiyun 	 * Since bio lists are singly linked, pop them all instead of trying to
380*4882a593Smuzhiyun 	 * remove from the middle of the list:
381*4882a593Smuzhiyun 	 */
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	bio_list_init(&punt);
384*4882a593Smuzhiyun 	bio_list_init(&nopunt);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&current->bio_list[0])))
387*4882a593Smuzhiyun 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
388*4882a593Smuzhiyun 	current->bio_list[0] = nopunt;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	bio_list_init(&nopunt);
391*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&current->bio_list[1])))
392*4882a593Smuzhiyun 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
393*4882a593Smuzhiyun 	current->bio_list[1] = nopunt;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	spin_lock(&bs->rescue_lock);
396*4882a593Smuzhiyun 	bio_list_merge(&bs->rescue_list, &punt);
397*4882a593Smuzhiyun 	spin_unlock(&bs->rescue_lock);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun  * bio_alloc_bioset - allocate a bio for I/O
404*4882a593Smuzhiyun  * @gfp_mask:   the GFP_* mask given to the slab allocator
405*4882a593Smuzhiyun  * @nr_iovecs:	number of iovecs to pre-allocate
406*4882a593Smuzhiyun  * @bs:		the bio_set to allocate from.
407*4882a593Smuzhiyun  *
408*4882a593Smuzhiyun  * Description:
409*4882a593Smuzhiyun  *   If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
410*4882a593Smuzhiyun  *   backed by the @bs's mempool.
411*4882a593Smuzhiyun  *
412*4882a593Smuzhiyun  *   When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
413*4882a593Smuzhiyun  *   always be able to allocate a bio. This is due to the mempool guarantees.
414*4882a593Smuzhiyun  *   To make this work, callers must never allocate more than 1 bio at a time
415*4882a593Smuzhiyun  *   from this pool. Callers that need to allocate more than 1 bio must always
416*4882a593Smuzhiyun  *   submit the previously allocated bio for IO before attempting to allocate
417*4882a593Smuzhiyun  *   a new one. Failure to do so can cause deadlocks under memory pressure.
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  *   Note that when running under submit_bio_noacct() (i.e. any block
420*4882a593Smuzhiyun  *   driver), bios are not submitted until after you return - see the code in
421*4882a593Smuzhiyun  *   submit_bio_noacct() that converts recursion into iteration, to prevent
422*4882a593Smuzhiyun  *   stack overflows.
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  *   This would normally mean allocating multiple bios under
425*4882a593Smuzhiyun  *   submit_bio_noacct() would be susceptible to deadlocks, but we have
426*4882a593Smuzhiyun  *   deadlock avoidance code that resubmits any blocked bios from a rescuer
427*4882a593Smuzhiyun  *   thread.
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  *   However, we do not guarantee forward progress for allocations from other
430*4882a593Smuzhiyun  *   mempools. Doing multiple allocations from the same mempool under
431*4882a593Smuzhiyun  *   submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
432*4882a593Smuzhiyun  *   for per bio allocations.
433*4882a593Smuzhiyun  *
434*4882a593Smuzhiyun  *   RETURNS:
435*4882a593Smuzhiyun  *   Pointer to new bio on success, NULL on failure.
436*4882a593Smuzhiyun  */
bio_alloc_bioset(gfp_t gfp_mask,unsigned int nr_iovecs,struct bio_set * bs)437*4882a593Smuzhiyun struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
438*4882a593Smuzhiyun 			     struct bio_set *bs)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	gfp_t saved_gfp = gfp_mask;
441*4882a593Smuzhiyun 	unsigned front_pad;
442*4882a593Smuzhiyun 	unsigned inline_vecs;
443*4882a593Smuzhiyun 	struct bio_vec *bvl = NULL;
444*4882a593Smuzhiyun 	struct bio *bio;
445*4882a593Smuzhiyun 	void *p;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (!bs) {
448*4882a593Smuzhiyun 		if (nr_iovecs > UIO_MAXIOV)
449*4882a593Smuzhiyun 			return NULL;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		p = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
452*4882a593Smuzhiyun 		front_pad = 0;
453*4882a593Smuzhiyun 		inline_vecs = nr_iovecs;
454*4882a593Smuzhiyun 	} else {
455*4882a593Smuzhiyun 		/* should not use nobvec bioset for nr_iovecs > 0 */
456*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
457*4882a593Smuzhiyun 				 nr_iovecs > 0))
458*4882a593Smuzhiyun 			return NULL;
459*4882a593Smuzhiyun 		/*
460*4882a593Smuzhiyun 		 * submit_bio_noacct() converts recursion to iteration; this
461*4882a593Smuzhiyun 		 * means if we're running beneath it, any bios we allocate and
462*4882a593Smuzhiyun 		 * submit will not be submitted (and thus freed) until after we
463*4882a593Smuzhiyun 		 * return.
464*4882a593Smuzhiyun 		 *
465*4882a593Smuzhiyun 		 * This exposes us to a potential deadlock if we allocate
466*4882a593Smuzhiyun 		 * multiple bios from the same bio_set() while running
467*4882a593Smuzhiyun 		 * underneath submit_bio_noacct(). If we were to allocate
468*4882a593Smuzhiyun 		 * multiple bios (say a stacking block driver that was splitting
469*4882a593Smuzhiyun 		 * bios), we would deadlock if we exhausted the mempool's
470*4882a593Smuzhiyun 		 * reserve.
471*4882a593Smuzhiyun 		 *
472*4882a593Smuzhiyun 		 * We solve this, and guarantee forward progress, with a rescuer
473*4882a593Smuzhiyun 		 * workqueue per bio_set. If we go to allocate and there are
474*4882a593Smuzhiyun 		 * bios on current->bio_list, we first try the allocation
475*4882a593Smuzhiyun 		 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
476*4882a593Smuzhiyun 		 * bios we would be blocking to the rescuer workqueue before
477*4882a593Smuzhiyun 		 * we retry with the original gfp_flags.
478*4882a593Smuzhiyun 		 */
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		if (current->bio_list &&
481*4882a593Smuzhiyun 		    (!bio_list_empty(&current->bio_list[0]) ||
482*4882a593Smuzhiyun 		     !bio_list_empty(&current->bio_list[1])) &&
483*4882a593Smuzhiyun 		    bs->rescue_workqueue)
484*4882a593Smuzhiyun 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
487*4882a593Smuzhiyun 		if (!p && gfp_mask != saved_gfp) {
488*4882a593Smuzhiyun 			punt_bios_to_rescuer(bs);
489*4882a593Smuzhiyun 			gfp_mask = saved_gfp;
490*4882a593Smuzhiyun 			p = mempool_alloc(&bs->bio_pool, gfp_mask);
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 		front_pad = bs->front_pad;
494*4882a593Smuzhiyun 		inline_vecs = BIO_INLINE_VECS;
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (unlikely(!p))
498*4882a593Smuzhiyun 		return NULL;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	bio = p + front_pad;
501*4882a593Smuzhiyun 	bio_init(bio, NULL, 0);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (nr_iovecs > inline_vecs) {
504*4882a593Smuzhiyun 		unsigned long idx = 0;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
507*4882a593Smuzhiyun 		if (!bvl && gfp_mask != saved_gfp) {
508*4882a593Smuzhiyun 			punt_bios_to_rescuer(bs);
509*4882a593Smuzhiyun 			gfp_mask = saved_gfp;
510*4882a593Smuzhiyun 			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		if (unlikely(!bvl))
514*4882a593Smuzhiyun 			goto err_free;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 		bio->bi_flags |= idx << BVEC_POOL_OFFSET;
517*4882a593Smuzhiyun 	} else if (nr_iovecs) {
518*4882a593Smuzhiyun 		bvl = bio->bi_inline_vecs;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	bio->bi_pool = bs;
522*4882a593Smuzhiyun 	bio->bi_max_vecs = nr_iovecs;
523*4882a593Smuzhiyun 	bio->bi_io_vec = bvl;
524*4882a593Smuzhiyun 	return bio;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun err_free:
527*4882a593Smuzhiyun 	mempool_free(p, &bs->bio_pool);
528*4882a593Smuzhiyun 	return NULL;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun EXPORT_SYMBOL(bio_alloc_bioset);
531*4882a593Smuzhiyun 
zero_fill_bio_iter(struct bio * bio,struct bvec_iter start)532*4882a593Smuzhiyun void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	unsigned long flags;
535*4882a593Smuzhiyun 	struct bio_vec bv;
536*4882a593Smuzhiyun 	struct bvec_iter iter;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	__bio_for_each_segment(bv, bio, iter, start) {
539*4882a593Smuzhiyun 		char *data = bvec_kmap_irq(&bv, &flags);
540*4882a593Smuzhiyun 		memset(data, 0, bv.bv_len);
541*4882a593Smuzhiyun 		flush_dcache_page(bv.bv_page);
542*4882a593Smuzhiyun 		bvec_kunmap_irq(data, &flags);
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun EXPORT_SYMBOL(zero_fill_bio_iter);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun /**
548*4882a593Smuzhiyun  * bio_truncate - truncate the bio to small size of @new_size
549*4882a593Smuzhiyun  * @bio:	the bio to be truncated
550*4882a593Smuzhiyun  * @new_size:	new size for truncating the bio
551*4882a593Smuzhiyun  *
552*4882a593Smuzhiyun  * Description:
553*4882a593Smuzhiyun  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
554*4882a593Smuzhiyun  *   REQ_OP_READ, zero the truncated part. This function should only
555*4882a593Smuzhiyun  *   be used for handling corner cases, such as bio eod.
556*4882a593Smuzhiyun  */
bio_truncate(struct bio * bio,unsigned new_size)557*4882a593Smuzhiyun void bio_truncate(struct bio *bio, unsigned new_size)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct bio_vec bv;
560*4882a593Smuzhiyun 	struct bvec_iter iter;
561*4882a593Smuzhiyun 	unsigned int done = 0;
562*4882a593Smuzhiyun 	bool truncated = false;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (new_size >= bio->bi_iter.bi_size)
565*4882a593Smuzhiyun 		return;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (bio_op(bio) != REQ_OP_READ)
568*4882a593Smuzhiyun 		goto exit;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	bio_for_each_segment(bv, bio, iter) {
571*4882a593Smuzhiyun 		if (done + bv.bv_len > new_size) {
572*4882a593Smuzhiyun 			unsigned offset;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 			if (!truncated)
575*4882a593Smuzhiyun 				offset = new_size - done;
576*4882a593Smuzhiyun 			else
577*4882a593Smuzhiyun 				offset = 0;
578*4882a593Smuzhiyun 			zero_user(bv.bv_page, bv.bv_offset + offset,
579*4882a593Smuzhiyun 				  bv.bv_len - offset);
580*4882a593Smuzhiyun 			truncated = true;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 		done += bv.bv_len;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun  exit:
586*4882a593Smuzhiyun 	/*
587*4882a593Smuzhiyun 	 * Don't touch bvec table here and make it really immutable, since
588*4882a593Smuzhiyun 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
589*4882a593Smuzhiyun 	 * in its .end_bio() callback.
590*4882a593Smuzhiyun 	 *
591*4882a593Smuzhiyun 	 * It is enough to truncate bio by updating .bi_size since we can make
592*4882a593Smuzhiyun 	 * correct bvec with the updated .bi_size for drivers.
593*4882a593Smuzhiyun 	 */
594*4882a593Smuzhiyun 	bio->bi_iter.bi_size = new_size;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun  * guard_bio_eod - truncate a BIO to fit the block device
599*4882a593Smuzhiyun  * @bio:	bio to truncate
600*4882a593Smuzhiyun  *
601*4882a593Smuzhiyun  * This allows us to do IO even on the odd last sectors of a device, even if the
602*4882a593Smuzhiyun  * block size is some multiple of the physical sector size.
603*4882a593Smuzhiyun  *
604*4882a593Smuzhiyun  * We'll just truncate the bio to the size of the device, and clear the end of
605*4882a593Smuzhiyun  * the buffer head manually.  Truly out-of-range accesses will turn into actual
606*4882a593Smuzhiyun  * I/O errors, this only handles the "we need to be able to do I/O at the final
607*4882a593Smuzhiyun  * sector" case.
608*4882a593Smuzhiyun  */
guard_bio_eod(struct bio * bio)609*4882a593Smuzhiyun void guard_bio_eod(struct bio *bio)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	sector_t maxsector;
612*4882a593Smuzhiyun 	struct hd_struct *part;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	rcu_read_lock();
615*4882a593Smuzhiyun 	part = __disk_get_part(bio->bi_disk, bio->bi_partno);
616*4882a593Smuzhiyun 	if (part)
617*4882a593Smuzhiyun 		maxsector = part_nr_sects_read(part);
618*4882a593Smuzhiyun 	else
619*4882a593Smuzhiyun 		maxsector = get_capacity(bio->bi_disk);
620*4882a593Smuzhiyun 	rcu_read_unlock();
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (!maxsector)
623*4882a593Smuzhiyun 		return;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/*
626*4882a593Smuzhiyun 	 * If the *whole* IO is past the end of the device,
627*4882a593Smuzhiyun 	 * let it through, and the IO layer will turn it into
628*4882a593Smuzhiyun 	 * an EIO.
629*4882a593Smuzhiyun 	 */
630*4882a593Smuzhiyun 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
631*4882a593Smuzhiyun 		return;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	maxsector -= bio->bi_iter.bi_sector;
634*4882a593Smuzhiyun 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
635*4882a593Smuzhiyun 		return;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	bio_truncate(bio, maxsector << 9);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun /**
641*4882a593Smuzhiyun  * bio_put - release a reference to a bio
642*4882a593Smuzhiyun  * @bio:   bio to release reference to
643*4882a593Smuzhiyun  *
644*4882a593Smuzhiyun  * Description:
645*4882a593Smuzhiyun  *   Put a reference to a &struct bio, either one you have gotten with
646*4882a593Smuzhiyun  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
647*4882a593Smuzhiyun  **/
bio_put(struct bio * bio)648*4882a593Smuzhiyun void bio_put(struct bio *bio)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	if (!bio_flagged(bio, BIO_REFFED))
651*4882a593Smuzhiyun 		bio_free(bio);
652*4882a593Smuzhiyun 	else {
653*4882a593Smuzhiyun 		BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 		/*
656*4882a593Smuzhiyun 		 * last put frees it
657*4882a593Smuzhiyun 		 */
658*4882a593Smuzhiyun 		if (atomic_dec_and_test(&bio->__bi_cnt))
659*4882a593Smuzhiyun 			bio_free(bio);
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun EXPORT_SYMBOL(bio_put);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /**
665*4882a593Smuzhiyun  * 	__bio_clone_fast - clone a bio that shares the original bio's biovec
666*4882a593Smuzhiyun  * 	@bio: destination bio
667*4882a593Smuzhiyun  * 	@bio_src: bio to clone
668*4882a593Smuzhiyun  *
669*4882a593Smuzhiyun  *	Clone a &bio. Caller will own the returned bio, but not
670*4882a593Smuzhiyun  *	the actual data it points to. Reference count of returned
671*4882a593Smuzhiyun  * 	bio will be one.
672*4882a593Smuzhiyun  *
673*4882a593Smuzhiyun  * 	Caller must ensure that @bio_src is not freed before @bio.
674*4882a593Smuzhiyun  */
__bio_clone_fast(struct bio * bio,struct bio * bio_src)675*4882a593Smuzhiyun void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	/*
680*4882a593Smuzhiyun 	 * most users will be overriding ->bi_disk with a new target,
681*4882a593Smuzhiyun 	 * so we don't set nor calculate new physical/hw segment counts here
682*4882a593Smuzhiyun 	 */
683*4882a593Smuzhiyun 	bio->bi_disk = bio_src->bi_disk;
684*4882a593Smuzhiyun 	bio->bi_partno = bio_src->bi_partno;
685*4882a593Smuzhiyun 	bio_set_flag(bio, BIO_CLONED);
686*4882a593Smuzhiyun 	if (bio_flagged(bio_src, BIO_THROTTLED))
687*4882a593Smuzhiyun 		bio_set_flag(bio, BIO_THROTTLED);
688*4882a593Smuzhiyun 	bio->bi_opf = bio_src->bi_opf;
689*4882a593Smuzhiyun 	bio->bi_ioprio = bio_src->bi_ioprio;
690*4882a593Smuzhiyun 	bio->bi_write_hint = bio_src->bi_write_hint;
691*4882a593Smuzhiyun 	bio->bi_iter = bio_src->bi_iter;
692*4882a593Smuzhiyun 	bio->bi_io_vec = bio_src->bi_io_vec;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	bio_clone_blkg_association(bio, bio_src);
695*4882a593Smuzhiyun 	blkcg_bio_issue_init(bio);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun EXPORT_SYMBOL(__bio_clone_fast);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /**
700*4882a593Smuzhiyun  *	bio_clone_fast - clone a bio that shares the original bio's biovec
701*4882a593Smuzhiyun  *	@bio: bio to clone
702*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
703*4882a593Smuzhiyun  *	@bs: bio_set to allocate from
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * 	Like __bio_clone_fast, only also allocates the returned bio
706*4882a593Smuzhiyun  */
bio_clone_fast(struct bio * bio,gfp_t gfp_mask,struct bio_set * bs)707*4882a593Smuzhiyun struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	struct bio *b;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	b = bio_alloc_bioset(gfp_mask, 0, bs);
712*4882a593Smuzhiyun 	if (!b)
713*4882a593Smuzhiyun 		return NULL;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	__bio_clone_fast(b, bio);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (bio_crypt_clone(b, bio, gfp_mask) < 0)
718*4882a593Smuzhiyun 		goto err_put;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (bio_integrity(bio) &&
721*4882a593Smuzhiyun 	    bio_integrity_clone(b, bio, gfp_mask) < 0)
722*4882a593Smuzhiyun 		goto err_put;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return b;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun err_put:
727*4882a593Smuzhiyun 	bio_put(b);
728*4882a593Smuzhiyun 	return NULL;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun EXPORT_SYMBOL(bio_clone_fast);
731*4882a593Smuzhiyun 
bio_devname(struct bio * bio,char * buf)732*4882a593Smuzhiyun const char *bio_devname(struct bio *bio, char *buf)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	return disk_name(bio->bi_disk, bio->bi_partno, buf);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun EXPORT_SYMBOL(bio_devname);
737*4882a593Smuzhiyun 
page_is_mergeable(const struct bio_vec * bv,struct page * page,unsigned int len,unsigned int off,bool * same_page)738*4882a593Smuzhiyun static inline bool page_is_mergeable(const struct bio_vec *bv,
739*4882a593Smuzhiyun 		struct page *page, unsigned int len, unsigned int off,
740*4882a593Smuzhiyun 		bool *same_page)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	size_t bv_end = bv->bv_offset + bv->bv_len;
743*4882a593Smuzhiyun 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
744*4882a593Smuzhiyun 	phys_addr_t page_addr = page_to_phys(page);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (vec_end_addr + 1 != page_addr + off)
747*4882a593Smuzhiyun 		return false;
748*4882a593Smuzhiyun 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
749*4882a593Smuzhiyun 		return false;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
752*4882a593Smuzhiyun 	if (*same_page)
753*4882a593Smuzhiyun 		return true;
754*4882a593Smuzhiyun 	return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun /*
758*4882a593Smuzhiyun  * Try to merge a page into a segment, while obeying the hardware segment
759*4882a593Smuzhiyun  * size limit.  This is not for normal read/write bios, but for passthrough
760*4882a593Smuzhiyun  * or Zone Append operations that we can't split.
761*4882a593Smuzhiyun  */
bio_try_merge_hw_seg(struct request_queue * q,struct bio * bio,struct page * page,unsigned len,unsigned offset,bool * same_page)762*4882a593Smuzhiyun static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
763*4882a593Smuzhiyun 				 struct page *page, unsigned len,
764*4882a593Smuzhiyun 				 unsigned offset, bool *same_page)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
767*4882a593Smuzhiyun 	unsigned long mask = queue_segment_boundary(q);
768*4882a593Smuzhiyun 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
769*4882a593Smuzhiyun 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if ((addr1 | mask) != (addr2 | mask))
772*4882a593Smuzhiyun 		return false;
773*4882a593Smuzhiyun 	if (bv->bv_len + len > queue_max_segment_size(q))
774*4882a593Smuzhiyun 		return false;
775*4882a593Smuzhiyun 	return __bio_try_merge_page(bio, page, len, offset, same_page);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun /**
779*4882a593Smuzhiyun  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
780*4882a593Smuzhiyun  * @q: the target queue
781*4882a593Smuzhiyun  * @bio: destination bio
782*4882a593Smuzhiyun  * @page: page to add
783*4882a593Smuzhiyun  * @len: vec entry length
784*4882a593Smuzhiyun  * @offset: vec entry offset
785*4882a593Smuzhiyun  * @max_sectors: maximum number of sectors that can be added
786*4882a593Smuzhiyun  * @same_page: return if the segment has been merged inside the same page
787*4882a593Smuzhiyun  *
788*4882a593Smuzhiyun  * Add a page to a bio while respecting the hardware max_sectors, max_segment
789*4882a593Smuzhiyun  * and gap limitations.
790*4882a593Smuzhiyun  */
bio_add_hw_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset,unsigned int max_sectors,bool * same_page)791*4882a593Smuzhiyun int bio_add_hw_page(struct request_queue *q, struct bio *bio,
792*4882a593Smuzhiyun 		struct page *page, unsigned int len, unsigned int offset,
793*4882a593Smuzhiyun 		unsigned int max_sectors, bool *same_page)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	struct bio_vec *bvec;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
798*4882a593Smuzhiyun 		return 0;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
801*4882a593Smuzhiyun 		return 0;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	if (bio->bi_vcnt > 0) {
804*4882a593Smuzhiyun 		if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
805*4882a593Smuzhiyun 			return len;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		/*
808*4882a593Smuzhiyun 		 * If the queue doesn't support SG gaps and adding this segment
809*4882a593Smuzhiyun 		 * would create a gap, disallow it.
810*4882a593Smuzhiyun 		 */
811*4882a593Smuzhiyun 		bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
812*4882a593Smuzhiyun 		if (bvec_gap_to_prev(q, bvec, offset))
813*4882a593Smuzhiyun 			return 0;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (bio_full(bio, len))
817*4882a593Smuzhiyun 		return 0;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (bio->bi_vcnt >= queue_max_segments(q))
820*4882a593Smuzhiyun 		return 0;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
823*4882a593Smuzhiyun 	bvec->bv_page = page;
824*4882a593Smuzhiyun 	bvec->bv_len = len;
825*4882a593Smuzhiyun 	bvec->bv_offset = offset;
826*4882a593Smuzhiyun 	bio->bi_vcnt++;
827*4882a593Smuzhiyun 	bio->bi_iter.bi_size += len;
828*4882a593Smuzhiyun 	return len;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun /**
832*4882a593Smuzhiyun  * bio_add_pc_page	- attempt to add page to passthrough bio
833*4882a593Smuzhiyun  * @q: the target queue
834*4882a593Smuzhiyun  * @bio: destination bio
835*4882a593Smuzhiyun  * @page: page to add
836*4882a593Smuzhiyun  * @len: vec entry length
837*4882a593Smuzhiyun  * @offset: vec entry offset
838*4882a593Smuzhiyun  *
839*4882a593Smuzhiyun  * Attempt to add a page to the bio_vec maplist. This can fail for a
840*4882a593Smuzhiyun  * number of reasons, such as the bio being full or target block device
841*4882a593Smuzhiyun  * limitations. The target block device must allow bio's up to PAGE_SIZE,
842*4882a593Smuzhiyun  * so it is always possible to add a single page to an empty bio.
843*4882a593Smuzhiyun  *
844*4882a593Smuzhiyun  * This should only be used by passthrough bios.
845*4882a593Smuzhiyun  */
bio_add_pc_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset)846*4882a593Smuzhiyun int bio_add_pc_page(struct request_queue *q, struct bio *bio,
847*4882a593Smuzhiyun 		struct page *page, unsigned int len, unsigned int offset)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	bool same_page = false;
850*4882a593Smuzhiyun 	return bio_add_hw_page(q, bio, page, len, offset,
851*4882a593Smuzhiyun 			queue_max_hw_sectors(q), &same_page);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun EXPORT_SYMBOL(bio_add_pc_page);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /**
856*4882a593Smuzhiyun  * __bio_try_merge_page - try appending data to an existing bvec.
857*4882a593Smuzhiyun  * @bio: destination bio
858*4882a593Smuzhiyun  * @page: start page to add
859*4882a593Smuzhiyun  * @len: length of the data to add
860*4882a593Smuzhiyun  * @off: offset of the data relative to @page
861*4882a593Smuzhiyun  * @same_page: return if the segment has been merged inside the same page
862*4882a593Smuzhiyun  *
863*4882a593Smuzhiyun  * Try to add the data at @page + @off to the last bvec of @bio.  This is a
864*4882a593Smuzhiyun  * useful optimisation for file systems with a block size smaller than the
865*4882a593Smuzhiyun  * page size.
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * Warn if (@len, @off) crosses pages in case that @same_page is true.
868*4882a593Smuzhiyun  *
869*4882a593Smuzhiyun  * Return %true on success or %false on failure.
870*4882a593Smuzhiyun  */
__bio_try_merge_page(struct bio * bio,struct page * page,unsigned int len,unsigned int off,bool * same_page)871*4882a593Smuzhiyun bool __bio_try_merge_page(struct bio *bio, struct page *page,
872*4882a593Smuzhiyun 		unsigned int len, unsigned int off, bool *same_page)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
875*4882a593Smuzhiyun 		return false;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (bio->bi_vcnt > 0) {
878*4882a593Smuzhiyun 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		if (page_is_mergeable(bv, page, len, off, same_page)) {
881*4882a593Smuzhiyun 			if (bio->bi_iter.bi_size > UINT_MAX - len) {
882*4882a593Smuzhiyun 				*same_page = false;
883*4882a593Smuzhiyun 				return false;
884*4882a593Smuzhiyun 			}
885*4882a593Smuzhiyun 			bv->bv_len += len;
886*4882a593Smuzhiyun 			bio->bi_iter.bi_size += len;
887*4882a593Smuzhiyun 			return true;
888*4882a593Smuzhiyun 		}
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 	return false;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__bio_try_merge_page);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun  * __bio_add_page - add page(s) to a bio in a new segment
896*4882a593Smuzhiyun  * @bio: destination bio
897*4882a593Smuzhiyun  * @page: start page to add
898*4882a593Smuzhiyun  * @len: length of the data to add, may cross pages
899*4882a593Smuzhiyun  * @off: offset of the data relative to @page, may cross pages
900*4882a593Smuzhiyun  *
901*4882a593Smuzhiyun  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
902*4882a593Smuzhiyun  * that @bio has space for another bvec.
903*4882a593Smuzhiyun  */
__bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int off)904*4882a593Smuzhiyun void __bio_add_page(struct bio *bio, struct page *page,
905*4882a593Smuzhiyun 		unsigned int len, unsigned int off)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
910*4882a593Smuzhiyun 	WARN_ON_ONCE(bio_full(bio, len));
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	bv->bv_page = page;
913*4882a593Smuzhiyun 	bv->bv_offset = off;
914*4882a593Smuzhiyun 	bv->bv_len = len;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	bio->bi_iter.bi_size += len;
917*4882a593Smuzhiyun 	bio->bi_vcnt++;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
920*4882a593Smuzhiyun 		bio_set_flag(bio, BIO_WORKINGSET);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__bio_add_page);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun /**
925*4882a593Smuzhiyun  *	bio_add_page	-	attempt to add page(s) to bio
926*4882a593Smuzhiyun  *	@bio: destination bio
927*4882a593Smuzhiyun  *	@page: start page to add
928*4882a593Smuzhiyun  *	@len: vec entry length, may cross pages
929*4882a593Smuzhiyun  *	@offset: vec entry offset relative to @page, may cross pages
930*4882a593Smuzhiyun  *
931*4882a593Smuzhiyun  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
932*4882a593Smuzhiyun  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
933*4882a593Smuzhiyun  */
bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)934*4882a593Smuzhiyun int bio_add_page(struct bio *bio, struct page *page,
935*4882a593Smuzhiyun 		 unsigned int len, unsigned int offset)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	bool same_page = false;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
940*4882a593Smuzhiyun 		if (bio_full(bio, len))
941*4882a593Smuzhiyun 			return 0;
942*4882a593Smuzhiyun 		__bio_add_page(bio, page, len, offset);
943*4882a593Smuzhiyun 	}
944*4882a593Smuzhiyun 	return len;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL(bio_add_page);
947*4882a593Smuzhiyun 
bio_release_pages(struct bio * bio,bool mark_dirty)948*4882a593Smuzhiyun void bio_release_pages(struct bio *bio, bool mark_dirty)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
951*4882a593Smuzhiyun 	struct bio_vec *bvec;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (bio_flagged(bio, BIO_NO_PAGE_REF))
954*4882a593Smuzhiyun 		return;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all) {
957*4882a593Smuzhiyun 		if (mark_dirty && !PageCompound(bvec->bv_page))
958*4882a593Smuzhiyun 			set_page_dirty_lock(bvec->bv_page);
959*4882a593Smuzhiyun 		put_page(bvec->bv_page);
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bio_release_pages);
963*4882a593Smuzhiyun 
__bio_iov_bvec_add_pages(struct bio * bio,struct iov_iter * iter)964*4882a593Smuzhiyun static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	const struct bio_vec *bv = iter->bvec;
967*4882a593Smuzhiyun 	unsigned int len;
968*4882a593Smuzhiyun 	size_t size;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
971*4882a593Smuzhiyun 		return -EINVAL;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
974*4882a593Smuzhiyun 	size = bio_add_page(bio, bv->bv_page, len,
975*4882a593Smuzhiyun 				bv->bv_offset + iter->iov_offset);
976*4882a593Smuzhiyun 	if (unlikely(size != len))
977*4882a593Smuzhiyun 		return -EINVAL;
978*4882a593Smuzhiyun 	iov_iter_advance(iter, size);
979*4882a593Smuzhiyun 	return 0;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
bio_put_pages(struct page ** pages,size_t size,size_t off)982*4882a593Smuzhiyun static void bio_put_pages(struct page **pages, size_t size, size_t off)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	for (i = 0; i < nr; i++)
987*4882a593Smuzhiyun 		put_page(pages[i]);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun /**
993*4882a593Smuzhiyun  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
994*4882a593Smuzhiyun  * @bio: bio to add pages to
995*4882a593Smuzhiyun  * @iter: iov iterator describing the region to be mapped
996*4882a593Smuzhiyun  *
997*4882a593Smuzhiyun  * Pins pages from *iter and appends them to @bio's bvec array. The
998*4882a593Smuzhiyun  * pages will have to be released using put_page() when done.
999*4882a593Smuzhiyun  * For multi-segment *iter, this function only adds pages from the
1000*4882a593Smuzhiyun  * next non-empty segment of the iov iterator.
1001*4882a593Smuzhiyun  */
__bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter)1002*4882a593Smuzhiyun static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1005*4882a593Smuzhiyun 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1006*4882a593Smuzhiyun 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1007*4882a593Smuzhiyun 	struct page **pages = (struct page **)bv;
1008*4882a593Smuzhiyun 	bool same_page = false;
1009*4882a593Smuzhiyun 	ssize_t size, left;
1010*4882a593Smuzhiyun 	unsigned len, i;
1011*4882a593Smuzhiyun 	size_t offset;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	/*
1014*4882a593Smuzhiyun 	 * Move page array up in the allocated memory for the bio vecs as far as
1015*4882a593Smuzhiyun 	 * possible so that we can start filling biovecs from the beginning
1016*4882a593Smuzhiyun 	 * without overwriting the temporary page array.
1017*4882a593Smuzhiyun 	*/
1018*4882a593Smuzhiyun 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1019*4882a593Smuzhiyun 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1022*4882a593Smuzhiyun 	if (unlikely(size <= 0))
1023*4882a593Smuzhiyun 		return size ? size : -EFAULT;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	for (left = size, i = 0; left > 0; left -= len, i++) {
1026*4882a593Smuzhiyun 		struct page *page = pages[i];
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 		len = min_t(size_t, PAGE_SIZE - offset, left);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1031*4882a593Smuzhiyun 			if (same_page)
1032*4882a593Smuzhiyun 				put_page(page);
1033*4882a593Smuzhiyun 		} else {
1034*4882a593Smuzhiyun 			if (WARN_ON_ONCE(bio_full(bio, len))) {
1035*4882a593Smuzhiyun 				bio_put_pages(pages + i, left, offset);
1036*4882a593Smuzhiyun 				return -EINVAL;
1037*4882a593Smuzhiyun 			}
1038*4882a593Smuzhiyun 			__bio_add_page(bio, page, len, offset);
1039*4882a593Smuzhiyun 		}
1040*4882a593Smuzhiyun 		offset = 0;
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	iov_iter_advance(iter, size);
1044*4882a593Smuzhiyun 	return 0;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
__bio_iov_append_get_pages(struct bio * bio,struct iov_iter * iter)1047*4882a593Smuzhiyun static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1050*4882a593Smuzhiyun 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1051*4882a593Smuzhiyun 	struct request_queue *q = bio->bi_disk->queue;
1052*4882a593Smuzhiyun 	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1053*4882a593Smuzhiyun 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1054*4882a593Smuzhiyun 	struct page **pages = (struct page **)bv;
1055*4882a593Smuzhiyun 	ssize_t size, left;
1056*4882a593Smuzhiyun 	unsigned len, i;
1057*4882a593Smuzhiyun 	size_t offset;
1058*4882a593Smuzhiyun 	int ret = 0;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	/*
1061*4882a593Smuzhiyun 	 * Move page array up in the allocated memory for the bio vecs as far as
1062*4882a593Smuzhiyun 	 * possible so that we can start filling biovecs from the beginning
1063*4882a593Smuzhiyun 	 * without overwriting the temporary page array.
1064*4882a593Smuzhiyun 	 */
1065*4882a593Smuzhiyun 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1066*4882a593Smuzhiyun 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1069*4882a593Smuzhiyun 	if (unlikely(size <= 0))
1070*4882a593Smuzhiyun 		return size ? size : -EFAULT;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	for (left = size, i = 0; left > 0; left -= len, i++) {
1073*4882a593Smuzhiyun 		struct page *page = pages[i];
1074*4882a593Smuzhiyun 		bool same_page = false;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 		len = min_t(size_t, PAGE_SIZE - offset, left);
1077*4882a593Smuzhiyun 		if (bio_add_hw_page(q, bio, page, len, offset,
1078*4882a593Smuzhiyun 				max_append_sectors, &same_page) != len) {
1079*4882a593Smuzhiyun 			bio_put_pages(pages + i, left, offset);
1080*4882a593Smuzhiyun 			ret = -EINVAL;
1081*4882a593Smuzhiyun 			break;
1082*4882a593Smuzhiyun 		}
1083*4882a593Smuzhiyun 		if (same_page)
1084*4882a593Smuzhiyun 			put_page(page);
1085*4882a593Smuzhiyun 		offset = 0;
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	iov_iter_advance(iter, size - left);
1089*4882a593Smuzhiyun 	return ret;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun /**
1093*4882a593Smuzhiyun  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1094*4882a593Smuzhiyun  * @bio: bio to add pages to
1095*4882a593Smuzhiyun  * @iter: iov iterator describing the region to be added
1096*4882a593Smuzhiyun  *
1097*4882a593Smuzhiyun  * This takes either an iterator pointing to user memory, or one pointing to
1098*4882a593Smuzhiyun  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1099*4882a593Smuzhiyun  * map them into the kernel. On IO completion, the caller should put those
1100*4882a593Smuzhiyun  * pages. If we're adding kernel pages, and the caller told us it's safe to
1101*4882a593Smuzhiyun  * do so, we just have to add the pages to the bio directly. We don't grab an
1102*4882a593Smuzhiyun  * extra reference to those pages (the user should already have that), and we
1103*4882a593Smuzhiyun  * don't put the page on IO completion. The caller needs to check if the bio is
1104*4882a593Smuzhiyun  * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
1105*4882a593Smuzhiyun  * released.
1106*4882a593Smuzhiyun  *
1107*4882a593Smuzhiyun  * The function tries, but does not guarantee, to pin as many pages as
1108*4882a593Smuzhiyun  * fit into the bio, or are requested in @iter, whatever is smaller. If
1109*4882a593Smuzhiyun  * MM encounters an error pinning the requested pages, it stops. Error
1110*4882a593Smuzhiyun  * is returned only if 0 pages could be pinned.
1111*4882a593Smuzhiyun  */
bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter)1112*4882a593Smuzhiyun int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	const bool is_bvec = iov_iter_is_bvec(iter);
1115*4882a593Smuzhiyun 	int ret;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	if (WARN_ON_ONCE(bio->bi_vcnt))
1118*4882a593Smuzhiyun 		return -EINVAL;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	do {
1121*4882a593Smuzhiyun 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1122*4882a593Smuzhiyun 			if (WARN_ON_ONCE(is_bvec))
1123*4882a593Smuzhiyun 				return -EINVAL;
1124*4882a593Smuzhiyun 			ret = __bio_iov_append_get_pages(bio, iter);
1125*4882a593Smuzhiyun 		} else {
1126*4882a593Smuzhiyun 			if (is_bvec)
1127*4882a593Smuzhiyun 				ret = __bio_iov_bvec_add_pages(bio, iter);
1128*4882a593Smuzhiyun 			else
1129*4882a593Smuzhiyun 				ret = __bio_iov_iter_get_pages(bio, iter);
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	if (is_bvec)
1134*4882a593Smuzhiyun 		bio_set_flag(bio, BIO_NO_PAGE_REF);
1135*4882a593Smuzhiyun 	return bio->bi_vcnt ? 0 : ret;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1138*4882a593Smuzhiyun 
submit_bio_wait_endio(struct bio * bio)1139*4882a593Smuzhiyun static void submit_bio_wait_endio(struct bio *bio)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	complete(bio->bi_private);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun /**
1145*4882a593Smuzhiyun  * submit_bio_wait - submit a bio, and wait until it completes
1146*4882a593Smuzhiyun  * @bio: The &struct bio which describes the I/O
1147*4882a593Smuzhiyun  *
1148*4882a593Smuzhiyun  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1149*4882a593Smuzhiyun  * bio_endio() on failure.
1150*4882a593Smuzhiyun  *
1151*4882a593Smuzhiyun  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1152*4882a593Smuzhiyun  * result in bio reference to be consumed. The caller must drop the reference
1153*4882a593Smuzhiyun  * on his own.
1154*4882a593Smuzhiyun  */
submit_bio_wait(struct bio * bio)1155*4882a593Smuzhiyun int submit_bio_wait(struct bio *bio)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
1158*4882a593Smuzhiyun 	unsigned long hang_check;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	bio->bi_private = &done;
1161*4882a593Smuzhiyun 	bio->bi_end_io = submit_bio_wait_endio;
1162*4882a593Smuzhiyun 	bio->bi_opf |= REQ_SYNC;
1163*4882a593Smuzhiyun 	submit_bio(bio);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* Prevent hang_check timer from firing at us during very long I/O */
1166*4882a593Smuzhiyun 	hang_check = sysctl_hung_task_timeout_secs;
1167*4882a593Smuzhiyun 	if (hang_check)
1168*4882a593Smuzhiyun 		while (!wait_for_completion_io_timeout(&done,
1169*4882a593Smuzhiyun 					hang_check * (HZ/2)))
1170*4882a593Smuzhiyun 			;
1171*4882a593Smuzhiyun 	else
1172*4882a593Smuzhiyun 		wait_for_completion_io(&done);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	return blk_status_to_errno(bio->bi_status);
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun EXPORT_SYMBOL(submit_bio_wait);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun /**
1179*4882a593Smuzhiyun  * bio_advance - increment/complete a bio by some number of bytes
1180*4882a593Smuzhiyun  * @bio:	bio to advance
1181*4882a593Smuzhiyun  * @bytes:	number of bytes to complete
1182*4882a593Smuzhiyun  *
1183*4882a593Smuzhiyun  * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1184*4882a593Smuzhiyun  * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1185*4882a593Smuzhiyun  * be updated on the last bvec as well.
1186*4882a593Smuzhiyun  *
1187*4882a593Smuzhiyun  * @bio will then represent the remaining, uncompleted portion of the io.
1188*4882a593Smuzhiyun  */
bio_advance(struct bio * bio,unsigned bytes)1189*4882a593Smuzhiyun void bio_advance(struct bio *bio, unsigned bytes)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	if (bio_integrity(bio))
1192*4882a593Smuzhiyun 		bio_integrity_advance(bio, bytes);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	bio_crypt_advance(bio, bytes);
1195*4882a593Smuzhiyun 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun EXPORT_SYMBOL(bio_advance);
1198*4882a593Smuzhiyun 
bio_copy_data_iter(struct bio * dst,struct bvec_iter * dst_iter,struct bio * src,struct bvec_iter * src_iter)1199*4882a593Smuzhiyun void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1200*4882a593Smuzhiyun 			struct bio *src, struct bvec_iter *src_iter)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	struct bio_vec src_bv, dst_bv;
1203*4882a593Smuzhiyun 	void *src_p, *dst_p;
1204*4882a593Smuzhiyun 	unsigned bytes;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	while (src_iter->bi_size && dst_iter->bi_size) {
1207*4882a593Smuzhiyun 		src_bv = bio_iter_iovec(src, *src_iter);
1208*4882a593Smuzhiyun 		dst_bv = bio_iter_iovec(dst, *dst_iter);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 		bytes = min(src_bv.bv_len, dst_bv.bv_len);
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 		src_p = kmap_atomic(src_bv.bv_page);
1213*4882a593Smuzhiyun 		dst_p = kmap_atomic(dst_bv.bv_page);
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 		memcpy(dst_p + dst_bv.bv_offset,
1216*4882a593Smuzhiyun 		       src_p + src_bv.bv_offset,
1217*4882a593Smuzhiyun 		       bytes);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 		kunmap_atomic(dst_p);
1220*4882a593Smuzhiyun 		kunmap_atomic(src_p);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 		flush_dcache_page(dst_bv.bv_page);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 		bio_advance_iter(src, src_iter, bytes);
1225*4882a593Smuzhiyun 		bio_advance_iter(dst, dst_iter, bytes);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun EXPORT_SYMBOL(bio_copy_data_iter);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun /**
1231*4882a593Smuzhiyun  * bio_copy_data - copy contents of data buffers from one bio to another
1232*4882a593Smuzhiyun  * @src: source bio
1233*4882a593Smuzhiyun  * @dst: destination bio
1234*4882a593Smuzhiyun  *
1235*4882a593Smuzhiyun  * Stops when it reaches the end of either @src or @dst - that is, copies
1236*4882a593Smuzhiyun  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1237*4882a593Smuzhiyun  */
bio_copy_data(struct bio * dst,struct bio * src)1238*4882a593Smuzhiyun void bio_copy_data(struct bio *dst, struct bio *src)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	struct bvec_iter src_iter = src->bi_iter;
1241*4882a593Smuzhiyun 	struct bvec_iter dst_iter = dst->bi_iter;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun EXPORT_SYMBOL(bio_copy_data);
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun  * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1249*4882a593Smuzhiyun  * another
1250*4882a593Smuzhiyun  * @src: source bio list
1251*4882a593Smuzhiyun  * @dst: destination bio list
1252*4882a593Smuzhiyun  *
1253*4882a593Smuzhiyun  * Stops when it reaches the end of either the @src list or @dst list - that is,
1254*4882a593Smuzhiyun  * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1255*4882a593Smuzhiyun  * bios).
1256*4882a593Smuzhiyun  */
bio_list_copy_data(struct bio * dst,struct bio * src)1257*4882a593Smuzhiyun void bio_list_copy_data(struct bio *dst, struct bio *src)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun 	struct bvec_iter src_iter = src->bi_iter;
1260*4882a593Smuzhiyun 	struct bvec_iter dst_iter = dst->bi_iter;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	while (1) {
1263*4882a593Smuzhiyun 		if (!src_iter.bi_size) {
1264*4882a593Smuzhiyun 			src = src->bi_next;
1265*4882a593Smuzhiyun 			if (!src)
1266*4882a593Smuzhiyun 				break;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 			src_iter = src->bi_iter;
1269*4882a593Smuzhiyun 		}
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 		if (!dst_iter.bi_size) {
1272*4882a593Smuzhiyun 			dst = dst->bi_next;
1273*4882a593Smuzhiyun 			if (!dst)
1274*4882a593Smuzhiyun 				break;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 			dst_iter = dst->bi_iter;
1277*4882a593Smuzhiyun 		}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun EXPORT_SYMBOL(bio_list_copy_data);
1283*4882a593Smuzhiyun 
bio_free_pages(struct bio * bio)1284*4882a593Smuzhiyun void bio_free_pages(struct bio *bio)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun 	struct bio_vec *bvec;
1287*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all)
1290*4882a593Smuzhiyun 		__free_page(bvec->bv_page);
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun EXPORT_SYMBOL(bio_free_pages);
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun /*
1295*4882a593Smuzhiyun  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1296*4882a593Smuzhiyun  * for performing direct-IO in BIOs.
1297*4882a593Smuzhiyun  *
1298*4882a593Smuzhiyun  * The problem is that we cannot run set_page_dirty() from interrupt context
1299*4882a593Smuzhiyun  * because the required locks are not interrupt-safe.  So what we can do is to
1300*4882a593Smuzhiyun  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1301*4882a593Smuzhiyun  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1302*4882a593Smuzhiyun  * in process context.
1303*4882a593Smuzhiyun  *
1304*4882a593Smuzhiyun  * We special-case compound pages here: normally this means reads into hugetlb
1305*4882a593Smuzhiyun  * pages.  The logic in here doesn't really work right for compound pages
1306*4882a593Smuzhiyun  * because the VM does not uniformly chase down the head page in all cases.
1307*4882a593Smuzhiyun  * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1308*4882a593Smuzhiyun  * handle them at all.  So we skip compound pages here at an early stage.
1309*4882a593Smuzhiyun  *
1310*4882a593Smuzhiyun  * Note that this code is very hard to test under normal circumstances because
1311*4882a593Smuzhiyun  * direct-io pins the pages with get_user_pages().  This makes
1312*4882a593Smuzhiyun  * is_page_cache_freeable return false, and the VM will not clean the pages.
1313*4882a593Smuzhiyun  * But other code (eg, flusher threads) could clean the pages if they are mapped
1314*4882a593Smuzhiyun  * pagecache.
1315*4882a593Smuzhiyun  *
1316*4882a593Smuzhiyun  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1317*4882a593Smuzhiyun  * deferred bio dirtying paths.
1318*4882a593Smuzhiyun  */
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun /*
1321*4882a593Smuzhiyun  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1322*4882a593Smuzhiyun  */
bio_set_pages_dirty(struct bio * bio)1323*4882a593Smuzhiyun void bio_set_pages_dirty(struct bio *bio)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun 	struct bio_vec *bvec;
1326*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all) {
1329*4882a593Smuzhiyun 		if (!PageCompound(bvec->bv_page))
1330*4882a593Smuzhiyun 			set_page_dirty_lock(bvec->bv_page);
1331*4882a593Smuzhiyun 	}
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun /*
1335*4882a593Smuzhiyun  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1336*4882a593Smuzhiyun  * If they are, then fine.  If, however, some pages are clean then they must
1337*4882a593Smuzhiyun  * have been written out during the direct-IO read.  So we take another ref on
1338*4882a593Smuzhiyun  * the BIO and re-dirty the pages in process context.
1339*4882a593Smuzhiyun  *
1340*4882a593Smuzhiyun  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1341*4882a593Smuzhiyun  * here on.  It will run one put_page() against each page and will run one
1342*4882a593Smuzhiyun  * bio_put() against the BIO.
1343*4882a593Smuzhiyun  */
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun static void bio_dirty_fn(struct work_struct *work);
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1348*4882a593Smuzhiyun static DEFINE_SPINLOCK(bio_dirty_lock);
1349*4882a593Smuzhiyun static struct bio *bio_dirty_list;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun /*
1352*4882a593Smuzhiyun  * This runs in process context
1353*4882a593Smuzhiyun  */
bio_dirty_fn(struct work_struct * work)1354*4882a593Smuzhiyun static void bio_dirty_fn(struct work_struct *work)
1355*4882a593Smuzhiyun {
1356*4882a593Smuzhiyun 	struct bio *bio, *next;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	spin_lock_irq(&bio_dirty_lock);
1359*4882a593Smuzhiyun 	next = bio_dirty_list;
1360*4882a593Smuzhiyun 	bio_dirty_list = NULL;
1361*4882a593Smuzhiyun 	spin_unlock_irq(&bio_dirty_lock);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	while ((bio = next) != NULL) {
1364*4882a593Smuzhiyun 		next = bio->bi_private;
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 		bio_release_pages(bio, true);
1367*4882a593Smuzhiyun 		bio_put(bio);
1368*4882a593Smuzhiyun 	}
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
bio_check_pages_dirty(struct bio * bio)1371*4882a593Smuzhiyun void bio_check_pages_dirty(struct bio *bio)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	struct bio_vec *bvec;
1374*4882a593Smuzhiyun 	unsigned long flags;
1375*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all) {
1378*4882a593Smuzhiyun 		if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1379*4882a593Smuzhiyun 			goto defer;
1380*4882a593Smuzhiyun 	}
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	bio_release_pages(bio, false);
1383*4882a593Smuzhiyun 	bio_put(bio);
1384*4882a593Smuzhiyun 	return;
1385*4882a593Smuzhiyun defer:
1386*4882a593Smuzhiyun 	spin_lock_irqsave(&bio_dirty_lock, flags);
1387*4882a593Smuzhiyun 	bio->bi_private = bio_dirty_list;
1388*4882a593Smuzhiyun 	bio_dirty_list = bio;
1389*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1390*4882a593Smuzhiyun 	schedule_work(&bio_dirty_work);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun 
bio_remaining_done(struct bio * bio)1393*4882a593Smuzhiyun static inline bool bio_remaining_done(struct bio *bio)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun 	/*
1396*4882a593Smuzhiyun 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1397*4882a593Smuzhiyun 	 * we always end io on the first invocation.
1398*4882a593Smuzhiyun 	 */
1399*4882a593Smuzhiyun 	if (!bio_flagged(bio, BIO_CHAIN))
1400*4882a593Smuzhiyun 		return true;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1405*4882a593Smuzhiyun 		bio_clear_flag(bio, BIO_CHAIN);
1406*4882a593Smuzhiyun 		return true;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	return false;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun /**
1413*4882a593Smuzhiyun  * bio_endio - end I/O on a bio
1414*4882a593Smuzhiyun  * @bio:	bio
1415*4882a593Smuzhiyun  *
1416*4882a593Smuzhiyun  * Description:
1417*4882a593Smuzhiyun  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1418*4882a593Smuzhiyun  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1419*4882a593Smuzhiyun  *   bio unless they own it and thus know that it has an end_io function.
1420*4882a593Smuzhiyun  *
1421*4882a593Smuzhiyun  *   bio_endio() can be called several times on a bio that has been chained
1422*4882a593Smuzhiyun  *   using bio_chain().  The ->bi_end_io() function will only be called the
1423*4882a593Smuzhiyun  *   last time.  At this point the BLK_TA_COMPLETE tracing event will be
1424*4882a593Smuzhiyun  *   generated if BIO_TRACE_COMPLETION is set.
1425*4882a593Smuzhiyun  **/
bio_endio(struct bio * bio)1426*4882a593Smuzhiyun void bio_endio(struct bio *bio)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun again:
1429*4882a593Smuzhiyun 	if (!bio_remaining_done(bio))
1430*4882a593Smuzhiyun 		return;
1431*4882a593Smuzhiyun 	if (!bio_integrity_endio(bio))
1432*4882a593Smuzhiyun 		return;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	if (bio->bi_disk)
1435*4882a593Smuzhiyun 		rq_qos_done_bio(bio->bi_disk->queue, bio);
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	/*
1438*4882a593Smuzhiyun 	 * Need to have a real endio function for chained bios, otherwise
1439*4882a593Smuzhiyun 	 * various corner cases will break (like stacking block devices that
1440*4882a593Smuzhiyun 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1441*4882a593Smuzhiyun 	 * recursion and blowing the stack. Tail call optimization would
1442*4882a593Smuzhiyun 	 * handle this, but compiling with frame pointers also disables
1443*4882a593Smuzhiyun 	 * gcc's sibling call optimization.
1444*4882a593Smuzhiyun 	 */
1445*4882a593Smuzhiyun 	if (bio->bi_end_io == bio_chain_endio) {
1446*4882a593Smuzhiyun 		bio = __bio_chain_endio(bio);
1447*4882a593Smuzhiyun 		goto again;
1448*4882a593Smuzhiyun 	}
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1451*4882a593Smuzhiyun 		trace_block_bio_complete(bio->bi_disk->queue, bio);
1452*4882a593Smuzhiyun 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	blk_throtl_bio_endio(bio);
1456*4882a593Smuzhiyun 	/* release cgroup info */
1457*4882a593Smuzhiyun 	bio_uninit(bio);
1458*4882a593Smuzhiyun 	if (bio->bi_end_io)
1459*4882a593Smuzhiyun 		bio->bi_end_io(bio);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun EXPORT_SYMBOL(bio_endio);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun /**
1464*4882a593Smuzhiyun  * bio_split - split a bio
1465*4882a593Smuzhiyun  * @bio:	bio to split
1466*4882a593Smuzhiyun  * @sectors:	number of sectors to split from the front of @bio
1467*4882a593Smuzhiyun  * @gfp:	gfp mask
1468*4882a593Smuzhiyun  * @bs:		bio set to allocate from
1469*4882a593Smuzhiyun  *
1470*4882a593Smuzhiyun  * Allocates and returns a new bio which represents @sectors from the start of
1471*4882a593Smuzhiyun  * @bio, and updates @bio to represent the remaining sectors.
1472*4882a593Smuzhiyun  *
1473*4882a593Smuzhiyun  * Unless this is a discard request the newly allocated bio will point
1474*4882a593Smuzhiyun  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1475*4882a593Smuzhiyun  * neither @bio nor @bs are freed before the split bio.
1476*4882a593Smuzhiyun  */
bio_split(struct bio * bio,int sectors,gfp_t gfp,struct bio_set * bs)1477*4882a593Smuzhiyun struct bio *bio_split(struct bio *bio, int sectors,
1478*4882a593Smuzhiyun 		      gfp_t gfp, struct bio_set *bs)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun 	struct bio *split;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	BUG_ON(sectors <= 0);
1483*4882a593Smuzhiyun 	BUG_ON(sectors >= bio_sectors(bio));
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	/* Zone append commands cannot be split */
1486*4882a593Smuzhiyun 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1487*4882a593Smuzhiyun 		return NULL;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	split = bio_clone_fast(bio, gfp, bs);
1490*4882a593Smuzhiyun 	if (!split)
1491*4882a593Smuzhiyun 		return NULL;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	split->bi_iter.bi_size = sectors << 9;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	if (bio_integrity(split))
1496*4882a593Smuzhiyun 		bio_integrity_trim(split);
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	bio_advance(bio, split->bi_iter.bi_size);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1501*4882a593Smuzhiyun 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	return split;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun EXPORT_SYMBOL(bio_split);
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun /**
1508*4882a593Smuzhiyun  * bio_trim - trim a bio
1509*4882a593Smuzhiyun  * @bio:	bio to trim
1510*4882a593Smuzhiyun  * @offset:	number of sectors to trim from the front of @bio
1511*4882a593Smuzhiyun  * @size:	size we want to trim @bio to, in sectors
1512*4882a593Smuzhiyun  */
bio_trim(struct bio * bio,int offset,int size)1513*4882a593Smuzhiyun void bio_trim(struct bio *bio, int offset, int size)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun 	/* 'bio' is a cloned bio which we need to trim to match
1516*4882a593Smuzhiyun 	 * the given offset and size.
1517*4882a593Smuzhiyun 	 */
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	size <<= 9;
1520*4882a593Smuzhiyun 	if (offset == 0 && size == bio->bi_iter.bi_size)
1521*4882a593Smuzhiyun 		return;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	bio_advance(bio, offset << 9);
1524*4882a593Smuzhiyun 	bio->bi_iter.bi_size = size;
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	if (bio_integrity(bio))
1527*4882a593Smuzhiyun 		bio_integrity_trim(bio);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bio_trim);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun /*
1533*4882a593Smuzhiyun  * create memory pools for biovec's in a bio_set.
1534*4882a593Smuzhiyun  * use the global biovec slabs created for general use.
1535*4882a593Smuzhiyun  */
biovec_init_pool(mempool_t * pool,int pool_entries)1536*4882a593Smuzhiyun int biovec_init_pool(mempool_t *pool, int pool_entries)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun  * bioset_exit - exit a bioset initialized with bioset_init()
1545*4882a593Smuzhiyun  *
1546*4882a593Smuzhiyun  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1547*4882a593Smuzhiyun  * kzalloc()).
1548*4882a593Smuzhiyun  */
bioset_exit(struct bio_set * bs)1549*4882a593Smuzhiyun void bioset_exit(struct bio_set *bs)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun 	if (bs->rescue_workqueue)
1552*4882a593Smuzhiyun 		destroy_workqueue(bs->rescue_workqueue);
1553*4882a593Smuzhiyun 	bs->rescue_workqueue = NULL;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	mempool_exit(&bs->bio_pool);
1556*4882a593Smuzhiyun 	mempool_exit(&bs->bvec_pool);
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	bioset_integrity_free(bs);
1559*4882a593Smuzhiyun 	if (bs->bio_slab)
1560*4882a593Smuzhiyun 		bio_put_slab(bs);
1561*4882a593Smuzhiyun 	bs->bio_slab = NULL;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun EXPORT_SYMBOL(bioset_exit);
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun /**
1566*4882a593Smuzhiyun  * bioset_init - Initialize a bio_set
1567*4882a593Smuzhiyun  * @bs:		pool to initialize
1568*4882a593Smuzhiyun  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1569*4882a593Smuzhiyun  * @front_pad:	Number of bytes to allocate in front of the returned bio
1570*4882a593Smuzhiyun  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1571*4882a593Smuzhiyun  *              and %BIOSET_NEED_RESCUER
1572*4882a593Smuzhiyun  *
1573*4882a593Smuzhiyun  * Description:
1574*4882a593Smuzhiyun  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1575*4882a593Smuzhiyun  *    to ask for a number of bytes to be allocated in front of the bio.
1576*4882a593Smuzhiyun  *    Front pad allocation is useful for embedding the bio inside
1577*4882a593Smuzhiyun  *    another structure, to avoid allocating extra data to go with the bio.
1578*4882a593Smuzhiyun  *    Note that the bio must be embedded at the END of that structure always,
1579*4882a593Smuzhiyun  *    or things will break badly.
1580*4882a593Smuzhiyun  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1581*4882a593Smuzhiyun  *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
1582*4882a593Smuzhiyun  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1583*4882a593Smuzhiyun  *    dispatch queued requests when the mempool runs out of space.
1584*4882a593Smuzhiyun  *
1585*4882a593Smuzhiyun  */
bioset_init(struct bio_set * bs,unsigned int pool_size,unsigned int front_pad,int flags)1586*4882a593Smuzhiyun int bioset_init(struct bio_set *bs,
1587*4882a593Smuzhiyun 		unsigned int pool_size,
1588*4882a593Smuzhiyun 		unsigned int front_pad,
1589*4882a593Smuzhiyun 		int flags)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun 	unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	bs->front_pad = front_pad;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	spin_lock_init(&bs->rescue_lock);
1596*4882a593Smuzhiyun 	bio_list_init(&bs->rescue_list);
1597*4882a593Smuzhiyun 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1600*4882a593Smuzhiyun 	if (!bs->bio_slab)
1601*4882a593Smuzhiyun 		return -ENOMEM;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1604*4882a593Smuzhiyun 		goto bad;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	if ((flags & BIOSET_NEED_BVECS) &&
1607*4882a593Smuzhiyun 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1608*4882a593Smuzhiyun 		goto bad;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	if (!(flags & BIOSET_NEED_RESCUER))
1611*4882a593Smuzhiyun 		return 0;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1614*4882a593Smuzhiyun 	if (!bs->rescue_workqueue)
1615*4882a593Smuzhiyun 		goto bad;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	return 0;
1618*4882a593Smuzhiyun bad:
1619*4882a593Smuzhiyun 	bioset_exit(bs);
1620*4882a593Smuzhiyun 	return -ENOMEM;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun EXPORT_SYMBOL(bioset_init);
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /*
1625*4882a593Smuzhiyun  * Initialize and setup a new bio_set, based on the settings from
1626*4882a593Smuzhiyun  * another bio_set.
1627*4882a593Smuzhiyun  */
bioset_init_from_src(struct bio_set * bs,struct bio_set * src)1628*4882a593Smuzhiyun int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun 	int flags;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	flags = 0;
1633*4882a593Smuzhiyun 	if (src->bvec_pool.min_nr)
1634*4882a593Smuzhiyun 		flags |= BIOSET_NEED_BVECS;
1635*4882a593Smuzhiyun 	if (src->rescue_workqueue)
1636*4882a593Smuzhiyun 		flags |= BIOSET_NEED_RESCUER;
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun EXPORT_SYMBOL(bioset_init_from_src);
1641*4882a593Smuzhiyun 
biovec_init_slabs(void)1642*4882a593Smuzhiyun static void __init biovec_init_slabs(void)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun 	int i;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	for (i = 0; i < BVEC_POOL_NR; i++) {
1647*4882a593Smuzhiyun 		int size;
1648*4882a593Smuzhiyun 		struct biovec_slab *bvs = bvec_slabs + i;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 		if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1651*4882a593Smuzhiyun 			bvs->slab = NULL;
1652*4882a593Smuzhiyun 			continue;
1653*4882a593Smuzhiyun 		}
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 		size = bvs->nr_vecs * sizeof(struct bio_vec);
1656*4882a593Smuzhiyun 		bvs->slab = kmem_cache_create(bvs->name, size, 0,
1657*4882a593Smuzhiyun                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1658*4882a593Smuzhiyun 	}
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun 
init_bio(void)1661*4882a593Smuzhiyun static int __init init_bio(void)
1662*4882a593Smuzhiyun {
1663*4882a593Smuzhiyun 	bio_slab_max = 2;
1664*4882a593Smuzhiyun 	bio_slab_nr = 0;
1665*4882a593Smuzhiyun 	bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
1666*4882a593Smuzhiyun 			    GFP_KERNEL);
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	if (!bio_slabs)
1671*4882a593Smuzhiyun 		panic("bio: can't allocate bios\n");
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	bio_integrity_init();
1674*4882a593Smuzhiyun 	biovec_init_slabs();
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1677*4882a593Smuzhiyun 		panic("bio: can't allocate bios\n");
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1680*4882a593Smuzhiyun 		panic("bio: can't create integrity pool\n");
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	return 0;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun subsys_initcall(init_bio);
1685