xref: /OK3568_Linux_fs/kernel/block/blk-lib.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Functions related to generic helpers functions
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/bio.h>
8*4882a593Smuzhiyun #include <linux/blkdev.h>
9*4882a593Smuzhiyun #include <linux/scatterlist.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "blk.h"
12*4882a593Smuzhiyun 
blk_next_bio(struct bio * bio,unsigned int nr_pages,gfp_t gfp)13*4882a593Smuzhiyun struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	struct bio *new = bio_alloc(gfp, nr_pages);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	if (bio) {
18*4882a593Smuzhiyun 		bio_chain(bio, new);
19*4882a593Smuzhiyun 		submit_bio(bio);
20*4882a593Smuzhiyun 	}
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	return new;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
__blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,int flags,struct bio ** biop)25*4882a593Smuzhiyun int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26*4882a593Smuzhiyun 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27*4882a593Smuzhiyun 		struct bio **biop)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(bdev);
30*4882a593Smuzhiyun 	struct bio *bio = *biop;
31*4882a593Smuzhiyun 	unsigned int op;
32*4882a593Smuzhiyun 	sector_t bs_mask, part_offset = 0;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (!q)
35*4882a593Smuzhiyun 		return -ENXIO;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	if (bdev_read_only(bdev))
38*4882a593Smuzhiyun 		return -EPERM;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (flags & BLKDEV_DISCARD_SECURE) {
41*4882a593Smuzhiyun 		if (!blk_queue_secure_erase(q))
42*4882a593Smuzhiyun 			return -EOPNOTSUPP;
43*4882a593Smuzhiyun 		op = REQ_OP_SECURE_ERASE;
44*4882a593Smuzhiyun 	} else {
45*4882a593Smuzhiyun 		if (!blk_queue_discard(q))
46*4882a593Smuzhiyun 			return -EOPNOTSUPP;
47*4882a593Smuzhiyun 		op = REQ_OP_DISCARD;
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* In case the discard granularity isn't set by buggy device driver */
51*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
52*4882a593Smuzhiyun 		char dev_name[BDEVNAME_SIZE];
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		bdevname(bdev, dev_name);
55*4882a593Smuzhiyun 		pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
56*4882a593Smuzhiyun 		return -EOPNOTSUPP;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
60*4882a593Smuzhiyun 	if ((sector | nr_sects) & bs_mask)
61*4882a593Smuzhiyun 		return -EINVAL;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (!nr_sects)
64*4882a593Smuzhiyun 		return -EINVAL;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* In case the discard request is in a partition */
67*4882a593Smuzhiyun 	if (bdev_is_partition(bdev))
68*4882a593Smuzhiyun 		part_offset = bdev->bd_part->start_sect;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	while (nr_sects) {
71*4882a593Smuzhiyun 		sector_t granularity_aligned_lba, req_sects;
72*4882a593Smuzhiyun 		sector_t sector_mapped = sector + part_offset;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		granularity_aligned_lba = round_up(sector_mapped,
75*4882a593Smuzhiyun 				q->limits.discard_granularity >> SECTOR_SHIFT);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		/*
78*4882a593Smuzhiyun 		 * Check whether the discard bio starts at a discard_granularity
79*4882a593Smuzhiyun 		 * aligned LBA,
80*4882a593Smuzhiyun 		 * - If no: set (granularity_aligned_lba - sector_mapped) to
81*4882a593Smuzhiyun 		 *   bi_size of the first split bio, then the second bio will
82*4882a593Smuzhiyun 		 *   start at a discard_granularity aligned LBA on the device.
83*4882a593Smuzhiyun 		 * - If yes: use bio_aligned_discard_max_sectors() as the max
84*4882a593Smuzhiyun 		 *   possible bi_size of the first split bio. Then when this bio
85*4882a593Smuzhiyun 		 *   is split in device drive, the split ones are very probably
86*4882a593Smuzhiyun 		 *   to be aligned to discard_granularity of the device's queue.
87*4882a593Smuzhiyun 		 */
88*4882a593Smuzhiyun 		if (granularity_aligned_lba == sector_mapped)
89*4882a593Smuzhiyun 			req_sects = min_t(sector_t, nr_sects,
90*4882a593Smuzhiyun 					  bio_aligned_discard_max_sectors(q));
91*4882a593Smuzhiyun 		else
92*4882a593Smuzhiyun 			req_sects = min_t(sector_t, nr_sects,
93*4882a593Smuzhiyun 					  granularity_aligned_lba - sector_mapped);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 		bio = blk_next_bio(bio, 0, gfp_mask);
98*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = sector;
99*4882a593Smuzhiyun 		bio_set_dev(bio, bdev);
100*4882a593Smuzhiyun 		bio_set_op_attrs(bio, op, 0);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		bio->bi_iter.bi_size = req_sects << 9;
103*4882a593Smuzhiyun 		sector += req_sects;
104*4882a593Smuzhiyun 		nr_sects -= req_sects;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		/*
107*4882a593Smuzhiyun 		 * We can loop for a long time in here, if someone does
108*4882a593Smuzhiyun 		 * full device discards (like mkfs). Be nice and allow
109*4882a593Smuzhiyun 		 * us to schedule out to avoid softlocking if preempt
110*4882a593Smuzhiyun 		 * is disabled.
111*4882a593Smuzhiyun 		 */
112*4882a593Smuzhiyun 		cond_resched();
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	*biop = bio;
116*4882a593Smuzhiyun 	return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun EXPORT_SYMBOL(__blkdev_issue_discard);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun  * blkdev_issue_discard - queue a discard
122*4882a593Smuzhiyun  * @bdev:	blockdev to issue discard for
123*4882a593Smuzhiyun  * @sector:	start sector
124*4882a593Smuzhiyun  * @nr_sects:	number of sectors to discard
125*4882a593Smuzhiyun  * @gfp_mask:	memory allocation flags (for bio_alloc)
126*4882a593Smuzhiyun  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * Description:
129*4882a593Smuzhiyun  *    Issue a discard request for the sectors in question.
130*4882a593Smuzhiyun  */
blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned long flags)131*4882a593Smuzhiyun int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
132*4882a593Smuzhiyun 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct bio *bio = NULL;
135*4882a593Smuzhiyun 	struct blk_plug plug;
136*4882a593Smuzhiyun 	int ret;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	blk_start_plug(&plug);
139*4882a593Smuzhiyun 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
140*4882a593Smuzhiyun 			&bio);
141*4882a593Smuzhiyun 	if (!ret && bio) {
142*4882a593Smuzhiyun 		ret = submit_bio_wait(bio);
143*4882a593Smuzhiyun 		if (ret == -EOPNOTSUPP)
144*4882a593Smuzhiyun 			ret = 0;
145*4882a593Smuzhiyun 		bio_put(bio);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 	blk_finish_plug(&plug);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return ret;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_issue_discard);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun  * __blkdev_issue_write_same - generate number of bios with same page
155*4882a593Smuzhiyun  * @bdev:	target blockdev
156*4882a593Smuzhiyun  * @sector:	start sector
157*4882a593Smuzhiyun  * @nr_sects:	number of sectors to write
158*4882a593Smuzhiyun  * @gfp_mask:	memory allocation flags (for bio_alloc)
159*4882a593Smuzhiyun  * @page:	page containing data to write
160*4882a593Smuzhiyun  * @biop:	pointer to anchor bio
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * Description:
163*4882a593Smuzhiyun  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
164*4882a593Smuzhiyun  */
__blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page,struct bio ** biop)165*4882a593Smuzhiyun static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
166*4882a593Smuzhiyun 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
167*4882a593Smuzhiyun 		struct bio **biop)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(bdev);
170*4882a593Smuzhiyun 	unsigned int max_write_same_sectors;
171*4882a593Smuzhiyun 	struct bio *bio = *biop;
172*4882a593Smuzhiyun 	sector_t bs_mask;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!q)
175*4882a593Smuzhiyun 		return -ENXIO;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (bdev_read_only(bdev))
178*4882a593Smuzhiyun 		return -EPERM;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
181*4882a593Smuzhiyun 	if ((sector | nr_sects) & bs_mask)
182*4882a593Smuzhiyun 		return -EINVAL;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (!bdev_write_same(bdev))
185*4882a593Smuzhiyun 		return -EOPNOTSUPP;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
188*4882a593Smuzhiyun 	max_write_same_sectors = bio_allowed_max_sectors(q);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	while (nr_sects) {
191*4882a593Smuzhiyun 		bio = blk_next_bio(bio, 1, gfp_mask);
192*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = sector;
193*4882a593Smuzhiyun 		bio_set_dev(bio, bdev);
194*4882a593Smuzhiyun 		bio->bi_vcnt = 1;
195*4882a593Smuzhiyun 		bio->bi_io_vec->bv_page = page;
196*4882a593Smuzhiyun 		bio->bi_io_vec->bv_offset = 0;
197*4882a593Smuzhiyun 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
198*4882a593Smuzhiyun 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		if (nr_sects > max_write_same_sectors) {
201*4882a593Smuzhiyun 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
202*4882a593Smuzhiyun 			nr_sects -= max_write_same_sectors;
203*4882a593Smuzhiyun 			sector += max_write_same_sectors;
204*4882a593Smuzhiyun 		} else {
205*4882a593Smuzhiyun 			bio->bi_iter.bi_size = nr_sects << 9;
206*4882a593Smuzhiyun 			nr_sects = 0;
207*4882a593Smuzhiyun 		}
208*4882a593Smuzhiyun 		cond_resched();
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	*biop = bio;
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun  * blkdev_issue_write_same - queue a write same operation
217*4882a593Smuzhiyun  * @bdev:	target blockdev
218*4882a593Smuzhiyun  * @sector:	start sector
219*4882a593Smuzhiyun  * @nr_sects:	number of sectors to write
220*4882a593Smuzhiyun  * @gfp_mask:	memory allocation flags (for bio_alloc)
221*4882a593Smuzhiyun  * @page:	page containing data
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  * Description:
224*4882a593Smuzhiyun  *    Issue a write same request for the sectors in question.
225*4882a593Smuzhiyun  */
blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page)226*4882a593Smuzhiyun int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
227*4882a593Smuzhiyun 				sector_t nr_sects, gfp_t gfp_mask,
228*4882a593Smuzhiyun 				struct page *page)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct bio *bio = NULL;
231*4882a593Smuzhiyun 	struct blk_plug plug;
232*4882a593Smuzhiyun 	int ret;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	blk_start_plug(&plug);
235*4882a593Smuzhiyun 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
236*4882a593Smuzhiyun 			&bio);
237*4882a593Smuzhiyun 	if (ret == 0 && bio) {
238*4882a593Smuzhiyun 		ret = submit_bio_wait(bio);
239*4882a593Smuzhiyun 		bio_put(bio);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	blk_finish_plug(&plug);
242*4882a593Smuzhiyun 	return ret;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_issue_write_same);
245*4882a593Smuzhiyun 
__blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)246*4882a593Smuzhiyun static int __blkdev_issue_write_zeroes(struct block_device *bdev,
247*4882a593Smuzhiyun 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
248*4882a593Smuzhiyun 		struct bio **biop, unsigned flags)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct bio *bio = *biop;
251*4882a593Smuzhiyun 	unsigned int max_write_zeroes_sectors;
252*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(bdev);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (!q)
255*4882a593Smuzhiyun 		return -ENXIO;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (bdev_read_only(bdev))
258*4882a593Smuzhiyun 		return -EPERM;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
261*4882a593Smuzhiyun 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (max_write_zeroes_sectors == 0)
264*4882a593Smuzhiyun 		return -EOPNOTSUPP;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	while (nr_sects) {
267*4882a593Smuzhiyun 		bio = blk_next_bio(bio, 0, gfp_mask);
268*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = sector;
269*4882a593Smuzhiyun 		bio_set_dev(bio, bdev);
270*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
271*4882a593Smuzhiyun 		if (flags & BLKDEV_ZERO_NOUNMAP)
272*4882a593Smuzhiyun 			bio->bi_opf |= REQ_NOUNMAP;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		if (nr_sects > max_write_zeroes_sectors) {
275*4882a593Smuzhiyun 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
276*4882a593Smuzhiyun 			nr_sects -= max_write_zeroes_sectors;
277*4882a593Smuzhiyun 			sector += max_write_zeroes_sectors;
278*4882a593Smuzhiyun 		} else {
279*4882a593Smuzhiyun 			bio->bi_iter.bi_size = nr_sects << 9;
280*4882a593Smuzhiyun 			nr_sects = 0;
281*4882a593Smuzhiyun 		}
282*4882a593Smuzhiyun 		cond_resched();
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	*biop = bio;
286*4882a593Smuzhiyun 	return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Convert a number of 512B sectors to a number of pages.
291*4882a593Smuzhiyun  * The result is limited to a number of pages that can fit into a BIO.
292*4882a593Smuzhiyun  * Also make sure that the result is always at least 1 (page) for the cases
293*4882a593Smuzhiyun  * where nr_sects is lower than the number of sectors in a page.
294*4882a593Smuzhiyun  */
__blkdev_sectors_to_bio_pages(sector_t nr_sects)295*4882a593Smuzhiyun static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return min(pages, (sector_t)BIO_MAX_PAGES);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
__blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)302*4882a593Smuzhiyun static int __blkdev_issue_zero_pages(struct block_device *bdev,
303*4882a593Smuzhiyun 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
304*4882a593Smuzhiyun 		struct bio **biop)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(bdev);
307*4882a593Smuzhiyun 	struct bio *bio = *biop;
308*4882a593Smuzhiyun 	int bi_size = 0;
309*4882a593Smuzhiyun 	unsigned int sz;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (!q)
312*4882a593Smuzhiyun 		return -ENXIO;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (bdev_read_only(bdev))
315*4882a593Smuzhiyun 		return -EPERM;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	while (nr_sects != 0) {
318*4882a593Smuzhiyun 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
319*4882a593Smuzhiyun 				   gfp_mask);
320*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = sector;
321*4882a593Smuzhiyun 		bio_set_dev(bio, bdev);
322*4882a593Smuzhiyun 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		while (nr_sects != 0) {
325*4882a593Smuzhiyun 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
326*4882a593Smuzhiyun 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
327*4882a593Smuzhiyun 			nr_sects -= bi_size >> 9;
328*4882a593Smuzhiyun 			sector += bi_size >> 9;
329*4882a593Smuzhiyun 			if (bi_size < sz)
330*4882a593Smuzhiyun 				break;
331*4882a593Smuzhiyun 		}
332*4882a593Smuzhiyun 		cond_resched();
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	*biop = bio;
336*4882a593Smuzhiyun 	return 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun  * __blkdev_issue_zeroout - generate number of zero filed write bios
341*4882a593Smuzhiyun  * @bdev:	blockdev to issue
342*4882a593Smuzhiyun  * @sector:	start sector
343*4882a593Smuzhiyun  * @nr_sects:	number of sectors to write
344*4882a593Smuzhiyun  * @gfp_mask:	memory allocation flags (for bio_alloc)
345*4882a593Smuzhiyun  * @biop:	pointer to anchor bio
346*4882a593Smuzhiyun  * @flags:	controls detailed behavior
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Description:
349*4882a593Smuzhiyun  *  Zero-fill a block range, either using hardware offload or by explicitly
350*4882a593Smuzhiyun  *  writing zeroes to the device.
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  *  If a device is using logical block provisioning, the underlying space will
353*4882a593Smuzhiyun  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
356*4882a593Smuzhiyun  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
357*4882a593Smuzhiyun  */
__blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)358*4882a593Smuzhiyun int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
359*4882a593Smuzhiyun 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
360*4882a593Smuzhiyun 		unsigned flags)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	int ret;
363*4882a593Smuzhiyun 	sector_t bs_mask;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
366*4882a593Smuzhiyun 	if ((sector | nr_sects) & bs_mask)
367*4882a593Smuzhiyun 		return -EINVAL;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
370*4882a593Smuzhiyun 			biop, flags);
371*4882a593Smuzhiyun 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
372*4882a593Smuzhiyun 		return ret;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
375*4882a593Smuzhiyun 					 biop);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun EXPORT_SYMBOL(__blkdev_issue_zeroout);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun  * blkdev_issue_zeroout - zero-fill a block range
381*4882a593Smuzhiyun  * @bdev:	blockdev to write
382*4882a593Smuzhiyun  * @sector:	start sector
383*4882a593Smuzhiyun  * @nr_sects:	number of sectors to write
384*4882a593Smuzhiyun  * @gfp_mask:	memory allocation flags (for bio_alloc)
385*4882a593Smuzhiyun  * @flags:	controls detailed behavior
386*4882a593Smuzhiyun  *
387*4882a593Smuzhiyun  * Description:
388*4882a593Smuzhiyun  *  Zero-fill a block range, either using hardware offload or by explicitly
389*4882a593Smuzhiyun  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
390*4882a593Smuzhiyun  *  valid values for %flags.
391*4882a593Smuzhiyun  */
blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned flags)392*4882a593Smuzhiyun int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
393*4882a593Smuzhiyun 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	int ret = 0;
396*4882a593Smuzhiyun 	sector_t bs_mask;
397*4882a593Smuzhiyun 	struct bio *bio;
398*4882a593Smuzhiyun 	struct blk_plug plug;
399*4882a593Smuzhiyun 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
402*4882a593Smuzhiyun 	if ((sector | nr_sects) & bs_mask)
403*4882a593Smuzhiyun 		return -EINVAL;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun retry:
406*4882a593Smuzhiyun 	bio = NULL;
407*4882a593Smuzhiyun 	blk_start_plug(&plug);
408*4882a593Smuzhiyun 	if (try_write_zeroes) {
409*4882a593Smuzhiyun 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
410*4882a593Smuzhiyun 						  gfp_mask, &bio, flags);
411*4882a593Smuzhiyun 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
412*4882a593Smuzhiyun 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
413*4882a593Smuzhiyun 						gfp_mask, &bio);
414*4882a593Smuzhiyun 	} else {
415*4882a593Smuzhiyun 		/* No zeroing offload support */
416*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 	if (ret == 0 && bio) {
419*4882a593Smuzhiyun 		ret = submit_bio_wait(bio);
420*4882a593Smuzhiyun 		bio_put(bio);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	blk_finish_plug(&plug);
423*4882a593Smuzhiyun 	if (ret && try_write_zeroes) {
424*4882a593Smuzhiyun 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
425*4882a593Smuzhiyun 			try_write_zeroes = false;
426*4882a593Smuzhiyun 			goto retry;
427*4882a593Smuzhiyun 		}
428*4882a593Smuzhiyun 		if (!bdev_write_zeroes_sectors(bdev)) {
429*4882a593Smuzhiyun 			/*
430*4882a593Smuzhiyun 			 * Zeroing offload support was indicated, but the
431*4882a593Smuzhiyun 			 * device reported ILLEGAL REQUEST (for some devices
432*4882a593Smuzhiyun 			 * there is no non-destructive way to verify whether
433*4882a593Smuzhiyun 			 * WRITE ZEROES is actually supported).
434*4882a593Smuzhiyun 			 */
435*4882a593Smuzhiyun 			ret = -EOPNOTSUPP;
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL(blkdev_issue_zeroout);
442