xref: /OK3568_Linux_fs/kernel/block/bio-integrity.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * bio-integrity.c - bio data integrity extensions
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6*4882a593Smuzhiyun  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/blkdev.h>
10*4882a593Smuzhiyun #include <linux/mempool.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/bio.h>
13*4882a593Smuzhiyun #include <linux/workqueue.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include "blk.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define BIP_INLINE_VECS	4
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static struct kmem_cache *bip_slab;
20*4882a593Smuzhiyun static struct workqueue_struct *kintegrityd_wq;
21*4882a593Smuzhiyun 
blk_flush_integrity(void)22*4882a593Smuzhiyun void blk_flush_integrity(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	flush_workqueue(kintegrityd_wq);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
__bio_integrity_free(struct bio_set * bs,struct bio_integrity_payload * bip)27*4882a593Smuzhiyun static void __bio_integrity_free(struct bio_set *bs,
28*4882a593Smuzhiyun 				 struct bio_integrity_payload *bip)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
31*4882a593Smuzhiyun 		if (bip->bip_vec)
32*4882a593Smuzhiyun 			bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
33*4882a593Smuzhiyun 				  bip->bip_slab);
34*4882a593Smuzhiyun 		mempool_free(bip, &bs->bio_integrity_pool);
35*4882a593Smuzhiyun 	} else {
36*4882a593Smuzhiyun 		kfree(bip);
37*4882a593Smuzhiyun 	}
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
42*4882a593Smuzhiyun  * @bio:	bio to attach integrity metadata to
43*4882a593Smuzhiyun  * @gfp_mask:	Memory allocation mask
44*4882a593Smuzhiyun  * @nr_vecs:	Number of integrity metadata scatter-gather elements
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Description: This function prepares a bio for attaching integrity
47*4882a593Smuzhiyun  * metadata.  nr_vecs specifies the maximum number of pages containing
48*4882a593Smuzhiyun  * integrity metadata that can be attached.
49*4882a593Smuzhiyun  */
bio_integrity_alloc(struct bio * bio,gfp_t gfp_mask,unsigned int nr_vecs)50*4882a593Smuzhiyun struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
51*4882a593Smuzhiyun 						  gfp_t gfp_mask,
52*4882a593Smuzhiyun 						  unsigned int nr_vecs)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct bio_integrity_payload *bip;
55*4882a593Smuzhiyun 	struct bio_set *bs = bio->bi_pool;
56*4882a593Smuzhiyun 	unsigned inline_vecs;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
59*4882a593Smuzhiyun 		return ERR_PTR(-EOPNOTSUPP);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
62*4882a593Smuzhiyun 		bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
63*4882a593Smuzhiyun 		inline_vecs = nr_vecs;
64*4882a593Smuzhiyun 	} else {
65*4882a593Smuzhiyun 		bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
66*4882a593Smuzhiyun 		inline_vecs = BIP_INLINE_VECS;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (unlikely(!bip))
70*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	memset(bip, 0, sizeof(*bip));
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (nr_vecs > inline_vecs) {
75*4882a593Smuzhiyun 		unsigned long idx = 0;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
78*4882a593Smuzhiyun 					  &bs->bvec_integrity_pool);
79*4882a593Smuzhiyun 		if (!bip->bip_vec)
80*4882a593Smuzhiyun 			goto err;
81*4882a593Smuzhiyun 		bip->bip_max_vcnt = bvec_nr_vecs(idx);
82*4882a593Smuzhiyun 		bip->bip_slab = idx;
83*4882a593Smuzhiyun 	} else {
84*4882a593Smuzhiyun 		bip->bip_vec = bip->bip_inline_vecs;
85*4882a593Smuzhiyun 		bip->bip_max_vcnt = inline_vecs;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	bip->bip_bio = bio;
89*4882a593Smuzhiyun 	bio->bi_integrity = bip;
90*4882a593Smuzhiyun 	bio->bi_opf |= REQ_INTEGRITY;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return bip;
93*4882a593Smuzhiyun err:
94*4882a593Smuzhiyun 	__bio_integrity_free(bs, bip);
95*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun EXPORT_SYMBOL(bio_integrity_alloc);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun  * bio_integrity_free - Free bio integrity payload
101*4882a593Smuzhiyun  * @bio:	bio containing bip to be freed
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Description: Used to free the integrity portion of a bio. Usually
104*4882a593Smuzhiyun  * called from bio_free().
105*4882a593Smuzhiyun  */
bio_integrity_free(struct bio * bio)106*4882a593Smuzhiyun void bio_integrity_free(struct bio *bio)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
109*4882a593Smuzhiyun 	struct bio_set *bs = bio->bi_pool;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
112*4882a593Smuzhiyun 		kfree(page_address(bip->bip_vec->bv_page) +
113*4882a593Smuzhiyun 		      bip->bip_vec->bv_offset);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	__bio_integrity_free(bs, bip);
116*4882a593Smuzhiyun 	bio->bi_integrity = NULL;
117*4882a593Smuzhiyun 	bio->bi_opf &= ~REQ_INTEGRITY;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun  * bio_integrity_add_page - Attach integrity metadata
122*4882a593Smuzhiyun  * @bio:	bio to update
123*4882a593Smuzhiyun  * @page:	page containing integrity metadata
124*4882a593Smuzhiyun  * @len:	number of bytes of integrity metadata in page
125*4882a593Smuzhiyun  * @offset:	start offset within page
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * Description: Attach a page containing integrity metadata to bio.
128*4882a593Smuzhiyun  */
bio_integrity_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)129*4882a593Smuzhiyun int bio_integrity_add_page(struct bio *bio, struct page *page,
130*4882a593Smuzhiyun 			   unsigned int len, unsigned int offset)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
133*4882a593Smuzhiyun 	struct bio_vec *iv;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
136*4882a593Smuzhiyun 		printk(KERN_ERR "%s: bip_vec full\n", __func__);
137*4882a593Smuzhiyun 		return 0;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	iv = bip->bip_vec + bip->bip_vcnt;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (bip->bip_vcnt &&
143*4882a593Smuzhiyun 	    bvec_gap_to_prev(bio->bi_disk->queue,
144*4882a593Smuzhiyun 			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
145*4882a593Smuzhiyun 		return 0;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	iv->bv_page = page;
148*4882a593Smuzhiyun 	iv->bv_len = len;
149*4882a593Smuzhiyun 	iv->bv_offset = offset;
150*4882a593Smuzhiyun 	bip->bip_vcnt++;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	return len;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun EXPORT_SYMBOL(bio_integrity_add_page);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun  * bio_integrity_process - Process integrity metadata for a bio
158*4882a593Smuzhiyun  * @bio:	bio to generate/verify integrity metadata for
159*4882a593Smuzhiyun  * @proc_iter:  iterator to process
160*4882a593Smuzhiyun  * @proc_fn:	Pointer to the relevant processing function
161*4882a593Smuzhiyun  */
bio_integrity_process(struct bio * bio,struct bvec_iter * proc_iter,integrity_processing_fn * proc_fn)162*4882a593Smuzhiyun static blk_status_t bio_integrity_process(struct bio *bio,
163*4882a593Smuzhiyun 		struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
166*4882a593Smuzhiyun 	struct blk_integrity_iter iter;
167*4882a593Smuzhiyun 	struct bvec_iter bviter;
168*4882a593Smuzhiyun 	struct bio_vec bv;
169*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
170*4882a593Smuzhiyun 	blk_status_t ret = BLK_STS_OK;
171*4882a593Smuzhiyun 	void *prot_buf = page_address(bip->bip_vec->bv_page) +
172*4882a593Smuzhiyun 		bip->bip_vec->bv_offset;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	iter.disk_name = bio->bi_disk->disk_name;
175*4882a593Smuzhiyun 	iter.interval = 1 << bi->interval_exp;
176*4882a593Smuzhiyun 	iter.seed = proc_iter->bi_sector;
177*4882a593Smuzhiyun 	iter.prot_buf = prot_buf;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	__bio_for_each_segment(bv, bio, bviter, *proc_iter) {
180*4882a593Smuzhiyun 		void *kaddr = kmap_atomic(bv.bv_page);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		iter.data_buf = kaddr + bv.bv_offset;
183*4882a593Smuzhiyun 		iter.data_size = bv.bv_len;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		ret = proc_fn(&iter);
186*4882a593Smuzhiyun 		if (ret) {
187*4882a593Smuzhiyun 			kunmap_atomic(kaddr);
188*4882a593Smuzhiyun 			return ret;
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		kunmap_atomic(kaddr);
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 	return ret;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun  * bio_integrity_prep - Prepare bio for integrity I/O
198*4882a593Smuzhiyun  * @bio:	bio to prepare
199*4882a593Smuzhiyun  *
200*4882a593Smuzhiyun  * Description:  Checks if the bio already has an integrity payload attached.
201*4882a593Smuzhiyun  * If it does, the payload has been generated by another kernel subsystem,
202*4882a593Smuzhiyun  * and we just pass it through. Otherwise allocates integrity payload.
203*4882a593Smuzhiyun  * The bio must have data direction, target device and start sector set priot
204*4882a593Smuzhiyun  * to calling.  In the WRITE case, integrity metadata will be generated using
205*4882a593Smuzhiyun  * the block device's integrity function.  In the READ case, the buffer
206*4882a593Smuzhiyun  * will be prepared for DMA and a suitable end_io handler set up.
207*4882a593Smuzhiyun  */
bio_integrity_prep(struct bio * bio)208*4882a593Smuzhiyun bool bio_integrity_prep(struct bio *bio)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct bio_integrity_payload *bip;
211*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
212*4882a593Smuzhiyun 	struct request_queue *q = bio->bi_disk->queue;
213*4882a593Smuzhiyun 	void *buf;
214*4882a593Smuzhiyun 	unsigned long start, end;
215*4882a593Smuzhiyun 	unsigned int len, nr_pages;
216*4882a593Smuzhiyun 	unsigned int bytes, offset, i;
217*4882a593Smuzhiyun 	unsigned int intervals;
218*4882a593Smuzhiyun 	blk_status_t status;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (!bi)
221*4882a593Smuzhiyun 		return true;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
224*4882a593Smuzhiyun 		return true;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (!bio_sectors(bio))
227*4882a593Smuzhiyun 		return true;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Already protected? */
230*4882a593Smuzhiyun 	if (bio_integrity(bio))
231*4882a593Smuzhiyun 		return true;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (bio_data_dir(bio) == READ) {
234*4882a593Smuzhiyun 		if (!bi->profile->verify_fn ||
235*4882a593Smuzhiyun 		    !(bi->flags & BLK_INTEGRITY_VERIFY))
236*4882a593Smuzhiyun 			return true;
237*4882a593Smuzhiyun 	} else {
238*4882a593Smuzhiyun 		if (!bi->profile->generate_fn ||
239*4882a593Smuzhiyun 		    !(bi->flags & BLK_INTEGRITY_GENERATE))
240*4882a593Smuzhiyun 			return true;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 	intervals = bio_integrity_intervals(bi, bio_sectors(bio));
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Allocate kernel buffer for protection data */
245*4882a593Smuzhiyun 	len = intervals * bi->tuple_size;
246*4882a593Smuzhiyun 	buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
247*4882a593Smuzhiyun 	status = BLK_STS_RESOURCE;
248*4882a593Smuzhiyun 	if (unlikely(buf == NULL)) {
249*4882a593Smuzhiyun 		printk(KERN_ERR "could not allocate integrity buffer\n");
250*4882a593Smuzhiyun 		goto err_end_io;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
254*4882a593Smuzhiyun 	start = ((unsigned long) buf) >> PAGE_SHIFT;
255*4882a593Smuzhiyun 	nr_pages = end - start;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Allocate bio integrity payload and integrity vectors */
258*4882a593Smuzhiyun 	bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
259*4882a593Smuzhiyun 	if (IS_ERR(bip)) {
260*4882a593Smuzhiyun 		printk(KERN_ERR "could not allocate data integrity bioset\n");
261*4882a593Smuzhiyun 		kfree(buf);
262*4882a593Smuzhiyun 		status = BLK_STS_RESOURCE;
263*4882a593Smuzhiyun 		goto err_end_io;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	bip->bip_flags |= BIP_BLOCK_INTEGRITY;
267*4882a593Smuzhiyun 	bip->bip_iter.bi_size = len;
268*4882a593Smuzhiyun 	bip_set_seed(bip, bio->bi_iter.bi_sector);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
271*4882a593Smuzhiyun 		bip->bip_flags |= BIP_IP_CHECKSUM;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* Map it */
274*4882a593Smuzhiyun 	offset = offset_in_page(buf);
275*4882a593Smuzhiyun 	for (i = 0 ; i < nr_pages ; i++) {
276*4882a593Smuzhiyun 		int ret;
277*4882a593Smuzhiyun 		bytes = PAGE_SIZE - offset;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		if (len <= 0)
280*4882a593Smuzhiyun 			break;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		if (bytes > len)
283*4882a593Smuzhiyun 			bytes = len;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		ret = bio_integrity_add_page(bio, virt_to_page(buf),
286*4882a593Smuzhiyun 					     bytes, offset);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		if (ret == 0) {
289*4882a593Smuzhiyun 			printk(KERN_ERR "could not attach integrity payload\n");
290*4882a593Smuzhiyun 			status = BLK_STS_RESOURCE;
291*4882a593Smuzhiyun 			goto err_end_io;
292*4882a593Smuzhiyun 		}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		if (ret < bytes)
295*4882a593Smuzhiyun 			break;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		buf += bytes;
298*4882a593Smuzhiyun 		len -= bytes;
299*4882a593Smuzhiyun 		offset = 0;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* Auto-generate integrity metadata if this is a write */
303*4882a593Smuzhiyun 	if (bio_data_dir(bio) == WRITE) {
304*4882a593Smuzhiyun 		bio_integrity_process(bio, &bio->bi_iter,
305*4882a593Smuzhiyun 				      bi->profile->generate_fn);
306*4882a593Smuzhiyun 	} else {
307*4882a593Smuzhiyun 		bip->bio_iter = bio->bi_iter;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	return true;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun err_end_io:
312*4882a593Smuzhiyun 	bio->bi_status = status;
313*4882a593Smuzhiyun 	bio_endio(bio);
314*4882a593Smuzhiyun 	return false;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun EXPORT_SYMBOL(bio_integrity_prep);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun  * bio_integrity_verify_fn - Integrity I/O completion worker
321*4882a593Smuzhiyun  * @work:	Work struct stored in bio to be verified
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * Description: This workqueue function is called to complete a READ
324*4882a593Smuzhiyun  * request.  The function verifies the transferred integrity metadata
325*4882a593Smuzhiyun  * and then calls the original bio end_io function.
326*4882a593Smuzhiyun  */
bio_integrity_verify_fn(struct work_struct * work)327*4882a593Smuzhiyun static void bio_integrity_verify_fn(struct work_struct *work)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct bio_integrity_payload *bip =
330*4882a593Smuzhiyun 		container_of(work, struct bio_integrity_payload, bip_work);
331*4882a593Smuzhiyun 	struct bio *bio = bip->bip_bio;
332*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/*
335*4882a593Smuzhiyun 	 * At the moment verify is called bio's iterator was advanced
336*4882a593Smuzhiyun 	 * during split and completion, we need to rewind iterator to
337*4882a593Smuzhiyun 	 * it's original position.
338*4882a593Smuzhiyun 	 */
339*4882a593Smuzhiyun 	bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
340*4882a593Smuzhiyun 						bi->profile->verify_fn);
341*4882a593Smuzhiyun 	bio_integrity_free(bio);
342*4882a593Smuzhiyun 	bio_endio(bio);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * __bio_integrity_endio - Integrity I/O completion function
347*4882a593Smuzhiyun  * @bio:	Protected bio
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * Description: Completion for integrity I/O
350*4882a593Smuzhiyun  *
351*4882a593Smuzhiyun  * Normally I/O completion is done in interrupt context.  However,
352*4882a593Smuzhiyun  * verifying I/O integrity is a time-consuming task which must be run
353*4882a593Smuzhiyun  * in process context.	This function postpones completion
354*4882a593Smuzhiyun  * accordingly.
355*4882a593Smuzhiyun  */
__bio_integrity_endio(struct bio * bio)356*4882a593Smuzhiyun bool __bio_integrity_endio(struct bio *bio)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
359*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
362*4882a593Smuzhiyun 	    (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
363*4882a593Smuzhiyun 		INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
364*4882a593Smuzhiyun 		queue_work(kintegrityd_wq, &bip->bip_work);
365*4882a593Smuzhiyun 		return false;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	bio_integrity_free(bio);
369*4882a593Smuzhiyun 	return true;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun /**
373*4882a593Smuzhiyun  * bio_integrity_advance - Advance integrity vector
374*4882a593Smuzhiyun  * @bio:	bio whose integrity vector to update
375*4882a593Smuzhiyun  * @bytes_done:	number of data bytes that have been completed
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * Description: This function calculates how many integrity bytes the
378*4882a593Smuzhiyun  * number of completed data bytes correspond to and advances the
379*4882a593Smuzhiyun  * integrity vector accordingly.
380*4882a593Smuzhiyun  */
bio_integrity_advance(struct bio * bio,unsigned int bytes_done)381*4882a593Smuzhiyun void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
384*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
385*4882a593Smuzhiyun 	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
388*4882a593Smuzhiyun 	bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun  * bio_integrity_trim - Trim integrity vector
393*4882a593Smuzhiyun  * @bio:	bio whose integrity vector to update
394*4882a593Smuzhiyun  *
395*4882a593Smuzhiyun  * Description: Used to trim the integrity vector in a cloned bio.
396*4882a593Smuzhiyun  */
bio_integrity_trim(struct bio * bio)397*4882a593Smuzhiyun void bio_integrity_trim(struct bio *bio)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct bio_integrity_payload *bip = bio_integrity(bio);
400*4882a593Smuzhiyun 	struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun EXPORT_SYMBOL(bio_integrity_trim);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun /**
407*4882a593Smuzhiyun  * bio_integrity_clone - Callback for cloning bios with integrity metadata
408*4882a593Smuzhiyun  * @bio:	New bio
409*4882a593Smuzhiyun  * @bio_src:	Original bio
410*4882a593Smuzhiyun  * @gfp_mask:	Memory allocation mask
411*4882a593Smuzhiyun  *
412*4882a593Smuzhiyun  * Description:	Called to allocate a bip when cloning a bio
413*4882a593Smuzhiyun  */
bio_integrity_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp_mask)414*4882a593Smuzhiyun int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
415*4882a593Smuzhiyun 			gfp_t gfp_mask)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
418*4882a593Smuzhiyun 	struct bio_integrity_payload *bip;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	BUG_ON(bip_src == NULL);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
423*4882a593Smuzhiyun 	if (IS_ERR(bip))
424*4882a593Smuzhiyun 		return PTR_ERR(bip);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	memcpy(bip->bip_vec, bip_src->bip_vec,
427*4882a593Smuzhiyun 	       bip_src->bip_vcnt * sizeof(struct bio_vec));
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	bip->bip_vcnt = bip_src->bip_vcnt;
430*4882a593Smuzhiyun 	bip->bip_iter = bip_src->bip_iter;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun EXPORT_SYMBOL(bio_integrity_clone);
435*4882a593Smuzhiyun 
bioset_integrity_create(struct bio_set * bs,int pool_size)436*4882a593Smuzhiyun int bioset_integrity_create(struct bio_set *bs, int pool_size)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	if (mempool_initialized(&bs->bio_integrity_pool))
439*4882a593Smuzhiyun 		return 0;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (mempool_init_slab_pool(&bs->bio_integrity_pool,
442*4882a593Smuzhiyun 				   pool_size, bip_slab))
443*4882a593Smuzhiyun 		return -1;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
446*4882a593Smuzhiyun 		mempool_exit(&bs->bio_integrity_pool);
447*4882a593Smuzhiyun 		return -1;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun EXPORT_SYMBOL(bioset_integrity_create);
453*4882a593Smuzhiyun 
bioset_integrity_free(struct bio_set * bs)454*4882a593Smuzhiyun void bioset_integrity_free(struct bio_set *bs)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	mempool_exit(&bs->bio_integrity_pool);
457*4882a593Smuzhiyun 	mempool_exit(&bs->bvec_integrity_pool);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
bio_integrity_init(void)460*4882a593Smuzhiyun void __init bio_integrity_init(void)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * kintegrityd won't block much but may burn a lot of CPU cycles.
464*4882a593Smuzhiyun 	 * Make it highpri CPU intensive wq with max concurrency of 1.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
467*4882a593Smuzhiyun 					 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
468*4882a593Smuzhiyun 	if (!kintegrityd_wq)
469*4882a593Smuzhiyun 		panic("Failed to create kintegrityd\n");
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	bip_slab = kmem_cache_create("bio_integrity_payload",
472*4882a593Smuzhiyun 				     sizeof(struct bio_integrity_payload) +
473*4882a593Smuzhiyun 				     sizeof(struct bio_vec) * BIP_INLINE_VECS,
474*4882a593Smuzhiyun 				     0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
475*4882a593Smuzhiyun }
476