xref: /OK3568_Linux_fs/kernel/drivers/md/dm-io.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2003 Sistina Software
3*4882a593Smuzhiyun  * Copyright (C) 2006 Red Hat GmbH
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is released under the GPL.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "dm-core.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/device-mapper.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/bio.h>
13*4882a593Smuzhiyun #include <linux/completion.h>
14*4882a593Smuzhiyun #include <linux/mempool.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/dm-io.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define DM_MSG_PREFIX "io"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define DM_IO_MAX_REGIONS	BITS_PER_LONG
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct dm_io_client {
25*4882a593Smuzhiyun 	mempool_t pool;
26*4882a593Smuzhiyun 	struct bio_set bios;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Aligning 'struct io' reduces the number of bits required to store
31*4882a593Smuzhiyun  * its address.  Refer to store_io_and_region_in_bio() below.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun struct io {
34*4882a593Smuzhiyun 	unsigned long error_bits;
35*4882a593Smuzhiyun 	atomic_t count;
36*4882a593Smuzhiyun 	struct dm_io_client *client;
37*4882a593Smuzhiyun 	io_notify_fn callback;
38*4882a593Smuzhiyun 	void *context;
39*4882a593Smuzhiyun 	void *vma_invalidate_address;
40*4882a593Smuzhiyun 	unsigned long vma_invalidate_size;
41*4882a593Smuzhiyun } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static struct kmem_cache *_dm_io_cache;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Create a client with mempool and bioset.
47*4882a593Smuzhiyun  */
dm_io_client_create(void)48*4882a593Smuzhiyun struct dm_io_client *dm_io_client_create(void)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct dm_io_client *client;
51*4882a593Smuzhiyun 	unsigned min_ios = dm_get_reserved_bio_based_ios();
52*4882a593Smuzhiyun 	int ret;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	client = kzalloc(sizeof(*client), GFP_KERNEL);
55*4882a593Smuzhiyun 	if (!client)
56*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
59*4882a593Smuzhiyun 	if (ret)
60*4882a593Smuzhiyun 		goto bad;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
63*4882a593Smuzhiyun 	if (ret)
64*4882a593Smuzhiyun 		goto bad;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return client;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun    bad:
69*4882a593Smuzhiyun 	mempool_exit(&client->pool);
70*4882a593Smuzhiyun 	kfree(client);
71*4882a593Smuzhiyun 	return ERR_PTR(ret);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun EXPORT_SYMBOL(dm_io_client_create);
74*4882a593Smuzhiyun 
dm_io_client_destroy(struct dm_io_client * client)75*4882a593Smuzhiyun void dm_io_client_destroy(struct dm_io_client *client)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	mempool_exit(&client->pool);
78*4882a593Smuzhiyun 	bioset_exit(&client->bios);
79*4882a593Smuzhiyun 	kfree(client);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun EXPORT_SYMBOL(dm_io_client_destroy);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*-----------------------------------------------------------------
84*4882a593Smuzhiyun  * We need to keep track of which region a bio is doing io for.
85*4882a593Smuzhiyun  * To avoid a memory allocation to store just 5 or 6 bits, we
86*4882a593Smuzhiyun  * ensure the 'struct io' pointer is aligned so enough low bits are
87*4882a593Smuzhiyun  * always zero and then combine it with the region number directly in
88*4882a593Smuzhiyun  * bi_private.
89*4882a593Smuzhiyun  *---------------------------------------------------------------*/
store_io_and_region_in_bio(struct bio * bio,struct io * io,unsigned region)90*4882a593Smuzhiyun static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91*4882a593Smuzhiyun 				       unsigned region)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94*4882a593Smuzhiyun 		DMCRIT("Unaligned struct io pointer %p", io);
95*4882a593Smuzhiyun 		BUG();
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	bio->bi_private = (void *)((unsigned long)io | region);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
retrieve_io_and_region_from_bio(struct bio * bio,struct io ** io,unsigned * region)101*4882a593Smuzhiyun static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102*4882a593Smuzhiyun 				       unsigned *region)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	unsigned long val = (unsigned long)bio->bi_private;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107*4882a593Smuzhiyun 	*region = val & (DM_IO_MAX_REGIONS - 1);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*-----------------------------------------------------------------
111*4882a593Smuzhiyun  * We need an io object to keep track of the number of bios that
112*4882a593Smuzhiyun  * have been dispatched for a particular io.
113*4882a593Smuzhiyun  *---------------------------------------------------------------*/
complete_io(struct io * io)114*4882a593Smuzhiyun static void complete_io(struct io *io)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	unsigned long error_bits = io->error_bits;
117*4882a593Smuzhiyun 	io_notify_fn fn = io->callback;
118*4882a593Smuzhiyun 	void *context = io->context;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (io->vma_invalidate_size)
121*4882a593Smuzhiyun 		invalidate_kernel_vmap_range(io->vma_invalidate_address,
122*4882a593Smuzhiyun 					     io->vma_invalidate_size);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	mempool_free(io, &io->client->pool);
125*4882a593Smuzhiyun 	fn(error_bits, context);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
dec_count(struct io * io,unsigned int region,blk_status_t error)128*4882a593Smuzhiyun static void dec_count(struct io *io, unsigned int region, blk_status_t error)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	if (error)
131*4882a593Smuzhiyun 		set_bit(region, &io->error_bits);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (atomic_dec_and_test(&io->count))
134*4882a593Smuzhiyun 		complete_io(io);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
endio(struct bio * bio)137*4882a593Smuzhiyun static void endio(struct bio *bio)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct io *io;
140*4882a593Smuzhiyun 	unsigned region;
141*4882a593Smuzhiyun 	blk_status_t error;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (bio->bi_status && bio_data_dir(bio) == READ)
144*4882a593Smuzhiyun 		zero_fill_bio(bio);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * The bio destructor in bio_put() may use the io object.
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	retrieve_io_and_region_from_bio(bio, &io, &region);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	error = bio->bi_status;
152*4882a593Smuzhiyun 	bio_put(bio);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	dec_count(io, region, error);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /*-----------------------------------------------------------------
158*4882a593Smuzhiyun  * These little objects provide an abstraction for getting a new
159*4882a593Smuzhiyun  * destination page for io.
160*4882a593Smuzhiyun  *---------------------------------------------------------------*/
161*4882a593Smuzhiyun struct dpages {
162*4882a593Smuzhiyun 	void (*get_page)(struct dpages *dp,
163*4882a593Smuzhiyun 			 struct page **p, unsigned long *len, unsigned *offset);
164*4882a593Smuzhiyun 	void (*next_page)(struct dpages *dp);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	union {
167*4882a593Smuzhiyun 		unsigned context_u;
168*4882a593Smuzhiyun 		struct bvec_iter context_bi;
169*4882a593Smuzhiyun 	};
170*4882a593Smuzhiyun 	void *context_ptr;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	void *vma_invalidate_address;
173*4882a593Smuzhiyun 	unsigned long vma_invalidate_size;
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * Functions for getting the pages from a list.
178*4882a593Smuzhiyun  */
list_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)179*4882a593Smuzhiyun static void list_get_page(struct dpages *dp,
180*4882a593Smuzhiyun 		  struct page **p, unsigned long *len, unsigned *offset)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	unsigned o = dp->context_u;
183*4882a593Smuzhiyun 	struct page_list *pl = (struct page_list *) dp->context_ptr;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	*p = pl->page;
186*4882a593Smuzhiyun 	*len = PAGE_SIZE - o;
187*4882a593Smuzhiyun 	*offset = o;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
list_next_page(struct dpages * dp)190*4882a593Smuzhiyun static void list_next_page(struct dpages *dp)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct page_list *pl = (struct page_list *) dp->context_ptr;
193*4882a593Smuzhiyun 	dp->context_ptr = pl->next;
194*4882a593Smuzhiyun 	dp->context_u = 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
list_dp_init(struct dpages * dp,struct page_list * pl,unsigned offset)197*4882a593Smuzhiyun static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	dp->get_page = list_get_page;
200*4882a593Smuzhiyun 	dp->next_page = list_next_page;
201*4882a593Smuzhiyun 	dp->context_u = offset;
202*4882a593Smuzhiyun 	dp->context_ptr = pl;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * Functions for getting the pages from a bvec.
207*4882a593Smuzhiyun  */
bio_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)208*4882a593Smuzhiyun static void bio_get_page(struct dpages *dp, struct page **p,
209*4882a593Smuzhiyun 			 unsigned long *len, unsigned *offset)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
212*4882a593Smuzhiyun 					     dp->context_bi);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	*p = bvec.bv_page;
215*4882a593Smuzhiyun 	*len = bvec.bv_len;
216*4882a593Smuzhiyun 	*offset = bvec.bv_offset;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* avoid figuring it out again in bio_next_page() */
219*4882a593Smuzhiyun 	dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
bio_next_page(struct dpages * dp)222*4882a593Smuzhiyun static void bio_next_page(struct dpages *dp)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	unsigned int len = (unsigned int)dp->context_bi.bi_sector;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	bvec_iter_advance((struct bio_vec *)dp->context_ptr,
227*4882a593Smuzhiyun 			  &dp->context_bi, len);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
bio_dp_init(struct dpages * dp,struct bio * bio)230*4882a593Smuzhiyun static void bio_dp_init(struct dpages *dp, struct bio *bio)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	dp->get_page = bio_get_page;
233*4882a593Smuzhiyun 	dp->next_page = bio_next_page;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * We just use bvec iterator to retrieve pages, so it is ok to
237*4882a593Smuzhiyun 	 * access the bvec table directly here
238*4882a593Smuzhiyun 	 */
239*4882a593Smuzhiyun 	dp->context_ptr = bio->bi_io_vec;
240*4882a593Smuzhiyun 	dp->context_bi = bio->bi_iter;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Functions for getting the pages from a VMA.
245*4882a593Smuzhiyun  */
vm_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)246*4882a593Smuzhiyun static void vm_get_page(struct dpages *dp,
247*4882a593Smuzhiyun 		 struct page **p, unsigned long *len, unsigned *offset)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	*p = vmalloc_to_page(dp->context_ptr);
250*4882a593Smuzhiyun 	*offset = dp->context_u;
251*4882a593Smuzhiyun 	*len = PAGE_SIZE - dp->context_u;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
vm_next_page(struct dpages * dp)254*4882a593Smuzhiyun static void vm_next_page(struct dpages *dp)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	dp->context_ptr += PAGE_SIZE - dp->context_u;
257*4882a593Smuzhiyun 	dp->context_u = 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
vm_dp_init(struct dpages * dp,void * data)260*4882a593Smuzhiyun static void vm_dp_init(struct dpages *dp, void *data)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	dp->get_page = vm_get_page;
263*4882a593Smuzhiyun 	dp->next_page = vm_next_page;
264*4882a593Smuzhiyun 	dp->context_u = offset_in_page(data);
265*4882a593Smuzhiyun 	dp->context_ptr = data;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * Functions for getting the pages from kernel memory.
270*4882a593Smuzhiyun  */
km_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned * offset)271*4882a593Smuzhiyun static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
272*4882a593Smuzhiyun 			unsigned *offset)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	*p = virt_to_page(dp->context_ptr);
275*4882a593Smuzhiyun 	*offset = dp->context_u;
276*4882a593Smuzhiyun 	*len = PAGE_SIZE - dp->context_u;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
km_next_page(struct dpages * dp)279*4882a593Smuzhiyun static void km_next_page(struct dpages *dp)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	dp->context_ptr += PAGE_SIZE - dp->context_u;
282*4882a593Smuzhiyun 	dp->context_u = 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
km_dp_init(struct dpages * dp,void * data)285*4882a593Smuzhiyun static void km_dp_init(struct dpages *dp, void *data)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	dp->get_page = km_get_page;
288*4882a593Smuzhiyun 	dp->next_page = km_next_page;
289*4882a593Smuzhiyun 	dp->context_u = offset_in_page(data);
290*4882a593Smuzhiyun 	dp->context_ptr = data;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /*-----------------------------------------------------------------
294*4882a593Smuzhiyun  * IO routines that accept a list of pages.
295*4882a593Smuzhiyun  *---------------------------------------------------------------*/
do_region(int op,int op_flags,unsigned region,struct dm_io_region * where,struct dpages * dp,struct io * io)296*4882a593Smuzhiyun static void do_region(int op, int op_flags, unsigned region,
297*4882a593Smuzhiyun 		      struct dm_io_region *where, struct dpages *dp,
298*4882a593Smuzhiyun 		      struct io *io)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct bio *bio;
301*4882a593Smuzhiyun 	struct page *page;
302*4882a593Smuzhiyun 	unsigned long len;
303*4882a593Smuzhiyun 	unsigned offset;
304*4882a593Smuzhiyun 	unsigned num_bvecs;
305*4882a593Smuzhiyun 	sector_t remaining = where->count;
306*4882a593Smuzhiyun 	struct request_queue *q = bdev_get_queue(where->bdev);
307*4882a593Smuzhiyun 	unsigned short logical_block_size = queue_logical_block_size(q);
308*4882a593Smuzhiyun 	sector_t num_sectors;
309*4882a593Smuzhiyun 	unsigned int special_cmd_max_sectors;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * Reject unsupported discard and write same requests.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	if (op == REQ_OP_DISCARD)
315*4882a593Smuzhiyun 		special_cmd_max_sectors = q->limits.max_discard_sectors;
316*4882a593Smuzhiyun 	else if (op == REQ_OP_WRITE_ZEROES)
317*4882a593Smuzhiyun 		special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
318*4882a593Smuzhiyun 	else if (op == REQ_OP_WRITE_SAME)
319*4882a593Smuzhiyun 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
320*4882a593Smuzhiyun 	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
321*4882a593Smuzhiyun 	     op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
322*4882a593Smuzhiyun 		atomic_inc(&io->count);
323*4882a593Smuzhiyun 		dec_count(io, region, BLK_STS_NOTSUPP);
324*4882a593Smuzhiyun 		return;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/*
328*4882a593Smuzhiyun 	 * where->count may be zero if op holds a flush and we need to
329*4882a593Smuzhiyun 	 * send a zero-sized flush.
330*4882a593Smuzhiyun 	 */
331*4882a593Smuzhiyun 	do {
332*4882a593Smuzhiyun 		/*
333*4882a593Smuzhiyun 		 * Allocate a suitably sized-bio.
334*4882a593Smuzhiyun 		 */
335*4882a593Smuzhiyun 		switch (op) {
336*4882a593Smuzhiyun 		case REQ_OP_DISCARD:
337*4882a593Smuzhiyun 		case REQ_OP_WRITE_ZEROES:
338*4882a593Smuzhiyun 			num_bvecs = 0;
339*4882a593Smuzhiyun 			break;
340*4882a593Smuzhiyun 		case REQ_OP_WRITE_SAME:
341*4882a593Smuzhiyun 			num_bvecs = 1;
342*4882a593Smuzhiyun 			break;
343*4882a593Smuzhiyun 		default:
344*4882a593Smuzhiyun 			num_bvecs = min_t(int, BIO_MAX_PAGES,
345*4882a593Smuzhiyun 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
349*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
350*4882a593Smuzhiyun 		bio_set_dev(bio, where->bdev);
351*4882a593Smuzhiyun 		bio->bi_end_io = endio;
352*4882a593Smuzhiyun 		bio_set_op_attrs(bio, op, op_flags);
353*4882a593Smuzhiyun 		store_io_and_region_in_bio(bio, io, region);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
356*4882a593Smuzhiyun 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
357*4882a593Smuzhiyun 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
358*4882a593Smuzhiyun 			remaining -= num_sectors;
359*4882a593Smuzhiyun 		} else if (op == REQ_OP_WRITE_SAME) {
360*4882a593Smuzhiyun 			/*
361*4882a593Smuzhiyun 			 * WRITE SAME only uses a single page.
362*4882a593Smuzhiyun 			 */
363*4882a593Smuzhiyun 			dp->get_page(dp, &page, &len, &offset);
364*4882a593Smuzhiyun 			bio_add_page(bio, page, logical_block_size, offset);
365*4882a593Smuzhiyun 			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
366*4882a593Smuzhiyun 			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 			offset = 0;
369*4882a593Smuzhiyun 			remaining -= num_sectors;
370*4882a593Smuzhiyun 			dp->next_page(dp);
371*4882a593Smuzhiyun 		} else while (remaining) {
372*4882a593Smuzhiyun 			/*
373*4882a593Smuzhiyun 			 * Try and add as many pages as possible.
374*4882a593Smuzhiyun 			 */
375*4882a593Smuzhiyun 			dp->get_page(dp, &page, &len, &offset);
376*4882a593Smuzhiyun 			len = min(len, to_bytes(remaining));
377*4882a593Smuzhiyun 			if (!bio_add_page(bio, page, len, offset))
378*4882a593Smuzhiyun 				break;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 			offset = 0;
381*4882a593Smuzhiyun 			remaining -= to_sector(len);
382*4882a593Smuzhiyun 			dp->next_page(dp);
383*4882a593Smuzhiyun 		}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		atomic_inc(&io->count);
386*4882a593Smuzhiyun 		submit_bio(bio);
387*4882a593Smuzhiyun 	} while (remaining);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
dispatch_io(int op,int op_flags,unsigned int num_regions,struct dm_io_region * where,struct dpages * dp,struct io * io,int sync)390*4882a593Smuzhiyun static void dispatch_io(int op, int op_flags, unsigned int num_regions,
391*4882a593Smuzhiyun 			struct dm_io_region *where, struct dpages *dp,
392*4882a593Smuzhiyun 			struct io *io, int sync)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	int i;
395*4882a593Smuzhiyun 	struct dpages old_pages = *dp;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (sync)
400*4882a593Smuzhiyun 		op_flags |= REQ_SYNC;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/*
403*4882a593Smuzhiyun 	 * For multiple regions we need to be careful to rewind
404*4882a593Smuzhiyun 	 * the dp object for each call to do_region.
405*4882a593Smuzhiyun 	 */
406*4882a593Smuzhiyun 	for (i = 0; i < num_regions; i++) {
407*4882a593Smuzhiyun 		*dp = old_pages;
408*4882a593Smuzhiyun 		if (where[i].count || (op_flags & REQ_PREFLUSH))
409*4882a593Smuzhiyun 			do_region(op, op_flags, i, where + i, dp, io);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/*
413*4882a593Smuzhiyun 	 * Drop the extra reference that we were holding to avoid
414*4882a593Smuzhiyun 	 * the io being completed too early.
415*4882a593Smuzhiyun 	 */
416*4882a593Smuzhiyun 	dec_count(io, 0, 0);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun struct sync_io {
420*4882a593Smuzhiyun 	unsigned long error_bits;
421*4882a593Smuzhiyun 	struct completion wait;
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun 
sync_io_complete(unsigned long error,void * context)424*4882a593Smuzhiyun static void sync_io_complete(unsigned long error, void *context)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct sync_io *sio = context;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	sio->error_bits = error;
429*4882a593Smuzhiyun 	complete(&sio->wait);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
sync_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int op,int op_flags,struct dpages * dp,unsigned long * error_bits)432*4882a593Smuzhiyun static int sync_io(struct dm_io_client *client, unsigned int num_regions,
433*4882a593Smuzhiyun 		   struct dm_io_region *where, int op, int op_flags,
434*4882a593Smuzhiyun 		   struct dpages *dp, unsigned long *error_bits)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct io *io;
437*4882a593Smuzhiyun 	struct sync_io sio;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (num_regions > 1 && !op_is_write(op)) {
440*4882a593Smuzhiyun 		WARN_ON(1);
441*4882a593Smuzhiyun 		return -EIO;
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	init_completion(&sio.wait);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	io = mempool_alloc(&client->pool, GFP_NOIO);
447*4882a593Smuzhiyun 	io->error_bits = 0;
448*4882a593Smuzhiyun 	atomic_set(&io->count, 1); /* see dispatch_io() */
449*4882a593Smuzhiyun 	io->client = client;
450*4882a593Smuzhiyun 	io->callback = sync_io_complete;
451*4882a593Smuzhiyun 	io->context = &sio;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	io->vma_invalidate_address = dp->vma_invalidate_address;
454*4882a593Smuzhiyun 	io->vma_invalidate_size = dp->vma_invalidate_size;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	wait_for_completion_io(&sio.wait);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (error_bits)
461*4882a593Smuzhiyun 		*error_bits = sio.error_bits;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return sio.error_bits ? -EIO : 0;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
async_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,int op,int op_flags,struct dpages * dp,io_notify_fn fn,void * context)466*4882a593Smuzhiyun static int async_io(struct dm_io_client *client, unsigned int num_regions,
467*4882a593Smuzhiyun 		    struct dm_io_region *where, int op, int op_flags,
468*4882a593Smuzhiyun 		    struct dpages *dp, io_notify_fn fn, void *context)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct io *io;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (num_regions > 1 && !op_is_write(op)) {
473*4882a593Smuzhiyun 		WARN_ON(1);
474*4882a593Smuzhiyun 		fn(1, context);
475*4882a593Smuzhiyun 		return -EIO;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	io = mempool_alloc(&client->pool, GFP_NOIO);
479*4882a593Smuzhiyun 	io->error_bits = 0;
480*4882a593Smuzhiyun 	atomic_set(&io->count, 1); /* see dispatch_io() */
481*4882a593Smuzhiyun 	io->client = client;
482*4882a593Smuzhiyun 	io->callback = fn;
483*4882a593Smuzhiyun 	io->context = context;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	io->vma_invalidate_address = dp->vma_invalidate_address;
486*4882a593Smuzhiyun 	io->vma_invalidate_size = dp->vma_invalidate_size;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
489*4882a593Smuzhiyun 	return 0;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
dp_init(struct dm_io_request * io_req,struct dpages * dp,unsigned long size)492*4882a593Smuzhiyun static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
493*4882a593Smuzhiyun 		   unsigned long size)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	/* Set up dpages based on memory type */
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	dp->vma_invalidate_address = NULL;
498*4882a593Smuzhiyun 	dp->vma_invalidate_size = 0;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	switch (io_req->mem.type) {
501*4882a593Smuzhiyun 	case DM_IO_PAGE_LIST:
502*4882a593Smuzhiyun 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
503*4882a593Smuzhiyun 		break;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	case DM_IO_BIO:
506*4882a593Smuzhiyun 		bio_dp_init(dp, io_req->mem.ptr.bio);
507*4882a593Smuzhiyun 		break;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	case DM_IO_VMA:
510*4882a593Smuzhiyun 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
511*4882a593Smuzhiyun 		if (io_req->bi_op == REQ_OP_READ) {
512*4882a593Smuzhiyun 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
513*4882a593Smuzhiyun 			dp->vma_invalidate_size = size;
514*4882a593Smuzhiyun 		}
515*4882a593Smuzhiyun 		vm_dp_init(dp, io_req->mem.ptr.vma);
516*4882a593Smuzhiyun 		break;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	case DM_IO_KMEM:
519*4882a593Smuzhiyun 		km_dp_init(dp, io_req->mem.ptr.addr);
520*4882a593Smuzhiyun 		break;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	default:
523*4882a593Smuzhiyun 		return -EINVAL;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /*
530*4882a593Smuzhiyun  * New collapsed (a)synchronous interface.
531*4882a593Smuzhiyun  *
532*4882a593Smuzhiyun  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
533*4882a593Smuzhiyun  * the queue with blk_unplug() some time later or set REQ_SYNC in
534*4882a593Smuzhiyun  * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
535*4882a593Smuzhiyun  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
536*4882a593Smuzhiyun  */
dm_io(struct dm_io_request * io_req,unsigned num_regions,struct dm_io_region * where,unsigned long * sync_error_bits)537*4882a593Smuzhiyun int dm_io(struct dm_io_request *io_req, unsigned num_regions,
538*4882a593Smuzhiyun 	  struct dm_io_region *where, unsigned long *sync_error_bits)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	int r;
541*4882a593Smuzhiyun 	struct dpages dp;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
544*4882a593Smuzhiyun 	if (r)
545*4882a593Smuzhiyun 		return r;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (!io_req->notify.fn)
548*4882a593Smuzhiyun 		return sync_io(io_req->client, num_regions, where,
549*4882a593Smuzhiyun 			       io_req->bi_op, io_req->bi_op_flags, &dp,
550*4882a593Smuzhiyun 			       sync_error_bits);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	return async_io(io_req->client, num_regions, where, io_req->bi_op,
553*4882a593Smuzhiyun 			io_req->bi_op_flags, &dp, io_req->notify.fn,
554*4882a593Smuzhiyun 			io_req->notify.context);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun EXPORT_SYMBOL(dm_io);
557*4882a593Smuzhiyun 
dm_io_init(void)558*4882a593Smuzhiyun int __init dm_io_init(void)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	_dm_io_cache = KMEM_CACHE(io, 0);
561*4882a593Smuzhiyun 	if (!_dm_io_cache)
562*4882a593Smuzhiyun 		return -ENOMEM;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return 0;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
dm_io_exit(void)567*4882a593Smuzhiyun void dm_io_exit(void)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	kmem_cache_destroy(_dm_io_cache);
570*4882a593Smuzhiyun 	_dm_io_cache = NULL;
571*4882a593Smuzhiyun }
572