xref: /OK3568_Linux_fs/kernel/drivers/md/dm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3*4882a593Smuzhiyun  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This file is released under the GPL.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "dm-core.h"
9*4882a593Smuzhiyun #include "dm-rq.h"
10*4882a593Smuzhiyun #include "dm-uevent.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/sched/mm.h>
16*4882a593Smuzhiyun #include <linux/sched/signal.h>
17*4882a593Smuzhiyun #include <linux/blkpg.h>
18*4882a593Smuzhiyun #include <linux/bio.h>
19*4882a593Smuzhiyun #include <linux/mempool.h>
20*4882a593Smuzhiyun #include <linux/dax.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/idr.h>
23*4882a593Smuzhiyun #include <linux/uio.h>
24*4882a593Smuzhiyun #include <linux/hdreg.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/wait.h>
27*4882a593Smuzhiyun #include <linux/pr.h>
28*4882a593Smuzhiyun #include <linux/refcount.h>
29*4882a593Smuzhiyun #include <linux/part_stat.h>
30*4882a593Smuzhiyun #include <linux/blk-crypto.h>
31*4882a593Smuzhiyun #include <linux/keyslot-manager.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define DM_MSG_PREFIX "core"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Cookies are numeric values sent with CHANGE and REMOVE
37*4882a593Smuzhiyun  * uevents while resuming, removing or renaming the device.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
40*4882a593Smuzhiyun #define DM_COOKIE_LENGTH 24
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static const char *_name = DM_NAME;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static unsigned int major = 0;
45*4882a593Smuzhiyun static unsigned int _major = 0;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static DEFINE_IDR(_minor_idr);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static DEFINE_SPINLOCK(_minor_lock);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static void do_deferred_remove(struct work_struct *w);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static struct workqueue_struct *deferred_remove_workqueue;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun atomic_t dm_global_event_nr = ATOMIC_INIT(0);
58*4882a593Smuzhiyun DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
59*4882a593Smuzhiyun 
dm_issue_global_event(void)60*4882a593Smuzhiyun void dm_issue_global_event(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	atomic_inc(&dm_global_event_nr);
63*4882a593Smuzhiyun 	wake_up(&dm_global_eventq);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * One of these is allocated (on-stack) per original bio.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun struct clone_info {
70*4882a593Smuzhiyun 	struct dm_table *map;
71*4882a593Smuzhiyun 	struct bio *bio;
72*4882a593Smuzhiyun 	struct dm_io *io;
73*4882a593Smuzhiyun 	sector_t sector;
74*4882a593Smuzhiyun 	unsigned sector_count;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * One of these is allocated per clone bio.
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun #define DM_TIO_MAGIC 7282014
81*4882a593Smuzhiyun struct dm_target_io {
82*4882a593Smuzhiyun 	unsigned magic;
83*4882a593Smuzhiyun 	struct dm_io *io;
84*4882a593Smuzhiyun 	struct dm_target *ti;
85*4882a593Smuzhiyun 	unsigned target_bio_nr;
86*4882a593Smuzhiyun 	unsigned *len_ptr;
87*4882a593Smuzhiyun 	bool inside_dm_io;
88*4882a593Smuzhiyun 	struct bio clone;
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * One of these is allocated per original bio.
93*4882a593Smuzhiyun  * It contains the first clone used for that original.
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun #define DM_IO_MAGIC 5191977
96*4882a593Smuzhiyun struct dm_io {
97*4882a593Smuzhiyun 	unsigned magic;
98*4882a593Smuzhiyun 	struct mapped_device *md;
99*4882a593Smuzhiyun 	blk_status_t status;
100*4882a593Smuzhiyun 	atomic_t io_count;
101*4882a593Smuzhiyun 	struct bio *orig_bio;
102*4882a593Smuzhiyun 	unsigned long start_time;
103*4882a593Smuzhiyun 	spinlock_t endio_lock;
104*4882a593Smuzhiyun 	struct dm_stats_aux stats_aux;
105*4882a593Smuzhiyun 	/* last member of dm_target_io is 'struct bio' */
106*4882a593Smuzhiyun 	struct dm_target_io tio;
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
dm_per_bio_data(struct bio * bio,size_t data_size)109*4882a593Smuzhiyun void *dm_per_bio_data(struct bio *bio, size_t data_size)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
112*4882a593Smuzhiyun 	if (!tio->inside_dm_io)
113*4882a593Smuzhiyun 		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
114*4882a593Smuzhiyun 	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_per_bio_data);
117*4882a593Smuzhiyun 
dm_bio_from_per_bio_data(void * data,size_t data_size)118*4882a593Smuzhiyun struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
121*4882a593Smuzhiyun 	if (io->magic == DM_IO_MAGIC)
122*4882a593Smuzhiyun 		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
123*4882a593Smuzhiyun 	BUG_ON(io->magic != DM_TIO_MAGIC);
124*4882a593Smuzhiyun 	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
127*4882a593Smuzhiyun 
dm_bio_get_target_bio_nr(const struct bio * bio)128*4882a593Smuzhiyun unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define MINOR_ALLOCED ((void *)-1)
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * Bits for the md->flags field.
138*4882a593Smuzhiyun  */
139*4882a593Smuzhiyun #define DMF_BLOCK_IO_FOR_SUSPEND 0
140*4882a593Smuzhiyun #define DMF_SUSPENDED 1
141*4882a593Smuzhiyun #define DMF_FROZEN 2
142*4882a593Smuzhiyun #define DMF_FREEING 3
143*4882a593Smuzhiyun #define DMF_DELETING 4
144*4882a593Smuzhiyun #define DMF_NOFLUSH_SUSPENDING 5
145*4882a593Smuzhiyun #define DMF_DEFERRED_REMOVE 6
146*4882a593Smuzhiyun #define DMF_SUSPENDED_INTERNALLY 7
147*4882a593Smuzhiyun #define DMF_POST_SUSPENDING 8
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun #define DM_NUMA_NODE NUMA_NO_NODE
150*4882a593Smuzhiyun static int dm_numa_node = DM_NUMA_NODE;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define DEFAULT_SWAP_BIOS	(8 * 1048576 / PAGE_SIZE)
153*4882a593Smuzhiyun static int swap_bios = DEFAULT_SWAP_BIOS;
get_swap_bios(void)154*4882a593Smuzhiyun static int get_swap_bios(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int latch = READ_ONCE(swap_bios);
157*4882a593Smuzhiyun 	if (unlikely(latch <= 0))
158*4882a593Smuzhiyun 		latch = DEFAULT_SWAP_BIOS;
159*4882a593Smuzhiyun 	return latch;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * For mempools pre-allocation at the table loading time.
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun struct dm_md_mempools {
166*4882a593Smuzhiyun 	struct bio_set bs;
167*4882a593Smuzhiyun 	struct bio_set io_bs;
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun struct table_device {
171*4882a593Smuzhiyun 	struct list_head list;
172*4882a593Smuzhiyun 	refcount_t count;
173*4882a593Smuzhiyun 	struct dm_dev dm_dev;
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * Bio-based DM's mempools' reserved IOs set by the user.
178*4882a593Smuzhiyun  */
179*4882a593Smuzhiyun #define RESERVED_BIO_BASED_IOS		16
180*4882a593Smuzhiyun static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
181*4882a593Smuzhiyun 
__dm_get_module_param_int(int * module_param,int min,int max)182*4882a593Smuzhiyun static int __dm_get_module_param_int(int *module_param, int min, int max)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	int param = READ_ONCE(*module_param);
185*4882a593Smuzhiyun 	int modified_param = 0;
186*4882a593Smuzhiyun 	bool modified = true;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (param < min)
189*4882a593Smuzhiyun 		modified_param = min;
190*4882a593Smuzhiyun 	else if (param > max)
191*4882a593Smuzhiyun 		modified_param = max;
192*4882a593Smuzhiyun 	else
193*4882a593Smuzhiyun 		modified = false;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (modified) {
196*4882a593Smuzhiyun 		(void)cmpxchg(module_param, param, modified_param);
197*4882a593Smuzhiyun 		param = modified_param;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return param;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
__dm_get_module_param(unsigned * module_param,unsigned def,unsigned max)203*4882a593Smuzhiyun unsigned __dm_get_module_param(unsigned *module_param,
204*4882a593Smuzhiyun 			       unsigned def, unsigned max)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	unsigned param = READ_ONCE(*module_param);
207*4882a593Smuzhiyun 	unsigned modified_param = 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (!param)
210*4882a593Smuzhiyun 		modified_param = def;
211*4882a593Smuzhiyun 	else if (param > max)
212*4882a593Smuzhiyun 		modified_param = max;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (modified_param) {
215*4882a593Smuzhiyun 		(void)cmpxchg(module_param, param, modified_param);
216*4882a593Smuzhiyun 		param = modified_param;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	return param;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
dm_get_reserved_bio_based_ios(void)222*4882a593Smuzhiyun unsigned dm_get_reserved_bio_based_ios(void)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	return __dm_get_module_param(&reserved_bio_based_ios,
225*4882a593Smuzhiyun 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
228*4882a593Smuzhiyun 
dm_get_numa_node(void)229*4882a593Smuzhiyun static unsigned dm_get_numa_node(void)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return __dm_get_module_param_int(&dm_numa_node,
232*4882a593Smuzhiyun 					 DM_NUMA_NODE, num_online_nodes() - 1);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
local_init(void)235*4882a593Smuzhiyun static int __init local_init(void)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	int r;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	r = dm_uevent_init();
240*4882a593Smuzhiyun 	if (r)
241*4882a593Smuzhiyun 		return r;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
244*4882a593Smuzhiyun 	if (!deferred_remove_workqueue) {
245*4882a593Smuzhiyun 		r = -ENOMEM;
246*4882a593Smuzhiyun 		goto out_uevent_exit;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	_major = major;
250*4882a593Smuzhiyun 	r = register_blkdev(_major, _name);
251*4882a593Smuzhiyun 	if (r < 0)
252*4882a593Smuzhiyun 		goto out_free_workqueue;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (!_major)
255*4882a593Smuzhiyun 		_major = r;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	return 0;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun out_free_workqueue:
260*4882a593Smuzhiyun 	destroy_workqueue(deferred_remove_workqueue);
261*4882a593Smuzhiyun out_uevent_exit:
262*4882a593Smuzhiyun 	dm_uevent_exit();
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return r;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
local_exit(void)267*4882a593Smuzhiyun static void local_exit(void)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	flush_scheduled_work();
270*4882a593Smuzhiyun 	destroy_workqueue(deferred_remove_workqueue);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	unregister_blkdev(_major, _name);
273*4882a593Smuzhiyun 	dm_uevent_exit();
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	_major = 0;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	DMINFO("cleaned up");
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun static int (*_inits[])(void) __initdata = {
281*4882a593Smuzhiyun 	local_init,
282*4882a593Smuzhiyun 	dm_target_init,
283*4882a593Smuzhiyun 	dm_linear_init,
284*4882a593Smuzhiyun 	dm_stripe_init,
285*4882a593Smuzhiyun 	dm_io_init,
286*4882a593Smuzhiyun 	dm_kcopyd_init,
287*4882a593Smuzhiyun 	dm_interface_init,
288*4882a593Smuzhiyun 	dm_statistics_init,
289*4882a593Smuzhiyun };
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun static void (*_exits[])(void) = {
292*4882a593Smuzhiyun 	local_exit,
293*4882a593Smuzhiyun 	dm_target_exit,
294*4882a593Smuzhiyun 	dm_linear_exit,
295*4882a593Smuzhiyun 	dm_stripe_exit,
296*4882a593Smuzhiyun 	dm_io_exit,
297*4882a593Smuzhiyun 	dm_kcopyd_exit,
298*4882a593Smuzhiyun 	dm_interface_exit,
299*4882a593Smuzhiyun 	dm_statistics_exit,
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun 
dm_init(void)302*4882a593Smuzhiyun static int __init dm_init(void)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	const int count = ARRAY_SIZE(_inits);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	int r, i;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
309*4882a593Smuzhiyun 		r = _inits[i]();
310*4882a593Smuzhiyun 		if (r)
311*4882a593Smuzhiyun 			goto bad;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return 0;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun       bad:
317*4882a593Smuzhiyun 	while (i--)
318*4882a593Smuzhiyun 		_exits[i]();
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return r;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
dm_exit(void)323*4882a593Smuzhiyun static void __exit dm_exit(void)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	int i = ARRAY_SIZE(_exits);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	while (i--)
328*4882a593Smuzhiyun 		_exits[i]();
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/*
331*4882a593Smuzhiyun 	 * Should be empty by this point.
332*4882a593Smuzhiyun 	 */
333*4882a593Smuzhiyun 	idr_destroy(&_minor_idr);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun  * Block device functions
338*4882a593Smuzhiyun  */
dm_deleting_md(struct mapped_device * md)339*4882a593Smuzhiyun int dm_deleting_md(struct mapped_device *md)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	return test_bit(DMF_DELETING, &md->flags);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
dm_blk_open(struct block_device * bdev,fmode_t mode)344*4882a593Smuzhiyun static int dm_blk_open(struct block_device *bdev, fmode_t mode)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct mapped_device *md;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	md = bdev->bd_disk->private_data;
351*4882a593Smuzhiyun 	if (!md)
352*4882a593Smuzhiyun 		goto out;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (test_bit(DMF_FREEING, &md->flags) ||
355*4882a593Smuzhiyun 	    dm_deleting_md(md)) {
356*4882a593Smuzhiyun 		md = NULL;
357*4882a593Smuzhiyun 		goto out;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	dm_get(md);
361*4882a593Smuzhiyun 	atomic_inc(&md->open_count);
362*4882a593Smuzhiyun out:
363*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return md ? 0 : -ENXIO;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
dm_blk_close(struct gendisk * disk,fmode_t mode)368*4882a593Smuzhiyun static void dm_blk_close(struct gendisk *disk, fmode_t mode)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct mapped_device *md;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	md = disk->private_data;
375*4882a593Smuzhiyun 	if (WARN_ON(!md))
376*4882a593Smuzhiyun 		goto out;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (atomic_dec_and_test(&md->open_count) &&
379*4882a593Smuzhiyun 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
380*4882a593Smuzhiyun 		queue_work(deferred_remove_workqueue, &deferred_remove_work);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	dm_put(md);
383*4882a593Smuzhiyun out:
384*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
dm_open_count(struct mapped_device * md)387*4882a593Smuzhiyun int dm_open_count(struct mapped_device *md)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	return atomic_read(&md->open_count);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun  * Guarantees nothing is using the device before it's deleted.
394*4882a593Smuzhiyun  */
dm_lock_for_deletion(struct mapped_device * md,bool mark_deferred,bool only_deferred)395*4882a593Smuzhiyun int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	int r = 0;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (dm_open_count(md)) {
402*4882a593Smuzhiyun 		r = -EBUSY;
403*4882a593Smuzhiyun 		if (mark_deferred)
404*4882a593Smuzhiyun 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
405*4882a593Smuzhiyun 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
406*4882a593Smuzhiyun 		r = -EEXIST;
407*4882a593Smuzhiyun 	else
408*4882a593Smuzhiyun 		set_bit(DMF_DELETING, &md->flags);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return r;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
dm_cancel_deferred_remove(struct mapped_device * md)415*4882a593Smuzhiyun int dm_cancel_deferred_remove(struct mapped_device *md)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	int r = 0;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	if (test_bit(DMF_DELETING, &md->flags))
422*4882a593Smuzhiyun 		r = -EBUSY;
423*4882a593Smuzhiyun 	else
424*4882a593Smuzhiyun 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	return r;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
do_deferred_remove(struct work_struct * w)431*4882a593Smuzhiyun static void do_deferred_remove(struct work_struct *w)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	dm_deferred_remove();
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
dm_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)436*4882a593Smuzhiyun static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	return dm_get_geometry(md, geo);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_cb(struct blk_zone * zone,unsigned int idx,void * data)444*4882a593Smuzhiyun int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct dm_report_zones_args *args = data;
447*4882a593Smuzhiyun 	sector_t sector_diff = args->tgt->begin - args->start;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/*
450*4882a593Smuzhiyun 	 * Ignore zones beyond the target range.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	if (zone->start >= args->start + args->tgt->len)
453*4882a593Smuzhiyun 		return 0;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/*
456*4882a593Smuzhiyun 	 * Remap the start sector and write pointer position of the zone
457*4882a593Smuzhiyun 	 * to match its position in the target range.
458*4882a593Smuzhiyun 	 */
459*4882a593Smuzhiyun 	zone->start += sector_diff;
460*4882a593Smuzhiyun 	if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
461*4882a593Smuzhiyun 		if (zone->cond == BLK_ZONE_COND_FULL)
462*4882a593Smuzhiyun 			zone->wp = zone->start + zone->len;
463*4882a593Smuzhiyun 		else if (zone->cond == BLK_ZONE_COND_EMPTY)
464*4882a593Smuzhiyun 			zone->wp = zone->start;
465*4882a593Smuzhiyun 		else
466*4882a593Smuzhiyun 			zone->wp += sector_diff;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	args->next_sector = zone->start + zone->len;
470*4882a593Smuzhiyun 	return args->orig_cb(zone, args->zone_idx++, args->orig_data);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_report_zones_cb);
473*4882a593Smuzhiyun 
dm_blk_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)474*4882a593Smuzhiyun static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
475*4882a593Smuzhiyun 		unsigned int nr_zones, report_zones_cb cb, void *data)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct mapped_device *md = disk->private_data;
478*4882a593Smuzhiyun 	struct dm_table *map;
479*4882a593Smuzhiyun 	int srcu_idx, ret;
480*4882a593Smuzhiyun 	struct dm_report_zones_args args = {
481*4882a593Smuzhiyun 		.next_sector = sector,
482*4882a593Smuzhiyun 		.orig_data = data,
483*4882a593Smuzhiyun 		.orig_cb = cb,
484*4882a593Smuzhiyun 	};
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (dm_suspended_md(md))
487*4882a593Smuzhiyun 		return -EAGAIN;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	map = dm_get_live_table(md, &srcu_idx);
490*4882a593Smuzhiyun 	if (!map) {
491*4882a593Smuzhiyun 		ret = -EIO;
492*4882a593Smuzhiyun 		goto out;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	do {
496*4882a593Smuzhiyun 		struct dm_target *tgt;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 		tgt = dm_table_find_target(map, args.next_sector);
499*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!tgt->type->report_zones)) {
500*4882a593Smuzhiyun 			ret = -EIO;
501*4882a593Smuzhiyun 			goto out;
502*4882a593Smuzhiyun 		}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 		args.tgt = tgt;
505*4882a593Smuzhiyun 		ret = tgt->type->report_zones(tgt, &args,
506*4882a593Smuzhiyun 					      nr_zones - args.zone_idx);
507*4882a593Smuzhiyun 		if (ret < 0)
508*4882a593Smuzhiyun 			goto out;
509*4882a593Smuzhiyun 	} while (args.zone_idx < nr_zones &&
510*4882a593Smuzhiyun 		 args.next_sector < get_capacity(disk));
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	ret = args.zone_idx;
513*4882a593Smuzhiyun out:
514*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
515*4882a593Smuzhiyun 	return ret;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun #else
518*4882a593Smuzhiyun #define dm_blk_report_zones		NULL
519*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_ZONED */
520*4882a593Smuzhiyun 
dm_prepare_ioctl(struct mapped_device * md,int * srcu_idx,struct block_device ** bdev)521*4882a593Smuzhiyun static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
522*4882a593Smuzhiyun 			    struct block_device **bdev)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct dm_target *tgt;
525*4882a593Smuzhiyun 	struct dm_table *map;
526*4882a593Smuzhiyun 	int r;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun retry:
529*4882a593Smuzhiyun 	r = -ENOTTY;
530*4882a593Smuzhiyun 	map = dm_get_live_table(md, srcu_idx);
531*4882a593Smuzhiyun 	if (!map || !dm_table_get_size(map))
532*4882a593Smuzhiyun 		return r;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* We only support devices that have a single target */
535*4882a593Smuzhiyun 	if (dm_table_get_num_targets(map) != 1)
536*4882a593Smuzhiyun 		return r;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	tgt = dm_table_get_target(map, 0);
539*4882a593Smuzhiyun 	if (!tgt->type->prepare_ioctl)
540*4882a593Smuzhiyun 		return r;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (dm_suspended_md(md))
543*4882a593Smuzhiyun 		return -EAGAIN;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	r = tgt->type->prepare_ioctl(tgt, bdev);
546*4882a593Smuzhiyun 	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
547*4882a593Smuzhiyun 		dm_put_live_table(md, *srcu_idx);
548*4882a593Smuzhiyun 		msleep(10);
549*4882a593Smuzhiyun 		goto retry;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	return r;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
dm_unprepare_ioctl(struct mapped_device * md,int srcu_idx)555*4882a593Smuzhiyun static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
dm_blk_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)560*4882a593Smuzhiyun static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
561*4882a593Smuzhiyun 			unsigned int cmd, unsigned long arg)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
564*4882a593Smuzhiyun 	int r, srcu_idx;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
567*4882a593Smuzhiyun 	if (r < 0)
568*4882a593Smuzhiyun 		goto out;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (r > 0) {
571*4882a593Smuzhiyun 		/*
572*4882a593Smuzhiyun 		 * Target determined this ioctl is being issued against a
573*4882a593Smuzhiyun 		 * subset of the parent bdev; require extra privileges.
574*4882a593Smuzhiyun 		 */
575*4882a593Smuzhiyun 		if (!capable(CAP_SYS_RAWIO)) {
576*4882a593Smuzhiyun 			DMDEBUG_LIMIT(
577*4882a593Smuzhiyun 	"%s: sending ioctl %x to DM device without required privilege.",
578*4882a593Smuzhiyun 				current->comm, cmd);
579*4882a593Smuzhiyun 			r = -ENOIOCTLCMD;
580*4882a593Smuzhiyun 			goto out;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
585*4882a593Smuzhiyun out:
586*4882a593Smuzhiyun 	dm_unprepare_ioctl(md, srcu_idx);
587*4882a593Smuzhiyun 	return r;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
dm_start_time_ns_from_clone(struct bio * bio)590*4882a593Smuzhiyun u64 dm_start_time_ns_from_clone(struct bio *bio)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
593*4882a593Smuzhiyun 	struct dm_io *io = tio->io;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return jiffies_to_nsecs(io->start_time);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
598*4882a593Smuzhiyun 
start_io_acct(struct dm_io * io)599*4882a593Smuzhiyun static void start_io_acct(struct dm_io *io)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	struct mapped_device *md = io->md;
602*4882a593Smuzhiyun 	struct bio *bio = io->orig_bio;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	io->start_time = bio_start_io_acct(bio);
605*4882a593Smuzhiyun 	if (unlikely(dm_stats_used(&md->stats)))
606*4882a593Smuzhiyun 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
607*4882a593Smuzhiyun 				    bio->bi_iter.bi_sector, bio_sectors(bio),
608*4882a593Smuzhiyun 				    false, 0, &io->stats_aux);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
end_io_acct(struct mapped_device * md,struct bio * bio,unsigned long start_time,struct dm_stats_aux * stats_aux)611*4882a593Smuzhiyun static void end_io_acct(struct mapped_device *md, struct bio *bio,
612*4882a593Smuzhiyun 			unsigned long start_time, struct dm_stats_aux *stats_aux)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	unsigned long duration = jiffies - start_time;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (unlikely(dm_stats_used(&md->stats)))
617*4882a593Smuzhiyun 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
618*4882a593Smuzhiyun 				    bio->bi_iter.bi_sector, bio_sectors(bio),
619*4882a593Smuzhiyun 				    true, duration, stats_aux);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	smp_wmb();
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	bio_end_io_acct(bio, start_time);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* nudge anyone waiting on suspend queue */
626*4882a593Smuzhiyun 	if (unlikely(wq_has_sleeper(&md->wait)))
627*4882a593Smuzhiyun 		wake_up(&md->wait);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
alloc_io(struct mapped_device * md,struct bio * bio)630*4882a593Smuzhiyun static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	struct dm_io *io;
633*4882a593Smuzhiyun 	struct dm_target_io *tio;
634*4882a593Smuzhiyun 	struct bio *clone;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
637*4882a593Smuzhiyun 	if (!clone)
638*4882a593Smuzhiyun 		return NULL;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	tio = container_of(clone, struct dm_target_io, clone);
641*4882a593Smuzhiyun 	tio->inside_dm_io = true;
642*4882a593Smuzhiyun 	tio->io = NULL;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	io = container_of(tio, struct dm_io, tio);
645*4882a593Smuzhiyun 	io->magic = DM_IO_MAGIC;
646*4882a593Smuzhiyun 	io->status = 0;
647*4882a593Smuzhiyun 	atomic_set(&io->io_count, 1);
648*4882a593Smuzhiyun 	io->orig_bio = bio;
649*4882a593Smuzhiyun 	io->md = md;
650*4882a593Smuzhiyun 	spin_lock_init(&io->endio_lock);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	start_io_acct(io);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return io;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
free_io(struct mapped_device * md,struct dm_io * io)657*4882a593Smuzhiyun static void free_io(struct mapped_device *md, struct dm_io *io)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	bio_put(&io->tio.clone);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
alloc_tio(struct clone_info * ci,struct dm_target * ti,unsigned target_bio_nr,gfp_t gfp_mask)662*4882a593Smuzhiyun static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
663*4882a593Smuzhiyun 				      unsigned target_bio_nr, gfp_t gfp_mask)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct dm_target_io *tio;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (!ci->io->tio.io) {
668*4882a593Smuzhiyun 		/* the dm_target_io embedded in ci->io is available */
669*4882a593Smuzhiyun 		tio = &ci->io->tio;
670*4882a593Smuzhiyun 	} else {
671*4882a593Smuzhiyun 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
672*4882a593Smuzhiyun 		if (!clone)
673*4882a593Smuzhiyun 			return NULL;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 		tio = container_of(clone, struct dm_target_io, clone);
676*4882a593Smuzhiyun 		tio->inside_dm_io = false;
677*4882a593Smuzhiyun 	}
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	tio->magic = DM_TIO_MAGIC;
680*4882a593Smuzhiyun 	tio->io = ci->io;
681*4882a593Smuzhiyun 	tio->ti = ti;
682*4882a593Smuzhiyun 	tio->target_bio_nr = target_bio_nr;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return tio;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
free_tio(struct dm_target_io * tio)687*4882a593Smuzhiyun static void free_tio(struct dm_target_io *tio)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	if (tio->inside_dm_io)
690*4882a593Smuzhiyun 		return;
691*4882a593Smuzhiyun 	bio_put(&tio->clone);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun  * Add the bio to the list of deferred io.
696*4882a593Smuzhiyun  */
queue_io(struct mapped_device * md,struct bio * bio)697*4882a593Smuzhiyun static void queue_io(struct mapped_device *md, struct bio *bio)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun 	unsigned long flags;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	spin_lock_irqsave(&md->deferred_lock, flags);
702*4882a593Smuzhiyun 	bio_list_add(&md->deferred, bio);
703*4882a593Smuzhiyun 	spin_unlock_irqrestore(&md->deferred_lock, flags);
704*4882a593Smuzhiyun 	queue_work(md->wq, &md->work);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /*
708*4882a593Smuzhiyun  * Everyone (including functions in this file), should use this
709*4882a593Smuzhiyun  * function to access the md->map field, and make sure they call
710*4882a593Smuzhiyun  * dm_put_live_table() when finished.
711*4882a593Smuzhiyun  */
dm_get_live_table(struct mapped_device * md,int * srcu_idx)712*4882a593Smuzhiyun struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	*srcu_idx = srcu_read_lock(&md->io_barrier);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	return srcu_dereference(md->map, &md->io_barrier);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
dm_put_live_table(struct mapped_device * md,int srcu_idx)719*4882a593Smuzhiyun void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	srcu_read_unlock(&md->io_barrier, srcu_idx);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
dm_sync_table(struct mapped_device * md)724*4882a593Smuzhiyun void dm_sync_table(struct mapped_device *md)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	synchronize_srcu(&md->io_barrier);
727*4882a593Smuzhiyun 	synchronize_rcu_expedited();
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun  * A fast alternative to dm_get_live_table/dm_put_live_table.
732*4882a593Smuzhiyun  * The caller must not block between these two functions.
733*4882a593Smuzhiyun  */
dm_get_live_table_fast(struct mapped_device * md)734*4882a593Smuzhiyun static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	rcu_read_lock();
737*4882a593Smuzhiyun 	return rcu_dereference(md->map);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
dm_put_live_table_fast(struct mapped_device * md)740*4882a593Smuzhiyun static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	rcu_read_unlock();
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun static char *_dm_claim_ptr = "I belong to device-mapper";
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun  * Open a table device so we can use it as a map destination.
749*4882a593Smuzhiyun  */
open_table_device(struct table_device * td,dev_t dev,struct mapped_device * md)750*4882a593Smuzhiyun static int open_table_device(struct table_device *td, dev_t dev,
751*4882a593Smuzhiyun 			     struct mapped_device *md)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	struct block_device *bdev;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	int r;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	BUG_ON(td->dm_dev.bdev);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
760*4882a593Smuzhiyun 	if (IS_ERR(bdev))
761*4882a593Smuzhiyun 		return PTR_ERR(bdev);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	r = bd_link_disk_holder(bdev, dm_disk(md));
764*4882a593Smuzhiyun 	if (r) {
765*4882a593Smuzhiyun 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
766*4882a593Smuzhiyun 		return r;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	td->dm_dev.bdev = bdev;
770*4882a593Smuzhiyun 	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
771*4882a593Smuzhiyun 	return 0;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun  * Close a table device that we've been using.
776*4882a593Smuzhiyun  */
close_table_device(struct table_device * td,struct mapped_device * md)777*4882a593Smuzhiyun static void close_table_device(struct table_device *td, struct mapped_device *md)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	if (!td->dm_dev.bdev)
780*4882a593Smuzhiyun 		return;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
783*4882a593Smuzhiyun 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
784*4882a593Smuzhiyun 	put_dax(td->dm_dev.dax_dev);
785*4882a593Smuzhiyun 	td->dm_dev.bdev = NULL;
786*4882a593Smuzhiyun 	td->dm_dev.dax_dev = NULL;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
find_table_device(struct list_head * l,dev_t dev,fmode_t mode)789*4882a593Smuzhiyun static struct table_device *find_table_device(struct list_head *l, dev_t dev,
790*4882a593Smuzhiyun 					      fmode_t mode)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct table_device *td;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	list_for_each_entry(td, l, list)
795*4882a593Smuzhiyun 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
796*4882a593Smuzhiyun 			return td;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return NULL;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
dm_get_table_device(struct mapped_device * md,dev_t dev,fmode_t mode,struct dm_dev ** result)801*4882a593Smuzhiyun int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
802*4882a593Smuzhiyun 			struct dm_dev **result)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	int r;
805*4882a593Smuzhiyun 	struct table_device *td;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	mutex_lock(&md->table_devices_lock);
808*4882a593Smuzhiyun 	td = find_table_device(&md->table_devices, dev, mode);
809*4882a593Smuzhiyun 	if (!td) {
810*4882a593Smuzhiyun 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
811*4882a593Smuzhiyun 		if (!td) {
812*4882a593Smuzhiyun 			mutex_unlock(&md->table_devices_lock);
813*4882a593Smuzhiyun 			return -ENOMEM;
814*4882a593Smuzhiyun 		}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		td->dm_dev.mode = mode;
817*4882a593Smuzhiyun 		td->dm_dev.bdev = NULL;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 		if ((r = open_table_device(td, dev, md))) {
820*4882a593Smuzhiyun 			mutex_unlock(&md->table_devices_lock);
821*4882a593Smuzhiyun 			kfree(td);
822*4882a593Smuzhiyun 			return r;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		format_dev_t(td->dm_dev.name, dev);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		refcount_set(&td->count, 1);
828*4882a593Smuzhiyun 		list_add(&td->list, &md->table_devices);
829*4882a593Smuzhiyun 	} else {
830*4882a593Smuzhiyun 		refcount_inc(&td->count);
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 	mutex_unlock(&md->table_devices_lock);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	*result = &td->dm_dev;
835*4882a593Smuzhiyun 	return 0;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_get_table_device);
838*4882a593Smuzhiyun 
dm_put_table_device(struct mapped_device * md,struct dm_dev * d)839*4882a593Smuzhiyun void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	struct table_device *td = container_of(d, struct table_device, dm_dev);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	mutex_lock(&md->table_devices_lock);
844*4882a593Smuzhiyun 	if (refcount_dec_and_test(&td->count)) {
845*4882a593Smuzhiyun 		close_table_device(td, md);
846*4882a593Smuzhiyun 		list_del(&td->list);
847*4882a593Smuzhiyun 		kfree(td);
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 	mutex_unlock(&md->table_devices_lock);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun EXPORT_SYMBOL(dm_put_table_device);
852*4882a593Smuzhiyun 
free_table_devices(struct list_head * devices)853*4882a593Smuzhiyun static void free_table_devices(struct list_head *devices)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct list_head *tmp, *next;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	list_for_each_safe(tmp, next, devices) {
858*4882a593Smuzhiyun 		struct table_device *td = list_entry(tmp, struct table_device, list);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		DMWARN("dm_destroy: %s still exists with %d references",
861*4882a593Smuzhiyun 		       td->dm_dev.name, refcount_read(&td->count));
862*4882a593Smuzhiyun 		kfree(td);
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun  * Get the geometry associated with a dm device
868*4882a593Smuzhiyun  */
dm_get_geometry(struct mapped_device * md,struct hd_geometry * geo)869*4882a593Smuzhiyun int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	*geo = md->geometry;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	return 0;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun /*
877*4882a593Smuzhiyun  * Set the geometry of a device.
878*4882a593Smuzhiyun  */
dm_set_geometry(struct mapped_device * md,struct hd_geometry * geo)879*4882a593Smuzhiyun int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (geo->start > sz) {
884*4882a593Smuzhiyun 		DMWARN("Start sector is beyond the geometry limits.");
885*4882a593Smuzhiyun 		return -EINVAL;
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	md->geometry = *geo;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	return 0;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
__noflush_suspending(struct mapped_device * md)893*4882a593Smuzhiyun static int __noflush_suspending(struct mapped_device *md)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun /*
899*4882a593Smuzhiyun  * Decrements the number of outstanding ios that a bio has been
900*4882a593Smuzhiyun  * cloned into, completing the original io if necc.
901*4882a593Smuzhiyun  */
dec_pending(struct dm_io * io,blk_status_t error)902*4882a593Smuzhiyun static void dec_pending(struct dm_io *io, blk_status_t error)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	unsigned long flags;
905*4882a593Smuzhiyun 	blk_status_t io_error;
906*4882a593Smuzhiyun 	struct bio *bio;
907*4882a593Smuzhiyun 	struct mapped_device *md = io->md;
908*4882a593Smuzhiyun 	unsigned long start_time = 0;
909*4882a593Smuzhiyun 	struct dm_stats_aux stats_aux;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	/* Push-back supersedes any I/O errors */
912*4882a593Smuzhiyun 	if (unlikely(error)) {
913*4882a593Smuzhiyun 		spin_lock_irqsave(&io->endio_lock, flags);
914*4882a593Smuzhiyun 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
915*4882a593Smuzhiyun 			io->status = error;
916*4882a593Smuzhiyun 		spin_unlock_irqrestore(&io->endio_lock, flags);
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (atomic_dec_and_test(&io->io_count)) {
920*4882a593Smuzhiyun 		if (io->status == BLK_STS_DM_REQUEUE) {
921*4882a593Smuzhiyun 			/*
922*4882a593Smuzhiyun 			 * Target requested pushing back the I/O.
923*4882a593Smuzhiyun 			 */
924*4882a593Smuzhiyun 			spin_lock_irqsave(&md->deferred_lock, flags);
925*4882a593Smuzhiyun 			if (__noflush_suspending(md))
926*4882a593Smuzhiyun 				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
927*4882a593Smuzhiyun 				bio_list_add_head(&md->deferred, io->orig_bio);
928*4882a593Smuzhiyun 			else
929*4882a593Smuzhiyun 				/* noflush suspend was interrupted. */
930*4882a593Smuzhiyun 				io->status = BLK_STS_IOERR;
931*4882a593Smuzhiyun 			spin_unlock_irqrestore(&md->deferred_lock, flags);
932*4882a593Smuzhiyun 		}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 		io_error = io->status;
935*4882a593Smuzhiyun 		bio = io->orig_bio;
936*4882a593Smuzhiyun 		start_time = io->start_time;
937*4882a593Smuzhiyun 		stats_aux = io->stats_aux;
938*4882a593Smuzhiyun 		free_io(md, io);
939*4882a593Smuzhiyun 		end_io_acct(md, bio, start_time, &stats_aux);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		if (io_error == BLK_STS_DM_REQUEUE)
942*4882a593Smuzhiyun 			return;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
945*4882a593Smuzhiyun 			/*
946*4882a593Smuzhiyun 			 * Preflush done for flush with data, reissue
947*4882a593Smuzhiyun 			 * without REQ_PREFLUSH.
948*4882a593Smuzhiyun 			 */
949*4882a593Smuzhiyun 			bio->bi_opf &= ~REQ_PREFLUSH;
950*4882a593Smuzhiyun 			queue_io(md, bio);
951*4882a593Smuzhiyun 		} else {
952*4882a593Smuzhiyun 			/* done with normal IO or empty flush */
953*4882a593Smuzhiyun 			if (io_error)
954*4882a593Smuzhiyun 				bio->bi_status = io_error;
955*4882a593Smuzhiyun 			bio_endio(bio);
956*4882a593Smuzhiyun 		}
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
disable_discard(struct mapped_device * md)960*4882a593Smuzhiyun void disable_discard(struct mapped_device *md)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	struct queue_limits *limits = dm_get_queue_limits(md);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	/* device doesn't really support DISCARD, disable it */
965*4882a593Smuzhiyun 	limits->max_discard_sectors = 0;
966*4882a593Smuzhiyun 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun 
disable_write_same(struct mapped_device * md)969*4882a593Smuzhiyun void disable_write_same(struct mapped_device *md)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct queue_limits *limits = dm_get_queue_limits(md);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	/* device doesn't really support WRITE SAME, disable it */
974*4882a593Smuzhiyun 	limits->max_write_same_sectors = 0;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun 
disable_write_zeroes(struct mapped_device * md)977*4882a593Smuzhiyun void disable_write_zeroes(struct mapped_device *md)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun 	struct queue_limits *limits = dm_get_queue_limits(md);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	/* device doesn't really support WRITE ZEROES, disable it */
982*4882a593Smuzhiyun 	limits->max_write_zeroes_sectors = 0;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
swap_bios_limit(struct dm_target * ti,struct bio * bio)985*4882a593Smuzhiyun static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
clone_endio(struct bio * bio)990*4882a593Smuzhiyun static void clone_endio(struct bio *bio)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	blk_status_t error = bio->bi_status;
993*4882a593Smuzhiyun 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
994*4882a593Smuzhiyun 	struct dm_io *io = tio->io;
995*4882a593Smuzhiyun 	struct mapped_device *md = tio->io->md;
996*4882a593Smuzhiyun 	dm_endio_fn endio = tio->ti->type->end_io;
997*4882a593Smuzhiyun 	struct bio *orig_bio = io->orig_bio;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	if (unlikely(error == BLK_STS_TARGET)) {
1000*4882a593Smuzhiyun 		if (bio_op(bio) == REQ_OP_DISCARD &&
1001*4882a593Smuzhiyun 		    !bio->bi_disk->queue->limits.max_discard_sectors)
1002*4882a593Smuzhiyun 			disable_discard(md);
1003*4882a593Smuzhiyun 		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
1004*4882a593Smuzhiyun 			 !bio->bi_disk->queue->limits.max_write_same_sectors)
1005*4882a593Smuzhiyun 			disable_write_same(md);
1006*4882a593Smuzhiyun 		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1007*4882a593Smuzhiyun 			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
1008*4882a593Smuzhiyun 			disable_write_zeroes(md);
1009*4882a593Smuzhiyun 	}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/*
1012*4882a593Smuzhiyun 	 * For zone-append bios get offset in zone of the written
1013*4882a593Smuzhiyun 	 * sector and add that to the original bio sector pos.
1014*4882a593Smuzhiyun 	 */
1015*4882a593Smuzhiyun 	if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
1016*4882a593Smuzhiyun 		sector_t written_sector = bio->bi_iter.bi_sector;
1017*4882a593Smuzhiyun 		struct request_queue *q = orig_bio->bi_disk->queue;
1018*4882a593Smuzhiyun 		u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 		orig_bio->bi_iter.bi_sector += written_sector & mask;
1021*4882a593Smuzhiyun 	}
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	if (endio) {
1024*4882a593Smuzhiyun 		int r = endio(tio->ti, bio, &error);
1025*4882a593Smuzhiyun 		switch (r) {
1026*4882a593Smuzhiyun 		case DM_ENDIO_REQUEUE:
1027*4882a593Smuzhiyun 			error = BLK_STS_DM_REQUEUE;
1028*4882a593Smuzhiyun 			fallthrough;
1029*4882a593Smuzhiyun 		case DM_ENDIO_DONE:
1030*4882a593Smuzhiyun 			break;
1031*4882a593Smuzhiyun 		case DM_ENDIO_INCOMPLETE:
1032*4882a593Smuzhiyun 			/* The target will handle the io */
1033*4882a593Smuzhiyun 			return;
1034*4882a593Smuzhiyun 		default:
1035*4882a593Smuzhiyun 			DMWARN("unimplemented target endio return value: %d", r);
1036*4882a593Smuzhiyun 			BUG();
1037*4882a593Smuzhiyun 		}
1038*4882a593Smuzhiyun 	}
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (unlikely(swap_bios_limit(tio->ti, bio))) {
1041*4882a593Smuzhiyun 		struct mapped_device *md = io->md;
1042*4882a593Smuzhiyun 		up(&md->swap_bios_semaphore);
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	free_tio(tio);
1046*4882a593Smuzhiyun 	dec_pending(io, error);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun /*
1050*4882a593Smuzhiyun  * Return maximum size of I/O possible at the supplied sector up to the current
1051*4882a593Smuzhiyun  * target boundary.
1052*4882a593Smuzhiyun  */
max_io_len_target_boundary(struct dm_target * ti,sector_t target_offset)1053*4882a593Smuzhiyun static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1054*4882a593Smuzhiyun 						  sector_t target_offset)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	return ti->len - target_offset;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
max_io_len(struct dm_target * ti,sector_t sector)1059*4882a593Smuzhiyun static sector_t max_io_len(struct dm_target *ti, sector_t sector)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	sector_t target_offset = dm_target_offset(ti, sector);
1062*4882a593Smuzhiyun 	sector_t len = max_io_len_target_boundary(ti, target_offset);
1063*4882a593Smuzhiyun 	sector_t max_len;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	/*
1066*4882a593Smuzhiyun 	 * Does the target need to split IO even further?
1067*4882a593Smuzhiyun 	 * - varied (per target) IO splitting is a tenet of DM; this
1068*4882a593Smuzhiyun 	 *   explains why stacked chunk_sectors based splitting via
1069*4882a593Smuzhiyun 	 *   blk_max_size_offset() isn't possible here. So pass in
1070*4882a593Smuzhiyun 	 *   ti->max_io_len to override stacked chunk_sectors.
1071*4882a593Smuzhiyun 	 */
1072*4882a593Smuzhiyun 	if (ti->max_io_len) {
1073*4882a593Smuzhiyun 		max_len = blk_max_size_offset(ti->table->md->queue,
1074*4882a593Smuzhiyun 					      target_offset, ti->max_io_len);
1075*4882a593Smuzhiyun 		if (len > max_len)
1076*4882a593Smuzhiyun 			len = max_len;
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	return len;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
dm_set_target_max_io_len(struct dm_target * ti,sector_t len)1082*4882a593Smuzhiyun int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	if (len > UINT_MAX) {
1085*4882a593Smuzhiyun 		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1086*4882a593Smuzhiyun 		      (unsigned long long)len, UINT_MAX);
1087*4882a593Smuzhiyun 		ti->error = "Maximum size of target IO is too large";
1088*4882a593Smuzhiyun 		return -EINVAL;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	ti->max_io_len = (uint32_t) len;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	return 0;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1096*4882a593Smuzhiyun 
dm_dax_get_live_target(struct mapped_device * md,sector_t sector,int * srcu_idx)1097*4882a593Smuzhiyun static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1098*4882a593Smuzhiyun 						sector_t sector, int *srcu_idx)
1099*4882a593Smuzhiyun 	__acquires(md->io_barrier)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun 	struct dm_table *map;
1102*4882a593Smuzhiyun 	struct dm_target *ti;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	map = dm_get_live_table(md, srcu_idx);
1105*4882a593Smuzhiyun 	if (!map)
1106*4882a593Smuzhiyun 		return NULL;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	ti = dm_table_find_target(map, sector);
1109*4882a593Smuzhiyun 	if (!ti)
1110*4882a593Smuzhiyun 		return NULL;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return ti;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
dm_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)1115*4882a593Smuzhiyun static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1116*4882a593Smuzhiyun 				 long nr_pages, void **kaddr, pfn_t *pfn)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	struct mapped_device *md = dax_get_private(dax_dev);
1119*4882a593Smuzhiyun 	sector_t sector = pgoff * PAGE_SECTORS;
1120*4882a593Smuzhiyun 	struct dm_target *ti;
1121*4882a593Smuzhiyun 	long len, ret = -EIO;
1122*4882a593Smuzhiyun 	int srcu_idx;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	if (!ti)
1127*4882a593Smuzhiyun 		goto out;
1128*4882a593Smuzhiyun 	if (!ti->type->direct_access)
1129*4882a593Smuzhiyun 		goto out;
1130*4882a593Smuzhiyun 	len = max_io_len(ti, sector) / PAGE_SECTORS;
1131*4882a593Smuzhiyun 	if (len < 1)
1132*4882a593Smuzhiyun 		goto out;
1133*4882a593Smuzhiyun 	nr_pages = min(len, nr_pages);
1134*4882a593Smuzhiyun 	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun  out:
1137*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	return ret;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun 
dm_dax_supported(struct dax_device * dax_dev,struct block_device * bdev,int blocksize,sector_t start,sector_t len)1142*4882a593Smuzhiyun static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
1143*4882a593Smuzhiyun 		int blocksize, sector_t start, sector_t len)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	struct mapped_device *md = dax_get_private(dax_dev);
1146*4882a593Smuzhiyun 	struct dm_table *map;
1147*4882a593Smuzhiyun 	bool ret = false;
1148*4882a593Smuzhiyun 	int srcu_idx;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	map = dm_get_live_table(md, &srcu_idx);
1151*4882a593Smuzhiyun 	if (!map)
1152*4882a593Smuzhiyun 		goto out;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun out:
1157*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	return ret;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun 
dm_dax_copy_from_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1162*4882a593Smuzhiyun static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1163*4882a593Smuzhiyun 				    void *addr, size_t bytes, struct iov_iter *i)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	struct mapped_device *md = dax_get_private(dax_dev);
1166*4882a593Smuzhiyun 	sector_t sector = pgoff * PAGE_SECTORS;
1167*4882a593Smuzhiyun 	struct dm_target *ti;
1168*4882a593Smuzhiyun 	long ret = 0;
1169*4882a593Smuzhiyun 	int srcu_idx;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	if (!ti)
1174*4882a593Smuzhiyun 		goto out;
1175*4882a593Smuzhiyun 	if (!ti->type->dax_copy_from_iter) {
1176*4882a593Smuzhiyun 		ret = copy_from_iter(addr, bytes, i);
1177*4882a593Smuzhiyun 		goto out;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
1180*4882a593Smuzhiyun  out:
1181*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	return ret;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun 
dm_dax_copy_to_iter(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1186*4882a593Smuzhiyun static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1187*4882a593Smuzhiyun 		void *addr, size_t bytes, struct iov_iter *i)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun 	struct mapped_device *md = dax_get_private(dax_dev);
1190*4882a593Smuzhiyun 	sector_t sector = pgoff * PAGE_SECTORS;
1191*4882a593Smuzhiyun 	struct dm_target *ti;
1192*4882a593Smuzhiyun 	long ret = 0;
1193*4882a593Smuzhiyun 	int srcu_idx;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (!ti)
1198*4882a593Smuzhiyun 		goto out;
1199*4882a593Smuzhiyun 	if (!ti->type->dax_copy_to_iter) {
1200*4882a593Smuzhiyun 		ret = copy_to_iter(addr, bytes, i);
1201*4882a593Smuzhiyun 		goto out;
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
1204*4882a593Smuzhiyun  out:
1205*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	return ret;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun 
dm_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1210*4882a593Smuzhiyun static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1211*4882a593Smuzhiyun 				  size_t nr_pages)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun 	struct mapped_device *md = dax_get_private(dax_dev);
1214*4882a593Smuzhiyun 	sector_t sector = pgoff * PAGE_SECTORS;
1215*4882a593Smuzhiyun 	struct dm_target *ti;
1216*4882a593Smuzhiyun 	int ret = -EIO;
1217*4882a593Smuzhiyun 	int srcu_idx;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	if (!ti)
1222*4882a593Smuzhiyun 		goto out;
1223*4882a593Smuzhiyun 	if (WARN_ON(!ti->type->dax_zero_page_range)) {
1224*4882a593Smuzhiyun 		/*
1225*4882a593Smuzhiyun 		 * ->zero_page_range() is mandatory dax operation. If we are
1226*4882a593Smuzhiyun 		 *  here, something is wrong.
1227*4882a593Smuzhiyun 		 */
1228*4882a593Smuzhiyun 		goto out;
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun 	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1231*4882a593Smuzhiyun  out:
1232*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	return ret;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun /*
1238*4882a593Smuzhiyun  * A target may call dm_accept_partial_bio only from the map routine.  It is
1239*4882a593Smuzhiyun  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1240*4882a593Smuzhiyun  * operations and REQ_OP_ZONE_APPEND (zone append writes).
1241*4882a593Smuzhiyun  *
1242*4882a593Smuzhiyun  * dm_accept_partial_bio informs the dm that the target only wants to process
1243*4882a593Smuzhiyun  * additional n_sectors sectors of the bio and the rest of the data should be
1244*4882a593Smuzhiyun  * sent in a next bio.
1245*4882a593Smuzhiyun  *
1246*4882a593Smuzhiyun  * A diagram that explains the arithmetics:
1247*4882a593Smuzhiyun  * +--------------------+---------------+-------+
1248*4882a593Smuzhiyun  * |         1          |       2       |   3   |
1249*4882a593Smuzhiyun  * +--------------------+---------------+-------+
1250*4882a593Smuzhiyun  *
1251*4882a593Smuzhiyun  * <-------------- *tio->len_ptr --------------->
1252*4882a593Smuzhiyun  *                      <------- bi_size ------->
1253*4882a593Smuzhiyun  *                      <-- n_sectors -->
1254*4882a593Smuzhiyun  *
1255*4882a593Smuzhiyun  * Region 1 was already iterated over with bio_advance or similar function.
1256*4882a593Smuzhiyun  *	(it may be empty if the target doesn't use bio_advance)
1257*4882a593Smuzhiyun  * Region 2 is the remaining bio size that the target wants to process.
1258*4882a593Smuzhiyun  *	(it may be empty if region 1 is non-empty, although there is no reason
1259*4882a593Smuzhiyun  *	 to make it empty)
1260*4882a593Smuzhiyun  * The target requires that region 3 is to be sent in the next bio.
1261*4882a593Smuzhiyun  *
1262*4882a593Smuzhiyun  * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1263*4882a593Smuzhiyun  * the partially processed part (the sum of regions 1+2) must be the same for all
1264*4882a593Smuzhiyun  * copies of the bio.
1265*4882a593Smuzhiyun  */
dm_accept_partial_bio(struct bio * bio,unsigned n_sectors)1266*4882a593Smuzhiyun void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1269*4882a593Smuzhiyun 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1272*4882a593Smuzhiyun 	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1273*4882a593Smuzhiyun 	BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1274*4882a593Smuzhiyun 	BUG_ON(bi_size > *tio->len_ptr);
1275*4882a593Smuzhiyun 	BUG_ON(n_sectors > bi_size);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	*tio->len_ptr -= bi_size - n_sectors;
1278*4882a593Smuzhiyun 	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1281*4882a593Smuzhiyun 
__set_swap_bios_limit(struct mapped_device * md,int latch)1282*4882a593Smuzhiyun static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	mutex_lock(&md->swap_bios_lock);
1285*4882a593Smuzhiyun 	while (latch < md->swap_bios) {
1286*4882a593Smuzhiyun 		cond_resched();
1287*4882a593Smuzhiyun 		down(&md->swap_bios_semaphore);
1288*4882a593Smuzhiyun 		md->swap_bios--;
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun 	while (latch > md->swap_bios) {
1291*4882a593Smuzhiyun 		cond_resched();
1292*4882a593Smuzhiyun 		up(&md->swap_bios_semaphore);
1293*4882a593Smuzhiyun 		md->swap_bios++;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 	mutex_unlock(&md->swap_bios_lock);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
__map_bio(struct dm_target_io * tio)1298*4882a593Smuzhiyun static blk_qc_t __map_bio(struct dm_target_io *tio)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	int r;
1301*4882a593Smuzhiyun 	sector_t sector;
1302*4882a593Smuzhiyun 	struct bio *clone = &tio->clone;
1303*4882a593Smuzhiyun 	struct dm_io *io = tio->io;
1304*4882a593Smuzhiyun 	struct dm_target *ti = tio->ti;
1305*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	clone->bi_end_io = clone_endio;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	/*
1310*4882a593Smuzhiyun 	 * Map the clone.  If r == 0 we don't need to do
1311*4882a593Smuzhiyun 	 * anything, the target has assumed ownership of
1312*4882a593Smuzhiyun 	 * this io.
1313*4882a593Smuzhiyun 	 */
1314*4882a593Smuzhiyun 	atomic_inc(&io->io_count);
1315*4882a593Smuzhiyun 	sector = clone->bi_iter.bi_sector;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (unlikely(swap_bios_limit(ti, clone))) {
1318*4882a593Smuzhiyun 		struct mapped_device *md = io->md;
1319*4882a593Smuzhiyun 		int latch = get_swap_bios();
1320*4882a593Smuzhiyun 		if (unlikely(latch != md->swap_bios))
1321*4882a593Smuzhiyun 			__set_swap_bios_limit(md, latch);
1322*4882a593Smuzhiyun 		down(&md->swap_bios_semaphore);
1323*4882a593Smuzhiyun 	}
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	r = ti->type->map(ti, clone);
1326*4882a593Smuzhiyun 	switch (r) {
1327*4882a593Smuzhiyun 	case DM_MAPIO_SUBMITTED:
1328*4882a593Smuzhiyun 		break;
1329*4882a593Smuzhiyun 	case DM_MAPIO_REMAPPED:
1330*4882a593Smuzhiyun 		/* the bio has been remapped so dispatch it */
1331*4882a593Smuzhiyun 		trace_block_bio_remap(clone->bi_disk->queue, clone,
1332*4882a593Smuzhiyun 				      bio_dev(io->orig_bio), sector);
1333*4882a593Smuzhiyun 		ret = submit_bio_noacct(clone);
1334*4882a593Smuzhiyun 		break;
1335*4882a593Smuzhiyun 	case DM_MAPIO_KILL:
1336*4882a593Smuzhiyun 		if (unlikely(swap_bios_limit(ti, clone))) {
1337*4882a593Smuzhiyun 			struct mapped_device *md = io->md;
1338*4882a593Smuzhiyun 			up(&md->swap_bios_semaphore);
1339*4882a593Smuzhiyun 		}
1340*4882a593Smuzhiyun 		free_tio(tio);
1341*4882a593Smuzhiyun 		dec_pending(io, BLK_STS_IOERR);
1342*4882a593Smuzhiyun 		break;
1343*4882a593Smuzhiyun 	case DM_MAPIO_REQUEUE:
1344*4882a593Smuzhiyun 		if (unlikely(swap_bios_limit(ti, clone))) {
1345*4882a593Smuzhiyun 			struct mapped_device *md = io->md;
1346*4882a593Smuzhiyun 			up(&md->swap_bios_semaphore);
1347*4882a593Smuzhiyun 		}
1348*4882a593Smuzhiyun 		free_tio(tio);
1349*4882a593Smuzhiyun 		dec_pending(io, BLK_STS_DM_REQUEUE);
1350*4882a593Smuzhiyun 		break;
1351*4882a593Smuzhiyun 	default:
1352*4882a593Smuzhiyun 		DMWARN("unimplemented target map return value: %d", r);
1353*4882a593Smuzhiyun 		BUG();
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	return ret;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun 
bio_setup_sector(struct bio * bio,sector_t sector,unsigned len)1359*4882a593Smuzhiyun static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	bio->bi_iter.bi_sector = sector;
1362*4882a593Smuzhiyun 	bio->bi_iter.bi_size = to_bytes(len);
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun /*
1366*4882a593Smuzhiyun  * Creates a bio that consists of range of complete bvecs.
1367*4882a593Smuzhiyun  */
clone_bio(struct dm_target_io * tio,struct bio * bio,sector_t sector,unsigned len)1368*4882a593Smuzhiyun static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1369*4882a593Smuzhiyun 		     sector_t sector, unsigned len)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun 	struct bio *clone = &tio->clone;
1372*4882a593Smuzhiyun 	int r;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	__bio_clone_fast(clone, bio);
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	r = bio_crypt_clone(clone, bio, GFP_NOIO);
1377*4882a593Smuzhiyun 	if (r < 0)
1378*4882a593Smuzhiyun 		return r;
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	if (bio_integrity(bio)) {
1381*4882a593Smuzhiyun 		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
1382*4882a593Smuzhiyun 			     !dm_target_passes_integrity(tio->ti->type))) {
1383*4882a593Smuzhiyun 			DMWARN("%s: the target %s doesn't support integrity data.",
1384*4882a593Smuzhiyun 				dm_device_name(tio->io->md),
1385*4882a593Smuzhiyun 				tio->ti->type->name);
1386*4882a593Smuzhiyun 			return -EIO;
1387*4882a593Smuzhiyun 		}
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1390*4882a593Smuzhiyun 		if (r < 0)
1391*4882a593Smuzhiyun 			return r;
1392*4882a593Smuzhiyun 	}
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1395*4882a593Smuzhiyun 	clone->bi_iter.bi_size = to_bytes(len);
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	if (bio_integrity(bio))
1398*4882a593Smuzhiyun 		bio_integrity_trim(clone);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	return 0;
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun 
alloc_multiple_bios(struct bio_list * blist,struct clone_info * ci,struct dm_target * ti,unsigned num_bios)1403*4882a593Smuzhiyun static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1404*4882a593Smuzhiyun 				struct dm_target *ti, unsigned num_bios)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	struct dm_target_io *tio;
1407*4882a593Smuzhiyun 	int try;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	if (!num_bios)
1410*4882a593Smuzhiyun 		return;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	if (num_bios == 1) {
1413*4882a593Smuzhiyun 		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1414*4882a593Smuzhiyun 		bio_list_add(blist, &tio->clone);
1415*4882a593Smuzhiyun 		return;
1416*4882a593Smuzhiyun 	}
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	for (try = 0; try < 2; try++) {
1419*4882a593Smuzhiyun 		int bio_nr;
1420*4882a593Smuzhiyun 		struct bio *bio;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 		if (try)
1423*4882a593Smuzhiyun 			mutex_lock(&ci->io->md->table_devices_lock);
1424*4882a593Smuzhiyun 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1425*4882a593Smuzhiyun 			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
1426*4882a593Smuzhiyun 			if (!tio)
1427*4882a593Smuzhiyun 				break;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 			bio_list_add(blist, &tio->clone);
1430*4882a593Smuzhiyun 		}
1431*4882a593Smuzhiyun 		if (try)
1432*4882a593Smuzhiyun 			mutex_unlock(&ci->io->md->table_devices_lock);
1433*4882a593Smuzhiyun 		if (bio_nr == num_bios)
1434*4882a593Smuzhiyun 			return;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 		while ((bio = bio_list_pop(blist))) {
1437*4882a593Smuzhiyun 			tio = container_of(bio, struct dm_target_io, clone);
1438*4882a593Smuzhiyun 			free_tio(tio);
1439*4882a593Smuzhiyun 		}
1440*4882a593Smuzhiyun 	}
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun 
__clone_and_map_simple_bio(struct clone_info * ci,struct dm_target_io * tio,unsigned * len)1443*4882a593Smuzhiyun static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
1444*4882a593Smuzhiyun 					   struct dm_target_io *tio, unsigned *len)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	struct bio *clone = &tio->clone;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	tio->len_ptr = len;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	__bio_clone_fast(clone, ci->bio);
1451*4882a593Smuzhiyun 	if (len)
1452*4882a593Smuzhiyun 		bio_setup_sector(clone, ci->sector, *len);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	return __map_bio(tio);
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun 
__send_duplicate_bios(struct clone_info * ci,struct dm_target * ti,unsigned num_bios,unsigned * len)1457*4882a593Smuzhiyun static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1458*4882a593Smuzhiyun 				  unsigned num_bios, unsigned *len)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun 	struct bio_list blist = BIO_EMPTY_LIST;
1461*4882a593Smuzhiyun 	struct bio *bio;
1462*4882a593Smuzhiyun 	struct dm_target_io *tio;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	alloc_multiple_bios(&blist, ci, ti, num_bios);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	while ((bio = bio_list_pop(&blist))) {
1467*4882a593Smuzhiyun 		tio = container_of(bio, struct dm_target_io, clone);
1468*4882a593Smuzhiyun 		(void) __clone_and_map_simple_bio(ci, tio, len);
1469*4882a593Smuzhiyun 	}
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun 
__send_empty_flush(struct clone_info * ci)1472*4882a593Smuzhiyun static int __send_empty_flush(struct clone_info *ci)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	unsigned target_nr = 0;
1475*4882a593Smuzhiyun 	struct dm_target *ti;
1476*4882a593Smuzhiyun 	struct bio flush_bio;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	/*
1479*4882a593Smuzhiyun 	 * Use an on-stack bio for this, it's safe since we don't
1480*4882a593Smuzhiyun 	 * need to reference it after submit. It's just used as
1481*4882a593Smuzhiyun 	 * the basis for the clone(s).
1482*4882a593Smuzhiyun 	 */
1483*4882a593Smuzhiyun 	bio_init(&flush_bio, NULL, 0);
1484*4882a593Smuzhiyun 	flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1485*4882a593Smuzhiyun 	ci->bio = &flush_bio;
1486*4882a593Smuzhiyun 	ci->sector_count = 0;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	/*
1489*4882a593Smuzhiyun 	 * Empty flush uses a statically initialized bio, as the base for
1490*4882a593Smuzhiyun 	 * cloning.  However, blkg association requires that a bdev is
1491*4882a593Smuzhiyun 	 * associated with a gendisk, which doesn't happen until the bdev is
1492*4882a593Smuzhiyun 	 * opened.  So, blkg association is done at issue time of the flush
1493*4882a593Smuzhiyun 	 * rather than when the device is created in alloc_dev().
1494*4882a593Smuzhiyun 	 */
1495*4882a593Smuzhiyun 	bio_set_dev(ci->bio, ci->io->md->bdev);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	BUG_ON(bio_has_data(ci->bio));
1498*4882a593Smuzhiyun 	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1499*4882a593Smuzhiyun 		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	bio_uninit(ci->bio);
1502*4882a593Smuzhiyun 	return 0;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun 
__clone_and_map_data_bio(struct clone_info * ci,struct dm_target * ti,sector_t sector,unsigned * len)1505*4882a593Smuzhiyun static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1506*4882a593Smuzhiyun 				    sector_t sector, unsigned *len)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun 	struct bio *bio = ci->bio;
1509*4882a593Smuzhiyun 	struct dm_target_io *tio;
1510*4882a593Smuzhiyun 	int r;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1513*4882a593Smuzhiyun 	tio->len_ptr = len;
1514*4882a593Smuzhiyun 	r = clone_bio(tio, bio, sector, *len);
1515*4882a593Smuzhiyun 	if (r < 0) {
1516*4882a593Smuzhiyun 		free_tio(tio);
1517*4882a593Smuzhiyun 		return r;
1518*4882a593Smuzhiyun 	}
1519*4882a593Smuzhiyun 	(void) __map_bio(tio);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	return 0;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
__send_changing_extent_only(struct clone_info * ci,struct dm_target * ti,unsigned num_bios)1524*4882a593Smuzhiyun static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1525*4882a593Smuzhiyun 				       unsigned num_bios)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun 	unsigned len;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	/*
1530*4882a593Smuzhiyun 	 * Even though the device advertised support for this type of
1531*4882a593Smuzhiyun 	 * request, that does not mean every target supports it, and
1532*4882a593Smuzhiyun 	 * reconfiguration might also have changed that since the
1533*4882a593Smuzhiyun 	 * check was performed.
1534*4882a593Smuzhiyun 	 */
1535*4882a593Smuzhiyun 	if (!num_bios)
1536*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	len = min_t(sector_t, ci->sector_count,
1539*4882a593Smuzhiyun 		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	__send_duplicate_bios(ci, ti, num_bios, &len);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	ci->sector += len;
1544*4882a593Smuzhiyun 	ci->sector_count -= len;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	return 0;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun 
is_abnormal_io(struct bio * bio)1549*4882a593Smuzhiyun static bool is_abnormal_io(struct bio *bio)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun 	bool r = false;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	switch (bio_op(bio)) {
1554*4882a593Smuzhiyun 	case REQ_OP_DISCARD:
1555*4882a593Smuzhiyun 	case REQ_OP_SECURE_ERASE:
1556*4882a593Smuzhiyun 	case REQ_OP_WRITE_SAME:
1557*4882a593Smuzhiyun 	case REQ_OP_WRITE_ZEROES:
1558*4882a593Smuzhiyun 		r = true;
1559*4882a593Smuzhiyun 		break;
1560*4882a593Smuzhiyun 	}
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	return r;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun 
__process_abnormal_io(struct clone_info * ci,struct dm_target * ti,int * result)1565*4882a593Smuzhiyun static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1566*4882a593Smuzhiyun 				  int *result)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun 	struct bio *bio = ci->bio;
1569*4882a593Smuzhiyun 	unsigned num_bios = 0;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	switch (bio_op(bio)) {
1572*4882a593Smuzhiyun 	case REQ_OP_DISCARD:
1573*4882a593Smuzhiyun 		num_bios = ti->num_discard_bios;
1574*4882a593Smuzhiyun 		break;
1575*4882a593Smuzhiyun 	case REQ_OP_SECURE_ERASE:
1576*4882a593Smuzhiyun 		num_bios = ti->num_secure_erase_bios;
1577*4882a593Smuzhiyun 		break;
1578*4882a593Smuzhiyun 	case REQ_OP_WRITE_SAME:
1579*4882a593Smuzhiyun 		num_bios = ti->num_write_same_bios;
1580*4882a593Smuzhiyun 		break;
1581*4882a593Smuzhiyun 	case REQ_OP_WRITE_ZEROES:
1582*4882a593Smuzhiyun 		num_bios = ti->num_write_zeroes_bios;
1583*4882a593Smuzhiyun 		break;
1584*4882a593Smuzhiyun 	default:
1585*4882a593Smuzhiyun 		return false;
1586*4882a593Smuzhiyun 	}
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	*result = __send_changing_extent_only(ci, ti, num_bios);
1589*4882a593Smuzhiyun 	return true;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun /*
1593*4882a593Smuzhiyun  * Select the correct strategy for processing a non-flush bio.
1594*4882a593Smuzhiyun  */
__split_and_process_non_flush(struct clone_info * ci)1595*4882a593Smuzhiyun static int __split_and_process_non_flush(struct clone_info *ci)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	struct dm_target *ti;
1598*4882a593Smuzhiyun 	unsigned len;
1599*4882a593Smuzhiyun 	int r;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	ti = dm_table_find_target(ci->map, ci->sector);
1602*4882a593Smuzhiyun 	if (!ti)
1603*4882a593Smuzhiyun 		return -EIO;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	if (__process_abnormal_io(ci, ti, &r))
1606*4882a593Smuzhiyun 		return r;
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1611*4882a593Smuzhiyun 	if (r < 0)
1612*4882a593Smuzhiyun 		return r;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	ci->sector += len;
1615*4882a593Smuzhiyun 	ci->sector_count -= len;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	return 0;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun 
init_clone_info(struct clone_info * ci,struct mapped_device * md,struct dm_table * map,struct bio * bio)1620*4882a593Smuzhiyun static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1621*4882a593Smuzhiyun 			    struct dm_table *map, struct bio *bio)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	ci->map = map;
1624*4882a593Smuzhiyun 	ci->io = alloc_io(md, bio);
1625*4882a593Smuzhiyun 	ci->sector = bio->bi_iter.bi_sector;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun #define __dm_part_stat_sub(part, field, subnd)	\
1629*4882a593Smuzhiyun 	(part_stat_get(part, field) -= (subnd))
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun /*
1632*4882a593Smuzhiyun  * Entry point to split a bio into clones and submit them to the targets.
1633*4882a593Smuzhiyun  */
__split_and_process_bio(struct mapped_device * md,struct dm_table * map,struct bio * bio)1634*4882a593Smuzhiyun static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1635*4882a593Smuzhiyun 					struct dm_table *map, struct bio *bio)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun 	struct clone_info ci;
1638*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
1639*4882a593Smuzhiyun 	int error = 0;
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	init_clone_info(&ci, md, map, bio);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (bio->bi_opf & REQ_PREFLUSH) {
1644*4882a593Smuzhiyun 		error = __send_empty_flush(&ci);
1645*4882a593Smuzhiyun 		/* dec_pending submits any data associated with flush */
1646*4882a593Smuzhiyun 	} else if (op_is_zone_mgmt(bio_op(bio))) {
1647*4882a593Smuzhiyun 		ci.bio = bio;
1648*4882a593Smuzhiyun 		ci.sector_count = 0;
1649*4882a593Smuzhiyun 		error = __split_and_process_non_flush(&ci);
1650*4882a593Smuzhiyun 	} else {
1651*4882a593Smuzhiyun 		ci.bio = bio;
1652*4882a593Smuzhiyun 		ci.sector_count = bio_sectors(bio);
1653*4882a593Smuzhiyun 		while (ci.sector_count && !error) {
1654*4882a593Smuzhiyun 			error = __split_and_process_non_flush(&ci);
1655*4882a593Smuzhiyun 			if (current->bio_list && ci.sector_count && !error) {
1656*4882a593Smuzhiyun 				/*
1657*4882a593Smuzhiyun 				 * Remainder must be passed to submit_bio_noacct()
1658*4882a593Smuzhiyun 				 * so that it gets handled *after* bios already submitted
1659*4882a593Smuzhiyun 				 * have been completely processed.
1660*4882a593Smuzhiyun 				 * We take a clone of the original to store in
1661*4882a593Smuzhiyun 				 * ci.io->orig_bio to be used by end_io_acct() and
1662*4882a593Smuzhiyun 				 * for dec_pending to use for completion handling.
1663*4882a593Smuzhiyun 				 */
1664*4882a593Smuzhiyun 				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1665*4882a593Smuzhiyun 							  GFP_NOIO, &md->queue->bio_split);
1666*4882a593Smuzhiyun 				ci.io->orig_bio = b;
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 				/*
1669*4882a593Smuzhiyun 				 * Adjust IO stats for each split, otherwise upon queue
1670*4882a593Smuzhiyun 				 * reentry there will be redundant IO accounting.
1671*4882a593Smuzhiyun 				 * NOTE: this is a stop-gap fix, a proper fix involves
1672*4882a593Smuzhiyun 				 * significant refactoring of DM core's bio splitting
1673*4882a593Smuzhiyun 				 * (by eliminating DM's splitting and just using bio_split)
1674*4882a593Smuzhiyun 				 */
1675*4882a593Smuzhiyun 				part_stat_lock();
1676*4882a593Smuzhiyun 				__dm_part_stat_sub(&dm_disk(md)->part0,
1677*4882a593Smuzhiyun 						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1678*4882a593Smuzhiyun 				part_stat_unlock();
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 				bio_chain(b, bio);
1681*4882a593Smuzhiyun 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1682*4882a593Smuzhiyun 				ret = submit_bio_noacct(bio);
1683*4882a593Smuzhiyun 				break;
1684*4882a593Smuzhiyun 			}
1685*4882a593Smuzhiyun 		}
1686*4882a593Smuzhiyun 	}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	/* drop the extra reference count */
1689*4882a593Smuzhiyun 	dec_pending(ci.io, errno_to_blk_status(error));
1690*4882a593Smuzhiyun 	return ret;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun 
dm_submit_bio(struct bio * bio)1693*4882a593Smuzhiyun static blk_qc_t dm_submit_bio(struct bio *bio)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	struct mapped_device *md = bio->bi_disk->private_data;
1696*4882a593Smuzhiyun 	blk_qc_t ret = BLK_QC_T_NONE;
1697*4882a593Smuzhiyun 	int srcu_idx;
1698*4882a593Smuzhiyun 	struct dm_table *map;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	map = dm_get_live_table(md, &srcu_idx);
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	/* If suspended, or map not yet available, queue this IO for later */
1703*4882a593Smuzhiyun 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1704*4882a593Smuzhiyun 	    unlikely(!map)) {
1705*4882a593Smuzhiyun 		if (bio->bi_opf & REQ_NOWAIT)
1706*4882a593Smuzhiyun 			bio_wouldblock_error(bio);
1707*4882a593Smuzhiyun 		else if (bio->bi_opf & REQ_RAHEAD)
1708*4882a593Smuzhiyun 			bio_io_error(bio);
1709*4882a593Smuzhiyun 		else
1710*4882a593Smuzhiyun 			queue_io(md, bio);
1711*4882a593Smuzhiyun 		goto out;
1712*4882a593Smuzhiyun 	}
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	/*
1715*4882a593Smuzhiyun 	 * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
1716*4882a593Smuzhiyun 	 * otherwise associated queue_limits won't be imposed.
1717*4882a593Smuzhiyun 	 */
1718*4882a593Smuzhiyun 	if (is_abnormal_io(bio))
1719*4882a593Smuzhiyun 		blk_queue_split(&bio);
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	ret = __split_and_process_bio(md, map, bio);
1722*4882a593Smuzhiyun out:
1723*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
1724*4882a593Smuzhiyun 	return ret;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun /*-----------------------------------------------------------------
1728*4882a593Smuzhiyun  * An IDR is used to keep track of allocated minor numbers.
1729*4882a593Smuzhiyun  *---------------------------------------------------------------*/
free_minor(int minor)1730*4882a593Smuzhiyun static void free_minor(int minor)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
1733*4882a593Smuzhiyun 	idr_remove(&_minor_idr, minor);
1734*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun /*
1738*4882a593Smuzhiyun  * See if the device with a specific minor # is free.
1739*4882a593Smuzhiyun  */
specific_minor(int minor)1740*4882a593Smuzhiyun static int specific_minor(int minor)
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun 	int r;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	if (minor >= (1 << MINORBITS))
1745*4882a593Smuzhiyun 		return -EINVAL;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	idr_preload(GFP_KERNEL);
1748*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
1753*4882a593Smuzhiyun 	idr_preload_end();
1754*4882a593Smuzhiyun 	if (r < 0)
1755*4882a593Smuzhiyun 		return r == -ENOSPC ? -EBUSY : r;
1756*4882a593Smuzhiyun 	return 0;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun 
next_free_minor(int * minor)1759*4882a593Smuzhiyun static int next_free_minor(int *minor)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun 	int r;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	idr_preload(GFP_KERNEL);
1764*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
1769*4882a593Smuzhiyun 	idr_preload_end();
1770*4882a593Smuzhiyun 	if (r < 0)
1771*4882a593Smuzhiyun 		return r;
1772*4882a593Smuzhiyun 	*minor = r;
1773*4882a593Smuzhiyun 	return 0;
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun static const struct block_device_operations dm_blk_dops;
1777*4882a593Smuzhiyun static const struct block_device_operations dm_rq_blk_dops;
1778*4882a593Smuzhiyun static const struct dax_operations dm_dax_ops;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun static void dm_wq_work(struct work_struct *work);
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun #ifdef CONFIG_BLK_INLINE_ENCRYPTION
dm_queue_destroy_keyslot_manager(struct request_queue * q)1783*4882a593Smuzhiyun static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 	dm_destroy_keyslot_manager(q->ksm);
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1789*4882a593Smuzhiyun 
dm_queue_destroy_keyslot_manager(struct request_queue * q)1790*4882a593Smuzhiyun static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
1794*4882a593Smuzhiyun 
cleanup_mapped_device(struct mapped_device * md)1795*4882a593Smuzhiyun static void cleanup_mapped_device(struct mapped_device *md)
1796*4882a593Smuzhiyun {
1797*4882a593Smuzhiyun 	if (md->wq)
1798*4882a593Smuzhiyun 		destroy_workqueue(md->wq);
1799*4882a593Smuzhiyun 	bioset_exit(&md->bs);
1800*4882a593Smuzhiyun 	bioset_exit(&md->io_bs);
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	if (md->dax_dev) {
1803*4882a593Smuzhiyun 		kill_dax(md->dax_dev);
1804*4882a593Smuzhiyun 		put_dax(md->dax_dev);
1805*4882a593Smuzhiyun 		md->dax_dev = NULL;
1806*4882a593Smuzhiyun 	}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	if (md->disk) {
1809*4882a593Smuzhiyun 		spin_lock(&_minor_lock);
1810*4882a593Smuzhiyun 		md->disk->private_data = NULL;
1811*4882a593Smuzhiyun 		spin_unlock(&_minor_lock);
1812*4882a593Smuzhiyun 		del_gendisk(md->disk);
1813*4882a593Smuzhiyun 		put_disk(md->disk);
1814*4882a593Smuzhiyun 	}
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	if (md->queue) {
1817*4882a593Smuzhiyun 		dm_queue_destroy_keyslot_manager(md->queue);
1818*4882a593Smuzhiyun 		blk_cleanup_queue(md->queue);
1819*4882a593Smuzhiyun 	}
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	cleanup_srcu_struct(&md->io_barrier);
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	if (md->bdev) {
1824*4882a593Smuzhiyun 		bdput(md->bdev);
1825*4882a593Smuzhiyun 		md->bdev = NULL;
1826*4882a593Smuzhiyun 	}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	mutex_destroy(&md->suspend_lock);
1829*4882a593Smuzhiyun 	mutex_destroy(&md->type_lock);
1830*4882a593Smuzhiyun 	mutex_destroy(&md->table_devices_lock);
1831*4882a593Smuzhiyun 	mutex_destroy(&md->swap_bios_lock);
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	dm_mq_cleanup_mapped_device(md);
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun /*
1837*4882a593Smuzhiyun  * Allocate and initialise a blank device with a given minor.
1838*4882a593Smuzhiyun  */
alloc_dev(int minor)1839*4882a593Smuzhiyun static struct mapped_device *alloc_dev(int minor)
1840*4882a593Smuzhiyun {
1841*4882a593Smuzhiyun 	int r, numa_node_id = dm_get_numa_node();
1842*4882a593Smuzhiyun 	struct mapped_device *md;
1843*4882a593Smuzhiyun 	void *old_md;
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1846*4882a593Smuzhiyun 	if (!md) {
1847*4882a593Smuzhiyun 		DMWARN("unable to allocate device, out of memory.");
1848*4882a593Smuzhiyun 		return NULL;
1849*4882a593Smuzhiyun 	}
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	if (!try_module_get(THIS_MODULE))
1852*4882a593Smuzhiyun 		goto bad_module_get;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	/* get a minor number for the dev */
1855*4882a593Smuzhiyun 	if (minor == DM_ANY_MINOR)
1856*4882a593Smuzhiyun 		r = next_free_minor(&minor);
1857*4882a593Smuzhiyun 	else
1858*4882a593Smuzhiyun 		r = specific_minor(minor);
1859*4882a593Smuzhiyun 	if (r < 0)
1860*4882a593Smuzhiyun 		goto bad_minor;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	r = init_srcu_struct(&md->io_barrier);
1863*4882a593Smuzhiyun 	if (r < 0)
1864*4882a593Smuzhiyun 		goto bad_io_barrier;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	md->numa_node_id = numa_node_id;
1867*4882a593Smuzhiyun 	md->init_tio_pdu = false;
1868*4882a593Smuzhiyun 	md->type = DM_TYPE_NONE;
1869*4882a593Smuzhiyun 	mutex_init(&md->suspend_lock);
1870*4882a593Smuzhiyun 	mutex_init(&md->type_lock);
1871*4882a593Smuzhiyun 	mutex_init(&md->table_devices_lock);
1872*4882a593Smuzhiyun 	spin_lock_init(&md->deferred_lock);
1873*4882a593Smuzhiyun 	atomic_set(&md->holders, 1);
1874*4882a593Smuzhiyun 	atomic_set(&md->open_count, 0);
1875*4882a593Smuzhiyun 	atomic_set(&md->event_nr, 0);
1876*4882a593Smuzhiyun 	atomic_set(&md->uevent_seq, 0);
1877*4882a593Smuzhiyun 	INIT_LIST_HEAD(&md->uevent_list);
1878*4882a593Smuzhiyun 	INIT_LIST_HEAD(&md->table_devices);
1879*4882a593Smuzhiyun 	spin_lock_init(&md->uevent_lock);
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	/*
1882*4882a593Smuzhiyun 	 * default to bio-based until DM table is loaded and md->type
1883*4882a593Smuzhiyun 	 * established. If request-based table is loaded: blk-mq will
1884*4882a593Smuzhiyun 	 * override accordingly.
1885*4882a593Smuzhiyun 	 */
1886*4882a593Smuzhiyun 	md->queue = blk_alloc_queue(numa_node_id);
1887*4882a593Smuzhiyun 	if (!md->queue)
1888*4882a593Smuzhiyun 		goto bad;
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	md->disk = alloc_disk_node(1, md->numa_node_id);
1891*4882a593Smuzhiyun 	if (!md->disk)
1892*4882a593Smuzhiyun 		goto bad;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	init_waitqueue_head(&md->wait);
1895*4882a593Smuzhiyun 	INIT_WORK(&md->work, dm_wq_work);
1896*4882a593Smuzhiyun 	init_waitqueue_head(&md->eventq);
1897*4882a593Smuzhiyun 	init_completion(&md->kobj_holder.completion);
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	md->swap_bios = get_swap_bios();
1900*4882a593Smuzhiyun 	sema_init(&md->swap_bios_semaphore, md->swap_bios);
1901*4882a593Smuzhiyun 	mutex_init(&md->swap_bios_lock);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	md->disk->major = _major;
1904*4882a593Smuzhiyun 	md->disk->first_minor = minor;
1905*4882a593Smuzhiyun 	md->disk->fops = &dm_blk_dops;
1906*4882a593Smuzhiyun 	md->disk->queue = md->queue;
1907*4882a593Smuzhiyun 	md->disk->private_data = md;
1908*4882a593Smuzhiyun 	sprintf(md->disk->disk_name, "dm-%d", minor);
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
1911*4882a593Smuzhiyun 		md->dax_dev = alloc_dax(md, md->disk->disk_name,
1912*4882a593Smuzhiyun 					&dm_dax_ops, 0);
1913*4882a593Smuzhiyun 		if (IS_ERR(md->dax_dev)) {
1914*4882a593Smuzhiyun 			md->dax_dev = NULL;
1915*4882a593Smuzhiyun 			goto bad;
1916*4882a593Smuzhiyun 		}
1917*4882a593Smuzhiyun 	}
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	add_disk_no_queue_reg(md->disk);
1920*4882a593Smuzhiyun 	format_dev_t(md->name, MKDEV(_major, minor));
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1923*4882a593Smuzhiyun 	if (!md->wq)
1924*4882a593Smuzhiyun 		goto bad;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	md->bdev = bdget_disk(md->disk, 0);
1927*4882a593Smuzhiyun 	if (!md->bdev)
1928*4882a593Smuzhiyun 		goto bad;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	dm_stats_init(&md->stats);
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	/* Populate the mapping, nobody knows we exist yet */
1933*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
1934*4882a593Smuzhiyun 	old_md = idr_replace(&_minor_idr, md, minor);
1935*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	BUG_ON(old_md != MINOR_ALLOCED);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	return md;
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun bad:
1942*4882a593Smuzhiyun 	cleanup_mapped_device(md);
1943*4882a593Smuzhiyun bad_io_barrier:
1944*4882a593Smuzhiyun 	free_minor(minor);
1945*4882a593Smuzhiyun bad_minor:
1946*4882a593Smuzhiyun 	module_put(THIS_MODULE);
1947*4882a593Smuzhiyun bad_module_get:
1948*4882a593Smuzhiyun 	kvfree(md);
1949*4882a593Smuzhiyun 	return NULL;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun static void unlock_fs(struct mapped_device *md);
1953*4882a593Smuzhiyun 
free_dev(struct mapped_device * md)1954*4882a593Smuzhiyun static void free_dev(struct mapped_device *md)
1955*4882a593Smuzhiyun {
1956*4882a593Smuzhiyun 	int minor = MINOR(disk_devt(md->disk));
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	unlock_fs(md);
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	cleanup_mapped_device(md);
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	free_table_devices(&md->table_devices);
1963*4882a593Smuzhiyun 	dm_stats_cleanup(&md->stats);
1964*4882a593Smuzhiyun 	free_minor(minor);
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	module_put(THIS_MODULE);
1967*4882a593Smuzhiyun 	kvfree(md);
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun 
__bind_mempools(struct mapped_device * md,struct dm_table * t)1970*4882a593Smuzhiyun static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
1971*4882a593Smuzhiyun {
1972*4882a593Smuzhiyun 	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1973*4882a593Smuzhiyun 	int ret = 0;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	if (dm_table_bio_based(t)) {
1976*4882a593Smuzhiyun 		/*
1977*4882a593Smuzhiyun 		 * The md may already have mempools that need changing.
1978*4882a593Smuzhiyun 		 * If so, reload bioset because front_pad may have changed
1979*4882a593Smuzhiyun 		 * because a different table was loaded.
1980*4882a593Smuzhiyun 		 */
1981*4882a593Smuzhiyun 		bioset_exit(&md->bs);
1982*4882a593Smuzhiyun 		bioset_exit(&md->io_bs);
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	} else if (bioset_initialized(&md->bs)) {
1985*4882a593Smuzhiyun 		/*
1986*4882a593Smuzhiyun 		 * There's no need to reload with request-based dm
1987*4882a593Smuzhiyun 		 * because the size of front_pad doesn't change.
1988*4882a593Smuzhiyun 		 * Note for future: If you are to reload bioset,
1989*4882a593Smuzhiyun 		 * prep-ed requests in the queue may refer
1990*4882a593Smuzhiyun 		 * to bio from the old bioset, so you must walk
1991*4882a593Smuzhiyun 		 * through the queue to unprep.
1992*4882a593Smuzhiyun 		 */
1993*4882a593Smuzhiyun 		goto out;
1994*4882a593Smuzhiyun 	}
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	BUG_ON(!p ||
1997*4882a593Smuzhiyun 	       bioset_initialized(&md->bs) ||
1998*4882a593Smuzhiyun 	       bioset_initialized(&md->io_bs));
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	ret = bioset_init_from_src(&md->bs, &p->bs);
2001*4882a593Smuzhiyun 	if (ret)
2002*4882a593Smuzhiyun 		goto out;
2003*4882a593Smuzhiyun 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
2004*4882a593Smuzhiyun 	if (ret)
2005*4882a593Smuzhiyun 		bioset_exit(&md->bs);
2006*4882a593Smuzhiyun out:
2007*4882a593Smuzhiyun 	/* mempool bind completed, no longer need any mempools in the table */
2008*4882a593Smuzhiyun 	dm_table_free_md_mempools(t);
2009*4882a593Smuzhiyun 	return ret;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun /*
2013*4882a593Smuzhiyun  * Bind a table to the device.
2014*4882a593Smuzhiyun  */
event_callback(void * context)2015*4882a593Smuzhiyun static void event_callback(void *context)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun 	unsigned long flags;
2018*4882a593Smuzhiyun 	LIST_HEAD(uevents);
2019*4882a593Smuzhiyun 	struct mapped_device *md = (struct mapped_device *) context;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	spin_lock_irqsave(&md->uevent_lock, flags);
2022*4882a593Smuzhiyun 	list_splice_init(&md->uevent_list, &uevents);
2023*4882a593Smuzhiyun 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	atomic_inc(&md->event_nr);
2028*4882a593Smuzhiyun 	wake_up(&md->eventq);
2029*4882a593Smuzhiyun 	dm_issue_global_event();
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun /*
2033*4882a593Smuzhiyun  * Returns old map, which caller must destroy.
2034*4882a593Smuzhiyun  */
__bind(struct mapped_device * md,struct dm_table * t,struct queue_limits * limits)2035*4882a593Smuzhiyun static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2036*4882a593Smuzhiyun 			       struct queue_limits *limits)
2037*4882a593Smuzhiyun {
2038*4882a593Smuzhiyun 	struct dm_table *old_map;
2039*4882a593Smuzhiyun 	struct request_queue *q = md->queue;
2040*4882a593Smuzhiyun 	bool request_based = dm_table_request_based(t);
2041*4882a593Smuzhiyun 	sector_t size;
2042*4882a593Smuzhiyun 	int ret;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	lockdep_assert_held(&md->suspend_lock);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	size = dm_table_get_size(t);
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	/*
2049*4882a593Smuzhiyun 	 * Wipe any geometry if the size of the table changed.
2050*4882a593Smuzhiyun 	 */
2051*4882a593Smuzhiyun 	if (size != dm_get_size(md))
2052*4882a593Smuzhiyun 		memset(&md->geometry, 0, sizeof(md->geometry));
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	set_capacity(md->disk, size);
2055*4882a593Smuzhiyun 	bd_set_nr_sectors(md->bdev, size);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	dm_table_event_callback(t, event_callback, md);
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	/*
2060*4882a593Smuzhiyun 	 * The queue hasn't been stopped yet, if the old table type wasn't
2061*4882a593Smuzhiyun 	 * for request-based during suspension.  So stop it to prevent
2062*4882a593Smuzhiyun 	 * I/O mapping before resume.
2063*4882a593Smuzhiyun 	 * This must be done before setting the queue restrictions,
2064*4882a593Smuzhiyun 	 * because request-based dm may be run just after the setting.
2065*4882a593Smuzhiyun 	 */
2066*4882a593Smuzhiyun 	if (request_based)
2067*4882a593Smuzhiyun 		dm_stop_queue(q);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	if (request_based) {
2070*4882a593Smuzhiyun 		/*
2071*4882a593Smuzhiyun 		 * Leverage the fact that request-based DM targets are
2072*4882a593Smuzhiyun 		 * immutable singletons - used to optimize dm_mq_queue_rq.
2073*4882a593Smuzhiyun 		 */
2074*4882a593Smuzhiyun 		md->immutable_target = dm_table_get_immutable_target(t);
2075*4882a593Smuzhiyun 	}
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	ret = __bind_mempools(md, t);
2078*4882a593Smuzhiyun 	if (ret) {
2079*4882a593Smuzhiyun 		old_map = ERR_PTR(ret);
2080*4882a593Smuzhiyun 		goto out;
2081*4882a593Smuzhiyun 	}
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2084*4882a593Smuzhiyun 	rcu_assign_pointer(md->map, (void *)t);
2085*4882a593Smuzhiyun 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	dm_table_set_restrictions(t, q, limits);
2088*4882a593Smuzhiyun 	if (old_map)
2089*4882a593Smuzhiyun 		dm_sync_table(md);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun out:
2092*4882a593Smuzhiyun 	return old_map;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun /*
2096*4882a593Smuzhiyun  * Returns unbound table for the caller to free.
2097*4882a593Smuzhiyun  */
__unbind(struct mapped_device * md)2098*4882a593Smuzhiyun static struct dm_table *__unbind(struct mapped_device *md)
2099*4882a593Smuzhiyun {
2100*4882a593Smuzhiyun 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	if (!map)
2103*4882a593Smuzhiyun 		return NULL;
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	dm_table_event_callback(map, NULL, NULL);
2106*4882a593Smuzhiyun 	RCU_INIT_POINTER(md->map, NULL);
2107*4882a593Smuzhiyun 	dm_sync_table(md);
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	return map;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun /*
2113*4882a593Smuzhiyun  * Constructor for a new device.
2114*4882a593Smuzhiyun  */
dm_create(int minor,struct mapped_device ** result)2115*4882a593Smuzhiyun int dm_create(int minor, struct mapped_device **result)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun 	int r;
2118*4882a593Smuzhiyun 	struct mapped_device *md;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	md = alloc_dev(minor);
2121*4882a593Smuzhiyun 	if (!md)
2122*4882a593Smuzhiyun 		return -ENXIO;
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	r = dm_sysfs_init(md);
2125*4882a593Smuzhiyun 	if (r) {
2126*4882a593Smuzhiyun 		free_dev(md);
2127*4882a593Smuzhiyun 		return r;
2128*4882a593Smuzhiyun 	}
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	*result = md;
2131*4882a593Smuzhiyun 	return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun /*
2135*4882a593Smuzhiyun  * Functions to manage md->type.
2136*4882a593Smuzhiyun  * All are required to hold md->type_lock.
2137*4882a593Smuzhiyun  */
dm_lock_md_type(struct mapped_device * md)2138*4882a593Smuzhiyun void dm_lock_md_type(struct mapped_device *md)
2139*4882a593Smuzhiyun {
2140*4882a593Smuzhiyun 	mutex_lock(&md->type_lock);
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun 
dm_unlock_md_type(struct mapped_device * md)2143*4882a593Smuzhiyun void dm_unlock_md_type(struct mapped_device *md)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun 	mutex_unlock(&md->type_lock);
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun 
dm_set_md_type(struct mapped_device * md,enum dm_queue_mode type)2148*4882a593Smuzhiyun void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun 	BUG_ON(!mutex_is_locked(&md->type_lock));
2151*4882a593Smuzhiyun 	md->type = type;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun 
dm_get_md_type(struct mapped_device * md)2154*4882a593Smuzhiyun enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun 	return md->type;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun 
dm_get_immutable_target_type(struct mapped_device * md)2159*4882a593Smuzhiyun struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2160*4882a593Smuzhiyun {
2161*4882a593Smuzhiyun 	return md->immutable_target_type;
2162*4882a593Smuzhiyun }
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun /*
2165*4882a593Smuzhiyun  * The queue_limits are only valid as long as you have a reference
2166*4882a593Smuzhiyun  * count on 'md'.
2167*4882a593Smuzhiyun  */
dm_get_queue_limits(struct mapped_device * md)2168*4882a593Smuzhiyun struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun 	BUG_ON(!atomic_read(&md->holders));
2171*4882a593Smuzhiyun 	return &md->queue->limits;
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun /*
2176*4882a593Smuzhiyun  * Setup the DM device's queue based on md's type
2177*4882a593Smuzhiyun  */
dm_setup_md_queue(struct mapped_device * md,struct dm_table * t)2178*4882a593Smuzhiyun int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun 	int r;
2181*4882a593Smuzhiyun 	struct queue_limits limits;
2182*4882a593Smuzhiyun 	enum dm_queue_mode type = dm_get_md_type(md);
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	switch (type) {
2185*4882a593Smuzhiyun 	case DM_TYPE_REQUEST_BASED:
2186*4882a593Smuzhiyun 		md->disk->fops = &dm_rq_blk_dops;
2187*4882a593Smuzhiyun 		r = dm_mq_init_request_queue(md, t);
2188*4882a593Smuzhiyun 		if (r) {
2189*4882a593Smuzhiyun 			DMERR("Cannot initialize queue for request-based dm mapped device");
2190*4882a593Smuzhiyun 			return r;
2191*4882a593Smuzhiyun 		}
2192*4882a593Smuzhiyun 		break;
2193*4882a593Smuzhiyun 	case DM_TYPE_BIO_BASED:
2194*4882a593Smuzhiyun 	case DM_TYPE_DAX_BIO_BASED:
2195*4882a593Smuzhiyun 		break;
2196*4882a593Smuzhiyun 	case DM_TYPE_NONE:
2197*4882a593Smuzhiyun 		WARN_ON_ONCE(true);
2198*4882a593Smuzhiyun 		break;
2199*4882a593Smuzhiyun 	}
2200*4882a593Smuzhiyun 
2201*4882a593Smuzhiyun 	r = dm_calculate_queue_limits(t, &limits);
2202*4882a593Smuzhiyun 	if (r) {
2203*4882a593Smuzhiyun 		DMERR("Cannot calculate initial queue limits");
2204*4882a593Smuzhiyun 		return r;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 	dm_table_set_restrictions(t, md->queue, &limits);
2207*4882a593Smuzhiyun 	blk_register_queue(md->disk);
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	return 0;
2210*4882a593Smuzhiyun }
2211*4882a593Smuzhiyun 
dm_get_md(dev_t dev)2212*4882a593Smuzhiyun struct mapped_device *dm_get_md(dev_t dev)
2213*4882a593Smuzhiyun {
2214*4882a593Smuzhiyun 	struct mapped_device *md;
2215*4882a593Smuzhiyun 	unsigned minor = MINOR(dev);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2218*4882a593Smuzhiyun 		return NULL;
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	md = idr_find(&_minor_idr, minor);
2223*4882a593Smuzhiyun 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2224*4882a593Smuzhiyun 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2225*4882a593Smuzhiyun 		md = NULL;
2226*4882a593Smuzhiyun 		goto out;
2227*4882a593Smuzhiyun 	}
2228*4882a593Smuzhiyun 	dm_get(md);
2229*4882a593Smuzhiyun out:
2230*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	return md;
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_get_md);
2235*4882a593Smuzhiyun 
dm_get_mdptr(struct mapped_device * md)2236*4882a593Smuzhiyun void *dm_get_mdptr(struct mapped_device *md)
2237*4882a593Smuzhiyun {
2238*4882a593Smuzhiyun 	return md->interface_ptr;
2239*4882a593Smuzhiyun }
2240*4882a593Smuzhiyun 
dm_set_mdptr(struct mapped_device * md,void * ptr)2241*4882a593Smuzhiyun void dm_set_mdptr(struct mapped_device *md, void *ptr)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun 	md->interface_ptr = ptr;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun 
dm_get(struct mapped_device * md)2246*4882a593Smuzhiyun void dm_get(struct mapped_device *md)
2247*4882a593Smuzhiyun {
2248*4882a593Smuzhiyun 	atomic_inc(&md->holders);
2249*4882a593Smuzhiyun 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun 
dm_hold(struct mapped_device * md)2252*4882a593Smuzhiyun int dm_hold(struct mapped_device *md)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
2255*4882a593Smuzhiyun 	if (test_bit(DMF_FREEING, &md->flags)) {
2256*4882a593Smuzhiyun 		spin_unlock(&_minor_lock);
2257*4882a593Smuzhiyun 		return -EBUSY;
2258*4882a593Smuzhiyun 	}
2259*4882a593Smuzhiyun 	dm_get(md);
2260*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
2261*4882a593Smuzhiyun 	return 0;
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_hold);
2264*4882a593Smuzhiyun 
dm_device_name(struct mapped_device * md)2265*4882a593Smuzhiyun const char *dm_device_name(struct mapped_device *md)
2266*4882a593Smuzhiyun {
2267*4882a593Smuzhiyun 	return md->name;
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_device_name);
2270*4882a593Smuzhiyun 
__dm_destroy(struct mapped_device * md,bool wait)2271*4882a593Smuzhiyun static void __dm_destroy(struct mapped_device *md, bool wait)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	struct dm_table *map;
2274*4882a593Smuzhiyun 	int srcu_idx;
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 	might_sleep();
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
2279*4882a593Smuzhiyun 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2280*4882a593Smuzhiyun 	set_bit(DMF_FREEING, &md->flags);
2281*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	blk_set_queue_dying(md->queue);
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	/*
2286*4882a593Smuzhiyun 	 * Take suspend_lock so that presuspend and postsuspend methods
2287*4882a593Smuzhiyun 	 * do not race with internal suspend.
2288*4882a593Smuzhiyun 	 */
2289*4882a593Smuzhiyun 	mutex_lock(&md->suspend_lock);
2290*4882a593Smuzhiyun 	map = dm_get_live_table(md, &srcu_idx);
2291*4882a593Smuzhiyun 	if (!dm_suspended_md(md)) {
2292*4882a593Smuzhiyun 		dm_table_presuspend_targets(map);
2293*4882a593Smuzhiyun 		set_bit(DMF_SUSPENDED, &md->flags);
2294*4882a593Smuzhiyun 		set_bit(DMF_POST_SUSPENDING, &md->flags);
2295*4882a593Smuzhiyun 		dm_table_postsuspend_targets(map);
2296*4882a593Smuzhiyun 	}
2297*4882a593Smuzhiyun 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2298*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
2299*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	/*
2302*4882a593Smuzhiyun 	 * Rare, but there may be I/O requests still going to complete,
2303*4882a593Smuzhiyun 	 * for example.  Wait for all references to disappear.
2304*4882a593Smuzhiyun 	 * No one should increment the reference count of the mapped_device,
2305*4882a593Smuzhiyun 	 * after the mapped_device state becomes DMF_FREEING.
2306*4882a593Smuzhiyun 	 */
2307*4882a593Smuzhiyun 	if (wait)
2308*4882a593Smuzhiyun 		while (atomic_read(&md->holders))
2309*4882a593Smuzhiyun 			msleep(1);
2310*4882a593Smuzhiyun 	else if (atomic_read(&md->holders))
2311*4882a593Smuzhiyun 		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2312*4882a593Smuzhiyun 		       dm_device_name(md), atomic_read(&md->holders));
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	dm_sysfs_exit(md);
2315*4882a593Smuzhiyun 	dm_table_destroy(__unbind(md));
2316*4882a593Smuzhiyun 	free_dev(md);
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun 
dm_destroy(struct mapped_device * md)2319*4882a593Smuzhiyun void dm_destroy(struct mapped_device *md)
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun 	__dm_destroy(md, true);
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun 
dm_destroy_immediate(struct mapped_device * md)2324*4882a593Smuzhiyun void dm_destroy_immediate(struct mapped_device *md)
2325*4882a593Smuzhiyun {
2326*4882a593Smuzhiyun 	__dm_destroy(md, false);
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun 
dm_put(struct mapped_device * md)2329*4882a593Smuzhiyun void dm_put(struct mapped_device *md)
2330*4882a593Smuzhiyun {
2331*4882a593Smuzhiyun 	atomic_dec(&md->holders);
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_put);
2334*4882a593Smuzhiyun 
md_in_flight_bios(struct mapped_device * md)2335*4882a593Smuzhiyun static bool md_in_flight_bios(struct mapped_device *md)
2336*4882a593Smuzhiyun {
2337*4882a593Smuzhiyun 	int cpu;
2338*4882a593Smuzhiyun 	struct hd_struct *part = &dm_disk(md)->part0;
2339*4882a593Smuzhiyun 	long sum = 0;
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
2342*4882a593Smuzhiyun 		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
2343*4882a593Smuzhiyun 		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
2344*4882a593Smuzhiyun 	}
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	return sum != 0;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun 
dm_wait_for_bios_completion(struct mapped_device * md,long task_state)2349*4882a593Smuzhiyun static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
2350*4882a593Smuzhiyun {
2351*4882a593Smuzhiyun 	int r = 0;
2352*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 	while (true) {
2355*4882a593Smuzhiyun 		prepare_to_wait(&md->wait, &wait, task_state);
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		if (!md_in_flight_bios(md))
2358*4882a593Smuzhiyun 			break;
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 		if (signal_pending_state(task_state, current)) {
2361*4882a593Smuzhiyun 			r = -EINTR;
2362*4882a593Smuzhiyun 			break;
2363*4882a593Smuzhiyun 		}
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 		io_schedule();
2366*4882a593Smuzhiyun 	}
2367*4882a593Smuzhiyun 	finish_wait(&md->wait, &wait);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 	smp_rmb();
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	return r;
2372*4882a593Smuzhiyun }
2373*4882a593Smuzhiyun 
dm_wait_for_completion(struct mapped_device * md,long task_state)2374*4882a593Smuzhiyun static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2375*4882a593Smuzhiyun {
2376*4882a593Smuzhiyun 	int r = 0;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	if (!queue_is_mq(md->queue))
2379*4882a593Smuzhiyun 		return dm_wait_for_bios_completion(md, task_state);
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	while (true) {
2382*4882a593Smuzhiyun 		if (!blk_mq_queue_inflight(md->queue))
2383*4882a593Smuzhiyun 			break;
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 		if (signal_pending_state(task_state, current)) {
2386*4882a593Smuzhiyun 			r = -EINTR;
2387*4882a593Smuzhiyun 			break;
2388*4882a593Smuzhiyun 		}
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 		msleep(5);
2391*4882a593Smuzhiyun 	}
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	return r;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun /*
2397*4882a593Smuzhiyun  * Process the deferred bios
2398*4882a593Smuzhiyun  */
dm_wq_work(struct work_struct * work)2399*4882a593Smuzhiyun static void dm_wq_work(struct work_struct *work)
2400*4882a593Smuzhiyun {
2401*4882a593Smuzhiyun 	struct mapped_device *md = container_of(work, struct mapped_device, work);
2402*4882a593Smuzhiyun 	struct bio *bio;
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2405*4882a593Smuzhiyun 		spin_lock_irq(&md->deferred_lock);
2406*4882a593Smuzhiyun 		bio = bio_list_pop(&md->deferred);
2407*4882a593Smuzhiyun 		spin_unlock_irq(&md->deferred_lock);
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 		if (!bio)
2410*4882a593Smuzhiyun 			break;
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun 		submit_bio_noacct(bio);
2413*4882a593Smuzhiyun 	}
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun 
dm_queue_flush(struct mapped_device * md)2416*4882a593Smuzhiyun static void dm_queue_flush(struct mapped_device *md)
2417*4882a593Smuzhiyun {
2418*4882a593Smuzhiyun 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2419*4882a593Smuzhiyun 	smp_mb__after_atomic();
2420*4882a593Smuzhiyun 	queue_work(md->wq, &md->work);
2421*4882a593Smuzhiyun }
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun /*
2424*4882a593Smuzhiyun  * Swap in a new table, returning the old one for the caller to destroy.
2425*4882a593Smuzhiyun  */
dm_swap_table(struct mapped_device * md,struct dm_table * table)2426*4882a593Smuzhiyun struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2429*4882a593Smuzhiyun 	struct queue_limits limits;
2430*4882a593Smuzhiyun 	int r;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	mutex_lock(&md->suspend_lock);
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	/* device must be suspended */
2435*4882a593Smuzhiyun 	if (!dm_suspended_md(md))
2436*4882a593Smuzhiyun 		goto out;
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	/*
2439*4882a593Smuzhiyun 	 * If the new table has no data devices, retain the existing limits.
2440*4882a593Smuzhiyun 	 * This helps multipath with queue_if_no_path if all paths disappear,
2441*4882a593Smuzhiyun 	 * then new I/O is queued based on these limits, and then some paths
2442*4882a593Smuzhiyun 	 * reappear.
2443*4882a593Smuzhiyun 	 */
2444*4882a593Smuzhiyun 	if (dm_table_has_no_data_devices(table)) {
2445*4882a593Smuzhiyun 		live_map = dm_get_live_table_fast(md);
2446*4882a593Smuzhiyun 		if (live_map)
2447*4882a593Smuzhiyun 			limits = md->queue->limits;
2448*4882a593Smuzhiyun 		dm_put_live_table_fast(md);
2449*4882a593Smuzhiyun 	}
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	if (!live_map) {
2452*4882a593Smuzhiyun 		r = dm_calculate_queue_limits(table, &limits);
2453*4882a593Smuzhiyun 		if (r) {
2454*4882a593Smuzhiyun 			map = ERR_PTR(r);
2455*4882a593Smuzhiyun 			goto out;
2456*4882a593Smuzhiyun 		}
2457*4882a593Smuzhiyun 	}
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	map = __bind(md, table, &limits);
2460*4882a593Smuzhiyun 	dm_issue_global_event();
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun out:
2463*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2464*4882a593Smuzhiyun 	return map;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun /*
2468*4882a593Smuzhiyun  * Functions to lock and unlock any filesystem running on the
2469*4882a593Smuzhiyun  * device.
2470*4882a593Smuzhiyun  */
lock_fs(struct mapped_device * md)2471*4882a593Smuzhiyun static int lock_fs(struct mapped_device *md)
2472*4882a593Smuzhiyun {
2473*4882a593Smuzhiyun 	int r;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	r = freeze_bdev(md->bdev);
2478*4882a593Smuzhiyun 	if (!r)
2479*4882a593Smuzhiyun 		set_bit(DMF_FROZEN, &md->flags);
2480*4882a593Smuzhiyun 	return r;
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun 
unlock_fs(struct mapped_device * md)2483*4882a593Smuzhiyun static void unlock_fs(struct mapped_device *md)
2484*4882a593Smuzhiyun {
2485*4882a593Smuzhiyun 	if (!test_bit(DMF_FROZEN, &md->flags))
2486*4882a593Smuzhiyun 		return;
2487*4882a593Smuzhiyun 	thaw_bdev(md->bdev);
2488*4882a593Smuzhiyun 	clear_bit(DMF_FROZEN, &md->flags);
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun /*
2492*4882a593Smuzhiyun  * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2493*4882a593Smuzhiyun  * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2494*4882a593Smuzhiyun  * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2495*4882a593Smuzhiyun  *
2496*4882a593Smuzhiyun  * If __dm_suspend returns 0, the device is completely quiescent
2497*4882a593Smuzhiyun  * now. There is no request-processing activity. All new requests
2498*4882a593Smuzhiyun  * are being added to md->deferred list.
2499*4882a593Smuzhiyun  */
__dm_suspend(struct mapped_device * md,struct dm_table * map,unsigned suspend_flags,long task_state,int dmf_suspended_flag)2500*4882a593Smuzhiyun static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2501*4882a593Smuzhiyun 			unsigned suspend_flags, long task_state,
2502*4882a593Smuzhiyun 			int dmf_suspended_flag)
2503*4882a593Smuzhiyun {
2504*4882a593Smuzhiyun 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2505*4882a593Smuzhiyun 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2506*4882a593Smuzhiyun 	int r;
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	lockdep_assert_held(&md->suspend_lock);
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	/*
2511*4882a593Smuzhiyun 	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2512*4882a593Smuzhiyun 	 * This flag is cleared before dm_suspend returns.
2513*4882a593Smuzhiyun 	 */
2514*4882a593Smuzhiyun 	if (noflush)
2515*4882a593Smuzhiyun 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2516*4882a593Smuzhiyun 	else
2517*4882a593Smuzhiyun 		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	/*
2520*4882a593Smuzhiyun 	 * This gets reverted if there's an error later and the targets
2521*4882a593Smuzhiyun 	 * provide the .presuspend_undo hook.
2522*4882a593Smuzhiyun 	 */
2523*4882a593Smuzhiyun 	dm_table_presuspend_targets(map);
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun 	/*
2526*4882a593Smuzhiyun 	 * Flush I/O to the device.
2527*4882a593Smuzhiyun 	 * Any I/O submitted after lock_fs() may not be flushed.
2528*4882a593Smuzhiyun 	 * noflush takes precedence over do_lockfs.
2529*4882a593Smuzhiyun 	 * (lock_fs() flushes I/Os and waits for them to complete.)
2530*4882a593Smuzhiyun 	 */
2531*4882a593Smuzhiyun 	if (!noflush && do_lockfs) {
2532*4882a593Smuzhiyun 		r = lock_fs(md);
2533*4882a593Smuzhiyun 		if (r) {
2534*4882a593Smuzhiyun 			dm_table_presuspend_undo_targets(map);
2535*4882a593Smuzhiyun 			return r;
2536*4882a593Smuzhiyun 		}
2537*4882a593Smuzhiyun 	}
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	/*
2540*4882a593Smuzhiyun 	 * Here we must make sure that no processes are submitting requests
2541*4882a593Smuzhiyun 	 * to target drivers i.e. no one may be executing
2542*4882a593Smuzhiyun 	 * __split_and_process_bio from dm_submit_bio.
2543*4882a593Smuzhiyun 	 *
2544*4882a593Smuzhiyun 	 * To get all processes out of __split_and_process_bio in dm_submit_bio,
2545*4882a593Smuzhiyun 	 * we take the write lock. To prevent any process from reentering
2546*4882a593Smuzhiyun 	 * __split_and_process_bio from dm_submit_bio and quiesce the thread
2547*4882a593Smuzhiyun 	 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2548*4882a593Smuzhiyun 	 * flush_workqueue(md->wq).
2549*4882a593Smuzhiyun 	 */
2550*4882a593Smuzhiyun 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2551*4882a593Smuzhiyun 	if (map)
2552*4882a593Smuzhiyun 		synchronize_srcu(&md->io_barrier);
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	/*
2555*4882a593Smuzhiyun 	 * Stop md->queue before flushing md->wq in case request-based
2556*4882a593Smuzhiyun 	 * dm defers requests to md->wq from md->queue.
2557*4882a593Smuzhiyun 	 */
2558*4882a593Smuzhiyun 	if (dm_request_based(md))
2559*4882a593Smuzhiyun 		dm_stop_queue(md->queue);
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 	flush_workqueue(md->wq);
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	/*
2564*4882a593Smuzhiyun 	 * At this point no more requests are entering target request routines.
2565*4882a593Smuzhiyun 	 * We call dm_wait_for_completion to wait for all existing requests
2566*4882a593Smuzhiyun 	 * to finish.
2567*4882a593Smuzhiyun 	 */
2568*4882a593Smuzhiyun 	r = dm_wait_for_completion(md, task_state);
2569*4882a593Smuzhiyun 	if (!r)
2570*4882a593Smuzhiyun 		set_bit(dmf_suspended_flag, &md->flags);
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	if (noflush)
2573*4882a593Smuzhiyun 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2574*4882a593Smuzhiyun 	if (map)
2575*4882a593Smuzhiyun 		synchronize_srcu(&md->io_barrier);
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 	/* were we interrupted ? */
2578*4882a593Smuzhiyun 	if (r < 0) {
2579*4882a593Smuzhiyun 		dm_queue_flush(md);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 		if (dm_request_based(md))
2582*4882a593Smuzhiyun 			dm_start_queue(md->queue);
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 		unlock_fs(md);
2585*4882a593Smuzhiyun 		dm_table_presuspend_undo_targets(map);
2586*4882a593Smuzhiyun 		/* pushback list is already flushed, so skip flush */
2587*4882a593Smuzhiyun 	}
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	return r;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun /*
2593*4882a593Smuzhiyun  * We need to be able to change a mapping table under a mounted
2594*4882a593Smuzhiyun  * filesystem.  For example we might want to move some data in
2595*4882a593Smuzhiyun  * the background.  Before the table can be swapped with
2596*4882a593Smuzhiyun  * dm_bind_table, dm_suspend must be called to flush any in
2597*4882a593Smuzhiyun  * flight bios and ensure that any further io gets deferred.
2598*4882a593Smuzhiyun  */
2599*4882a593Smuzhiyun /*
2600*4882a593Smuzhiyun  * Suspend mechanism in request-based dm.
2601*4882a593Smuzhiyun  *
2602*4882a593Smuzhiyun  * 1. Flush all I/Os by lock_fs() if needed.
2603*4882a593Smuzhiyun  * 2. Stop dispatching any I/O by stopping the request_queue.
2604*4882a593Smuzhiyun  * 3. Wait for all in-flight I/Os to be completed or requeued.
2605*4882a593Smuzhiyun  *
2606*4882a593Smuzhiyun  * To abort suspend, start the request_queue.
2607*4882a593Smuzhiyun  */
dm_suspend(struct mapped_device * md,unsigned suspend_flags)2608*4882a593Smuzhiyun int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2609*4882a593Smuzhiyun {
2610*4882a593Smuzhiyun 	struct dm_table *map = NULL;
2611*4882a593Smuzhiyun 	int r = 0;
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun retry:
2614*4882a593Smuzhiyun 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	if (dm_suspended_md(md)) {
2617*4882a593Smuzhiyun 		r = -EINVAL;
2618*4882a593Smuzhiyun 		goto out_unlock;
2619*4882a593Smuzhiyun 	}
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun 	if (dm_suspended_internally_md(md)) {
2622*4882a593Smuzhiyun 		/* already internally suspended, wait for internal resume */
2623*4882a593Smuzhiyun 		mutex_unlock(&md->suspend_lock);
2624*4882a593Smuzhiyun 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2625*4882a593Smuzhiyun 		if (r)
2626*4882a593Smuzhiyun 			return r;
2627*4882a593Smuzhiyun 		goto retry;
2628*4882a593Smuzhiyun 	}
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2633*4882a593Smuzhiyun 	if (r)
2634*4882a593Smuzhiyun 		goto out_unlock;
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	set_bit(DMF_POST_SUSPENDING, &md->flags);
2637*4882a593Smuzhiyun 	dm_table_postsuspend_targets(map);
2638*4882a593Smuzhiyun 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun out_unlock:
2641*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2642*4882a593Smuzhiyun 	return r;
2643*4882a593Smuzhiyun }
2644*4882a593Smuzhiyun 
__dm_resume(struct mapped_device * md,struct dm_table * map)2645*4882a593Smuzhiyun static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2646*4882a593Smuzhiyun {
2647*4882a593Smuzhiyun 	if (map) {
2648*4882a593Smuzhiyun 		int r = dm_table_resume_targets(map);
2649*4882a593Smuzhiyun 		if (r)
2650*4882a593Smuzhiyun 			return r;
2651*4882a593Smuzhiyun 	}
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	dm_queue_flush(md);
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	/*
2656*4882a593Smuzhiyun 	 * Flushing deferred I/Os must be done after targets are resumed
2657*4882a593Smuzhiyun 	 * so that mapping of targets can work correctly.
2658*4882a593Smuzhiyun 	 * Request-based dm is queueing the deferred I/Os in its request_queue.
2659*4882a593Smuzhiyun 	 */
2660*4882a593Smuzhiyun 	if (dm_request_based(md))
2661*4882a593Smuzhiyun 		dm_start_queue(md->queue);
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	unlock_fs(md);
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	return 0;
2666*4882a593Smuzhiyun }
2667*4882a593Smuzhiyun 
dm_resume(struct mapped_device * md)2668*4882a593Smuzhiyun int dm_resume(struct mapped_device *md)
2669*4882a593Smuzhiyun {
2670*4882a593Smuzhiyun 	int r;
2671*4882a593Smuzhiyun 	struct dm_table *map = NULL;
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun retry:
2674*4882a593Smuzhiyun 	r = -EINVAL;
2675*4882a593Smuzhiyun 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	if (!dm_suspended_md(md))
2678*4882a593Smuzhiyun 		goto out;
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	if (dm_suspended_internally_md(md)) {
2681*4882a593Smuzhiyun 		/* already internally suspended, wait for internal resume */
2682*4882a593Smuzhiyun 		mutex_unlock(&md->suspend_lock);
2683*4882a593Smuzhiyun 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2684*4882a593Smuzhiyun 		if (r)
2685*4882a593Smuzhiyun 			return r;
2686*4882a593Smuzhiyun 		goto retry;
2687*4882a593Smuzhiyun 	}
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2690*4882a593Smuzhiyun 	if (!map || !dm_table_get_size(map))
2691*4882a593Smuzhiyun 		goto out;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 	r = __dm_resume(md, map);
2694*4882a593Smuzhiyun 	if (r)
2695*4882a593Smuzhiyun 		goto out;
2696*4882a593Smuzhiyun 
2697*4882a593Smuzhiyun 	clear_bit(DMF_SUSPENDED, &md->flags);
2698*4882a593Smuzhiyun out:
2699*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 	return r;
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun /*
2705*4882a593Smuzhiyun  * Internal suspend/resume works like userspace-driven suspend. It waits
2706*4882a593Smuzhiyun  * until all bios finish and prevents issuing new bios to the target drivers.
2707*4882a593Smuzhiyun  * It may be used only from the kernel.
2708*4882a593Smuzhiyun  */
2709*4882a593Smuzhiyun 
__dm_internal_suspend(struct mapped_device * md,unsigned suspend_flags)2710*4882a593Smuzhiyun static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun 	struct dm_table *map = NULL;
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	lockdep_assert_held(&md->suspend_lock);
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	if (md->internal_suspend_count++)
2717*4882a593Smuzhiyun 		return; /* nested internal suspend */
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	if (dm_suspended_md(md)) {
2720*4882a593Smuzhiyun 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2721*4882a593Smuzhiyun 		return; /* nest suspend */
2722*4882a593Smuzhiyun 	}
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	/*
2727*4882a593Smuzhiyun 	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2728*4882a593Smuzhiyun 	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
2729*4882a593Smuzhiyun 	 * would require changing .presuspend to return an error -- avoid this
2730*4882a593Smuzhiyun 	 * until there is a need for more elaborate variants of internal suspend.
2731*4882a593Smuzhiyun 	 */
2732*4882a593Smuzhiyun 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2733*4882a593Smuzhiyun 			    DMF_SUSPENDED_INTERNALLY);
2734*4882a593Smuzhiyun 
2735*4882a593Smuzhiyun 	set_bit(DMF_POST_SUSPENDING, &md->flags);
2736*4882a593Smuzhiyun 	dm_table_postsuspend_targets(map);
2737*4882a593Smuzhiyun 	clear_bit(DMF_POST_SUSPENDING, &md->flags);
2738*4882a593Smuzhiyun }
2739*4882a593Smuzhiyun 
__dm_internal_resume(struct mapped_device * md)2740*4882a593Smuzhiyun static void __dm_internal_resume(struct mapped_device *md)
2741*4882a593Smuzhiyun {
2742*4882a593Smuzhiyun 	BUG_ON(!md->internal_suspend_count);
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	if (--md->internal_suspend_count)
2745*4882a593Smuzhiyun 		return; /* resume from nested internal suspend */
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	if (dm_suspended_md(md))
2748*4882a593Smuzhiyun 		goto done; /* resume from nested suspend */
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	/*
2751*4882a593Smuzhiyun 	 * NOTE: existing callers don't need to call dm_table_resume_targets
2752*4882a593Smuzhiyun 	 * (which may fail -- so best to avoid it for now by passing NULL map)
2753*4882a593Smuzhiyun 	 */
2754*4882a593Smuzhiyun 	(void) __dm_resume(md, NULL);
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun done:
2757*4882a593Smuzhiyun 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2758*4882a593Smuzhiyun 	smp_mb__after_atomic();
2759*4882a593Smuzhiyun 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun 
dm_internal_suspend_noflush(struct mapped_device * md)2762*4882a593Smuzhiyun void dm_internal_suspend_noflush(struct mapped_device *md)
2763*4882a593Smuzhiyun {
2764*4882a593Smuzhiyun 	mutex_lock(&md->suspend_lock);
2765*4882a593Smuzhiyun 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
2766*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
2769*4882a593Smuzhiyun 
dm_internal_resume(struct mapped_device * md)2770*4882a593Smuzhiyun void dm_internal_resume(struct mapped_device *md)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun 	mutex_lock(&md->suspend_lock);
2773*4882a593Smuzhiyun 	__dm_internal_resume(md);
2774*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2775*4882a593Smuzhiyun }
2776*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_internal_resume);
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun /*
2779*4882a593Smuzhiyun  * Fast variants of internal suspend/resume hold md->suspend_lock,
2780*4882a593Smuzhiyun  * which prevents interaction with userspace-driven suspend.
2781*4882a593Smuzhiyun  */
2782*4882a593Smuzhiyun 
dm_internal_suspend_fast(struct mapped_device * md)2783*4882a593Smuzhiyun void dm_internal_suspend_fast(struct mapped_device *md)
2784*4882a593Smuzhiyun {
2785*4882a593Smuzhiyun 	mutex_lock(&md->suspend_lock);
2786*4882a593Smuzhiyun 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2787*4882a593Smuzhiyun 		return;
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2790*4882a593Smuzhiyun 	synchronize_srcu(&md->io_barrier);
2791*4882a593Smuzhiyun 	flush_workqueue(md->wq);
2792*4882a593Smuzhiyun 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
2795*4882a593Smuzhiyun 
dm_internal_resume_fast(struct mapped_device * md)2796*4882a593Smuzhiyun void dm_internal_resume_fast(struct mapped_device *md)
2797*4882a593Smuzhiyun {
2798*4882a593Smuzhiyun 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
2799*4882a593Smuzhiyun 		goto done;
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	dm_queue_flush(md);
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun done:
2804*4882a593Smuzhiyun 	mutex_unlock(&md->suspend_lock);
2805*4882a593Smuzhiyun }
2806*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun /*-----------------------------------------------------------------
2809*4882a593Smuzhiyun  * Event notification.
2810*4882a593Smuzhiyun  *---------------------------------------------------------------*/
dm_kobject_uevent(struct mapped_device * md,enum kobject_action action,unsigned cookie)2811*4882a593Smuzhiyun int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2812*4882a593Smuzhiyun 		       unsigned cookie)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun 	int r;
2815*4882a593Smuzhiyun 	unsigned noio_flag;
2816*4882a593Smuzhiyun 	char udev_cookie[DM_COOKIE_LENGTH];
2817*4882a593Smuzhiyun 	char *envp[] = { udev_cookie, NULL };
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 	noio_flag = memalloc_noio_save();
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	if (!cookie)
2822*4882a593Smuzhiyun 		r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2823*4882a593Smuzhiyun 	else {
2824*4882a593Smuzhiyun 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2825*4882a593Smuzhiyun 			 DM_COOKIE_ENV_VAR_NAME, cookie);
2826*4882a593Smuzhiyun 		r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2827*4882a593Smuzhiyun 				       action, envp);
2828*4882a593Smuzhiyun 	}
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 	memalloc_noio_restore(noio_flag);
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun 	return r;
2833*4882a593Smuzhiyun }
2834*4882a593Smuzhiyun 
dm_next_uevent_seq(struct mapped_device * md)2835*4882a593Smuzhiyun uint32_t dm_next_uevent_seq(struct mapped_device *md)
2836*4882a593Smuzhiyun {
2837*4882a593Smuzhiyun 	return atomic_add_return(1, &md->uevent_seq);
2838*4882a593Smuzhiyun }
2839*4882a593Smuzhiyun 
dm_get_event_nr(struct mapped_device * md)2840*4882a593Smuzhiyun uint32_t dm_get_event_nr(struct mapped_device *md)
2841*4882a593Smuzhiyun {
2842*4882a593Smuzhiyun 	return atomic_read(&md->event_nr);
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun 
dm_wait_event(struct mapped_device * md,int event_nr)2845*4882a593Smuzhiyun int dm_wait_event(struct mapped_device *md, int event_nr)
2846*4882a593Smuzhiyun {
2847*4882a593Smuzhiyun 	return wait_event_interruptible(md->eventq,
2848*4882a593Smuzhiyun 			(event_nr != atomic_read(&md->event_nr)));
2849*4882a593Smuzhiyun }
2850*4882a593Smuzhiyun 
dm_uevent_add(struct mapped_device * md,struct list_head * elist)2851*4882a593Smuzhiyun void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2852*4882a593Smuzhiyun {
2853*4882a593Smuzhiyun 	unsigned long flags;
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	spin_lock_irqsave(&md->uevent_lock, flags);
2856*4882a593Smuzhiyun 	list_add(elist, &md->uevent_list);
2857*4882a593Smuzhiyun 	spin_unlock_irqrestore(&md->uevent_lock, flags);
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun /*
2861*4882a593Smuzhiyun  * The gendisk is only valid as long as you have a reference
2862*4882a593Smuzhiyun  * count on 'md'.
2863*4882a593Smuzhiyun  */
dm_disk(struct mapped_device * md)2864*4882a593Smuzhiyun struct gendisk *dm_disk(struct mapped_device *md)
2865*4882a593Smuzhiyun {
2866*4882a593Smuzhiyun 	return md->disk;
2867*4882a593Smuzhiyun }
2868*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_disk);
2869*4882a593Smuzhiyun 
dm_kobject(struct mapped_device * md)2870*4882a593Smuzhiyun struct kobject *dm_kobject(struct mapped_device *md)
2871*4882a593Smuzhiyun {
2872*4882a593Smuzhiyun 	return &md->kobj_holder.kobj;
2873*4882a593Smuzhiyun }
2874*4882a593Smuzhiyun 
dm_get_from_kobject(struct kobject * kobj)2875*4882a593Smuzhiyun struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2876*4882a593Smuzhiyun {
2877*4882a593Smuzhiyun 	struct mapped_device *md;
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	spin_lock(&_minor_lock);
2882*4882a593Smuzhiyun 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2883*4882a593Smuzhiyun 		md = NULL;
2884*4882a593Smuzhiyun 		goto out;
2885*4882a593Smuzhiyun 	}
2886*4882a593Smuzhiyun 	dm_get(md);
2887*4882a593Smuzhiyun out:
2888*4882a593Smuzhiyun 	spin_unlock(&_minor_lock);
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	return md;
2891*4882a593Smuzhiyun }
2892*4882a593Smuzhiyun 
dm_suspended_md(struct mapped_device * md)2893*4882a593Smuzhiyun int dm_suspended_md(struct mapped_device *md)
2894*4882a593Smuzhiyun {
2895*4882a593Smuzhiyun 	return test_bit(DMF_SUSPENDED, &md->flags);
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun 
dm_post_suspending_md(struct mapped_device * md)2898*4882a593Smuzhiyun static int dm_post_suspending_md(struct mapped_device *md)
2899*4882a593Smuzhiyun {
2900*4882a593Smuzhiyun 	return test_bit(DMF_POST_SUSPENDING, &md->flags);
2901*4882a593Smuzhiyun }
2902*4882a593Smuzhiyun 
dm_suspended_internally_md(struct mapped_device * md)2903*4882a593Smuzhiyun int dm_suspended_internally_md(struct mapped_device *md)
2904*4882a593Smuzhiyun {
2905*4882a593Smuzhiyun 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2906*4882a593Smuzhiyun }
2907*4882a593Smuzhiyun 
dm_test_deferred_remove_flag(struct mapped_device * md)2908*4882a593Smuzhiyun int dm_test_deferred_remove_flag(struct mapped_device *md)
2909*4882a593Smuzhiyun {
2910*4882a593Smuzhiyun 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2911*4882a593Smuzhiyun }
2912*4882a593Smuzhiyun 
dm_suspended(struct dm_target * ti)2913*4882a593Smuzhiyun int dm_suspended(struct dm_target *ti)
2914*4882a593Smuzhiyun {
2915*4882a593Smuzhiyun 	return dm_suspended_md(ti->table->md);
2916*4882a593Smuzhiyun }
2917*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_suspended);
2918*4882a593Smuzhiyun 
dm_post_suspending(struct dm_target * ti)2919*4882a593Smuzhiyun int dm_post_suspending(struct dm_target *ti)
2920*4882a593Smuzhiyun {
2921*4882a593Smuzhiyun 	return dm_post_suspending_md(ti->table->md);
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_post_suspending);
2924*4882a593Smuzhiyun 
dm_noflush_suspending(struct dm_target * ti)2925*4882a593Smuzhiyun int dm_noflush_suspending(struct dm_target *ti)
2926*4882a593Smuzhiyun {
2927*4882a593Smuzhiyun 	return __noflush_suspending(ti->table->md);
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2930*4882a593Smuzhiyun 
dm_alloc_md_mempools(struct mapped_device * md,enum dm_queue_mode type,unsigned integrity,unsigned per_io_data_size,unsigned min_pool_size)2931*4882a593Smuzhiyun struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2932*4882a593Smuzhiyun 					    unsigned integrity, unsigned per_io_data_size,
2933*4882a593Smuzhiyun 					    unsigned min_pool_size)
2934*4882a593Smuzhiyun {
2935*4882a593Smuzhiyun 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2936*4882a593Smuzhiyun 	unsigned int pool_size = 0;
2937*4882a593Smuzhiyun 	unsigned int front_pad, io_front_pad;
2938*4882a593Smuzhiyun 	int ret;
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	if (!pools)
2941*4882a593Smuzhiyun 		return NULL;
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	switch (type) {
2944*4882a593Smuzhiyun 	case DM_TYPE_BIO_BASED:
2945*4882a593Smuzhiyun 	case DM_TYPE_DAX_BIO_BASED:
2946*4882a593Smuzhiyun 		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2947*4882a593Smuzhiyun 		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2948*4882a593Smuzhiyun 		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
2949*4882a593Smuzhiyun 		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
2950*4882a593Smuzhiyun 		if (ret)
2951*4882a593Smuzhiyun 			goto out;
2952*4882a593Smuzhiyun 		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
2953*4882a593Smuzhiyun 			goto out;
2954*4882a593Smuzhiyun 		break;
2955*4882a593Smuzhiyun 	case DM_TYPE_REQUEST_BASED:
2956*4882a593Smuzhiyun 		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2957*4882a593Smuzhiyun 		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2958*4882a593Smuzhiyun 		/* per_io_data_size is used for blk-mq pdu at queue allocation */
2959*4882a593Smuzhiyun 		break;
2960*4882a593Smuzhiyun 	default:
2961*4882a593Smuzhiyun 		BUG();
2962*4882a593Smuzhiyun 	}
2963*4882a593Smuzhiyun 
2964*4882a593Smuzhiyun 	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
2965*4882a593Smuzhiyun 	if (ret)
2966*4882a593Smuzhiyun 		goto out;
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun 	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
2969*4882a593Smuzhiyun 		goto out;
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun 	return pools;
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun out:
2974*4882a593Smuzhiyun 	dm_free_md_mempools(pools);
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun 	return NULL;
2977*4882a593Smuzhiyun }
2978*4882a593Smuzhiyun 
dm_free_md_mempools(struct dm_md_mempools * pools)2979*4882a593Smuzhiyun void dm_free_md_mempools(struct dm_md_mempools *pools)
2980*4882a593Smuzhiyun {
2981*4882a593Smuzhiyun 	if (!pools)
2982*4882a593Smuzhiyun 		return;
2983*4882a593Smuzhiyun 
2984*4882a593Smuzhiyun 	bioset_exit(&pools->bs);
2985*4882a593Smuzhiyun 	bioset_exit(&pools->io_bs);
2986*4882a593Smuzhiyun 
2987*4882a593Smuzhiyun 	kfree(pools);
2988*4882a593Smuzhiyun }
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun struct dm_pr {
2991*4882a593Smuzhiyun 	u64	old_key;
2992*4882a593Smuzhiyun 	u64	new_key;
2993*4882a593Smuzhiyun 	u32	flags;
2994*4882a593Smuzhiyun 	bool	fail_early;
2995*4882a593Smuzhiyun };
2996*4882a593Smuzhiyun 
dm_call_pr(struct block_device * bdev,iterate_devices_callout_fn fn,void * data)2997*4882a593Smuzhiyun static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
2998*4882a593Smuzhiyun 		      void *data)
2999*4882a593Smuzhiyun {
3000*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
3001*4882a593Smuzhiyun 	struct dm_table *table;
3002*4882a593Smuzhiyun 	struct dm_target *ti;
3003*4882a593Smuzhiyun 	int ret = -ENOTTY, srcu_idx;
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun 	table = dm_get_live_table(md, &srcu_idx);
3006*4882a593Smuzhiyun 	if (!table || !dm_table_get_size(table))
3007*4882a593Smuzhiyun 		goto out;
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	/* We only support devices that have a single target */
3010*4882a593Smuzhiyun 	if (dm_table_get_num_targets(table) != 1)
3011*4882a593Smuzhiyun 		goto out;
3012*4882a593Smuzhiyun 	ti = dm_table_get_target(table, 0);
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	if (dm_suspended_md(md)) {
3015*4882a593Smuzhiyun 		ret = -EAGAIN;
3016*4882a593Smuzhiyun 		goto out;
3017*4882a593Smuzhiyun 	}
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	ret = -EINVAL;
3020*4882a593Smuzhiyun 	if (!ti->type->iterate_devices)
3021*4882a593Smuzhiyun 		goto out;
3022*4882a593Smuzhiyun 
3023*4882a593Smuzhiyun 	ret = ti->type->iterate_devices(ti, fn, data);
3024*4882a593Smuzhiyun out:
3025*4882a593Smuzhiyun 	dm_put_live_table(md, srcu_idx);
3026*4882a593Smuzhiyun 	return ret;
3027*4882a593Smuzhiyun }
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun /*
3030*4882a593Smuzhiyun  * For register / unregister we need to manually call out to every path.
3031*4882a593Smuzhiyun  */
__dm_pr_register(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3032*4882a593Smuzhiyun static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3033*4882a593Smuzhiyun 			    sector_t start, sector_t len, void *data)
3034*4882a593Smuzhiyun {
3035*4882a593Smuzhiyun 	struct dm_pr *pr = data;
3036*4882a593Smuzhiyun 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 	if (!ops || !ops->pr_register)
3039*4882a593Smuzhiyun 		return -EOPNOTSUPP;
3040*4882a593Smuzhiyun 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun 
dm_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)3043*4882a593Smuzhiyun static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3044*4882a593Smuzhiyun 			  u32 flags)
3045*4882a593Smuzhiyun {
3046*4882a593Smuzhiyun 	struct dm_pr pr = {
3047*4882a593Smuzhiyun 		.old_key	= old_key,
3048*4882a593Smuzhiyun 		.new_key	= new_key,
3049*4882a593Smuzhiyun 		.flags		= flags,
3050*4882a593Smuzhiyun 		.fail_early	= true,
3051*4882a593Smuzhiyun 	};
3052*4882a593Smuzhiyun 	int ret;
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3055*4882a593Smuzhiyun 	if (ret && new_key) {
3056*4882a593Smuzhiyun 		/* unregister all paths if we failed to register any path */
3057*4882a593Smuzhiyun 		pr.old_key = new_key;
3058*4882a593Smuzhiyun 		pr.new_key = 0;
3059*4882a593Smuzhiyun 		pr.flags = 0;
3060*4882a593Smuzhiyun 		pr.fail_early = false;
3061*4882a593Smuzhiyun 		dm_call_pr(bdev, __dm_pr_register, &pr);
3062*4882a593Smuzhiyun 	}
3063*4882a593Smuzhiyun 
3064*4882a593Smuzhiyun 	return ret;
3065*4882a593Smuzhiyun }
3066*4882a593Smuzhiyun 
dm_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)3067*4882a593Smuzhiyun static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3068*4882a593Smuzhiyun 			 u32 flags)
3069*4882a593Smuzhiyun {
3070*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
3071*4882a593Smuzhiyun 	const struct pr_ops *ops;
3072*4882a593Smuzhiyun 	int r, srcu_idx;
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3075*4882a593Smuzhiyun 	if (r < 0)
3076*4882a593Smuzhiyun 		goto out;
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 	ops = bdev->bd_disk->fops->pr_ops;
3079*4882a593Smuzhiyun 	if (ops && ops->pr_reserve)
3080*4882a593Smuzhiyun 		r = ops->pr_reserve(bdev, key, type, flags);
3081*4882a593Smuzhiyun 	else
3082*4882a593Smuzhiyun 		r = -EOPNOTSUPP;
3083*4882a593Smuzhiyun out:
3084*4882a593Smuzhiyun 	dm_unprepare_ioctl(md, srcu_idx);
3085*4882a593Smuzhiyun 	return r;
3086*4882a593Smuzhiyun }
3087*4882a593Smuzhiyun 
dm_pr_release(struct block_device * bdev,u64 key,enum pr_type type)3088*4882a593Smuzhiyun static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3089*4882a593Smuzhiyun {
3090*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
3091*4882a593Smuzhiyun 	const struct pr_ops *ops;
3092*4882a593Smuzhiyun 	int r, srcu_idx;
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3095*4882a593Smuzhiyun 	if (r < 0)
3096*4882a593Smuzhiyun 		goto out;
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun 	ops = bdev->bd_disk->fops->pr_ops;
3099*4882a593Smuzhiyun 	if (ops && ops->pr_release)
3100*4882a593Smuzhiyun 		r = ops->pr_release(bdev, key, type);
3101*4882a593Smuzhiyun 	else
3102*4882a593Smuzhiyun 		r = -EOPNOTSUPP;
3103*4882a593Smuzhiyun out:
3104*4882a593Smuzhiyun 	dm_unprepare_ioctl(md, srcu_idx);
3105*4882a593Smuzhiyun 	return r;
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun 
dm_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)3108*4882a593Smuzhiyun static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3109*4882a593Smuzhiyun 			 enum pr_type type, bool abort)
3110*4882a593Smuzhiyun {
3111*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
3112*4882a593Smuzhiyun 	const struct pr_ops *ops;
3113*4882a593Smuzhiyun 	int r, srcu_idx;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3116*4882a593Smuzhiyun 	if (r < 0)
3117*4882a593Smuzhiyun 		goto out;
3118*4882a593Smuzhiyun 
3119*4882a593Smuzhiyun 	ops = bdev->bd_disk->fops->pr_ops;
3120*4882a593Smuzhiyun 	if (ops && ops->pr_preempt)
3121*4882a593Smuzhiyun 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
3122*4882a593Smuzhiyun 	else
3123*4882a593Smuzhiyun 		r = -EOPNOTSUPP;
3124*4882a593Smuzhiyun out:
3125*4882a593Smuzhiyun 	dm_unprepare_ioctl(md, srcu_idx);
3126*4882a593Smuzhiyun 	return r;
3127*4882a593Smuzhiyun }
3128*4882a593Smuzhiyun 
dm_pr_clear(struct block_device * bdev,u64 key)3129*4882a593Smuzhiyun static int dm_pr_clear(struct block_device *bdev, u64 key)
3130*4882a593Smuzhiyun {
3131*4882a593Smuzhiyun 	struct mapped_device *md = bdev->bd_disk->private_data;
3132*4882a593Smuzhiyun 	const struct pr_ops *ops;
3133*4882a593Smuzhiyun 	int r, srcu_idx;
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3136*4882a593Smuzhiyun 	if (r < 0)
3137*4882a593Smuzhiyun 		goto out;
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	ops = bdev->bd_disk->fops->pr_ops;
3140*4882a593Smuzhiyun 	if (ops && ops->pr_clear)
3141*4882a593Smuzhiyun 		r = ops->pr_clear(bdev, key);
3142*4882a593Smuzhiyun 	else
3143*4882a593Smuzhiyun 		r = -EOPNOTSUPP;
3144*4882a593Smuzhiyun out:
3145*4882a593Smuzhiyun 	dm_unprepare_ioctl(md, srcu_idx);
3146*4882a593Smuzhiyun 	return r;
3147*4882a593Smuzhiyun }
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun static const struct pr_ops dm_pr_ops = {
3150*4882a593Smuzhiyun 	.pr_register	= dm_pr_register,
3151*4882a593Smuzhiyun 	.pr_reserve	= dm_pr_reserve,
3152*4882a593Smuzhiyun 	.pr_release	= dm_pr_release,
3153*4882a593Smuzhiyun 	.pr_preempt	= dm_pr_preempt,
3154*4882a593Smuzhiyun 	.pr_clear	= dm_pr_clear,
3155*4882a593Smuzhiyun };
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun static const struct block_device_operations dm_blk_dops = {
3158*4882a593Smuzhiyun 	.submit_bio = dm_submit_bio,
3159*4882a593Smuzhiyun 	.open = dm_blk_open,
3160*4882a593Smuzhiyun 	.release = dm_blk_close,
3161*4882a593Smuzhiyun 	.ioctl = dm_blk_ioctl,
3162*4882a593Smuzhiyun 	.getgeo = dm_blk_getgeo,
3163*4882a593Smuzhiyun 	.report_zones = dm_blk_report_zones,
3164*4882a593Smuzhiyun 	.pr_ops = &dm_pr_ops,
3165*4882a593Smuzhiyun 	.owner = THIS_MODULE
3166*4882a593Smuzhiyun };
3167*4882a593Smuzhiyun 
3168*4882a593Smuzhiyun static const struct block_device_operations dm_rq_blk_dops = {
3169*4882a593Smuzhiyun 	.open = dm_blk_open,
3170*4882a593Smuzhiyun 	.release = dm_blk_close,
3171*4882a593Smuzhiyun 	.ioctl = dm_blk_ioctl,
3172*4882a593Smuzhiyun 	.getgeo = dm_blk_getgeo,
3173*4882a593Smuzhiyun 	.pr_ops = &dm_pr_ops,
3174*4882a593Smuzhiyun 	.owner = THIS_MODULE
3175*4882a593Smuzhiyun };
3176*4882a593Smuzhiyun 
3177*4882a593Smuzhiyun static const struct dax_operations dm_dax_ops = {
3178*4882a593Smuzhiyun 	.direct_access = dm_dax_direct_access,
3179*4882a593Smuzhiyun 	.dax_supported = dm_dax_supported,
3180*4882a593Smuzhiyun 	.copy_from_iter = dm_dax_copy_from_iter,
3181*4882a593Smuzhiyun 	.copy_to_iter = dm_dax_copy_to_iter,
3182*4882a593Smuzhiyun 	.zero_page_range = dm_dax_zero_page_range,
3183*4882a593Smuzhiyun };
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun /*
3186*4882a593Smuzhiyun  * module hooks
3187*4882a593Smuzhiyun  */
3188*4882a593Smuzhiyun module_init(dm_init);
3189*4882a593Smuzhiyun module_exit(dm_exit);
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun module_param(major, uint, 0);
3192*4882a593Smuzhiyun MODULE_PARM_DESC(major, "The major number of the device mapper");
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
3195*4882a593Smuzhiyun MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
3198*4882a593Smuzhiyun MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun module_param(swap_bios, int, S_IRUGO | S_IWUSR);
3201*4882a593Smuzhiyun MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun MODULE_DESCRIPTION(DM_NAME " driver");
3204*4882a593Smuzhiyun MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3205*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3206