xref: /OK3568_Linux_fs/kernel/drivers/md/dm-linear.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is released under the GPL.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "dm.h"
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/blkdev.h>
11*4882a593Smuzhiyun #include <linux/bio.h>
12*4882a593Smuzhiyun #include <linux/dax.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/device-mapper.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DM_MSG_PREFIX "linear"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Linear: maps a linear range of a device.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun struct linear_c {
22*4882a593Smuzhiyun 	struct dm_dev *dev;
23*4882a593Smuzhiyun 	sector_t start;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Construct a linear mapping: <dev_path> <offset>
28*4882a593Smuzhiyun  */
linear_ctr(struct dm_target * ti,unsigned int argc,char ** argv)29*4882a593Smuzhiyun static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct linear_c *lc;
32*4882a593Smuzhiyun 	unsigned long long tmp;
33*4882a593Smuzhiyun 	char dummy;
34*4882a593Smuzhiyun 	int ret;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	if (argc != 2) {
37*4882a593Smuzhiyun 		ti->error = "Invalid argument count";
38*4882a593Smuzhiyun 		return -EINVAL;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
42*4882a593Smuzhiyun 	if (lc == NULL) {
43*4882a593Smuzhiyun 		ti->error = "Cannot allocate linear context";
44*4882a593Smuzhiyun 		return -ENOMEM;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	ret = -EINVAL;
48*4882a593Smuzhiyun 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
49*4882a593Smuzhiyun 		ti->error = "Invalid device sector";
50*4882a593Smuzhiyun 		goto bad;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 	lc->start = tmp;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
55*4882a593Smuzhiyun 	if (ret) {
56*4882a593Smuzhiyun 		ti->error = "Device lookup failed";
57*4882a593Smuzhiyun 		goto bad;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	ti->num_flush_bios = 1;
61*4882a593Smuzhiyun 	ti->num_discard_bios = 1;
62*4882a593Smuzhiyun 	ti->num_secure_erase_bios = 1;
63*4882a593Smuzhiyun 	ti->num_write_same_bios = 1;
64*4882a593Smuzhiyun 	ti->num_write_zeroes_bios = 1;
65*4882a593Smuzhiyun 	ti->private = lc;
66*4882a593Smuzhiyun 	return 0;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun       bad:
69*4882a593Smuzhiyun 	kfree(lc);
70*4882a593Smuzhiyun 	return ret;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
linear_dtr(struct dm_target * ti)73*4882a593Smuzhiyun static void linear_dtr(struct dm_target *ti)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct linear_c *lc = (struct linear_c *) ti->private;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	dm_put_device(ti, lc->dev);
78*4882a593Smuzhiyun 	kfree(lc);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
linear_map_sector(struct dm_target * ti,sector_t bi_sector)81*4882a593Smuzhiyun static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return lc->start + dm_target_offset(ti, bi_sector);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
linear_map_bio(struct dm_target * ti,struct bio * bio)88*4882a593Smuzhiyun static void linear_map_bio(struct dm_target *ti, struct bio *bio)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	bio_set_dev(bio, lc->dev->bdev);
93*4882a593Smuzhiyun 	if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
94*4882a593Smuzhiyun 		bio->bi_iter.bi_sector =
95*4882a593Smuzhiyun 			linear_map_sector(ti, bio->bi_iter.bi_sector);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
linear_map(struct dm_target * ti,struct bio * bio)98*4882a593Smuzhiyun static int linear_map(struct dm_target *ti, struct bio *bio)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	linear_map_bio(ti, bio);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return DM_MAPIO_REMAPPED;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
linear_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)105*4882a593Smuzhiyun static void linear_status(struct dm_target *ti, status_type_t type,
106*4882a593Smuzhiyun 			  unsigned status_flags, char *result, unsigned maxlen)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct linear_c *lc = (struct linear_c *) ti->private;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	switch (type) {
111*4882a593Smuzhiyun 	case STATUSTYPE_INFO:
112*4882a593Smuzhiyun 		result[0] = '\0';
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	case STATUSTYPE_TABLE:
116*4882a593Smuzhiyun 		snprintf(result, maxlen, "%s %llu", lc->dev->name,
117*4882a593Smuzhiyun 				(unsigned long long)lc->start);
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
linear_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)122*4882a593Smuzhiyun static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct linear_c *lc = (struct linear_c *) ti->private;
125*4882a593Smuzhiyun 	struct dm_dev *dev = lc->dev;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	*bdev = dev->bdev;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/*
130*4882a593Smuzhiyun 	 * Only pass ioctls through if the device sizes match exactly.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	if (lc->start ||
133*4882a593Smuzhiyun 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
134*4882a593Smuzhiyun 		return 1;
135*4882a593Smuzhiyun 	return 0;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
linear_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)139*4882a593Smuzhiyun static int linear_report_zones(struct dm_target *ti,
140*4882a593Smuzhiyun 		struct dm_report_zones_args *args, unsigned int nr_zones)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
143*4882a593Smuzhiyun 	sector_t sector = linear_map_sector(ti, args->next_sector);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	args->start = lc->start;
146*4882a593Smuzhiyun 	return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
147*4882a593Smuzhiyun 				   dm_report_zones_cb, args);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun 
linear_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)151*4882a593Smuzhiyun static int linear_iterate_devices(struct dm_target *ti,
152*4882a593Smuzhiyun 				  iterate_devices_callout_fn fn, void *data)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return fn(ti, lc->dev, lc->start, ti->len, data);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DAX_DRIVER)
linear_dax_direct_access(struct dm_target * ti,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)160*4882a593Smuzhiyun static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
161*4882a593Smuzhiyun 		long nr_pages, void **kaddr, pfn_t *pfn)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	long ret;
164*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
165*4882a593Smuzhiyun 	struct block_device *bdev = lc->dev->bdev;
166*4882a593Smuzhiyun 	struct dax_device *dax_dev = lc->dev->dax_dev;
167*4882a593Smuzhiyun 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	dev_sector = linear_map_sector(ti, sector);
170*4882a593Smuzhiyun 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
171*4882a593Smuzhiyun 	if (ret)
172*4882a593Smuzhiyun 		return ret;
173*4882a593Smuzhiyun 	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
linear_dax_copy_from_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)176*4882a593Smuzhiyun static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
177*4882a593Smuzhiyun 		void *addr, size_t bytes, struct iov_iter *i)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
180*4882a593Smuzhiyun 	struct block_device *bdev = lc->dev->bdev;
181*4882a593Smuzhiyun 	struct dax_device *dax_dev = lc->dev->dax_dev;
182*4882a593Smuzhiyun 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	dev_sector = linear_map_sector(ti, sector);
185*4882a593Smuzhiyun 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
186*4882a593Smuzhiyun 		return 0;
187*4882a593Smuzhiyun 	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
linear_dax_copy_to_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)190*4882a593Smuzhiyun static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
191*4882a593Smuzhiyun 		void *addr, size_t bytes, struct iov_iter *i)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
194*4882a593Smuzhiyun 	struct block_device *bdev = lc->dev->bdev;
195*4882a593Smuzhiyun 	struct dax_device *dax_dev = lc->dev->dax_dev;
196*4882a593Smuzhiyun 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	dev_sector = linear_map_sector(ti, sector);
199*4882a593Smuzhiyun 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
200*4882a593Smuzhiyun 		return 0;
201*4882a593Smuzhiyun 	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
linear_dax_zero_page_range(struct dm_target * ti,pgoff_t pgoff,size_t nr_pages)204*4882a593Smuzhiyun static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
205*4882a593Smuzhiyun 				      size_t nr_pages)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	int ret;
208*4882a593Smuzhiyun 	struct linear_c *lc = ti->private;
209*4882a593Smuzhiyun 	struct block_device *bdev = lc->dev->bdev;
210*4882a593Smuzhiyun 	struct dax_device *dax_dev = lc->dev->dax_dev;
211*4882a593Smuzhiyun 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	dev_sector = linear_map_sector(ti, sector);
214*4882a593Smuzhiyun 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
215*4882a593Smuzhiyun 	if (ret)
216*4882a593Smuzhiyun 		return ret;
217*4882a593Smuzhiyun 	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #else
221*4882a593Smuzhiyun #define linear_dax_direct_access NULL
222*4882a593Smuzhiyun #define linear_dax_copy_from_iter NULL
223*4882a593Smuzhiyun #define linear_dax_copy_to_iter NULL
224*4882a593Smuzhiyun #define linear_dax_zero_page_range NULL
225*4882a593Smuzhiyun #endif
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun static struct target_type linear_target = {
228*4882a593Smuzhiyun 	.name   = "linear",
229*4882a593Smuzhiyun 	.version = {1, 4, 0},
230*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_ZONED
231*4882a593Smuzhiyun 	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
232*4882a593Smuzhiyun 		    DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
233*4882a593Smuzhiyun 	.report_zones = linear_report_zones,
234*4882a593Smuzhiyun #else
235*4882a593Smuzhiyun 	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
236*4882a593Smuzhiyun 		    DM_TARGET_PASSES_CRYPTO,
237*4882a593Smuzhiyun #endif
238*4882a593Smuzhiyun 	.module = THIS_MODULE,
239*4882a593Smuzhiyun 	.ctr    = linear_ctr,
240*4882a593Smuzhiyun 	.dtr    = linear_dtr,
241*4882a593Smuzhiyun 	.map    = linear_map,
242*4882a593Smuzhiyun 	.status = linear_status,
243*4882a593Smuzhiyun 	.prepare_ioctl = linear_prepare_ioctl,
244*4882a593Smuzhiyun 	.iterate_devices = linear_iterate_devices,
245*4882a593Smuzhiyun 	.direct_access = linear_dax_direct_access,
246*4882a593Smuzhiyun 	.dax_copy_from_iter = linear_dax_copy_from_iter,
247*4882a593Smuzhiyun 	.dax_copy_to_iter = linear_dax_copy_to_iter,
248*4882a593Smuzhiyun 	.dax_zero_page_range = linear_dax_zero_page_range,
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
dm_linear_init(void)251*4882a593Smuzhiyun int __init dm_linear_init(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	int r = dm_register_target(&linear_target);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (r < 0)
256*4882a593Smuzhiyun 		DMERR("register failed %d", r);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return r;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
dm_linear_exit(void)261*4882a593Smuzhiyun void dm_linear_exit(void)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	dm_unregister_target(&linear_target);
264*4882a593Smuzhiyun }
265