xref: /OK3568_Linux_fs/kernel/drivers/md/dm-dust.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2018 Red Hat, Inc.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This is a test "dust" device, which fails reads on specified
6*4882a593Smuzhiyun  * sectors, emulating the behavior of a hard disk drive sending
7*4882a593Smuzhiyun  * a "Read Medium Error" sense.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/device-mapper.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/rbtree.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define DM_MSG_PREFIX "dust"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct badblock {
18*4882a593Smuzhiyun 	struct rb_node node;
19*4882a593Smuzhiyun 	sector_t bb;
20*4882a593Smuzhiyun 	unsigned char wr_fail_cnt;
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun struct dust_device {
24*4882a593Smuzhiyun 	struct dm_dev *dev;
25*4882a593Smuzhiyun 	struct rb_root badblocklist;
26*4882a593Smuzhiyun 	unsigned long long badblock_count;
27*4882a593Smuzhiyun 	spinlock_t dust_lock;
28*4882a593Smuzhiyun 	unsigned int blksz;
29*4882a593Smuzhiyun 	int sect_per_block_shift;
30*4882a593Smuzhiyun 	unsigned int sect_per_block;
31*4882a593Smuzhiyun 	sector_t start;
32*4882a593Smuzhiyun 	bool fail_read_on_bb:1;
33*4882a593Smuzhiyun 	bool quiet_mode:1;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
dust_rb_search(struct rb_root * root,sector_t blk)36*4882a593Smuzhiyun static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct rb_node *node = root->rb_node;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	while (node) {
41*4882a593Smuzhiyun 		struct badblock *bblk = rb_entry(node, struct badblock, node);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 		if (bblk->bb > blk)
44*4882a593Smuzhiyun 			node = node->rb_left;
45*4882a593Smuzhiyun 		else if (bblk->bb < blk)
46*4882a593Smuzhiyun 			node = node->rb_right;
47*4882a593Smuzhiyun 		else
48*4882a593Smuzhiyun 			return bblk;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	return NULL;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
dust_rb_insert(struct rb_root * root,struct badblock * new)54*4882a593Smuzhiyun static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct badblock *bblk;
57*4882a593Smuzhiyun 	struct rb_node **link = &root->rb_node, *parent = NULL;
58*4882a593Smuzhiyun 	sector_t value = new->bb;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	while (*link) {
61*4882a593Smuzhiyun 		parent = *link;
62*4882a593Smuzhiyun 		bblk = rb_entry(parent, struct badblock, node);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		if (bblk->bb > value)
65*4882a593Smuzhiyun 			link = &(*link)->rb_left;
66*4882a593Smuzhiyun 		else if (bblk->bb < value)
67*4882a593Smuzhiyun 			link = &(*link)->rb_right;
68*4882a593Smuzhiyun 		else
69*4882a593Smuzhiyun 			return false;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	rb_link_node(&new->node, parent, link);
73*4882a593Smuzhiyun 	rb_insert_color(&new->node, root);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return true;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
dust_remove_block(struct dust_device * dd,unsigned long long block)78*4882a593Smuzhiyun static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct badblock *bblock;
81*4882a593Smuzhiyun 	unsigned long flags;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->dust_lock, flags);
84*4882a593Smuzhiyun 	bblock = dust_rb_search(&dd->badblocklist, block);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (bblock == NULL) {
87*4882a593Smuzhiyun 		if (!dd->quiet_mode) {
88*4882a593Smuzhiyun 			DMERR("%s: block %llu not found in badblocklist",
89*4882a593Smuzhiyun 			      __func__, block);
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dd->dust_lock, flags);
92*4882a593Smuzhiyun 		return -EINVAL;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	rb_erase(&bblock->node, &dd->badblocklist);
96*4882a593Smuzhiyun 	dd->badblock_count--;
97*4882a593Smuzhiyun 	if (!dd->quiet_mode)
98*4882a593Smuzhiyun 		DMINFO("%s: badblock removed at block %llu", __func__, block);
99*4882a593Smuzhiyun 	kfree(bblock);
100*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->dust_lock, flags);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
dust_add_block(struct dust_device * dd,unsigned long long block,unsigned char wr_fail_cnt)105*4882a593Smuzhiyun static int dust_add_block(struct dust_device *dd, unsigned long long block,
106*4882a593Smuzhiyun 			  unsigned char wr_fail_cnt)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct badblock *bblock;
109*4882a593Smuzhiyun 	unsigned long flags;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
112*4882a593Smuzhiyun 	if (bblock == NULL) {
113*4882a593Smuzhiyun 		if (!dd->quiet_mode)
114*4882a593Smuzhiyun 			DMERR("%s: badblock allocation failed", __func__);
115*4882a593Smuzhiyun 		return -ENOMEM;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->dust_lock, flags);
119*4882a593Smuzhiyun 	bblock->bb = block;
120*4882a593Smuzhiyun 	bblock->wr_fail_cnt = wr_fail_cnt;
121*4882a593Smuzhiyun 	if (!dust_rb_insert(&dd->badblocklist, bblock)) {
122*4882a593Smuzhiyun 		if (!dd->quiet_mode) {
123*4882a593Smuzhiyun 			DMERR("%s: block %llu already in badblocklist",
124*4882a593Smuzhiyun 			      __func__, block);
125*4882a593Smuzhiyun 		}
126*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dd->dust_lock, flags);
127*4882a593Smuzhiyun 		kfree(bblock);
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	dd->badblock_count++;
132*4882a593Smuzhiyun 	if (!dd->quiet_mode) {
133*4882a593Smuzhiyun 		DMINFO("%s: badblock added at block %llu with write fail count %hhu",
134*4882a593Smuzhiyun 		       __func__, block, wr_fail_cnt);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->dust_lock, flags);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return 0;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
dust_query_block(struct dust_device * dd,unsigned long long block,char * result,unsigned int maxlen,unsigned int * sz_ptr)141*4882a593Smuzhiyun static int dust_query_block(struct dust_device *dd, unsigned long long block, char *result,
142*4882a593Smuzhiyun 			    unsigned int maxlen, unsigned int *sz_ptr)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct badblock *bblock;
145*4882a593Smuzhiyun 	unsigned long flags;
146*4882a593Smuzhiyun 	unsigned int sz = *sz_ptr;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->dust_lock, flags);
149*4882a593Smuzhiyun 	bblock = dust_rb_search(&dd->badblocklist, block);
150*4882a593Smuzhiyun 	if (bblock != NULL)
151*4882a593Smuzhiyun 		DMEMIT("%s: block %llu found in badblocklist", __func__, block);
152*4882a593Smuzhiyun 	else
153*4882a593Smuzhiyun 		DMEMIT("%s: block %llu not found in badblocklist", __func__, block);
154*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->dust_lock, flags);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return 1;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
__dust_map_read(struct dust_device * dd,sector_t thisblock)159*4882a593Smuzhiyun static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (bblk)
164*4882a593Smuzhiyun 		return DM_MAPIO_KILL;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return DM_MAPIO_REMAPPED;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
dust_map_read(struct dust_device * dd,sector_t thisblock,bool fail_read_on_bb)169*4882a593Smuzhiyun static int dust_map_read(struct dust_device *dd, sector_t thisblock,
170*4882a593Smuzhiyun 			 bool fail_read_on_bb)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	unsigned long flags;
173*4882a593Smuzhiyun 	int r = DM_MAPIO_REMAPPED;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (fail_read_on_bb) {
176*4882a593Smuzhiyun 		thisblock >>= dd->sect_per_block_shift;
177*4882a593Smuzhiyun 		spin_lock_irqsave(&dd->dust_lock, flags);
178*4882a593Smuzhiyun 		r = __dust_map_read(dd, thisblock);
179*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dd->dust_lock, flags);
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	return r;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
__dust_map_write(struct dust_device * dd,sector_t thisblock)185*4882a593Smuzhiyun static int __dust_map_write(struct dust_device *dd, sector_t thisblock)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (bblk && bblk->wr_fail_cnt > 0) {
190*4882a593Smuzhiyun 		bblk->wr_fail_cnt--;
191*4882a593Smuzhiyun 		return DM_MAPIO_KILL;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (bblk) {
195*4882a593Smuzhiyun 		rb_erase(&bblk->node, &dd->badblocklist);
196*4882a593Smuzhiyun 		dd->badblock_count--;
197*4882a593Smuzhiyun 		kfree(bblk);
198*4882a593Smuzhiyun 		if (!dd->quiet_mode) {
199*4882a593Smuzhiyun 			sector_div(thisblock, dd->sect_per_block);
200*4882a593Smuzhiyun 			DMINFO("block %llu removed from badblocklist by write",
201*4882a593Smuzhiyun 			       (unsigned long long)thisblock);
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return DM_MAPIO_REMAPPED;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
dust_map_write(struct dust_device * dd,sector_t thisblock,bool fail_read_on_bb)208*4882a593Smuzhiyun static int dust_map_write(struct dust_device *dd, sector_t thisblock,
209*4882a593Smuzhiyun 			  bool fail_read_on_bb)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	unsigned long flags;
212*4882a593Smuzhiyun 	int r = DM_MAPIO_REMAPPED;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (fail_read_on_bb) {
215*4882a593Smuzhiyun 		thisblock >>= dd->sect_per_block_shift;
216*4882a593Smuzhiyun 		spin_lock_irqsave(&dd->dust_lock, flags);
217*4882a593Smuzhiyun 		r = __dust_map_write(dd, thisblock);
218*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dd->dust_lock, flags);
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return r;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
dust_map(struct dm_target * ti,struct bio * bio)224*4882a593Smuzhiyun static int dust_map(struct dm_target *ti, struct bio *bio)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
227*4882a593Smuzhiyun 	int r;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	bio_set_dev(bio, dd->dev->bdev);
230*4882a593Smuzhiyun 	bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (bio_data_dir(bio) == READ)
233*4882a593Smuzhiyun 		r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
234*4882a593Smuzhiyun 	else
235*4882a593Smuzhiyun 		r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return r;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
__dust_clear_badblocks(struct rb_root * tree,unsigned long long count)240*4882a593Smuzhiyun static bool __dust_clear_badblocks(struct rb_root *tree,
241*4882a593Smuzhiyun 				   unsigned long long count)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct rb_node *node = NULL, *nnode = NULL;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	nnode = rb_first(tree);
246*4882a593Smuzhiyun 	if (nnode == NULL) {
247*4882a593Smuzhiyun 		BUG_ON(count != 0);
248*4882a593Smuzhiyun 		return false;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	while (nnode) {
252*4882a593Smuzhiyun 		node = nnode;
253*4882a593Smuzhiyun 		nnode = rb_next(node);
254*4882a593Smuzhiyun 		rb_erase(node, tree);
255*4882a593Smuzhiyun 		count--;
256*4882a593Smuzhiyun 		kfree(node);
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 	BUG_ON(count != 0);
259*4882a593Smuzhiyun 	BUG_ON(tree->rb_node != NULL);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return true;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
dust_clear_badblocks(struct dust_device * dd,char * result,unsigned int maxlen,unsigned int * sz_ptr)264*4882a593Smuzhiyun static int dust_clear_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
265*4882a593Smuzhiyun 				unsigned int *sz_ptr)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	unsigned long flags;
268*4882a593Smuzhiyun 	struct rb_root badblocklist;
269*4882a593Smuzhiyun 	unsigned long long badblock_count;
270*4882a593Smuzhiyun 	unsigned int sz = *sz_ptr;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->dust_lock, flags);
273*4882a593Smuzhiyun 	badblocklist = dd->badblocklist;
274*4882a593Smuzhiyun 	badblock_count = dd->badblock_count;
275*4882a593Smuzhiyun 	dd->badblocklist = RB_ROOT;
276*4882a593Smuzhiyun 	dd->badblock_count = 0;
277*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->dust_lock, flags);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (!__dust_clear_badblocks(&badblocklist, badblock_count))
280*4882a593Smuzhiyun 		DMEMIT("%s: no badblocks found", __func__);
281*4882a593Smuzhiyun 	else
282*4882a593Smuzhiyun 		DMEMIT("%s: badblocks cleared", __func__);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return 1;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
dust_list_badblocks(struct dust_device * dd,char * result,unsigned int maxlen,unsigned int * sz_ptr)287*4882a593Smuzhiyun static int dust_list_badblocks(struct dust_device *dd, char *result, unsigned int maxlen,
288*4882a593Smuzhiyun 				unsigned int *sz_ptr)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	unsigned long flags;
291*4882a593Smuzhiyun 	struct rb_root badblocklist;
292*4882a593Smuzhiyun 	struct rb_node *node;
293*4882a593Smuzhiyun 	struct badblock *bblk;
294*4882a593Smuzhiyun 	unsigned int sz = *sz_ptr;
295*4882a593Smuzhiyun 	unsigned long long num = 0;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	spin_lock_irqsave(&dd->dust_lock, flags);
298*4882a593Smuzhiyun 	badblocklist = dd->badblocklist;
299*4882a593Smuzhiyun 	for (node = rb_first(&badblocklist); node; node = rb_next(node)) {
300*4882a593Smuzhiyun 		bblk = rb_entry(node, struct badblock, node);
301*4882a593Smuzhiyun 		DMEMIT("%llu\n", bblk->bb);
302*4882a593Smuzhiyun 		num++;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dd->dust_lock, flags);
306*4882a593Smuzhiyun 	if (!num)
307*4882a593Smuzhiyun 		DMEMIT("No blocks in badblocklist");
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return 1;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * Target parameters:
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * <device_path> <offset> <blksz>
316*4882a593Smuzhiyun  *
317*4882a593Smuzhiyun  * device_path: path to the block device
318*4882a593Smuzhiyun  * offset: offset to data area from start of device_path
319*4882a593Smuzhiyun  * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
320*4882a593Smuzhiyun  */
dust_ctr(struct dm_target * ti,unsigned int argc,char ** argv)321*4882a593Smuzhiyun static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct dust_device *dd;
324*4882a593Smuzhiyun 	unsigned long long tmp;
325*4882a593Smuzhiyun 	char dummy;
326*4882a593Smuzhiyun 	unsigned int blksz;
327*4882a593Smuzhiyun 	unsigned int sect_per_block;
328*4882a593Smuzhiyun 	sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
329*4882a593Smuzhiyun 	sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (argc != 3) {
332*4882a593Smuzhiyun 		ti->error = "Invalid argument count";
333*4882a593Smuzhiyun 		return -EINVAL;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
337*4882a593Smuzhiyun 		ti->error = "Invalid block size parameter";
338*4882a593Smuzhiyun 		return -EINVAL;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (blksz < 512) {
342*4882a593Smuzhiyun 		ti->error = "Block size must be at least 512";
343*4882a593Smuzhiyun 		return -EINVAL;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (!is_power_of_2(blksz)) {
347*4882a593Smuzhiyun 		ti->error = "Block size must be a power of 2";
348*4882a593Smuzhiyun 		return -EINVAL;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (to_sector(blksz) > max_block_sectors) {
352*4882a593Smuzhiyun 		ti->error = "Block size is too large";
353*4882a593Smuzhiyun 		return -EINVAL;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	sect_per_block = (blksz >> SECTOR_SHIFT);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
359*4882a593Smuzhiyun 		ti->error = "Invalid device offset sector";
360*4882a593Smuzhiyun 		return -EINVAL;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
364*4882a593Smuzhiyun 	if (dd == NULL) {
365*4882a593Smuzhiyun 		ti->error = "Cannot allocate context";
366*4882a593Smuzhiyun 		return -ENOMEM;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
370*4882a593Smuzhiyun 		ti->error = "Device lookup failed";
371*4882a593Smuzhiyun 		kfree(dd);
372*4882a593Smuzhiyun 		return -EINVAL;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	dd->sect_per_block = sect_per_block;
376*4882a593Smuzhiyun 	dd->blksz = blksz;
377*4882a593Smuzhiyun 	dd->start = tmp;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	dd->sect_per_block_shift = __ffs(sect_per_block);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/*
382*4882a593Smuzhiyun 	 * Whether to fail a read on a "bad" block.
383*4882a593Smuzhiyun 	 * Defaults to false; enabled later by message.
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	dd->fail_read_on_bb = false;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	/*
388*4882a593Smuzhiyun 	 * Initialize bad block list rbtree.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	dd->badblocklist = RB_ROOT;
391*4882a593Smuzhiyun 	dd->badblock_count = 0;
392*4882a593Smuzhiyun 	spin_lock_init(&dd->dust_lock);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	dd->quiet_mode = false;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	ti->num_discard_bios = 1;
399*4882a593Smuzhiyun 	ti->num_flush_bios = 1;
400*4882a593Smuzhiyun 	ti->private = dd;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
dust_dtr(struct dm_target * ti)405*4882a593Smuzhiyun static void dust_dtr(struct dm_target *ti)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	__dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
410*4882a593Smuzhiyun 	dm_put_device(ti, dd->dev);
411*4882a593Smuzhiyun 	kfree(dd);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
dust_message(struct dm_target * ti,unsigned int argc,char ** argv,char * result,unsigned int maxlen)414*4882a593Smuzhiyun static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
415*4882a593Smuzhiyun 			char *result, unsigned int maxlen)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
418*4882a593Smuzhiyun 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
419*4882a593Smuzhiyun 	bool invalid_msg = false;
420*4882a593Smuzhiyun 	int r = -EINVAL;
421*4882a593Smuzhiyun 	unsigned long long tmp, block;
422*4882a593Smuzhiyun 	unsigned char wr_fail_cnt;
423*4882a593Smuzhiyun 	unsigned int tmp_ui;
424*4882a593Smuzhiyun 	unsigned long flags;
425*4882a593Smuzhiyun 	unsigned int sz = 0;
426*4882a593Smuzhiyun 	char dummy;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (argc == 1) {
429*4882a593Smuzhiyun 		if (!strcasecmp(argv[0], "addbadblock") ||
430*4882a593Smuzhiyun 		    !strcasecmp(argv[0], "removebadblock") ||
431*4882a593Smuzhiyun 		    !strcasecmp(argv[0], "queryblock")) {
432*4882a593Smuzhiyun 			DMERR("%s requires an additional argument", argv[0]);
433*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "disable")) {
434*4882a593Smuzhiyun 			DMINFO("disabling read failures on bad sectors");
435*4882a593Smuzhiyun 			dd->fail_read_on_bb = false;
436*4882a593Smuzhiyun 			r = 0;
437*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "enable")) {
438*4882a593Smuzhiyun 			DMINFO("enabling read failures on bad sectors");
439*4882a593Smuzhiyun 			dd->fail_read_on_bb = true;
440*4882a593Smuzhiyun 			r = 0;
441*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "countbadblocks")) {
442*4882a593Smuzhiyun 			spin_lock_irqsave(&dd->dust_lock, flags);
443*4882a593Smuzhiyun 			DMEMIT("countbadblocks: %llu badblock(s) found",
444*4882a593Smuzhiyun 			       dd->badblock_count);
445*4882a593Smuzhiyun 			spin_unlock_irqrestore(&dd->dust_lock, flags);
446*4882a593Smuzhiyun 			r = 1;
447*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "clearbadblocks")) {
448*4882a593Smuzhiyun 			r = dust_clear_badblocks(dd, result, maxlen, &sz);
449*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "quiet")) {
450*4882a593Smuzhiyun 			if (!dd->quiet_mode)
451*4882a593Smuzhiyun 				dd->quiet_mode = true;
452*4882a593Smuzhiyun 			else
453*4882a593Smuzhiyun 				dd->quiet_mode = false;
454*4882a593Smuzhiyun 			r = 0;
455*4882a593Smuzhiyun 		} else if (!strcasecmp(argv[0], "listbadblocks")) {
456*4882a593Smuzhiyun 			r = dust_list_badblocks(dd, result, maxlen, &sz);
457*4882a593Smuzhiyun 		} else {
458*4882a593Smuzhiyun 			invalid_msg = true;
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 	} else if (argc == 2) {
461*4882a593Smuzhiyun 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
462*4882a593Smuzhiyun 			return r;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		block = tmp;
465*4882a593Smuzhiyun 		sector_div(size, dd->sect_per_block);
466*4882a593Smuzhiyun 		if (block > size) {
467*4882a593Smuzhiyun 			DMERR("selected block value out of range");
468*4882a593Smuzhiyun 			return r;
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		if (!strcasecmp(argv[0], "addbadblock"))
472*4882a593Smuzhiyun 			r = dust_add_block(dd, block, 0);
473*4882a593Smuzhiyun 		else if (!strcasecmp(argv[0], "removebadblock"))
474*4882a593Smuzhiyun 			r = dust_remove_block(dd, block);
475*4882a593Smuzhiyun 		else if (!strcasecmp(argv[0], "queryblock"))
476*4882a593Smuzhiyun 			r = dust_query_block(dd, block, result, maxlen, &sz);
477*4882a593Smuzhiyun 		else
478*4882a593Smuzhiyun 			invalid_msg = true;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	} else if (argc == 3) {
481*4882a593Smuzhiyun 		if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
482*4882a593Smuzhiyun 			return r;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		if (sscanf(argv[2], "%u%c", &tmp_ui, &dummy) != 1)
485*4882a593Smuzhiyun 			return r;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 		block = tmp;
488*4882a593Smuzhiyun 		if (tmp_ui > 255) {
489*4882a593Smuzhiyun 			DMERR("selected write fail count out of range");
490*4882a593Smuzhiyun 			return r;
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 		wr_fail_cnt = tmp_ui;
493*4882a593Smuzhiyun 		sector_div(size, dd->sect_per_block);
494*4882a593Smuzhiyun 		if (block > size) {
495*4882a593Smuzhiyun 			DMERR("selected block value out of range");
496*4882a593Smuzhiyun 			return r;
497*4882a593Smuzhiyun 		}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		if (!strcasecmp(argv[0], "addbadblock"))
500*4882a593Smuzhiyun 			r = dust_add_block(dd, block, wr_fail_cnt);
501*4882a593Smuzhiyun 		else
502*4882a593Smuzhiyun 			invalid_msg = true;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	} else
505*4882a593Smuzhiyun 		DMERR("invalid number of arguments '%d'", argc);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (invalid_msg)
508*4882a593Smuzhiyun 		DMERR("unrecognized message '%s' received", argv[0]);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	return r;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
dust_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)513*4882a593Smuzhiyun static void dust_status(struct dm_target *ti, status_type_t type,
514*4882a593Smuzhiyun 			unsigned int status_flags, char *result, unsigned int maxlen)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
517*4882a593Smuzhiyun 	unsigned int sz = 0;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	switch (type) {
520*4882a593Smuzhiyun 	case STATUSTYPE_INFO:
521*4882a593Smuzhiyun 		DMEMIT("%s %s %s", dd->dev->name,
522*4882a593Smuzhiyun 		       dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
523*4882a593Smuzhiyun 		       dd->quiet_mode ? "quiet" : "verbose");
524*4882a593Smuzhiyun 		break;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	case STATUSTYPE_TABLE:
527*4882a593Smuzhiyun 		DMEMIT("%s %llu %u", dd->dev->name,
528*4882a593Smuzhiyun 		       (unsigned long long)dd->start, dd->blksz);
529*4882a593Smuzhiyun 		break;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
dust_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)533*4882a593Smuzhiyun static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
536*4882a593Smuzhiyun 	struct dm_dev *dev = dd->dev;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	*bdev = dev->bdev;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/*
541*4882a593Smuzhiyun 	 * Only pass ioctls through if the device sizes match exactly.
542*4882a593Smuzhiyun 	 */
543*4882a593Smuzhiyun 	if (dd->start ||
544*4882a593Smuzhiyun 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
545*4882a593Smuzhiyun 		return 1;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return 0;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
dust_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)550*4882a593Smuzhiyun static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
551*4882a593Smuzhiyun 				void *data)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	struct dust_device *dd = ti->private;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return fn(ti, dd->dev, dd->start, ti->len, data);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun static struct target_type dust_target = {
559*4882a593Smuzhiyun 	.name = "dust",
560*4882a593Smuzhiyun 	.version = {1, 0, 0},
561*4882a593Smuzhiyun 	.module = THIS_MODULE,
562*4882a593Smuzhiyun 	.ctr = dust_ctr,
563*4882a593Smuzhiyun 	.dtr = dust_dtr,
564*4882a593Smuzhiyun 	.iterate_devices = dust_iterate_devices,
565*4882a593Smuzhiyun 	.map = dust_map,
566*4882a593Smuzhiyun 	.message = dust_message,
567*4882a593Smuzhiyun 	.status = dust_status,
568*4882a593Smuzhiyun 	.prepare_ioctl = dust_prepare_ioctl,
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun 
dm_dust_init(void)571*4882a593Smuzhiyun static int __init dm_dust_init(void)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	int r = dm_register_target(&dust_target);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (r < 0)
576*4882a593Smuzhiyun 		DMERR("dm_register_target failed %d", r);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	return r;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
dm_dust_exit(void)581*4882a593Smuzhiyun static void __exit dm_dust_exit(void)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	dm_unregister_target(&dust_target);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun module_init(dm_dust_init);
587*4882a593Smuzhiyun module_exit(dm_dust_exit);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun MODULE_DESCRIPTION(DM_NAME " dust test target");
590*4882a593Smuzhiyun MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
591*4882a593Smuzhiyun MODULE_LICENSE("GPL");
592