xref: /OK3568_Linux_fs/kernel/drivers/md/md-faulty.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * faulty.c : Multiple Devices driver for Linux
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2004 Neil Brown
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * fautly-device-simulator personality for md
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * The "faulty" personality causes some requests to fail.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * Possible failure modes are:
15*4882a593Smuzhiyun  *   reads fail "randomly" but succeed on retry
16*4882a593Smuzhiyun  *   writes fail "randomly" but succeed on retry
17*4882a593Smuzhiyun  *   reads for some address fail and then persist until a write
18*4882a593Smuzhiyun  *   reads for some address fail and then persist irrespective of write
19*4882a593Smuzhiyun  *   writes for some address fail and persist
20*4882a593Smuzhiyun  *   all writes fail
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Different modes can be active at a time, but only
23*4882a593Smuzhiyun  * one can be set at array creation.  Others can be added later.
24*4882a593Smuzhiyun  * A mode can be one-shot or recurrent with the recurrence being
25*4882a593Smuzhiyun  * once in every N requests.
26*4882a593Smuzhiyun  * The bottom 5 bits of the "layout" indicate the mode.  The
27*4882a593Smuzhiyun  * remainder indicate a period, or 0 for one-shot.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * There is an implementation limit on the number of concurrently
30*4882a593Smuzhiyun  * persisting-faulty blocks. When a new fault is requested that would
31*4882a593Smuzhiyun  * exceed the limit, it is ignored.
32*4882a593Smuzhiyun  * All current faults can be clear using a layout of "0".
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * Requests are always sent to the device.  If they are to fail,
35*4882a593Smuzhiyun  * we clone the bio and insert a new b_end_io into the chain.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define	WriteTransient	0
39*4882a593Smuzhiyun #define	ReadTransient	1
40*4882a593Smuzhiyun #define	WritePersistent	2
41*4882a593Smuzhiyun #define	ReadPersistent	3
42*4882a593Smuzhiyun #define	WriteAll	4 /* doesn't go to device */
43*4882a593Smuzhiyun #define	ReadFixable	5
44*4882a593Smuzhiyun #define	Modes	6
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define	ClearErrors	31
47*4882a593Smuzhiyun #define	ClearFaults	30
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define AllPersist	100 /* internal use only */
50*4882a593Smuzhiyun #define	NoPersist	101
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define	ModeMask	0x1f
53*4882a593Smuzhiyun #define	ModeShift	5
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define MaxFault	50
56*4882a593Smuzhiyun #include <linux/blkdev.h>
57*4882a593Smuzhiyun #include <linux/module.h>
58*4882a593Smuzhiyun #include <linux/raid/md_u.h>
59*4882a593Smuzhiyun #include <linux/slab.h>
60*4882a593Smuzhiyun #include "md.h"
61*4882a593Smuzhiyun #include <linux/seq_file.h>
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 
faulty_fail(struct bio * bio)64*4882a593Smuzhiyun static void faulty_fail(struct bio *bio)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct bio *b = bio->bi_private;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	b->bi_iter.bi_size = bio->bi_iter.bi_size;
69*4882a593Smuzhiyun 	b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	bio_put(bio);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	bio_io_error(b);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct faulty_conf {
77*4882a593Smuzhiyun 	int period[Modes];
78*4882a593Smuzhiyun 	atomic_t counters[Modes];
79*4882a593Smuzhiyun 	sector_t faults[MaxFault];
80*4882a593Smuzhiyun 	int	modes[MaxFault];
81*4882a593Smuzhiyun 	int nfaults;
82*4882a593Smuzhiyun 	struct md_rdev *rdev;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
check_mode(struct faulty_conf * conf,int mode)85*4882a593Smuzhiyun static int check_mode(struct faulty_conf *conf, int mode)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	if (conf->period[mode] == 0 &&
88*4882a593Smuzhiyun 	    atomic_read(&conf->counters[mode]) <= 0)
89*4882a593Smuzhiyun 		return 0; /* no failure, no decrement */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (atomic_dec_and_test(&conf->counters[mode])) {
93*4882a593Smuzhiyun 		if (conf->period[mode])
94*4882a593Smuzhiyun 			atomic_set(&conf->counters[mode], conf->period[mode]);
95*4882a593Smuzhiyun 		return 1;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
check_sector(struct faulty_conf * conf,sector_t start,sector_t end,int dir)100*4882a593Smuzhiyun static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	/* If we find a ReadFixable sector, we fix it ... */
103*4882a593Smuzhiyun 	int i;
104*4882a593Smuzhiyun 	for (i=0; i<conf->nfaults; i++)
105*4882a593Smuzhiyun 		if (conf->faults[i] >= start &&
106*4882a593Smuzhiyun 		    conf->faults[i] < end) {
107*4882a593Smuzhiyun 			/* found it ... */
108*4882a593Smuzhiyun 			switch (conf->modes[i] * 2 + dir) {
109*4882a593Smuzhiyun 			case WritePersistent*2+WRITE: return 1;
110*4882a593Smuzhiyun 			case ReadPersistent*2+READ: return 1;
111*4882a593Smuzhiyun 			case ReadFixable*2+READ: return 1;
112*4882a593Smuzhiyun 			case ReadFixable*2+WRITE:
113*4882a593Smuzhiyun 				conf->modes[i] = NoPersist;
114*4882a593Smuzhiyun 				return 0;
115*4882a593Smuzhiyun 			case AllPersist*2+READ:
116*4882a593Smuzhiyun 			case AllPersist*2+WRITE: return 1;
117*4882a593Smuzhiyun 			default:
118*4882a593Smuzhiyun 				return 0;
119*4882a593Smuzhiyun 			}
120*4882a593Smuzhiyun 		}
121*4882a593Smuzhiyun 	return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
add_sector(struct faulty_conf * conf,sector_t start,int mode)124*4882a593Smuzhiyun static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int i;
127*4882a593Smuzhiyun 	int n = conf->nfaults;
128*4882a593Smuzhiyun 	for (i=0; i<conf->nfaults; i++)
129*4882a593Smuzhiyun 		if (conf->faults[i] == start) {
130*4882a593Smuzhiyun 			switch(mode) {
131*4882a593Smuzhiyun 			case NoPersist: conf->modes[i] = mode; return;
132*4882a593Smuzhiyun 			case WritePersistent:
133*4882a593Smuzhiyun 				if (conf->modes[i] == ReadPersistent ||
134*4882a593Smuzhiyun 				    conf->modes[i] == ReadFixable)
135*4882a593Smuzhiyun 					conf->modes[i] = AllPersist;
136*4882a593Smuzhiyun 				else
137*4882a593Smuzhiyun 					conf->modes[i] = WritePersistent;
138*4882a593Smuzhiyun 				return;
139*4882a593Smuzhiyun 			case ReadPersistent:
140*4882a593Smuzhiyun 				if (conf->modes[i] == WritePersistent)
141*4882a593Smuzhiyun 					conf->modes[i] = AllPersist;
142*4882a593Smuzhiyun 				else
143*4882a593Smuzhiyun 					conf->modes[i] = ReadPersistent;
144*4882a593Smuzhiyun 				return;
145*4882a593Smuzhiyun 			case ReadFixable:
146*4882a593Smuzhiyun 				if (conf->modes[i] == WritePersistent ||
147*4882a593Smuzhiyun 				    conf->modes[i] == ReadPersistent)
148*4882a593Smuzhiyun 					conf->modes[i] = AllPersist;
149*4882a593Smuzhiyun 				else
150*4882a593Smuzhiyun 					conf->modes[i] = ReadFixable;
151*4882a593Smuzhiyun 				return;
152*4882a593Smuzhiyun 			}
153*4882a593Smuzhiyun 		} else if (conf->modes[i] == NoPersist)
154*4882a593Smuzhiyun 			n = i;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (n >= MaxFault)
157*4882a593Smuzhiyun 		return;
158*4882a593Smuzhiyun 	conf->faults[n] = start;
159*4882a593Smuzhiyun 	conf->modes[n] = mode;
160*4882a593Smuzhiyun 	if (conf->nfaults == n)
161*4882a593Smuzhiyun 		conf->nfaults = n+1;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
faulty_make_request(struct mddev * mddev,struct bio * bio)164*4882a593Smuzhiyun static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct faulty_conf *conf = mddev->private;
167*4882a593Smuzhiyun 	int failit = 0;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (bio_data_dir(bio) == WRITE) {
170*4882a593Smuzhiyun 		/* write request */
171*4882a593Smuzhiyun 		if (atomic_read(&conf->counters[WriteAll])) {
172*4882a593Smuzhiyun 			/* special case - don't decrement, don't submit_bio_noacct,
173*4882a593Smuzhiyun 			 * just fail immediately
174*4882a593Smuzhiyun 			 */
175*4882a593Smuzhiyun 			bio_io_error(bio);
176*4882a593Smuzhiyun 			return true;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		if (check_sector(conf, bio->bi_iter.bi_sector,
180*4882a593Smuzhiyun 				 bio_end_sector(bio), WRITE))
181*4882a593Smuzhiyun 			failit = 1;
182*4882a593Smuzhiyun 		if (check_mode(conf, WritePersistent)) {
183*4882a593Smuzhiyun 			add_sector(conf, bio->bi_iter.bi_sector,
184*4882a593Smuzhiyun 				   WritePersistent);
185*4882a593Smuzhiyun 			failit = 1;
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 		if (check_mode(conf, WriteTransient))
188*4882a593Smuzhiyun 			failit = 1;
189*4882a593Smuzhiyun 	} else {
190*4882a593Smuzhiyun 		/* read request */
191*4882a593Smuzhiyun 		if (check_sector(conf, bio->bi_iter.bi_sector,
192*4882a593Smuzhiyun 				 bio_end_sector(bio), READ))
193*4882a593Smuzhiyun 			failit = 1;
194*4882a593Smuzhiyun 		if (check_mode(conf, ReadTransient))
195*4882a593Smuzhiyun 			failit = 1;
196*4882a593Smuzhiyun 		if (check_mode(conf, ReadPersistent)) {
197*4882a593Smuzhiyun 			add_sector(conf, bio->bi_iter.bi_sector,
198*4882a593Smuzhiyun 				   ReadPersistent);
199*4882a593Smuzhiyun 			failit = 1;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 		if (check_mode(conf, ReadFixable)) {
202*4882a593Smuzhiyun 			add_sector(conf, bio->bi_iter.bi_sector,
203*4882a593Smuzhiyun 				   ReadFixable);
204*4882a593Smuzhiyun 			failit = 1;
205*4882a593Smuzhiyun 		}
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 	if (failit) {
208*4882a593Smuzhiyun 		struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		bio_set_dev(b, conf->rdev->bdev);
211*4882a593Smuzhiyun 		b->bi_private = bio;
212*4882a593Smuzhiyun 		b->bi_end_io = faulty_fail;
213*4882a593Smuzhiyun 		bio = b;
214*4882a593Smuzhiyun 	} else
215*4882a593Smuzhiyun 		bio_set_dev(bio, conf->rdev->bdev);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	submit_bio_noacct(bio);
218*4882a593Smuzhiyun 	return true;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
faulty_status(struct seq_file * seq,struct mddev * mddev)221*4882a593Smuzhiyun static void faulty_status(struct seq_file *seq, struct mddev *mddev)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct faulty_conf *conf = mddev->private;
224*4882a593Smuzhiyun 	int n;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
227*4882a593Smuzhiyun 		seq_printf(seq, " WriteTransient=%d(%d)",
228*4882a593Smuzhiyun 			   n, conf->period[WriteTransient]);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
231*4882a593Smuzhiyun 		seq_printf(seq, " ReadTransient=%d(%d)",
232*4882a593Smuzhiyun 			   n, conf->period[ReadTransient]);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
235*4882a593Smuzhiyun 		seq_printf(seq, " WritePersistent=%d(%d)",
236*4882a593Smuzhiyun 			   n, conf->period[WritePersistent]);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
239*4882a593Smuzhiyun 		seq_printf(seq, " ReadPersistent=%d(%d)",
240*4882a593Smuzhiyun 			   n, conf->period[ReadPersistent]);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
244*4882a593Smuzhiyun 		seq_printf(seq, " ReadFixable=%d(%d)",
245*4882a593Smuzhiyun 			   n, conf->period[ReadFixable]);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
248*4882a593Smuzhiyun 		seq_printf(seq, " WriteAll");
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	seq_printf(seq, " nfaults=%d", conf->nfaults);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 
faulty_reshape(struct mddev * mddev)254*4882a593Smuzhiyun static int faulty_reshape(struct mddev *mddev)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	int mode = mddev->new_layout & ModeMask;
257*4882a593Smuzhiyun 	int count = mddev->new_layout >> ModeShift;
258*4882a593Smuzhiyun 	struct faulty_conf *conf = mddev->private;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (mddev->new_layout < 0)
261*4882a593Smuzhiyun 		return 0;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* new layout */
264*4882a593Smuzhiyun 	if (mode == ClearFaults)
265*4882a593Smuzhiyun 		conf->nfaults = 0;
266*4882a593Smuzhiyun 	else if (mode == ClearErrors) {
267*4882a593Smuzhiyun 		int i;
268*4882a593Smuzhiyun 		for (i=0 ; i < Modes ; i++) {
269*4882a593Smuzhiyun 			conf->period[i] = 0;
270*4882a593Smuzhiyun 			atomic_set(&conf->counters[i], 0);
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 	} else if (mode < Modes) {
273*4882a593Smuzhiyun 		conf->period[mode] = count;
274*4882a593Smuzhiyun 		if (!count) count++;
275*4882a593Smuzhiyun 		atomic_set(&conf->counters[mode], count);
276*4882a593Smuzhiyun 	} else
277*4882a593Smuzhiyun 		return -EINVAL;
278*4882a593Smuzhiyun 	mddev->new_layout = -1;
279*4882a593Smuzhiyun 	mddev->layout = -1; /* makes sure further changes come through */
280*4882a593Smuzhiyun 	return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
faulty_size(struct mddev * mddev,sector_t sectors,int raid_disks)283*4882a593Smuzhiyun static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	WARN_ONCE(raid_disks,
286*4882a593Smuzhiyun 		  "%s does not support generic reshape\n", __func__);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (sectors == 0)
289*4882a593Smuzhiyun 		return mddev->dev_sectors;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return sectors;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
faulty_run(struct mddev * mddev)294*4882a593Smuzhiyun static int faulty_run(struct mddev *mddev)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct md_rdev *rdev;
297*4882a593Smuzhiyun 	int i;
298*4882a593Smuzhiyun 	struct faulty_conf *conf;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (md_check_no_bitmap(mddev))
301*4882a593Smuzhiyun 		return -EINVAL;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	conf = kmalloc(sizeof(*conf), GFP_KERNEL);
304*4882a593Smuzhiyun 	if (!conf)
305*4882a593Smuzhiyun 		return -ENOMEM;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	for (i=0; i<Modes; i++) {
308*4882a593Smuzhiyun 		atomic_set(&conf->counters[i], 0);
309*4882a593Smuzhiyun 		conf->period[i] = 0;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 	conf->nfaults = 0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	rdev_for_each(rdev, mddev) {
314*4882a593Smuzhiyun 		conf->rdev = rdev;
315*4882a593Smuzhiyun 		disk_stack_limits(mddev->gendisk, rdev->bdev,
316*4882a593Smuzhiyun 				  rdev->data_offset << 9);
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
320*4882a593Smuzhiyun 	mddev->private = conf;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	faulty_reshape(mddev);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
faulty_free(struct mddev * mddev,void * priv)327*4882a593Smuzhiyun static void faulty_free(struct mddev *mddev, void *priv)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct faulty_conf *conf = priv;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	kfree(conf);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun static struct md_personality faulty_personality =
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	.name		= "faulty",
337*4882a593Smuzhiyun 	.level		= LEVEL_FAULTY,
338*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
339*4882a593Smuzhiyun 	.make_request	= faulty_make_request,
340*4882a593Smuzhiyun 	.run		= faulty_run,
341*4882a593Smuzhiyun 	.free		= faulty_free,
342*4882a593Smuzhiyun 	.status		= faulty_status,
343*4882a593Smuzhiyun 	.check_reshape	= faulty_reshape,
344*4882a593Smuzhiyun 	.size		= faulty_size,
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun 
raid_init(void)347*4882a593Smuzhiyun static int __init raid_init(void)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	return register_md_personality(&faulty_personality);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
raid_exit(void)352*4882a593Smuzhiyun static void raid_exit(void)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	unregister_md_personality(&faulty_personality);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun module_init(raid_init);
358*4882a593Smuzhiyun module_exit(raid_exit);
359*4882a593Smuzhiyun MODULE_LICENSE("GPL");
360*4882a593Smuzhiyun MODULE_DESCRIPTION("Fault injection personality for MD");
361*4882a593Smuzhiyun MODULE_ALIAS("md-personality-10"); /* faulty */
362*4882a593Smuzhiyun MODULE_ALIAS("md-faulty");
363*4882a593Smuzhiyun MODULE_ALIAS("md-level--5");
364