xref: /OK3568_Linux_fs/kernel/drivers/scsi/raid_class.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * raid_class.c - implementation of a simple raid visualisation class
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This class is designed to allow raid attributes to be visualised and
8*4882a593Smuzhiyun  * manipulated in a form independent of the underlying raid.  Ultimately this
9*4882a593Smuzhiyun  * should work for both hardware and software raids.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/raid_class.h>
17*4882a593Smuzhiyun #include <scsi/scsi_device.h>
18*4882a593Smuzhiyun #include <scsi/scsi_host.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define RAID_NUM_ATTRS	3
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct raid_internal {
23*4882a593Smuzhiyun 	struct raid_template r;
24*4882a593Smuzhiyun 	struct raid_function_template *f;
25*4882a593Smuzhiyun 	/* The actual attributes */
26*4882a593Smuzhiyun 	struct device_attribute private_attrs[RAID_NUM_ATTRS];
27*4882a593Smuzhiyun 	/* The array of null terminated pointers to attributes
28*4882a593Smuzhiyun 	 * needed by scsi_sysfs.c */
29*4882a593Smuzhiyun 	struct device_attribute *attrs[RAID_NUM_ATTRS + 1];
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct raid_component {
33*4882a593Smuzhiyun 	struct list_head node;
34*4882a593Smuzhiyun 	struct device dev;
35*4882a593Smuzhiyun 	int num;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define to_raid_internal(tmpl)	container_of(tmpl, struct raid_internal, r)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define tc_to_raid_internal(tcont) ({					\
41*4882a593Smuzhiyun 	struct raid_template *r =					\
42*4882a593Smuzhiyun 		container_of(tcont, struct raid_template, raid_attrs);	\
43*4882a593Smuzhiyun 	to_raid_internal(r);						\
44*4882a593Smuzhiyun })
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define ac_to_raid_internal(acont) ({					\
47*4882a593Smuzhiyun 	struct transport_container *tc =				\
48*4882a593Smuzhiyun 		container_of(acont, struct transport_container, ac);	\
49*4882a593Smuzhiyun 	tc_to_raid_internal(tc);					\
50*4882a593Smuzhiyun })
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define device_to_raid_internal(dev) ({				\
53*4882a593Smuzhiyun 	struct attribute_container *ac =				\
54*4882a593Smuzhiyun 		attribute_container_classdev_to_container(dev);	\
55*4882a593Smuzhiyun 	ac_to_raid_internal(ac);					\
56*4882a593Smuzhiyun })
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 
raid_match(struct attribute_container * cont,struct device * dev)59*4882a593Smuzhiyun static int raid_match(struct attribute_container *cont, struct device *dev)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	/* We have to look for every subsystem that could house
62*4882a593Smuzhiyun 	 * emulated RAID devices, so start with SCSI */
63*4882a593Smuzhiyun 	struct raid_internal *i = ac_to_raid_internal(cont);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
66*4882a593Smuzhiyun 		struct scsi_device *sdev = to_scsi_device(dev);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		if (i->f->cookie != sdev->host->hostt)
69*4882a593Smuzhiyun 			return 0;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 		return i->f->is_raid(dev);
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 	/* FIXME: look at other subsystems too */
74*4882a593Smuzhiyun 	return 0;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
raid_setup(struct transport_container * tc,struct device * dev,struct device * cdev)77*4882a593Smuzhiyun static int raid_setup(struct transport_container *tc, struct device *dev,
78*4882a593Smuzhiyun 		       struct device *cdev)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct raid_data *rd;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	BUG_ON(dev_get_drvdata(cdev));
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
85*4882a593Smuzhiyun 	if (!rd)
86*4882a593Smuzhiyun 		return -ENOMEM;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rd->component_list);
89*4882a593Smuzhiyun 	dev_set_drvdata(cdev, rd);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
raid_remove(struct transport_container * tc,struct device * dev,struct device * cdev)94*4882a593Smuzhiyun static int raid_remove(struct transport_container *tc, struct device *dev,
95*4882a593Smuzhiyun 		       struct device *cdev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct raid_data *rd = dev_get_drvdata(cdev);
98*4882a593Smuzhiyun 	struct raid_component *rc, *next;
99*4882a593Smuzhiyun 	dev_printk(KERN_ERR, dev, "RAID REMOVE\n");
100*4882a593Smuzhiyun 	dev_set_drvdata(cdev, NULL);
101*4882a593Smuzhiyun 	list_for_each_entry_safe(rc, next, &rd->component_list, node) {
102*4882a593Smuzhiyun 		list_del(&rc->node);
103*4882a593Smuzhiyun 		dev_printk(KERN_ERR, rc->dev.parent, "RAID COMPONENT REMOVE\n");
104*4882a593Smuzhiyun 		device_unregister(&rc->dev);
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	dev_printk(KERN_ERR, dev, "RAID REMOVE DONE\n");
107*4882a593Smuzhiyun 	kfree(rd);
108*4882a593Smuzhiyun 	return 0;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun static DECLARE_TRANSPORT_CLASS(raid_class,
112*4882a593Smuzhiyun 			       "raid_devices",
113*4882a593Smuzhiyun 			       raid_setup,
114*4882a593Smuzhiyun 			       raid_remove,
115*4882a593Smuzhiyun 			       NULL);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun static const struct {
118*4882a593Smuzhiyun 	enum raid_state	value;
119*4882a593Smuzhiyun 	char		*name;
120*4882a593Smuzhiyun } raid_states[] = {
121*4882a593Smuzhiyun 	{ RAID_STATE_UNKNOWN, "unknown" },
122*4882a593Smuzhiyun 	{ RAID_STATE_ACTIVE, "active" },
123*4882a593Smuzhiyun 	{ RAID_STATE_DEGRADED, "degraded" },
124*4882a593Smuzhiyun 	{ RAID_STATE_RESYNCING, "resyncing" },
125*4882a593Smuzhiyun 	{ RAID_STATE_OFFLINE, "offline" },
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
raid_state_name(enum raid_state state)128*4882a593Smuzhiyun static const char *raid_state_name(enum raid_state state)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	int i;
131*4882a593Smuzhiyun 	char *name = NULL;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(raid_states); i++) {
134*4882a593Smuzhiyun 		if (raid_states[i].value == state) {
135*4882a593Smuzhiyun 			name = raid_states[i].name;
136*4882a593Smuzhiyun 			break;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	return name;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static struct {
143*4882a593Smuzhiyun 	enum raid_level value;
144*4882a593Smuzhiyun 	char *name;
145*4882a593Smuzhiyun } raid_levels[] = {
146*4882a593Smuzhiyun 	{ RAID_LEVEL_UNKNOWN, "unknown" },
147*4882a593Smuzhiyun 	{ RAID_LEVEL_LINEAR, "linear" },
148*4882a593Smuzhiyun 	{ RAID_LEVEL_0, "raid0" },
149*4882a593Smuzhiyun 	{ RAID_LEVEL_1, "raid1" },
150*4882a593Smuzhiyun 	{ RAID_LEVEL_10, "raid10" },
151*4882a593Smuzhiyun 	{ RAID_LEVEL_1E, "raid1e" },
152*4882a593Smuzhiyun 	{ RAID_LEVEL_3, "raid3" },
153*4882a593Smuzhiyun 	{ RAID_LEVEL_4, "raid4" },
154*4882a593Smuzhiyun 	{ RAID_LEVEL_5, "raid5" },
155*4882a593Smuzhiyun 	{ RAID_LEVEL_50, "raid50" },
156*4882a593Smuzhiyun 	{ RAID_LEVEL_6, "raid6" },
157*4882a593Smuzhiyun 	{ RAID_LEVEL_JBOD, "jbod" },
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
raid_level_name(enum raid_level level)160*4882a593Smuzhiyun static const char *raid_level_name(enum raid_level level)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	int i;
163*4882a593Smuzhiyun 	char *name = NULL;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(raid_levels); i++) {
166*4882a593Smuzhiyun 		if (raid_levels[i].value == level) {
167*4882a593Smuzhiyun 			name = raid_levels[i].name;
168*4882a593Smuzhiyun 			break;
169*4882a593Smuzhiyun 		}
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	return name;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define raid_attr_show_internal(attr, fmt, var, code)			\
175*4882a593Smuzhiyun static ssize_t raid_show_##attr(struct device *dev, 			\
176*4882a593Smuzhiyun 				struct device_attribute *attr, 		\
177*4882a593Smuzhiyun 				char *buf)				\
178*4882a593Smuzhiyun {									\
179*4882a593Smuzhiyun 	struct raid_data *rd = dev_get_drvdata(dev);			\
180*4882a593Smuzhiyun 	code								\
181*4882a593Smuzhiyun 	return snprintf(buf, 20, #fmt "\n", var);			\
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #define raid_attr_ro_states(attr, states, code)				\
185*4882a593Smuzhiyun raid_attr_show_internal(attr, %s, name,					\
186*4882a593Smuzhiyun 	const char *name;						\
187*4882a593Smuzhiyun 	code								\
188*4882a593Smuzhiyun 	name = raid_##states##_name(rd->attr);				\
189*4882a593Smuzhiyun )									\
190*4882a593Smuzhiyun static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #define raid_attr_ro_internal(attr, code)				\
194*4882a593Smuzhiyun raid_attr_show_internal(attr, %d, rd->attr, code)			\
195*4882a593Smuzhiyun static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define ATTR_CODE(attr)							\
198*4882a593Smuzhiyun 	struct raid_internal *i = device_to_raid_internal(dev);		\
199*4882a593Smuzhiyun 	if (i->f->get_##attr)						\
200*4882a593Smuzhiyun 		i->f->get_##attr(dev->parent);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun #define raid_attr_ro(attr)	raid_attr_ro_internal(attr, )
203*4882a593Smuzhiyun #define raid_attr_ro_fn(attr)	raid_attr_ro_internal(attr, ATTR_CODE(attr))
204*4882a593Smuzhiyun #define raid_attr_ro_state(attr)	raid_attr_ro_states(attr, attr, )
205*4882a593Smuzhiyun #define raid_attr_ro_state_fn(attr)	raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun raid_attr_ro_state(level);
209*4882a593Smuzhiyun raid_attr_ro_fn(resync);
210*4882a593Smuzhiyun raid_attr_ro_state_fn(state);
211*4882a593Smuzhiyun 
raid_component_release(struct device * dev)212*4882a593Smuzhiyun static void raid_component_release(struct device *dev)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct raid_component *rc =
215*4882a593Smuzhiyun 		container_of(dev, struct raid_component, dev);
216*4882a593Smuzhiyun 	dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
217*4882a593Smuzhiyun 	put_device(rc->dev.parent);
218*4882a593Smuzhiyun 	kfree(rc);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
raid_component_add(struct raid_template * r,struct device * raid_dev,struct device * component_dev)221*4882a593Smuzhiyun int raid_component_add(struct raid_template *r,struct device *raid_dev,
222*4882a593Smuzhiyun 		       struct device *component_dev)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct device *cdev =
225*4882a593Smuzhiyun 		attribute_container_find_class_device(&r->raid_attrs.ac,
226*4882a593Smuzhiyun 						      raid_dev);
227*4882a593Smuzhiyun 	struct raid_component *rc;
228*4882a593Smuzhiyun 	struct raid_data *rd = dev_get_drvdata(cdev);
229*4882a593Smuzhiyun 	int err;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
232*4882a593Smuzhiyun 	if (!rc)
233*4882a593Smuzhiyun 		return -ENOMEM;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	INIT_LIST_HEAD(&rc->node);
236*4882a593Smuzhiyun 	device_initialize(&rc->dev);
237*4882a593Smuzhiyun 	rc->dev.release = raid_component_release;
238*4882a593Smuzhiyun 	rc->dev.parent = get_device(component_dev);
239*4882a593Smuzhiyun 	rc->num = rd->component_count++;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	dev_set_name(&rc->dev, "component-%d", rc->num);
242*4882a593Smuzhiyun 	list_add_tail(&rc->node, &rd->component_list);
243*4882a593Smuzhiyun 	rc->dev.class = &raid_class.class;
244*4882a593Smuzhiyun 	err = device_add(&rc->dev);
245*4882a593Smuzhiyun 	if (err)
246*4882a593Smuzhiyun 		goto err_out;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return 0;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun err_out:
251*4882a593Smuzhiyun 	list_del(&rc->node);
252*4882a593Smuzhiyun 	rd->component_count--;
253*4882a593Smuzhiyun 	put_device(component_dev);
254*4882a593Smuzhiyun 	kfree(rc);
255*4882a593Smuzhiyun 	return err;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun EXPORT_SYMBOL(raid_component_add);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun struct raid_template *
raid_class_attach(struct raid_function_template * ft)260*4882a593Smuzhiyun raid_class_attach(struct raid_function_template *ft)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct raid_internal *i = kzalloc(sizeof(struct raid_internal),
263*4882a593Smuzhiyun 					  GFP_KERNEL);
264*4882a593Smuzhiyun 	int count = 0;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (unlikely(!i))
267*4882a593Smuzhiyun 		return NULL;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	i->f = ft;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	i->r.raid_attrs.ac.class = &raid_class.class;
272*4882a593Smuzhiyun 	i->r.raid_attrs.ac.match = raid_match;
273*4882a593Smuzhiyun 	i->r.raid_attrs.ac.attrs = &i->attrs[0];
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	attribute_container_register(&i->r.raid_attrs.ac);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	i->attrs[count++] = &dev_attr_level;
278*4882a593Smuzhiyun 	i->attrs[count++] = &dev_attr_resync;
279*4882a593Smuzhiyun 	i->attrs[count++] = &dev_attr_state;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	i->attrs[count] = NULL;
282*4882a593Smuzhiyun 	BUG_ON(count > RAID_NUM_ATTRS);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return &i->r;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun EXPORT_SYMBOL(raid_class_attach);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun void
raid_class_release(struct raid_template * r)289*4882a593Smuzhiyun raid_class_release(struct raid_template *r)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct raid_internal *i = to_raid_internal(r);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac));
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	kfree(i);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun EXPORT_SYMBOL(raid_class_release);
298*4882a593Smuzhiyun 
raid_init(void)299*4882a593Smuzhiyun static __init int raid_init(void)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return transport_class_register(&raid_class);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
raid_exit(void)304*4882a593Smuzhiyun static __exit void raid_exit(void)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	transport_class_unregister(&raid_class);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun MODULE_AUTHOR("James Bottomley");
310*4882a593Smuzhiyun MODULE_DESCRIPTION("RAID device class");
311*4882a593Smuzhiyun MODULE_LICENSE("GPL");
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun module_init(raid_init);
314*4882a593Smuzhiyun module_exit(raid_exit);
315*4882a593Smuzhiyun 
316