xref: /OK3568_Linux_fs/kernel/drivers/md/dm-uevent.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Device Mapper Uevent Support (dm-uevent)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corporation, 2007
6*4882a593Smuzhiyun  * 	Author: Mike Anderson <andmike@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/list.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/kobject.h>
11*4882a593Smuzhiyun #include <linux/dm-ioctl.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "dm.h"
15*4882a593Smuzhiyun #include "dm-uevent.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define DM_MSG_PREFIX "uevent"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static const struct {
20*4882a593Smuzhiyun 	enum dm_uevent_type type;
21*4882a593Smuzhiyun 	enum kobject_action action;
22*4882a593Smuzhiyun 	char *name;
23*4882a593Smuzhiyun } _dm_uevent_type_names[] = {
24*4882a593Smuzhiyun 	{DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
25*4882a593Smuzhiyun 	{DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static struct kmem_cache *_dm_event_cache;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct dm_uevent {
31*4882a593Smuzhiyun 	struct mapped_device *md;
32*4882a593Smuzhiyun 	enum kobject_action action;
33*4882a593Smuzhiyun 	struct kobj_uevent_env ku_env;
34*4882a593Smuzhiyun 	struct list_head elist;
35*4882a593Smuzhiyun 	char name[DM_NAME_LEN];
36*4882a593Smuzhiyun 	char uuid[DM_UUID_LEN];
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
dm_uevent_free(struct dm_uevent * event)39*4882a593Smuzhiyun static void dm_uevent_free(struct dm_uevent *event)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	kmem_cache_free(_dm_event_cache, event);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
dm_uevent_alloc(struct mapped_device * md)44*4882a593Smuzhiyun static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct dm_uevent *event;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
49*4882a593Smuzhiyun 	if (!event)
50*4882a593Smuzhiyun 		return NULL;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	INIT_LIST_HEAD(&event->elist);
53*4882a593Smuzhiyun 	event->md = md;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return event;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
dm_build_path_uevent(struct mapped_device * md,struct dm_target * ti,enum kobject_action action,const char * dm_action,const char * path,unsigned nr_valid_paths)58*4882a593Smuzhiyun static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
59*4882a593Smuzhiyun 					      struct dm_target *ti,
60*4882a593Smuzhiyun 					      enum kobject_action action,
61*4882a593Smuzhiyun 					      const char *dm_action,
62*4882a593Smuzhiyun 					      const char *path,
63*4882a593Smuzhiyun 					      unsigned nr_valid_paths)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct dm_uevent *event;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	event = dm_uevent_alloc(md);
68*4882a593Smuzhiyun 	if (!event) {
69*4882a593Smuzhiyun 		DMERR("%s: dm_uevent_alloc() failed", __func__);
70*4882a593Smuzhiyun 		goto err_nomem;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	event->action = action;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
76*4882a593Smuzhiyun 		DMERR("%s: add_uevent_var() for DM_TARGET failed",
77*4882a593Smuzhiyun 		      __func__);
78*4882a593Smuzhiyun 		goto err_add;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
82*4882a593Smuzhiyun 		DMERR("%s: add_uevent_var() for DM_ACTION failed",
83*4882a593Smuzhiyun 		      __func__);
84*4882a593Smuzhiyun 		goto err_add;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
88*4882a593Smuzhiyun 			   dm_next_uevent_seq(md))) {
89*4882a593Smuzhiyun 		DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
90*4882a593Smuzhiyun 		      __func__);
91*4882a593Smuzhiyun 		goto err_add;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
95*4882a593Smuzhiyun 		DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
96*4882a593Smuzhiyun 		goto err_add;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
100*4882a593Smuzhiyun 			   nr_valid_paths)) {
101*4882a593Smuzhiyun 		DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
102*4882a593Smuzhiyun 		      __func__);
103*4882a593Smuzhiyun 		goto err_add;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return event;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun err_add:
109*4882a593Smuzhiyun 	dm_uevent_free(event);
110*4882a593Smuzhiyun err_nomem:
111*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun  * dm_send_uevents - send uevents for given list
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * @events:	list of events to send
118*4882a593Smuzhiyun  * @kobj:	kobject generating event
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  */
dm_send_uevents(struct list_head * events,struct kobject * kobj)121*4882a593Smuzhiyun void dm_send_uevents(struct list_head *events, struct kobject *kobj)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	int r;
124*4882a593Smuzhiyun 	struct dm_uevent *event, *next;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	list_for_each_entry_safe(event, next, events, elist) {
127*4882a593Smuzhiyun 		list_del_init(&event->elist);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 		/*
130*4882a593Smuzhiyun 		 * When a device is being removed this copy fails and we
131*4882a593Smuzhiyun 		 * discard these unsent events.
132*4882a593Smuzhiyun 		 */
133*4882a593Smuzhiyun 		if (dm_copy_name_and_uuid(event->md, event->name,
134*4882a593Smuzhiyun 					  event->uuid)) {
135*4882a593Smuzhiyun 			DMINFO("%s: skipping sending uevent for lost device",
136*4882a593Smuzhiyun 			       __func__);
137*4882a593Smuzhiyun 			goto uevent_free;
138*4882a593Smuzhiyun 		}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
141*4882a593Smuzhiyun 			DMERR("%s: add_uevent_var() for DM_NAME failed",
142*4882a593Smuzhiyun 			      __func__);
143*4882a593Smuzhiyun 			goto uevent_free;
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
147*4882a593Smuzhiyun 			DMERR("%s: add_uevent_var() for DM_UUID failed",
148*4882a593Smuzhiyun 			      __func__);
149*4882a593Smuzhiyun 			goto uevent_free;
150*4882a593Smuzhiyun 		}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
153*4882a593Smuzhiyun 		if (r)
154*4882a593Smuzhiyun 			DMERR("%s: kobject_uevent_env failed", __func__);
155*4882a593Smuzhiyun uevent_free:
156*4882a593Smuzhiyun 		dm_uevent_free(event);
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_send_uevents);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /**
162*4882a593Smuzhiyun  * dm_path_uevent - called to create a new path event and queue it
163*4882a593Smuzhiyun  *
164*4882a593Smuzhiyun  * @event_type:	path event type enum
165*4882a593Smuzhiyun  * @ti:			pointer to a dm_target
166*4882a593Smuzhiyun  * @path:		string containing pathname
167*4882a593Smuzhiyun  * @nr_valid_paths:	number of valid paths remaining
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  */
dm_path_uevent(enum dm_uevent_type event_type,struct dm_target * ti,const char * path,unsigned nr_valid_paths)170*4882a593Smuzhiyun void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
171*4882a593Smuzhiyun 		   const char *path, unsigned nr_valid_paths)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct mapped_device *md = dm_table_get_md(ti->table);
174*4882a593Smuzhiyun 	struct dm_uevent *event;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
177*4882a593Smuzhiyun 		DMERR("%s: Invalid event_type %d", __func__, event_type);
178*4882a593Smuzhiyun 		return;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	event = dm_build_path_uevent(md, ti,
182*4882a593Smuzhiyun 				     _dm_uevent_type_names[event_type].action,
183*4882a593Smuzhiyun 				     _dm_uevent_type_names[event_type].name,
184*4882a593Smuzhiyun 				     path, nr_valid_paths);
185*4882a593Smuzhiyun 	if (IS_ERR(event))
186*4882a593Smuzhiyun 		return;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	dm_uevent_add(md, &event->elist);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dm_path_uevent);
191*4882a593Smuzhiyun 
dm_uevent_init(void)192*4882a593Smuzhiyun int dm_uevent_init(void)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	_dm_event_cache = KMEM_CACHE(dm_uevent, 0);
195*4882a593Smuzhiyun 	if (!_dm_event_cache)
196*4882a593Smuzhiyun 		return -ENOMEM;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	DMINFO("version 1.0.3");
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
dm_uevent_exit(void)203*4882a593Smuzhiyun void dm_uevent_exit(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	kmem_cache_destroy(_dm_event_cache);
206*4882a593Smuzhiyun }
207