xref: /OK3568_Linux_fs/kernel/drivers/base/devcoredump.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2014 Intel Mobile Communications GmbH
4*4882a593Smuzhiyun  * Copyright(c) 2015 Intel Deutschland GmbH
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Contact Information:
7*4882a593Smuzhiyun  *  Intel Linux Wireless <ilw@linux.intel.com>
8*4882a593Smuzhiyun  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Author: Johannes Berg <johannes@sipsolutions.net>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/devcoredump.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/workqueue.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static struct class devcd_class;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* global disable flag, for security purposes */
23*4882a593Smuzhiyun static bool devcd_disabled;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* if data isn't read by userspace after 5 minutes then delete it */
26*4882a593Smuzhiyun #define DEVCD_TIMEOUT	(HZ * 60 * 5)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun struct devcd_entry {
29*4882a593Smuzhiyun 	struct device devcd_dev;
30*4882a593Smuzhiyun 	void *data;
31*4882a593Smuzhiyun 	size_t datalen;
32*4882a593Smuzhiyun 	/*
33*4882a593Smuzhiyun 	 * Here, mutex is required to serialize the calls to del_wk work between
34*4882a593Smuzhiyun 	 * user/kernel space which happens when devcd is added with device_add()
35*4882a593Smuzhiyun 	 * and that sends uevent to user space. User space reads the uevents,
36*4882a593Smuzhiyun 	 * and calls to devcd_data_write() which try to modify the work which is
37*4882a593Smuzhiyun 	 * not even initialized/queued from devcoredump.
38*4882a593Smuzhiyun 	 *
39*4882a593Smuzhiyun 	 *
40*4882a593Smuzhiyun 	 *
41*4882a593Smuzhiyun 	 *        cpu0(X)                                 cpu1(Y)
42*4882a593Smuzhiyun 	 *
43*4882a593Smuzhiyun 	 *        dev_coredump() uevent sent to user space
44*4882a593Smuzhiyun 	 *        device_add()  ======================> user space process Y reads the
45*4882a593Smuzhiyun 	 *                                              uevents writes to devcd fd
46*4882a593Smuzhiyun 	 *                                              which results into writes to
47*4882a593Smuzhiyun 	 *
48*4882a593Smuzhiyun 	 *                                             devcd_data_write()
49*4882a593Smuzhiyun 	 *                                               mod_delayed_work()
50*4882a593Smuzhiyun 	 *                                                 try_to_grab_pending()
51*4882a593Smuzhiyun 	 *                                                   del_timer()
52*4882a593Smuzhiyun 	 *                                                     debug_assert_init()
53*4882a593Smuzhiyun 	 *       INIT_DELAYED_WORK()
54*4882a593Smuzhiyun 	 *       schedule_delayed_work()
55*4882a593Smuzhiyun 	 *
56*4882a593Smuzhiyun 	 *
57*4882a593Smuzhiyun 	 * Also, mutex alone would not be enough to avoid scheduling of
58*4882a593Smuzhiyun 	 * del_wk work after it get flush from a call to devcd_free()
59*4882a593Smuzhiyun 	 * mentioned as below.
60*4882a593Smuzhiyun 	 *
61*4882a593Smuzhiyun 	 *	disabled_store()
62*4882a593Smuzhiyun 	 *        devcd_free()
63*4882a593Smuzhiyun 	 *          mutex_lock()             devcd_data_write()
64*4882a593Smuzhiyun 	 *          flush_delayed_work()
65*4882a593Smuzhiyun 	 *          mutex_unlock()
66*4882a593Smuzhiyun 	 *                                   mutex_lock()
67*4882a593Smuzhiyun 	 *                                   mod_delayed_work()
68*4882a593Smuzhiyun 	 *                                   mutex_unlock()
69*4882a593Smuzhiyun 	 * So, delete_work flag is required.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	struct mutex mutex;
72*4882a593Smuzhiyun 	bool delete_work;
73*4882a593Smuzhiyun 	struct module *owner;
74*4882a593Smuzhiyun 	ssize_t (*read)(char *buffer, loff_t offset, size_t count,
75*4882a593Smuzhiyun 			void *data, size_t datalen);
76*4882a593Smuzhiyun 	void (*free)(void *data);
77*4882a593Smuzhiyun 	struct delayed_work del_wk;
78*4882a593Smuzhiyun 	struct device *failing_dev;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
dev_to_devcd(struct device * dev)81*4882a593Smuzhiyun static struct devcd_entry *dev_to_devcd(struct device *dev)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	return container_of(dev, struct devcd_entry, devcd_dev);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
devcd_dev_release(struct device * dev)86*4882a593Smuzhiyun static void devcd_dev_release(struct device *dev)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct devcd_entry *devcd = dev_to_devcd(dev);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	devcd->free(devcd->data);
91*4882a593Smuzhiyun 	module_put(devcd->owner);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/*
94*4882a593Smuzhiyun 	 * this seems racy, but I don't see a notifier or such on
95*4882a593Smuzhiyun 	 * a struct device to know when it goes away?
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	if (devcd->failing_dev->kobj.sd)
98*4882a593Smuzhiyun 		sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
99*4882a593Smuzhiyun 				  "devcoredump");
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	put_device(devcd->failing_dev);
102*4882a593Smuzhiyun 	kfree(devcd);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
devcd_del(struct work_struct * wk)105*4882a593Smuzhiyun static void devcd_del(struct work_struct *wk)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct devcd_entry *devcd;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	devcd = container_of(wk, struct devcd_entry, del_wk.work);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	device_del(&devcd->devcd_dev);
112*4882a593Smuzhiyun 	put_device(&devcd->devcd_dev);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
devcd_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t offset,size_t count)115*4882a593Smuzhiyun static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
116*4882a593Smuzhiyun 			       struct bin_attribute *bin_attr,
117*4882a593Smuzhiyun 			       char *buffer, loff_t offset, size_t count)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
120*4882a593Smuzhiyun 	struct devcd_entry *devcd = dev_to_devcd(dev);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
devcd_data_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buffer,loff_t offset,size_t count)125*4882a593Smuzhiyun static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
126*4882a593Smuzhiyun 				struct bin_attribute *bin_attr,
127*4882a593Smuzhiyun 				char *buffer, loff_t offset, size_t count)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
130*4882a593Smuzhiyun 	struct devcd_entry *devcd = dev_to_devcd(dev);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	mutex_lock(&devcd->mutex);
133*4882a593Smuzhiyun 	if (!devcd->delete_work) {
134*4882a593Smuzhiyun 		devcd->delete_work = true;
135*4882a593Smuzhiyun 		mod_delayed_work(system_wq, &devcd->del_wk, 0);
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 	mutex_unlock(&devcd->mutex);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return count;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static struct bin_attribute devcd_attr_data = {
143*4882a593Smuzhiyun 	.attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
144*4882a593Smuzhiyun 	.size = 0,
145*4882a593Smuzhiyun 	.read = devcd_data_read,
146*4882a593Smuzhiyun 	.write = devcd_data_write,
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun static struct bin_attribute *devcd_dev_bin_attrs[] = {
150*4882a593Smuzhiyun 	&devcd_attr_data, NULL,
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun static const struct attribute_group devcd_dev_group = {
154*4882a593Smuzhiyun 	.bin_attrs = devcd_dev_bin_attrs,
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static const struct attribute_group *devcd_dev_groups[] = {
158*4882a593Smuzhiyun 	&devcd_dev_group, NULL,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
devcd_free(struct device * dev,void * data)161*4882a593Smuzhiyun static int devcd_free(struct device *dev, void *data)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	struct devcd_entry *devcd = dev_to_devcd(dev);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	mutex_lock(&devcd->mutex);
166*4882a593Smuzhiyun 	if (!devcd->delete_work)
167*4882a593Smuzhiyun 		devcd->delete_work = true;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	flush_delayed_work(&devcd->del_wk);
170*4882a593Smuzhiyun 	mutex_unlock(&devcd->mutex);
171*4882a593Smuzhiyun 	return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
disabled_show(struct class * class,struct class_attribute * attr,char * buf)174*4882a593Smuzhiyun static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
175*4882a593Smuzhiyun 			     char *buf)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	return sysfs_emit(buf, "%d\n", devcd_disabled);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  *	disabled_store()                                	worker()
183*4882a593Smuzhiyun  *	 class_for_each_device(&devcd_class,
184*4882a593Smuzhiyun  *		NULL, NULL, devcd_free)
185*4882a593Smuzhiyun  *         ...
186*4882a593Smuzhiyun  *         ...
187*4882a593Smuzhiyun  *	   while ((dev = class_dev_iter_next(&iter))
188*4882a593Smuzhiyun  *                                                             devcd_del()
189*4882a593Smuzhiyun  *                                                               device_del()
190*4882a593Smuzhiyun  *                                                                 put_device() <- last reference
191*4882a593Smuzhiyun  *             error = fn(dev, data)                           devcd_dev_release()
192*4882a593Smuzhiyun  *             devcd_free(dev, data)                           kfree(devcd)
193*4882a593Smuzhiyun  *             mutex_lock(&devcd->mutex);
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * In the above diagram, It looks like disabled_store() would be racing with parallely
197*4882a593Smuzhiyun  * running devcd_del() and result in memory abort while acquiring devcd->mutex which
198*4882a593Smuzhiyun  * is called after kfree of devcd memory  after dropping its last reference with
199*4882a593Smuzhiyun  * put_device(). However, this will not happens as fn(dev, data) runs
200*4882a593Smuzhiyun  * with its own reference to device via klist_node so it is not its last reference.
201*4882a593Smuzhiyun  * so, above situation would not occur.
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun 
disabled_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)204*4882a593Smuzhiyun static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
205*4882a593Smuzhiyun 			      const char *buf, size_t count)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	long tmp = simple_strtol(buf, NULL, 10);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 * This essentially makes the attribute write-once, since you can't
211*4882a593Smuzhiyun 	 * go back to not having it disabled. This is intentional, it serves
212*4882a593Smuzhiyun 	 * as a system lockdown feature.
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	if (tmp != 1)
215*4882a593Smuzhiyun 		return -EINVAL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	devcd_disabled = true;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return count;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun static CLASS_ATTR_RW(disabled);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun static struct attribute *devcd_class_attrs[] = {
226*4882a593Smuzhiyun 	&class_attr_disabled.attr,
227*4882a593Smuzhiyun 	NULL,
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun ATTRIBUTE_GROUPS(devcd_class);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun static struct class devcd_class = {
232*4882a593Smuzhiyun 	.name		= "devcoredump",
233*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
234*4882a593Smuzhiyun 	.dev_release	= devcd_dev_release,
235*4882a593Smuzhiyun 	.dev_groups	= devcd_dev_groups,
236*4882a593Smuzhiyun 	.class_groups	= devcd_class_groups,
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun 
devcd_readv(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)239*4882a593Smuzhiyun static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
240*4882a593Smuzhiyun 			   void *data, size_t datalen)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	return memory_read_from_buffer(buffer, count, &offset, data, datalen);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
devcd_freev(void * data)245*4882a593Smuzhiyun static void devcd_freev(void *data)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	vfree(data);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * dev_coredumpv - create device coredump with vmalloc data
252*4882a593Smuzhiyun  * @dev: the struct device for the crashed device
253*4882a593Smuzhiyun  * @data: vmalloc data containing the device coredump
254*4882a593Smuzhiyun  * @datalen: length of the data
255*4882a593Smuzhiyun  * @gfp: allocation flags
256*4882a593Smuzhiyun  *
257*4882a593Smuzhiyun  * This function takes ownership of the vmalloc'ed data and will free
258*4882a593Smuzhiyun  * it when it is no longer used. See dev_coredumpm() for more information.
259*4882a593Smuzhiyun  */
dev_coredumpv(struct device * dev,void * data,size_t datalen,gfp_t gfp)260*4882a593Smuzhiyun void dev_coredumpv(struct device *dev, void *data, size_t datalen,
261*4882a593Smuzhiyun 		   gfp_t gfp)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_coredumpv);
266*4882a593Smuzhiyun 
devcd_match_failing(struct device * dev,const void * failing)267*4882a593Smuzhiyun static int devcd_match_failing(struct device *dev, const void *failing)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct devcd_entry *devcd = dev_to_devcd(dev);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return devcd->failing_dev == failing;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * devcd_free_sgtable - free all the memory of the given scatterlist table
276*4882a593Smuzhiyun  * (i.e. both pages and scatterlist instances)
277*4882a593Smuzhiyun  * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
278*4882a593Smuzhiyun  * using the sg_chain function then that function should be called only once
279*4882a593Smuzhiyun  * on the chained table
280*4882a593Smuzhiyun  * @table: pointer to sg_table to free
281*4882a593Smuzhiyun  */
devcd_free_sgtable(void * data)282*4882a593Smuzhiyun static void devcd_free_sgtable(void *data)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	_devcd_free_sgtable(data);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun  * devcd_read_from_table - copy data from sg_table to a given buffer
289*4882a593Smuzhiyun  * and return the number of bytes read
290*4882a593Smuzhiyun  * @buffer: the buffer to copy the data to it
291*4882a593Smuzhiyun  * @buf_len: the length of the buffer
292*4882a593Smuzhiyun  * @data: the scatterlist table to copy from
293*4882a593Smuzhiyun  * @offset: start copy from @offset@ bytes from the head of the data
294*4882a593Smuzhiyun  *	in the given scatterlist
295*4882a593Smuzhiyun  * @data_len: the length of the data in the sg_table
296*4882a593Smuzhiyun  */
devcd_read_from_sgtable(char * buffer,loff_t offset,size_t buf_len,void * data,size_t data_len)297*4882a593Smuzhiyun static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
298*4882a593Smuzhiyun 				       size_t buf_len, void *data,
299*4882a593Smuzhiyun 				       size_t data_len)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct scatterlist *table = data;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (offset > data_len)
304*4882a593Smuzhiyun 		return -EINVAL;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (offset + buf_len > data_len)
307*4882a593Smuzhiyun 		buf_len = data_len - offset;
308*4882a593Smuzhiyun 	return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
309*4882a593Smuzhiyun 				  offset);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun  * dev_coredumpm - create device coredump with read/free methods
314*4882a593Smuzhiyun  * @dev: the struct device for the crashed device
315*4882a593Smuzhiyun  * @owner: the module that contains the read/free functions, use %THIS_MODULE
316*4882a593Smuzhiyun  * @data: data cookie for the @read/@free functions
317*4882a593Smuzhiyun  * @datalen: length of the data
318*4882a593Smuzhiyun  * @gfp: allocation flags
319*4882a593Smuzhiyun  * @read: function to read from the given buffer
320*4882a593Smuzhiyun  * @free: function to free the given buffer
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * Creates a new device coredump for the given device. If a previous one hasn't
323*4882a593Smuzhiyun  * been read yet, the new coredump is discarded. The data lifetime is determined
324*4882a593Smuzhiyun  * by the device coredump framework and when it is no longer needed the @free
325*4882a593Smuzhiyun  * function will be called to free the data.
326*4882a593Smuzhiyun  */
dev_coredumpm(struct device * dev,struct module * owner,void * data,size_t datalen,gfp_t gfp,ssize_t (* read)(char * buffer,loff_t offset,size_t count,void * data,size_t datalen),void (* free)(void * data))327*4882a593Smuzhiyun void dev_coredumpm(struct device *dev, struct module *owner,
328*4882a593Smuzhiyun 		   void *data, size_t datalen, gfp_t gfp,
329*4882a593Smuzhiyun 		   ssize_t (*read)(char *buffer, loff_t offset, size_t count,
330*4882a593Smuzhiyun 				   void *data, size_t datalen),
331*4882a593Smuzhiyun 		   void (*free)(void *data))
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	static atomic_t devcd_count = ATOMIC_INIT(0);
334*4882a593Smuzhiyun 	struct devcd_entry *devcd;
335*4882a593Smuzhiyun 	struct device *existing;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (devcd_disabled)
338*4882a593Smuzhiyun 		goto free;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	existing = class_find_device(&devcd_class, NULL, dev,
341*4882a593Smuzhiyun 				     devcd_match_failing);
342*4882a593Smuzhiyun 	if (existing) {
343*4882a593Smuzhiyun 		put_device(existing);
344*4882a593Smuzhiyun 		goto free;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (!try_module_get(owner))
348*4882a593Smuzhiyun 		goto free;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	devcd = kzalloc(sizeof(*devcd), gfp);
351*4882a593Smuzhiyun 	if (!devcd)
352*4882a593Smuzhiyun 		goto put_module;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	devcd->owner = owner;
355*4882a593Smuzhiyun 	devcd->data = data;
356*4882a593Smuzhiyun 	devcd->datalen = datalen;
357*4882a593Smuzhiyun 	devcd->read = read;
358*4882a593Smuzhiyun 	devcd->free = free;
359*4882a593Smuzhiyun 	devcd->failing_dev = get_device(dev);
360*4882a593Smuzhiyun 	devcd->delete_work = false;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	mutex_init(&devcd->mutex);
363*4882a593Smuzhiyun 	device_initialize(&devcd->devcd_dev);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	dev_set_name(&devcd->devcd_dev, "devcd%d",
366*4882a593Smuzhiyun 		     atomic_inc_return(&devcd_count));
367*4882a593Smuzhiyun 	devcd->devcd_dev.class = &devcd_class;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	mutex_lock(&devcd->mutex);
370*4882a593Smuzhiyun 	if (device_add(&devcd->devcd_dev))
371*4882a593Smuzhiyun 		goto put_device;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
374*4882a593Smuzhiyun 			      "failing_device"))
375*4882a593Smuzhiyun 		/* nothing - symlink will be missing */;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
378*4882a593Smuzhiyun 			      "devcoredump"))
379*4882a593Smuzhiyun 		/* nothing - symlink will be missing */;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
382*4882a593Smuzhiyun 	schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
383*4882a593Smuzhiyun 	mutex_unlock(&devcd->mutex);
384*4882a593Smuzhiyun 	return;
385*4882a593Smuzhiyun  put_device:
386*4882a593Smuzhiyun 	put_device(&devcd->devcd_dev);
387*4882a593Smuzhiyun 	mutex_unlock(&devcd->mutex);
388*4882a593Smuzhiyun  put_module:
389*4882a593Smuzhiyun 	module_put(owner);
390*4882a593Smuzhiyun  free:
391*4882a593Smuzhiyun 	free(data);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_coredumpm);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun  * dev_coredumpsg - create device coredump that uses scatterlist as data
397*4882a593Smuzhiyun  * parameter
398*4882a593Smuzhiyun  * @dev: the struct device for the crashed device
399*4882a593Smuzhiyun  * @table: the dump data
400*4882a593Smuzhiyun  * @datalen: length of the data
401*4882a593Smuzhiyun  * @gfp: allocation flags
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * Creates a new device coredump for the given device. If a previous one hasn't
404*4882a593Smuzhiyun  * been read yet, the new coredump is discarded. The data lifetime is determined
405*4882a593Smuzhiyun  * by the device coredump framework and when it is no longer needed
406*4882a593Smuzhiyun  * it will free the data.
407*4882a593Smuzhiyun  */
dev_coredumpsg(struct device * dev,struct scatterlist * table,size_t datalen,gfp_t gfp)408*4882a593Smuzhiyun void dev_coredumpsg(struct device *dev, struct scatterlist *table,
409*4882a593Smuzhiyun 		    size_t datalen, gfp_t gfp)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
412*4882a593Smuzhiyun 		      devcd_free_sgtable);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dev_coredumpsg);
415*4882a593Smuzhiyun 
devcoredump_init(void)416*4882a593Smuzhiyun static int __init devcoredump_init(void)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	return class_register(&devcd_class);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun __initcall(devcoredump_init);
421*4882a593Smuzhiyun 
devcoredump_exit(void)422*4882a593Smuzhiyun static void __exit devcoredump_exit(void)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
425*4882a593Smuzhiyun 	class_unregister(&devcd_class);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun __exitcall(devcoredump_exit);
428