xref: /OK3568_Linux_fs/kernel/lib/test_kmod.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * kmod stress test driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify it
7*4882a593Smuzhiyun  * under the terms of the GNU General Public License as published by the Free
8*4882a593Smuzhiyun  * Software Foundation; either version 2 of the License, or at your option any
9*4882a593Smuzhiyun  * later version; or, when distributed separately from the Linux kernel or
10*4882a593Smuzhiyun  * when incorporated into other software packages, subject to the following
11*4882a593Smuzhiyun  * license:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify it
14*4882a593Smuzhiyun  * under the terms of copyleft-next (version 0.3.1 or later) as published
15*4882a593Smuzhiyun  * at http://copyleft-next.org/.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * This driver provides an interface to trigger and test the kernel's
21*4882a593Smuzhiyun  * module loader through a series of configurations and a few triggers.
22*4882a593Smuzhiyun  * To test this driver use the following script as root:
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * tools/testing/selftests/kmod/kmod.sh --help
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/kernel.h>
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <linux/kmod.h>
30*4882a593Smuzhiyun #include <linux/printk.h>
31*4882a593Smuzhiyun #include <linux/kthread.h>
32*4882a593Smuzhiyun #include <linux/sched.h>
33*4882a593Smuzhiyun #include <linux/fs.h>
34*4882a593Smuzhiyun #include <linux/miscdevice.h>
35*4882a593Smuzhiyun #include <linux/vmalloc.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/device.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define TEST_START_NUM_THREADS	50
40*4882a593Smuzhiyun #define TEST_START_DRIVER	"test_module"
41*4882a593Smuzhiyun #define TEST_START_TEST_FS	"xfs"
42*4882a593Smuzhiyun #define TEST_START_TEST_CASE	TEST_KMOD_DRIVER
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static bool force_init_test = false;
46*4882a593Smuzhiyun module_param(force_init_test, bool_enable_only, 0644);
47*4882a593Smuzhiyun MODULE_PARM_DESC(force_init_test,
48*4882a593Smuzhiyun 		 "Force kicking a test immediately after driver loads");
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * For device allocation / registration
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun static DEFINE_MUTEX(reg_dev_mutex);
54*4882a593Smuzhiyun static LIST_HEAD(reg_test_devs);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * num_test_devs actually represents the *next* ID of the next
58*4882a593Smuzhiyun  * device we will allow to create.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun static int num_test_devs;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /**
63*4882a593Smuzhiyun  * enum kmod_test_case - linker table test case
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * If you add a  test case, please be sure to review if you need to se
66*4882a593Smuzhiyun  * @need_mod_put for your tests case.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * @TEST_KMOD_DRIVER: stress tests request_module()
69*4882a593Smuzhiyun  * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun enum kmod_test_case {
72*4882a593Smuzhiyun 	__TEST_KMOD_INVALID = 0,
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	TEST_KMOD_DRIVER,
75*4882a593Smuzhiyun 	TEST_KMOD_FS_TYPE,
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	__TEST_KMOD_MAX,
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun struct test_config {
81*4882a593Smuzhiyun 	char *test_driver;
82*4882a593Smuzhiyun 	char *test_fs;
83*4882a593Smuzhiyun 	unsigned int num_threads;
84*4882a593Smuzhiyun 	enum kmod_test_case test_case;
85*4882a593Smuzhiyun 	int test_result;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun struct kmod_test_device;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * kmod_test_device_info - thread info
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * @ret_sync: return value if request_module() is used, sync request for
94*4882a593Smuzhiyun  * 	@TEST_KMOD_DRIVER
95*4882a593Smuzhiyun  * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
96*4882a593Smuzhiyun  * @thread_idx: thread ID
97*4882a593Smuzhiyun  * @test_dev: test device test is being performed under
98*4882a593Smuzhiyun  * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
99*4882a593Smuzhiyun  *	(module_put(fs_sync->owner)) when done, otherwise you will not be able
100*4882a593Smuzhiyun  *	to unload the respective modules and re-test. We use this to keep
101*4882a593Smuzhiyun  *	accounting of when we need this and to help out in case we need to
102*4882a593Smuzhiyun  *	error out and deal with module_put() on error.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun struct kmod_test_device_info {
105*4882a593Smuzhiyun 	int ret_sync;
106*4882a593Smuzhiyun 	struct file_system_type *fs_sync;
107*4882a593Smuzhiyun 	struct task_struct *task_sync;
108*4882a593Smuzhiyun 	unsigned int thread_idx;
109*4882a593Smuzhiyun 	struct kmod_test_device *test_dev;
110*4882a593Smuzhiyun 	bool need_mod_put;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun  * kmod_test_device - test device to help test kmod
115*4882a593Smuzhiyun  *
116*4882a593Smuzhiyun  * @dev_idx: unique ID for test device
117*4882a593Smuzhiyun  * @config: configuration for the test
118*4882a593Smuzhiyun  * @misc_dev: we use a misc device under the hood
119*4882a593Smuzhiyun  * @dev: pointer to misc_dev's own struct device
120*4882a593Smuzhiyun  * @config_mutex: protects configuration of test
121*4882a593Smuzhiyun  * @trigger_mutex: the test trigger can only be fired once at a time
122*4882a593Smuzhiyun  * @thread_lock: protects @done count, and the @info per each thread
123*4882a593Smuzhiyun  * @done: number of threads which have completed or failed
124*4882a593Smuzhiyun  * @test_is_oom: when we run out of memory, use this to halt moving forward
125*4882a593Smuzhiyun  * @kthreads_done: completion used to signal when all work is done
126*4882a593Smuzhiyun  * @list: needed to be part of the reg_test_devs
127*4882a593Smuzhiyun  * @info: array of info for each thread
128*4882a593Smuzhiyun  */
129*4882a593Smuzhiyun struct kmod_test_device {
130*4882a593Smuzhiyun 	int dev_idx;
131*4882a593Smuzhiyun 	struct test_config config;
132*4882a593Smuzhiyun 	struct miscdevice misc_dev;
133*4882a593Smuzhiyun 	struct device *dev;
134*4882a593Smuzhiyun 	struct mutex config_mutex;
135*4882a593Smuzhiyun 	struct mutex trigger_mutex;
136*4882a593Smuzhiyun 	struct mutex thread_mutex;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	unsigned int done;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	bool test_is_oom;
141*4882a593Smuzhiyun 	struct completion kthreads_done;
142*4882a593Smuzhiyun 	struct list_head list;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	struct kmod_test_device_info *info;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
test_case_str(enum kmod_test_case test_case)147*4882a593Smuzhiyun static const char *test_case_str(enum kmod_test_case test_case)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	switch (test_case) {
150*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
151*4882a593Smuzhiyun 		return "TEST_KMOD_DRIVER";
152*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
153*4882a593Smuzhiyun 		return "TEST_KMOD_FS_TYPE";
154*4882a593Smuzhiyun 	default:
155*4882a593Smuzhiyun 		return "invalid";
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
dev_to_misc_dev(struct device * dev)159*4882a593Smuzhiyun static struct miscdevice *dev_to_misc_dev(struct device *dev)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	return dev_get_drvdata(dev);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
misc_dev_to_test_dev(struct miscdevice * misc_dev)164*4882a593Smuzhiyun static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return container_of(misc_dev, struct kmod_test_device, misc_dev);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
dev_to_test_dev(struct device * dev)169*4882a593Smuzhiyun static struct kmod_test_device *dev_to_test_dev(struct device *dev)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct miscdevice *misc_dev;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	misc_dev = dev_to_misc_dev(dev);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return misc_dev_to_test_dev(misc_dev);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /* Must run with thread_mutex held */
kmod_test_done_check(struct kmod_test_device * test_dev,unsigned int idx)179*4882a593Smuzhiyun static void kmod_test_done_check(struct kmod_test_device *test_dev,
180*4882a593Smuzhiyun 				 unsigned int idx)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	test_dev->done++;
185*4882a593Smuzhiyun 	dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (test_dev->done == config->num_threads) {
188*4882a593Smuzhiyun 		dev_info(test_dev->dev, "Done: %u threads have all run now\n",
189*4882a593Smuzhiyun 			 test_dev->done);
190*4882a593Smuzhiyun 		dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
191*4882a593Smuzhiyun 		complete(&test_dev->kthreads_done);
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
test_kmod_put_module(struct kmod_test_device_info * info)195*4882a593Smuzhiyun static void test_kmod_put_module(struct kmod_test_device_info *info)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = info->test_dev;
198*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (!info->need_mod_put)
201*4882a593Smuzhiyun 		return;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	switch (config->test_case) {
204*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
205*4882a593Smuzhiyun 		break;
206*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
207*4882a593Smuzhiyun 		if (info->fs_sync && info->fs_sync->owner)
208*4882a593Smuzhiyun 			module_put(info->fs_sync->owner);
209*4882a593Smuzhiyun 		break;
210*4882a593Smuzhiyun 	default:
211*4882a593Smuzhiyun 		BUG();
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	info->need_mod_put = true;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
run_request(void * data)217*4882a593Smuzhiyun static int run_request(void *data)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct kmod_test_device_info *info = data;
220*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = info->test_dev;
221*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	switch (config->test_case) {
224*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
225*4882a593Smuzhiyun 		info->ret_sync = request_module("%s", config->test_driver);
226*4882a593Smuzhiyun 		break;
227*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
228*4882a593Smuzhiyun 		info->fs_sync = get_fs_type(config->test_fs);
229*4882a593Smuzhiyun 		info->need_mod_put = true;
230*4882a593Smuzhiyun 		break;
231*4882a593Smuzhiyun 	default:
232*4882a593Smuzhiyun 		/* __trigger_config_run() already checked for test sanity */
233*4882a593Smuzhiyun 		BUG();
234*4882a593Smuzhiyun 		return -EINVAL;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	test_kmod_put_module(info);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	mutex_lock(&test_dev->thread_mutex);
242*4882a593Smuzhiyun 	info->task_sync = NULL;
243*4882a593Smuzhiyun 	kmod_test_done_check(test_dev, info->thread_idx);
244*4882a593Smuzhiyun 	mutex_unlock(&test_dev->thread_mutex);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
tally_work_test(struct kmod_test_device_info * info)249*4882a593Smuzhiyun static int tally_work_test(struct kmod_test_device_info *info)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = info->test_dev;
252*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
253*4882a593Smuzhiyun 	int err_ret = 0;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	switch (config->test_case) {
256*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
257*4882a593Smuzhiyun 		/*
258*4882a593Smuzhiyun 		 * Only capture errors, if one is found that's
259*4882a593Smuzhiyun 		 * enough, for now.
260*4882a593Smuzhiyun 		 */
261*4882a593Smuzhiyun 		if (info->ret_sync != 0)
262*4882a593Smuzhiyun 			err_ret = info->ret_sync;
263*4882a593Smuzhiyun 		dev_info(test_dev->dev,
264*4882a593Smuzhiyun 			 "Sync thread %d return status: %d\n",
265*4882a593Smuzhiyun 			 info->thread_idx, info->ret_sync);
266*4882a593Smuzhiyun 		break;
267*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
268*4882a593Smuzhiyun 		/* For now we make this simple */
269*4882a593Smuzhiyun 		if (!info->fs_sync)
270*4882a593Smuzhiyun 			err_ret = -EINVAL;
271*4882a593Smuzhiyun 		dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
272*4882a593Smuzhiyun 			 info->thread_idx, info->fs_sync ? config->test_fs :
273*4882a593Smuzhiyun 			 "NULL");
274*4882a593Smuzhiyun 		break;
275*4882a593Smuzhiyun 	default:
276*4882a593Smuzhiyun 		BUG();
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return err_ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun  * XXX: add result option to display if all errors did not match.
284*4882a593Smuzhiyun  * For now we just keep any error code if one was found.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * If this ran it means *all* tasks were created fine and we
287*4882a593Smuzhiyun  * are now just collecting results.
288*4882a593Smuzhiyun  *
289*4882a593Smuzhiyun  * Only propagate errors, do not override with a subsequent sucess case.
290*4882a593Smuzhiyun  */
tally_up_work(struct kmod_test_device * test_dev)291*4882a593Smuzhiyun static void tally_up_work(struct kmod_test_device *test_dev)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
294*4882a593Smuzhiyun 	struct kmod_test_device_info *info;
295*4882a593Smuzhiyun 	unsigned int idx;
296*4882a593Smuzhiyun 	int err_ret = 0;
297*4882a593Smuzhiyun 	int ret = 0;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	mutex_lock(&test_dev->thread_mutex);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Results:\n");
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	for (idx=0; idx < config->num_threads; idx++) {
304*4882a593Smuzhiyun 		info = &test_dev->info[idx];
305*4882a593Smuzhiyun 		ret = tally_work_test(info);
306*4882a593Smuzhiyun 		if (ret)
307*4882a593Smuzhiyun 			err_ret = ret;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/*
311*4882a593Smuzhiyun 	 * Note: request_module() returns 256 for a module not found even
312*4882a593Smuzhiyun 	 * though modprobe itself returns 1.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	config->test_result = err_ret;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	mutex_unlock(&test_dev->thread_mutex);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
try_one_request(struct kmod_test_device * test_dev,unsigned int idx)319*4882a593Smuzhiyun static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct kmod_test_device_info *info = &test_dev->info[idx];
322*4882a593Smuzhiyun 	int fail_ret = -ENOMEM;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	mutex_lock(&test_dev->thread_mutex);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	info->thread_idx = idx;
327*4882a593Smuzhiyun 	info->test_dev = test_dev;
328*4882a593Smuzhiyun 	info->task_sync = kthread_run(run_request, info, "%s-%u",
329*4882a593Smuzhiyun 				      KBUILD_MODNAME, idx);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (!info->task_sync || IS_ERR(info->task_sync)) {
332*4882a593Smuzhiyun 		test_dev->test_is_oom = true;
333*4882a593Smuzhiyun 		dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
334*4882a593Smuzhiyun 		info->task_sync = NULL;
335*4882a593Smuzhiyun 		goto err_out;
336*4882a593Smuzhiyun 	} else
337*4882a593Smuzhiyun 		dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	mutex_unlock(&test_dev->thread_mutex);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return 0;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun err_out:
344*4882a593Smuzhiyun 	info->ret_sync = fail_ret;
345*4882a593Smuzhiyun 	mutex_unlock(&test_dev->thread_mutex);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return fail_ret;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
test_dev_kmod_stop_tests(struct kmod_test_device * test_dev)350*4882a593Smuzhiyun static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
353*4882a593Smuzhiyun 	struct kmod_test_device_info *info;
354*4882a593Smuzhiyun 	unsigned int i;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Ending request_module() tests\n");
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	mutex_lock(&test_dev->thread_mutex);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	for (i=0; i < config->num_threads; i++) {
361*4882a593Smuzhiyun 		info = &test_dev->info[i];
362*4882a593Smuzhiyun 		if (info->task_sync && !IS_ERR(info->task_sync)) {
363*4882a593Smuzhiyun 			dev_info(test_dev->dev,
364*4882a593Smuzhiyun 				 "Stopping still-running thread %i\n", i);
365*4882a593Smuzhiyun 			kthread_stop(info->task_sync);
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		/*
369*4882a593Smuzhiyun 		 * info->task_sync is well protected, it can only be
370*4882a593Smuzhiyun 		 * NULL or a pointer to a struct. If its NULL we either
371*4882a593Smuzhiyun 		 * never ran, or we did and we completed the work. Completed
372*4882a593Smuzhiyun 		 * tasks *always* put the module for us. This is a sanity
373*4882a593Smuzhiyun 		 * check -- just in case.
374*4882a593Smuzhiyun 		 */
375*4882a593Smuzhiyun 		if (info->task_sync && info->need_mod_put)
376*4882a593Smuzhiyun 			test_kmod_put_module(info);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	mutex_unlock(&test_dev->thread_mutex);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun  * Only wait *iff* we did not run into any errors during all of our thread
384*4882a593Smuzhiyun  * set up. If run into any issues we stop threads and just bail out with
385*4882a593Smuzhiyun  * an error to the trigger. This also means we don't need any tally work
386*4882a593Smuzhiyun  * for any threads which fail.
387*4882a593Smuzhiyun  */
try_requests(struct kmod_test_device * test_dev)388*4882a593Smuzhiyun static int try_requests(struct kmod_test_device *test_dev)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
391*4882a593Smuzhiyun 	unsigned int idx;
392*4882a593Smuzhiyun 	int ret;
393*4882a593Smuzhiyun 	bool any_error = false;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	for (idx=0; idx < config->num_threads; idx++) {
396*4882a593Smuzhiyun 		if (test_dev->test_is_oom) {
397*4882a593Smuzhiyun 			any_error = true;
398*4882a593Smuzhiyun 			break;
399*4882a593Smuzhiyun 		}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 		ret = try_one_request(test_dev, idx);
402*4882a593Smuzhiyun 		if (ret) {
403*4882a593Smuzhiyun 			any_error = true;
404*4882a593Smuzhiyun 			break;
405*4882a593Smuzhiyun 		}
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (!any_error) {
409*4882a593Smuzhiyun 		test_dev->test_is_oom = false;
410*4882a593Smuzhiyun 		dev_info(test_dev->dev,
411*4882a593Smuzhiyun 			 "No errors were found while initializing threads\n");
412*4882a593Smuzhiyun 		wait_for_completion(&test_dev->kthreads_done);
413*4882a593Smuzhiyun 		tally_up_work(test_dev);
414*4882a593Smuzhiyun 	} else {
415*4882a593Smuzhiyun 		test_dev->test_is_oom = true;
416*4882a593Smuzhiyun 		dev_info(test_dev->dev,
417*4882a593Smuzhiyun 			 "At least one thread failed to start, stop all work\n");
418*4882a593Smuzhiyun 		test_dev_kmod_stop_tests(test_dev);
419*4882a593Smuzhiyun 		return -ENOMEM;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
run_test_driver(struct kmod_test_device * test_dev)425*4882a593Smuzhiyun static int run_test_driver(struct kmod_test_device *test_dev)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
430*4882a593Smuzhiyun 		 test_case_str(config->test_case),
431*4882a593Smuzhiyun 		 config->test_case);
432*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Test driver to load: %s\n",
433*4882a593Smuzhiyun 		 config->test_driver);
434*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
435*4882a593Smuzhiyun 		 config->num_threads);
436*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
437*4882a593Smuzhiyun 		 config->num_threads - 1);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return try_requests(test_dev);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
run_test_fs_type(struct kmod_test_device * test_dev)442*4882a593Smuzhiyun static int run_test_fs_type(struct kmod_test_device *test_dev)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Test case: %s (%u)\n",
447*4882a593Smuzhiyun 		 test_case_str(config->test_case),
448*4882a593Smuzhiyun 		 config->test_case);
449*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Test filesystem to load: %s\n",
450*4882a593Smuzhiyun 		 config->test_fs);
451*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Number of threads to run: %u\n",
452*4882a593Smuzhiyun 		 config->num_threads);
453*4882a593Smuzhiyun 	dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
454*4882a593Smuzhiyun 		 config->num_threads - 1);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	return try_requests(test_dev);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
config_show(struct device * dev,struct device_attribute * attr,char * buf)459*4882a593Smuzhiyun static ssize_t config_show(struct device *dev,
460*4882a593Smuzhiyun 			   struct device_attribute *attr,
461*4882a593Smuzhiyun 			   char *buf)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
464*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
465*4882a593Smuzhiyun 	int len = 0;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	len += snprintf(buf, PAGE_SIZE,
470*4882a593Smuzhiyun 			"Custom trigger configuration for: %s\n",
471*4882a593Smuzhiyun 			dev_name(dev));
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	len += snprintf(buf+len, PAGE_SIZE - len,
474*4882a593Smuzhiyun 			"Number of threads:\t%u\n",
475*4882a593Smuzhiyun 			config->num_threads);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	len += snprintf(buf+len, PAGE_SIZE - len,
478*4882a593Smuzhiyun 			"Test_case:\t%s (%u)\n",
479*4882a593Smuzhiyun 			test_case_str(config->test_case),
480*4882a593Smuzhiyun 			config->test_case);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (config->test_driver)
483*4882a593Smuzhiyun 		len += snprintf(buf+len, PAGE_SIZE - len,
484*4882a593Smuzhiyun 				"driver:\t%s\n",
485*4882a593Smuzhiyun 				config->test_driver);
486*4882a593Smuzhiyun 	else
487*4882a593Smuzhiyun 		len += snprintf(buf+len, PAGE_SIZE - len,
488*4882a593Smuzhiyun 				"driver:\tEMPTY\n");
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (config->test_fs)
491*4882a593Smuzhiyun 		len += snprintf(buf+len, PAGE_SIZE - len,
492*4882a593Smuzhiyun 				"fs:\t%s\n",
493*4882a593Smuzhiyun 				config->test_fs);
494*4882a593Smuzhiyun 	else
495*4882a593Smuzhiyun 		len += snprintf(buf+len, PAGE_SIZE - len,
496*4882a593Smuzhiyun 				"fs:\tEMPTY\n");
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	return len;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun static DEVICE_ATTR_RO(config);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun  * This ensures we don't allow kicking threads through if our configuration
506*4882a593Smuzhiyun  * is faulty.
507*4882a593Smuzhiyun  */
__trigger_config_run(struct kmod_test_device * test_dev)508*4882a593Smuzhiyun static int __trigger_config_run(struct kmod_test_device *test_dev)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	test_dev->done = 0;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	switch (config->test_case) {
515*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
516*4882a593Smuzhiyun 		return run_test_driver(test_dev);
517*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
518*4882a593Smuzhiyun 		return run_test_fs_type(test_dev);
519*4882a593Smuzhiyun 	default:
520*4882a593Smuzhiyun 		dev_warn(test_dev->dev,
521*4882a593Smuzhiyun 			 "Invalid test case requested: %u\n",
522*4882a593Smuzhiyun 			 config->test_case);
523*4882a593Smuzhiyun 		return -EINVAL;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
trigger_config_run(struct kmod_test_device * test_dev)527*4882a593Smuzhiyun static int trigger_config_run(struct kmod_test_device *test_dev)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
530*4882a593Smuzhiyun 	int ret;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	mutex_lock(&test_dev->trigger_mutex);
533*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	ret = __trigger_config_run(test_dev);
536*4882a593Smuzhiyun 	if (ret < 0)
537*4882a593Smuzhiyun 		goto out;
538*4882a593Smuzhiyun 	dev_info(test_dev->dev, "General test result: %d\n",
539*4882a593Smuzhiyun 		 config->test_result);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/*
542*4882a593Smuzhiyun 	 * We must return 0 after a trigger even unless something went
543*4882a593Smuzhiyun 	 * wrong with the setup of the test. If the test setup went fine
544*4882a593Smuzhiyun 	 * then userspace must just check the result of config->test_result.
545*4882a593Smuzhiyun 	 * One issue with relying on the return from a call in the kernel
546*4882a593Smuzhiyun 	 * is if the kernel returns a possitive value using this trigger
547*4882a593Smuzhiyun 	 * will not return the value to userspace, it would be lost.
548*4882a593Smuzhiyun 	 *
549*4882a593Smuzhiyun 	 * By not relying on capturing the return value of tests we are using
550*4882a593Smuzhiyun 	 * through the trigger it also us to run tests with set -e and only
551*4882a593Smuzhiyun 	 * fail when something went wrong with the driver upon trigger
552*4882a593Smuzhiyun 	 * requests.
553*4882a593Smuzhiyun 	 */
554*4882a593Smuzhiyun 	ret = 0;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun out:
557*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
558*4882a593Smuzhiyun 	mutex_unlock(&test_dev->trigger_mutex);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	return ret;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun static ssize_t
trigger_config_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)564*4882a593Smuzhiyun trigger_config_store(struct device *dev,
565*4882a593Smuzhiyun 		     struct device_attribute *attr,
566*4882a593Smuzhiyun 		     const char *buf, size_t count)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
569*4882a593Smuzhiyun 	int ret;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (test_dev->test_is_oom)
572*4882a593Smuzhiyun 		return -ENOMEM;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/* For all intents and purposes we don't care what userspace
575*4882a593Smuzhiyun 	 * sent this trigger, we care only that we were triggered.
576*4882a593Smuzhiyun 	 * We treat the return value only for caputuring issues with
577*4882a593Smuzhiyun 	 * the test setup. At this point all the test variables should
578*4882a593Smuzhiyun 	 * have been allocated so typically this should never fail.
579*4882a593Smuzhiyun 	 */
580*4882a593Smuzhiyun 	ret = trigger_config_run(test_dev);
581*4882a593Smuzhiyun 	if (unlikely(ret < 0))
582*4882a593Smuzhiyun 		goto out;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	/*
585*4882a593Smuzhiyun 	 * Note: any return > 0 will be treated as success
586*4882a593Smuzhiyun 	 * and the error value will not be available to userspace.
587*4882a593Smuzhiyun 	 * Do not rely on trying to send to userspace a test value
588*4882a593Smuzhiyun 	 * return value as possitive return errors will be lost.
589*4882a593Smuzhiyun 	 */
590*4882a593Smuzhiyun 	if (WARN_ON(ret > 0))
591*4882a593Smuzhiyun 		return -EINVAL;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	ret = count;
594*4882a593Smuzhiyun out:
595*4882a593Smuzhiyun 	return ret;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun static DEVICE_ATTR_WO(trigger_config);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun  * XXX: move to kstrncpy() once merged.
601*4882a593Smuzhiyun  *
602*4882a593Smuzhiyun  * Users should use kfree_const() when freeing these.
603*4882a593Smuzhiyun  */
__kstrncpy(char ** dst,const char * name,size_t count,gfp_t gfp)604*4882a593Smuzhiyun static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	*dst = kstrndup(name, count, gfp);
607*4882a593Smuzhiyun 	if (!*dst)
608*4882a593Smuzhiyun 		return -ENOSPC;
609*4882a593Smuzhiyun 	return count;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
config_copy_test_driver_name(struct test_config * config,const char * name,size_t count)612*4882a593Smuzhiyun static int config_copy_test_driver_name(struct test_config *config,
613*4882a593Smuzhiyun 				    const char *name,
614*4882a593Smuzhiyun 				    size_t count)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 
config_copy_test_fs(struct test_config * config,const char * name,size_t count)620*4882a593Smuzhiyun static int config_copy_test_fs(struct test_config *config, const char *name,
621*4882a593Smuzhiyun 			       size_t count)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
__kmod_config_free(struct test_config * config)626*4882a593Smuzhiyun static void __kmod_config_free(struct test_config *config)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	if (!config)
629*4882a593Smuzhiyun 		return;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	kfree_const(config->test_driver);
632*4882a593Smuzhiyun 	config->test_driver = NULL;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	kfree_const(config->test_fs);
635*4882a593Smuzhiyun 	config->test_fs = NULL;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
kmod_config_free(struct kmod_test_device * test_dev)638*4882a593Smuzhiyun static void kmod_config_free(struct kmod_test_device *test_dev)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct test_config *config;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (!test_dev)
643*4882a593Smuzhiyun 		return;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	config = &test_dev->config;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
648*4882a593Smuzhiyun 	__kmod_config_free(config);
649*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
config_test_driver_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)652*4882a593Smuzhiyun static ssize_t config_test_driver_store(struct device *dev,
653*4882a593Smuzhiyun 					struct device_attribute *attr,
654*4882a593Smuzhiyun 					const char *buf, size_t count)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
657*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
658*4882a593Smuzhiyun 	int copied;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	kfree_const(config->test_driver);
663*4882a593Smuzhiyun 	config->test_driver = NULL;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	copied = config_copy_test_driver_name(config, buf, count);
666*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	return copied;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun /*
672*4882a593Smuzhiyun  * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
673*4882a593Smuzhiyun  */
config_test_show_str(struct mutex * config_mutex,char * dst,char * src)674*4882a593Smuzhiyun static ssize_t config_test_show_str(struct mutex *config_mutex,
675*4882a593Smuzhiyun 				    char *dst,
676*4882a593Smuzhiyun 				    char *src)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	int len;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	mutex_lock(config_mutex);
681*4882a593Smuzhiyun 	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
682*4882a593Smuzhiyun 	mutex_unlock(config_mutex);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return len;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
config_test_driver_show(struct device * dev,struct device_attribute * attr,char * buf)687*4882a593Smuzhiyun static ssize_t config_test_driver_show(struct device *dev,
688*4882a593Smuzhiyun 					struct device_attribute *attr,
689*4882a593Smuzhiyun 					char *buf)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
692*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	return config_test_show_str(&test_dev->config_mutex, buf,
695*4882a593Smuzhiyun 				    config->test_driver);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun static DEVICE_ATTR_RW(config_test_driver);
698*4882a593Smuzhiyun 
config_test_fs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)699*4882a593Smuzhiyun static ssize_t config_test_fs_store(struct device *dev,
700*4882a593Smuzhiyun 				    struct device_attribute *attr,
701*4882a593Smuzhiyun 				    const char *buf, size_t count)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
704*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
705*4882a593Smuzhiyun 	int copied;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	kfree_const(config->test_fs);
710*4882a593Smuzhiyun 	config->test_fs = NULL;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	copied = config_copy_test_fs(config, buf, count);
713*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return copied;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
config_test_fs_show(struct device * dev,struct device_attribute * attr,char * buf)718*4882a593Smuzhiyun static ssize_t config_test_fs_show(struct device *dev,
719*4882a593Smuzhiyun 				   struct device_attribute *attr,
720*4882a593Smuzhiyun 				   char *buf)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
723*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	return config_test_show_str(&test_dev->config_mutex, buf,
726*4882a593Smuzhiyun 				    config->test_fs);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun static DEVICE_ATTR_RW(config_test_fs);
729*4882a593Smuzhiyun 
trigger_config_run_type(struct kmod_test_device * test_dev,enum kmod_test_case test_case,const char * test_str)730*4882a593Smuzhiyun static int trigger_config_run_type(struct kmod_test_device *test_dev,
731*4882a593Smuzhiyun 				   enum kmod_test_case test_case,
732*4882a593Smuzhiyun 				   const char *test_str)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	int copied = 0;
735*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	switch (test_case) {
740*4882a593Smuzhiyun 	case TEST_KMOD_DRIVER:
741*4882a593Smuzhiyun 		kfree_const(config->test_driver);
742*4882a593Smuzhiyun 		config->test_driver = NULL;
743*4882a593Smuzhiyun 		copied = config_copy_test_driver_name(config, test_str,
744*4882a593Smuzhiyun 						      strlen(test_str));
745*4882a593Smuzhiyun 		break;
746*4882a593Smuzhiyun 	case TEST_KMOD_FS_TYPE:
747*4882a593Smuzhiyun 		kfree_const(config->test_fs);
748*4882a593Smuzhiyun 		config->test_fs = NULL;
749*4882a593Smuzhiyun 		copied = config_copy_test_fs(config, test_str,
750*4882a593Smuzhiyun 					     strlen(test_str));
751*4882a593Smuzhiyun 		break;
752*4882a593Smuzhiyun 	default:
753*4882a593Smuzhiyun 		mutex_unlock(&test_dev->config_mutex);
754*4882a593Smuzhiyun 		return -EINVAL;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	config->test_case = test_case;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (copied <= 0 || copied != strlen(test_str)) {
762*4882a593Smuzhiyun 		test_dev->test_is_oom = true;
763*4882a593Smuzhiyun 		return -ENOMEM;
764*4882a593Smuzhiyun 	}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	test_dev->test_is_oom = false;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	return trigger_config_run(test_dev);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
free_test_dev_info(struct kmod_test_device * test_dev)771*4882a593Smuzhiyun static void free_test_dev_info(struct kmod_test_device *test_dev)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	vfree(test_dev->info);
774*4882a593Smuzhiyun 	test_dev->info = NULL;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
kmod_config_sync_info(struct kmod_test_device * test_dev)777*4882a593Smuzhiyun static int kmod_config_sync_info(struct kmod_test_device *test_dev)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	free_test_dev_info(test_dev);
782*4882a593Smuzhiyun 	test_dev->info =
783*4882a593Smuzhiyun 		vzalloc(array_size(sizeof(struct kmod_test_device_info),
784*4882a593Smuzhiyun 				   config->num_threads));
785*4882a593Smuzhiyun 	if (!test_dev->info)
786*4882a593Smuzhiyun 		return -ENOMEM;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun  * Old kernels may not have this, if you want to port this code to
793*4882a593Smuzhiyun  * test it on older kernels.
794*4882a593Smuzhiyun  */
795*4882a593Smuzhiyun #ifdef get_kmod_umh_limit
kmod_init_test_thread_limit(void)796*4882a593Smuzhiyun static unsigned int kmod_init_test_thread_limit(void)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	return get_kmod_umh_limit();
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun #else
kmod_init_test_thread_limit(void)801*4882a593Smuzhiyun static unsigned int kmod_init_test_thread_limit(void)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	return TEST_START_NUM_THREADS;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun #endif
806*4882a593Smuzhiyun 
__kmod_config_init(struct kmod_test_device * test_dev)807*4882a593Smuzhiyun static int __kmod_config_init(struct kmod_test_device *test_dev)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
810*4882a593Smuzhiyun 	int ret = -ENOMEM, copied;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	__kmod_config_free(config);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
815*4882a593Smuzhiyun 					      strlen(TEST_START_DRIVER));
816*4882a593Smuzhiyun 	if (copied != strlen(TEST_START_DRIVER))
817*4882a593Smuzhiyun 		goto err_out;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	copied = config_copy_test_fs(config, TEST_START_TEST_FS,
820*4882a593Smuzhiyun 				     strlen(TEST_START_TEST_FS));
821*4882a593Smuzhiyun 	if (copied != strlen(TEST_START_TEST_FS))
822*4882a593Smuzhiyun 		goto err_out;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	config->num_threads = kmod_init_test_thread_limit();
825*4882a593Smuzhiyun 	config->test_result = 0;
826*4882a593Smuzhiyun 	config->test_case = TEST_START_TEST_CASE;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	ret = kmod_config_sync_info(test_dev);
829*4882a593Smuzhiyun 	if (ret)
830*4882a593Smuzhiyun 		goto err_out;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	test_dev->test_is_oom = false;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	return 0;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun err_out:
837*4882a593Smuzhiyun 	test_dev->test_is_oom = true;
838*4882a593Smuzhiyun 	WARN_ON(test_dev->test_is_oom);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	__kmod_config_free(config);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	return ret;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
reset_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)845*4882a593Smuzhiyun static ssize_t reset_store(struct device *dev,
846*4882a593Smuzhiyun 			   struct device_attribute *attr,
847*4882a593Smuzhiyun 			   const char *buf, size_t count)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
850*4882a593Smuzhiyun 	int ret;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	mutex_lock(&test_dev->trigger_mutex);
853*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	ret = __kmod_config_init(test_dev);
856*4882a593Smuzhiyun 	if (ret < 0) {
857*4882a593Smuzhiyun 		ret = -ENOMEM;
858*4882a593Smuzhiyun 		dev_err(dev, "could not alloc settings for config trigger: %d\n",
859*4882a593Smuzhiyun 		       ret);
860*4882a593Smuzhiyun 		goto out;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	dev_info(dev, "reset\n");
864*4882a593Smuzhiyun 	ret = count;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun out:
867*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
868*4882a593Smuzhiyun 	mutex_unlock(&test_dev->trigger_mutex);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	return ret;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun static DEVICE_ATTR_WO(reset);
873*4882a593Smuzhiyun 
test_dev_config_update_uint_sync(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,int (* test_sync)(struct kmod_test_device * test_dev))874*4882a593Smuzhiyun static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
875*4882a593Smuzhiyun 					    const char *buf, size_t size,
876*4882a593Smuzhiyun 					    unsigned int *config,
877*4882a593Smuzhiyun 					    int (*test_sync)(struct kmod_test_device *test_dev))
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	int ret;
880*4882a593Smuzhiyun 	unsigned long new;
881*4882a593Smuzhiyun 	unsigned int old_val;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	ret = kstrtoul(buf, 10, &new);
884*4882a593Smuzhiyun 	if (ret)
885*4882a593Smuzhiyun 		return ret;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	if (new > UINT_MAX)
888*4882a593Smuzhiyun 		return -EINVAL;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	old_val = *config;
893*4882a593Smuzhiyun 	*(unsigned int *)config = new;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	ret = test_sync(test_dev);
896*4882a593Smuzhiyun 	if (ret) {
897*4882a593Smuzhiyun 		*(unsigned int *)config = old_val;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		ret = test_sync(test_dev);
900*4882a593Smuzhiyun 		WARN_ON(ret);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 		mutex_unlock(&test_dev->config_mutex);
903*4882a593Smuzhiyun 		return -EINVAL;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
907*4882a593Smuzhiyun 	/* Always return full write size even if we didn't consume all */
908*4882a593Smuzhiyun 	return size;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun 
test_dev_config_update_uint_range(struct kmod_test_device * test_dev,const char * buf,size_t size,unsigned int * config,unsigned int min,unsigned int max)911*4882a593Smuzhiyun static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
912*4882a593Smuzhiyun 					     const char *buf, size_t size,
913*4882a593Smuzhiyun 					     unsigned int *config,
914*4882a593Smuzhiyun 					     unsigned int min,
915*4882a593Smuzhiyun 					     unsigned int max)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	int ret;
918*4882a593Smuzhiyun 	unsigned long new;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	ret = kstrtoul(buf, 10, &new);
921*4882a593Smuzhiyun 	if (ret)
922*4882a593Smuzhiyun 		return ret;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (new < min || new > max)
925*4882a593Smuzhiyun 		return -EINVAL;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
928*4882a593Smuzhiyun 	*config = new;
929*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	/* Always return full write size even if we didn't consume all */
932*4882a593Smuzhiyun 	return size;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
test_dev_config_update_int(struct kmod_test_device * test_dev,const char * buf,size_t size,int * config)935*4882a593Smuzhiyun static int test_dev_config_update_int(struct kmod_test_device *test_dev,
936*4882a593Smuzhiyun 				      const char *buf, size_t size,
937*4882a593Smuzhiyun 				      int *config)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	int ret;
940*4882a593Smuzhiyun 	long new;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	ret = kstrtol(buf, 10, &new);
943*4882a593Smuzhiyun 	if (ret)
944*4882a593Smuzhiyun 		return ret;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (new < INT_MIN || new > INT_MAX)
947*4882a593Smuzhiyun 		return -EINVAL;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
950*4882a593Smuzhiyun 	*config = new;
951*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
952*4882a593Smuzhiyun 	/* Always return full write size even if we didn't consume all */
953*4882a593Smuzhiyun 	return size;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
test_dev_config_show_int(struct kmod_test_device * test_dev,char * buf,int config)956*4882a593Smuzhiyun static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
957*4882a593Smuzhiyun 					char *buf,
958*4882a593Smuzhiyun 					int config)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	int val;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
963*4882a593Smuzhiyun 	val = config;
964*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun 
test_dev_config_show_uint(struct kmod_test_device * test_dev,char * buf,unsigned int config)969*4882a593Smuzhiyun static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
970*4882a593Smuzhiyun 					 char *buf,
971*4882a593Smuzhiyun 					 unsigned int config)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	unsigned int val;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
976*4882a593Smuzhiyun 	val = config;
977*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%u\n", val);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
test_result_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)982*4882a593Smuzhiyun static ssize_t test_result_store(struct device *dev,
983*4882a593Smuzhiyun 				 struct device_attribute *attr,
984*4882a593Smuzhiyun 				 const char *buf, size_t count)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
987*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return test_dev_config_update_int(test_dev, buf, count,
990*4882a593Smuzhiyun 					  &config->test_result);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
config_num_threads_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)993*4882a593Smuzhiyun static ssize_t config_num_threads_store(struct device *dev,
994*4882a593Smuzhiyun 					struct device_attribute *attr,
995*4882a593Smuzhiyun 					const char *buf, size_t count)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
998*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	return test_dev_config_update_uint_sync(test_dev, buf, count,
1001*4882a593Smuzhiyun 						&config->num_threads,
1002*4882a593Smuzhiyun 						kmod_config_sync_info);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun 
config_num_threads_show(struct device * dev,struct device_attribute * attr,char * buf)1005*4882a593Smuzhiyun static ssize_t config_num_threads_show(struct device *dev,
1006*4882a593Smuzhiyun 				       struct device_attribute *attr,
1007*4882a593Smuzhiyun 				       char *buf)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1010*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	return test_dev_config_show_int(test_dev, buf, config->num_threads);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun static DEVICE_ATTR_RW(config_num_threads);
1015*4882a593Smuzhiyun 
config_test_case_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1016*4882a593Smuzhiyun static ssize_t config_test_case_store(struct device *dev,
1017*4882a593Smuzhiyun 				      struct device_attribute *attr,
1018*4882a593Smuzhiyun 				      const char *buf, size_t count)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1021*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	return test_dev_config_update_uint_range(test_dev, buf, count,
1024*4882a593Smuzhiyun 						 &config->test_case,
1025*4882a593Smuzhiyun 						 __TEST_KMOD_INVALID + 1,
1026*4882a593Smuzhiyun 						 __TEST_KMOD_MAX - 1);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
config_test_case_show(struct device * dev,struct device_attribute * attr,char * buf)1029*4882a593Smuzhiyun static ssize_t config_test_case_show(struct device *dev,
1030*4882a593Smuzhiyun 				     struct device_attribute *attr,
1031*4882a593Smuzhiyun 				     char *buf)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1034*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	return test_dev_config_show_uint(test_dev, buf, config->test_case);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun static DEVICE_ATTR_RW(config_test_case);
1039*4882a593Smuzhiyun 
test_result_show(struct device * dev,struct device_attribute * attr,char * buf)1040*4882a593Smuzhiyun static ssize_t test_result_show(struct device *dev,
1041*4882a593Smuzhiyun 				struct device_attribute *attr,
1042*4882a593Smuzhiyun 				char *buf)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = dev_to_test_dev(dev);
1045*4882a593Smuzhiyun 	struct test_config *config = &test_dev->config;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	return test_dev_config_show_int(test_dev, buf, config->test_result);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun static DEVICE_ATTR_RW(test_result);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun #define TEST_KMOD_DEV_ATTR(name)		&dev_attr_##name.attr
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun static struct attribute *test_dev_attrs[] = {
1054*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(trigger_config),
1055*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(config),
1056*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(reset),
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(config_test_driver),
1059*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(config_test_fs),
1060*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(config_num_threads),
1061*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(config_test_case),
1062*4882a593Smuzhiyun 	TEST_KMOD_DEV_ATTR(test_result),
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	NULL,
1065*4882a593Smuzhiyun };
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun ATTRIBUTE_GROUPS(test_dev);
1068*4882a593Smuzhiyun 
kmod_config_init(struct kmod_test_device * test_dev)1069*4882a593Smuzhiyun static int kmod_config_init(struct kmod_test_device *test_dev)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun 	int ret;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
1074*4882a593Smuzhiyun 	ret = __kmod_config_init(test_dev);
1075*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
alloc_test_dev_kmod(int idx)1080*4882a593Smuzhiyun static struct kmod_test_device *alloc_test_dev_kmod(int idx)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	int ret;
1083*4882a593Smuzhiyun 	struct kmod_test_device *test_dev;
1084*4882a593Smuzhiyun 	struct miscdevice *misc_dev;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	test_dev = vzalloc(sizeof(struct kmod_test_device));
1087*4882a593Smuzhiyun 	if (!test_dev)
1088*4882a593Smuzhiyun 		goto err_out;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	mutex_init(&test_dev->config_mutex);
1091*4882a593Smuzhiyun 	mutex_init(&test_dev->trigger_mutex);
1092*4882a593Smuzhiyun 	mutex_init(&test_dev->thread_mutex);
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	init_completion(&test_dev->kthreads_done);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	ret = kmod_config_init(test_dev);
1097*4882a593Smuzhiyun 	if (ret < 0) {
1098*4882a593Smuzhiyun 		pr_err("Cannot alloc kmod_config_init()\n");
1099*4882a593Smuzhiyun 		goto err_out_free;
1100*4882a593Smuzhiyun 	}
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	test_dev->dev_idx = idx;
1103*4882a593Smuzhiyun 	misc_dev = &test_dev->misc_dev;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	misc_dev->minor = MISC_DYNAMIC_MINOR;
1106*4882a593Smuzhiyun 	misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
1107*4882a593Smuzhiyun 	if (!misc_dev->name) {
1108*4882a593Smuzhiyun 		pr_err("Cannot alloc misc_dev->name\n");
1109*4882a593Smuzhiyun 		goto err_out_free_config;
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 	misc_dev->groups = test_dev_groups;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	return test_dev;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun err_out_free_config:
1116*4882a593Smuzhiyun 	free_test_dev_info(test_dev);
1117*4882a593Smuzhiyun 	kmod_config_free(test_dev);
1118*4882a593Smuzhiyun err_out_free:
1119*4882a593Smuzhiyun 	vfree(test_dev);
1120*4882a593Smuzhiyun 	test_dev = NULL;
1121*4882a593Smuzhiyun err_out:
1122*4882a593Smuzhiyun 	return NULL;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
free_test_dev_kmod(struct kmod_test_device * test_dev)1125*4882a593Smuzhiyun static void free_test_dev_kmod(struct kmod_test_device *test_dev)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	if (test_dev) {
1128*4882a593Smuzhiyun 		kfree_const(test_dev->misc_dev.name);
1129*4882a593Smuzhiyun 		test_dev->misc_dev.name = NULL;
1130*4882a593Smuzhiyun 		free_test_dev_info(test_dev);
1131*4882a593Smuzhiyun 		kmod_config_free(test_dev);
1132*4882a593Smuzhiyun 		vfree(test_dev);
1133*4882a593Smuzhiyun 		test_dev = NULL;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
register_test_dev_kmod(void)1137*4882a593Smuzhiyun static struct kmod_test_device *register_test_dev_kmod(void)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun 	struct kmod_test_device *test_dev = NULL;
1140*4882a593Smuzhiyun 	int ret;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	mutex_lock(&reg_dev_mutex);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	/* int should suffice for number of devices, test for wrap */
1145*4882a593Smuzhiyun 	if (num_test_devs + 1 == INT_MAX) {
1146*4882a593Smuzhiyun 		pr_err("reached limit of number of test devices\n");
1147*4882a593Smuzhiyun 		goto out;
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	test_dev = alloc_test_dev_kmod(num_test_devs);
1151*4882a593Smuzhiyun 	if (!test_dev)
1152*4882a593Smuzhiyun 		goto out;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	ret = misc_register(&test_dev->misc_dev);
1155*4882a593Smuzhiyun 	if (ret) {
1156*4882a593Smuzhiyun 		pr_err("could not register misc device: %d\n", ret);
1157*4882a593Smuzhiyun 		free_test_dev_kmod(test_dev);
1158*4882a593Smuzhiyun 		test_dev = NULL;
1159*4882a593Smuzhiyun 		goto out;
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	test_dev->dev = test_dev->misc_dev.this_device;
1163*4882a593Smuzhiyun 	list_add_tail(&test_dev->list, &reg_test_devs);
1164*4882a593Smuzhiyun 	dev_info(test_dev->dev, "interface ready\n");
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	num_test_devs++;
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun out:
1169*4882a593Smuzhiyun 	mutex_unlock(&reg_dev_mutex);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	return test_dev;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
test_kmod_init(void)1175*4882a593Smuzhiyun static int __init test_kmod_init(void)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	struct kmod_test_device *test_dev;
1178*4882a593Smuzhiyun 	int ret;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	test_dev = register_test_dev_kmod();
1181*4882a593Smuzhiyun 	if (!test_dev) {
1182*4882a593Smuzhiyun 		pr_err("Cannot add first test kmod device\n");
1183*4882a593Smuzhiyun 		return -ENODEV;
1184*4882a593Smuzhiyun 	}
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	/*
1187*4882a593Smuzhiyun 	 * With some work we might be able to gracefully enable
1188*4882a593Smuzhiyun 	 * testing with this driver built-in, for now this seems
1189*4882a593Smuzhiyun 	 * rather risky. For those willing to try have at it,
1190*4882a593Smuzhiyun 	 * and enable the below. Good luck! If that works, try
1191*4882a593Smuzhiyun 	 * lowering the init level for more fun.
1192*4882a593Smuzhiyun 	 */
1193*4882a593Smuzhiyun 	if (force_init_test) {
1194*4882a593Smuzhiyun 		ret = trigger_config_run_type(test_dev,
1195*4882a593Smuzhiyun 					      TEST_KMOD_DRIVER, "tun");
1196*4882a593Smuzhiyun 		if (WARN_ON(ret))
1197*4882a593Smuzhiyun 			return ret;
1198*4882a593Smuzhiyun 		ret = trigger_config_run_type(test_dev,
1199*4882a593Smuzhiyun 					      TEST_KMOD_FS_TYPE, "btrfs");
1200*4882a593Smuzhiyun 		if (WARN_ON(ret))
1201*4882a593Smuzhiyun 			return ret;
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	return 0;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun late_initcall(test_kmod_init);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun static
unregister_test_dev_kmod(struct kmod_test_device * test_dev)1209*4882a593Smuzhiyun void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 	mutex_lock(&test_dev->trigger_mutex);
1212*4882a593Smuzhiyun 	mutex_lock(&test_dev->config_mutex);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	test_dev_kmod_stop_tests(test_dev);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	dev_info(test_dev->dev, "removing interface\n");
1217*4882a593Smuzhiyun 	misc_deregister(&test_dev->misc_dev);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	mutex_unlock(&test_dev->config_mutex);
1220*4882a593Smuzhiyun 	mutex_unlock(&test_dev->trigger_mutex);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	free_test_dev_kmod(test_dev);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
test_kmod_exit(void)1225*4882a593Smuzhiyun static void __exit test_kmod_exit(void)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	struct kmod_test_device *test_dev, *tmp;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	mutex_lock(&reg_dev_mutex);
1230*4882a593Smuzhiyun 	list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
1231*4882a593Smuzhiyun 		list_del(&test_dev->list);
1232*4882a593Smuzhiyun 		unregister_test_dev_kmod(test_dev);
1233*4882a593Smuzhiyun 	}
1234*4882a593Smuzhiyun 	mutex_unlock(&reg_dev_mutex);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun module_exit(test_kmod_exit);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
1239*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1240