1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2014 Google, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/hrtimer.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/time.h>
14*4882a593Smuzhiyun #include <linux/numa.h>
15*4882a593Smuzhiyun #include <linux/nodemask.h>
16*4882a593Smuzhiyun #include <linux/topology.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
19*4882a593Smuzhiyun #define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static atomic_t warnings, errors, timeout, async_completed;
22*4882a593Smuzhiyun
test_probe(struct platform_device * pdev)23*4882a593Smuzhiyun static int test_probe(struct platform_device *pdev)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct device *dev = &pdev->dev;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * Determine if we have hit the "timeout" limit for the test if we
29*4882a593Smuzhiyun * have then report it as an error, otherwise we wil sleep for the
30*4882a593Smuzhiyun * required amount of time and then report completion.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun if (atomic_read(&timeout)) {
33*4882a593Smuzhiyun dev_err(dev, "async probe took too long\n");
34*4882a593Smuzhiyun atomic_inc(&errors);
35*4882a593Smuzhiyun } else {
36*4882a593Smuzhiyun dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n",
37*4882a593Smuzhiyun TEST_PROBE_DELAY);
38*4882a593Smuzhiyun msleep(TEST_PROBE_DELAY);
39*4882a593Smuzhiyun dev_dbg(&pdev->dev, "done sleeping\n");
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Report NUMA mismatch if device node is set and we are not
44*4882a593Smuzhiyun * performing an async init on that node.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
47*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_NUMA) &&
48*4882a593Smuzhiyun dev_to_node(dev) != numa_node_id()) {
49*4882a593Smuzhiyun dev_warn(dev, "NUMA node mismatch %d != %d\n",
50*4882a593Smuzhiyun dev_to_node(dev), numa_node_id());
51*4882a593Smuzhiyun atomic_inc(&warnings);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun atomic_inc(&async_completed);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static struct platform_driver async_driver = {
61*4882a593Smuzhiyun .driver = {
62*4882a593Smuzhiyun .name = "test_async_driver",
63*4882a593Smuzhiyun .probe_type = PROBE_PREFER_ASYNCHRONOUS,
64*4882a593Smuzhiyun },
65*4882a593Smuzhiyun .probe = test_probe,
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static struct platform_driver sync_driver = {
69*4882a593Smuzhiyun .driver = {
70*4882a593Smuzhiyun .name = "test_sync_driver",
71*4882a593Smuzhiyun .probe_type = PROBE_FORCE_SYNCHRONOUS,
72*4882a593Smuzhiyun },
73*4882a593Smuzhiyun .probe = test_probe,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun static struct platform_device *async_dev[NR_CPUS * 2];
77*4882a593Smuzhiyun static struct platform_device *sync_dev[2];
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun static struct platform_device *
test_platform_device_register_node(char * name,int id,int nid)80*4882a593Smuzhiyun test_platform_device_register_node(char *name, int id, int nid)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct platform_device *pdev;
83*4882a593Smuzhiyun int ret;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun pdev = platform_device_alloc(name, id);
86*4882a593Smuzhiyun if (!pdev)
87*4882a593Smuzhiyun return NULL;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (nid != NUMA_NO_NODE)
90*4882a593Smuzhiyun set_dev_node(&pdev->dev, nid);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun ret = platform_device_add(pdev);
93*4882a593Smuzhiyun if (ret) {
94*4882a593Smuzhiyun platform_device_put(pdev);
95*4882a593Smuzhiyun return ERR_PTR(ret);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return pdev;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
test_async_probe_init(void)102*4882a593Smuzhiyun static int __init test_async_probe_init(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct platform_device **pdev = NULL;
105*4882a593Smuzhiyun int async_id = 0, sync_id = 0;
106*4882a593Smuzhiyun unsigned long long duration;
107*4882a593Smuzhiyun ktime_t calltime, delta;
108*4882a593Smuzhiyun int err, nid, cpu;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun pr_info("registering first set of asynchronous devices...\n");
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun for_each_online_cpu(cpu) {
113*4882a593Smuzhiyun nid = cpu_to_node(cpu);
114*4882a593Smuzhiyun pdev = &async_dev[async_id];
115*4882a593Smuzhiyun *pdev = test_platform_device_register_node("test_async_driver",
116*4882a593Smuzhiyun async_id,
117*4882a593Smuzhiyun nid);
118*4882a593Smuzhiyun if (IS_ERR(*pdev)) {
119*4882a593Smuzhiyun err = PTR_ERR(*pdev);
120*4882a593Smuzhiyun *pdev = NULL;
121*4882a593Smuzhiyun pr_err("failed to create async_dev: %d\n", err);
122*4882a593Smuzhiyun goto err_unregister_async_devs;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun async_id++;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun pr_info("registering asynchronous driver...\n");
129*4882a593Smuzhiyun calltime = ktime_get();
130*4882a593Smuzhiyun err = platform_driver_register(&async_driver);
131*4882a593Smuzhiyun if (err) {
132*4882a593Smuzhiyun pr_err("Failed to register async_driver: %d\n", err);
133*4882a593Smuzhiyun goto err_unregister_async_devs;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun delta = ktime_sub(ktime_get(), calltime);
137*4882a593Smuzhiyun duration = (unsigned long long) ktime_to_ms(delta);
138*4882a593Smuzhiyun pr_info("registration took %lld msecs\n", duration);
139*4882a593Smuzhiyun if (duration > TEST_PROBE_THRESHOLD) {
140*4882a593Smuzhiyun pr_err("test failed: probe took too long\n");
141*4882a593Smuzhiyun err = -ETIMEDOUT;
142*4882a593Smuzhiyun goto err_unregister_async_driver;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun pr_info("registering second set of asynchronous devices...\n");
146*4882a593Smuzhiyun calltime = ktime_get();
147*4882a593Smuzhiyun for_each_online_cpu(cpu) {
148*4882a593Smuzhiyun nid = cpu_to_node(cpu);
149*4882a593Smuzhiyun pdev = &sync_dev[sync_id];
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun *pdev = test_platform_device_register_node("test_async_driver",
152*4882a593Smuzhiyun async_id,
153*4882a593Smuzhiyun nid);
154*4882a593Smuzhiyun if (IS_ERR(*pdev)) {
155*4882a593Smuzhiyun err = PTR_ERR(*pdev);
156*4882a593Smuzhiyun *pdev = NULL;
157*4882a593Smuzhiyun pr_err("failed to create async_dev: %d\n", err);
158*4882a593Smuzhiyun goto err_unregister_async_driver;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun async_id++;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun delta = ktime_sub(ktime_get(), calltime);
165*4882a593Smuzhiyun duration = (unsigned long long) ktime_to_ms(delta);
166*4882a593Smuzhiyun dev_info(&(*pdev)->dev,
167*4882a593Smuzhiyun "registration took %lld msecs\n", duration);
168*4882a593Smuzhiyun if (duration > TEST_PROBE_THRESHOLD) {
169*4882a593Smuzhiyun dev_err(&(*pdev)->dev,
170*4882a593Smuzhiyun "test failed: probe took too long\n");
171*4882a593Smuzhiyun err = -ETIMEDOUT;
172*4882a593Smuzhiyun goto err_unregister_async_driver;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun pr_info("registering first synchronous device...\n");
177*4882a593Smuzhiyun nid = cpu_to_node(cpu);
178*4882a593Smuzhiyun pdev = &sync_dev[sync_id];
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun *pdev = test_platform_device_register_node("test_sync_driver",
181*4882a593Smuzhiyun sync_id,
182*4882a593Smuzhiyun NUMA_NO_NODE);
183*4882a593Smuzhiyun if (IS_ERR(*pdev)) {
184*4882a593Smuzhiyun err = PTR_ERR(*pdev);
185*4882a593Smuzhiyun *pdev = NULL;
186*4882a593Smuzhiyun pr_err("failed to create sync_dev: %d\n", err);
187*4882a593Smuzhiyun goto err_unregister_async_driver;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun sync_id++;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun pr_info("registering synchronous driver...\n");
193*4882a593Smuzhiyun calltime = ktime_get();
194*4882a593Smuzhiyun err = platform_driver_register(&sync_driver);
195*4882a593Smuzhiyun if (err) {
196*4882a593Smuzhiyun pr_err("Failed to register async_driver: %d\n", err);
197*4882a593Smuzhiyun goto err_unregister_sync_devs;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun delta = ktime_sub(ktime_get(), calltime);
201*4882a593Smuzhiyun duration = (unsigned long long) ktime_to_ms(delta);
202*4882a593Smuzhiyun pr_info("registration took %lld msecs\n", duration);
203*4882a593Smuzhiyun if (duration < TEST_PROBE_THRESHOLD) {
204*4882a593Smuzhiyun dev_err(&(*pdev)->dev,
205*4882a593Smuzhiyun "test failed: probe was too quick\n");
206*4882a593Smuzhiyun err = -ETIMEDOUT;
207*4882a593Smuzhiyun goto err_unregister_sync_driver;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun pr_info("registering second synchronous device...\n");
211*4882a593Smuzhiyun pdev = &sync_dev[sync_id];
212*4882a593Smuzhiyun calltime = ktime_get();
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun *pdev = test_platform_device_register_node("test_sync_driver",
215*4882a593Smuzhiyun sync_id,
216*4882a593Smuzhiyun NUMA_NO_NODE);
217*4882a593Smuzhiyun if (IS_ERR(*pdev)) {
218*4882a593Smuzhiyun err = PTR_ERR(*pdev);
219*4882a593Smuzhiyun *pdev = NULL;
220*4882a593Smuzhiyun pr_err("failed to create sync_dev: %d\n", err);
221*4882a593Smuzhiyun goto err_unregister_sync_driver;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun sync_id++;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun delta = ktime_sub(ktime_get(), calltime);
227*4882a593Smuzhiyun duration = (unsigned long long) ktime_to_ms(delta);
228*4882a593Smuzhiyun dev_info(&(*pdev)->dev,
229*4882a593Smuzhiyun "registration took %lld msecs\n", duration);
230*4882a593Smuzhiyun if (duration < TEST_PROBE_THRESHOLD) {
231*4882a593Smuzhiyun dev_err(&(*pdev)->dev,
232*4882a593Smuzhiyun "test failed: probe was too quick\n");
233*4882a593Smuzhiyun err = -ETIMEDOUT;
234*4882a593Smuzhiyun goto err_unregister_sync_driver;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * The async events should have completed while we were taking care
239*4882a593Smuzhiyun * of the synchronous events. We will now terminate any outstanding
240*4882a593Smuzhiyun * asynchronous probe calls remaining by forcing timeout and remove
241*4882a593Smuzhiyun * the driver before we return which should force the flush of the
242*4882a593Smuzhiyun * pending asynchronous probe calls.
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * Otherwise if they completed without errors or warnings then
245*4882a593Smuzhiyun * report successful completion.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun if (atomic_read(&async_completed) != async_id) {
248*4882a593Smuzhiyun pr_err("async events still pending, forcing timeout\n");
249*4882a593Smuzhiyun atomic_inc(&timeout);
250*4882a593Smuzhiyun err = -ETIMEDOUT;
251*4882a593Smuzhiyun } else if (!atomic_read(&errors) && !atomic_read(&warnings)) {
252*4882a593Smuzhiyun pr_info("completed successfully\n");
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun err_unregister_sync_driver:
257*4882a593Smuzhiyun platform_driver_unregister(&sync_driver);
258*4882a593Smuzhiyun err_unregister_sync_devs:
259*4882a593Smuzhiyun while (sync_id--)
260*4882a593Smuzhiyun platform_device_unregister(sync_dev[sync_id]);
261*4882a593Smuzhiyun err_unregister_async_driver:
262*4882a593Smuzhiyun platform_driver_unregister(&async_driver);
263*4882a593Smuzhiyun err_unregister_async_devs:
264*4882a593Smuzhiyun while (async_id--)
265*4882a593Smuzhiyun platform_device_unregister(async_dev[async_id]);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * If err is already set then count that as an additional error for
269*4882a593Smuzhiyun * the test. Otherwise we will report an invalid argument error and
270*4882a593Smuzhiyun * not count that as we should have reached here as a result of
271*4882a593Smuzhiyun * errors or warnings being reported by the probe routine.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun if (err)
274*4882a593Smuzhiyun atomic_inc(&errors);
275*4882a593Smuzhiyun else
276*4882a593Smuzhiyun err = -EINVAL;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun pr_err("Test failed with %d errors and %d warnings\n",
279*4882a593Smuzhiyun atomic_read(&errors), atomic_read(&warnings));
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return err;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun module_init(test_async_probe_init);
284*4882a593Smuzhiyun
test_async_probe_exit(void)285*4882a593Smuzhiyun static void __exit test_async_probe_exit(void)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun int id = 2;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun platform_driver_unregister(&async_driver);
290*4882a593Smuzhiyun platform_driver_unregister(&sync_driver);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun while (id--)
293*4882a593Smuzhiyun platform_device_unregister(sync_dev[id]);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun id = NR_CPUS * 2;
296*4882a593Smuzhiyun while (id--)
297*4882a593Smuzhiyun platform_device_unregister(async_dev[id]);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun module_exit(test_async_probe_exit);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun MODULE_DESCRIPTION("Test module for asynchronous driver probing");
302*4882a593Smuzhiyun MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
303*4882a593Smuzhiyun MODULE_LICENSE("GPL");
304