xref: /OK3568_Linux_fs/kernel/drivers/powercap/idle_inject.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2018 Linaro Limited
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * The idle injection framework provides a way to force CPUs to enter idle
8*4882a593Smuzhiyun  * states for a specified fraction of time over a specified period.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * It relies on the smpboot kthreads feature providing common code for CPU
11*4882a593Smuzhiyun  * hotplug and thread [un]parking.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * All of the kthreads used for idle injection are created at init time.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * Next, the users of the the idle injection framework provide a cpumask via
16*4882a593Smuzhiyun  * its register function. The kthreads will be synchronized with respect to
17*4882a593Smuzhiyun  * this cpumask.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * The idle + run duration is specified via separate helpers and that allows
20*4882a593Smuzhiyun  * idle injection to be started.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * The idle injection kthreads will call play_idle_precise() with the idle
23*4882a593Smuzhiyun  * duration and max allowed latency specified as per the above.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * After all of them have been woken up, a timer is set to start the next idle
26*4882a593Smuzhiyun  * injection cycle.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * The timer interrupt handler will wake up the idle injection kthreads for
29*4882a593Smuzhiyun  * all of the CPUs in the cpumask provided by the user.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * Idle injection is stopped synchronously and no leftover idle injection
32*4882a593Smuzhiyun  * kthread activity after its completion is guaranteed.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * It is up to the user of this framework to provide a lock for higher-level
35*4882a593Smuzhiyun  * synchronization to prevent race conditions like starting idle injection
36*4882a593Smuzhiyun  * while unregistering from the framework.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun #define pr_fmt(fmt) "ii_dev: " fmt
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <linux/cpu.h>
41*4882a593Smuzhiyun #include <linux/hrtimer.h>
42*4882a593Smuzhiyun #include <linux/kthread.h>
43*4882a593Smuzhiyun #include <linux/sched.h>
44*4882a593Smuzhiyun #include <linux/slab.h>
45*4882a593Smuzhiyun #include <linux/smpboot.h>
46*4882a593Smuzhiyun #include <linux/idle_inject.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <uapi/linux/sched/types.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun  * struct idle_inject_thread - task on/off switch structure
52*4882a593Smuzhiyun  * @tsk: task injecting the idle cycles
53*4882a593Smuzhiyun  * @should_run: whether or not to run the task (for the smpboot kthread API)
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun struct idle_inject_thread {
56*4882a593Smuzhiyun 	struct task_struct *tsk;
57*4882a593Smuzhiyun 	int should_run;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * struct idle_inject_device - idle injection data
62*4882a593Smuzhiyun  * @timer: idle injection period timer
63*4882a593Smuzhiyun  * @idle_duration_us: duration of CPU idle time to inject
64*4882a593Smuzhiyun  * @run_duration_us: duration of CPU run time to allow
65*4882a593Smuzhiyun  * @latency_us: max allowed latency
66*4882a593Smuzhiyun  * @cpumask: mask of CPUs affected by idle injection
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun struct idle_inject_device {
69*4882a593Smuzhiyun 	struct hrtimer timer;
70*4882a593Smuzhiyun 	unsigned int idle_duration_us;
71*4882a593Smuzhiyun 	unsigned int run_duration_us;
72*4882a593Smuzhiyun 	unsigned int latency_us;
73*4882a593Smuzhiyun 	unsigned long cpumask[];
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
77*4882a593Smuzhiyun static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun  * idle_inject_wakeup - Wake up idle injection threads
81*4882a593Smuzhiyun  * @ii_dev: target idle injection device
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * Every idle injection task associated with the given idle injection device
84*4882a593Smuzhiyun  * and running on an online CPU will be woken up.
85*4882a593Smuzhiyun  */
idle_inject_wakeup(struct idle_inject_device * ii_dev)86*4882a593Smuzhiyun static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct idle_inject_thread *iit;
89*4882a593Smuzhiyun 	unsigned int cpu;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
92*4882a593Smuzhiyun 		iit = per_cpu_ptr(&idle_inject_thread, cpu);
93*4882a593Smuzhiyun 		iit->should_run = 1;
94*4882a593Smuzhiyun 		wake_up_process(iit->tsk);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /**
99*4882a593Smuzhiyun  * idle_inject_timer_fn - idle injection timer function
100*4882a593Smuzhiyun  * @timer: idle injection hrtimer
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * This function is called when the idle injection timer expires.  It wakes up
103*4882a593Smuzhiyun  * idle injection tasks associated with the timer and they, in turn, invoke
104*4882a593Smuzhiyun  * play_idle_precise() to inject a specified amount of CPU idle time.
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * Return: HRTIMER_RESTART.
107*4882a593Smuzhiyun  */
idle_inject_timer_fn(struct hrtimer * timer)108*4882a593Smuzhiyun static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	unsigned int duration_us;
111*4882a593Smuzhiyun 	struct idle_inject_device *ii_dev =
112*4882a593Smuzhiyun 		container_of(timer, struct idle_inject_device, timer);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	duration_us = READ_ONCE(ii_dev->run_duration_us);
115*4882a593Smuzhiyun 	duration_us += READ_ONCE(ii_dev->idle_duration_us);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	idle_inject_wakeup(ii_dev);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	return HRTIMER_RESTART;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun  * idle_inject_fn - idle injection work function
126*4882a593Smuzhiyun  * @cpu: the CPU owning the task
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * This function calls play_idle_precise() to inject a specified amount of CPU
129*4882a593Smuzhiyun  * idle time.
130*4882a593Smuzhiyun  */
idle_inject_fn(unsigned int cpu)131*4882a593Smuzhiyun static void idle_inject_fn(unsigned int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct idle_inject_device *ii_dev;
134*4882a593Smuzhiyun 	struct idle_inject_thread *iit;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	ii_dev = per_cpu(idle_inject_device, cpu);
137*4882a593Smuzhiyun 	iit = per_cpu_ptr(&idle_inject_thread, cpu);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/*
140*4882a593Smuzhiyun 	 * Let the smpboot main loop know that the task should not run again.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	iit->should_run = 0;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC,
145*4882a593Smuzhiyun 			  READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /**
149*4882a593Smuzhiyun  * idle_inject_set_duration - idle and run duration update helper
150*4882a593Smuzhiyun  * @run_duration_us: CPU run time to allow in microseconds
151*4882a593Smuzhiyun  * @idle_duration_us: CPU idle time to inject in microseconds
152*4882a593Smuzhiyun  */
idle_inject_set_duration(struct idle_inject_device * ii_dev,unsigned int run_duration_us,unsigned int idle_duration_us)153*4882a593Smuzhiyun void idle_inject_set_duration(struct idle_inject_device *ii_dev,
154*4882a593Smuzhiyun 			      unsigned int run_duration_us,
155*4882a593Smuzhiyun 			      unsigned int idle_duration_us)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	if (run_duration_us && idle_duration_us) {
158*4882a593Smuzhiyun 		WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
159*4882a593Smuzhiyun 		WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * idle_inject_get_duration - idle and run duration retrieval helper
165*4882a593Smuzhiyun  * @run_duration_us: memory location to store the current CPU run time
166*4882a593Smuzhiyun  * @idle_duration_us: memory location to store the current CPU idle time
167*4882a593Smuzhiyun  */
idle_inject_get_duration(struct idle_inject_device * ii_dev,unsigned int * run_duration_us,unsigned int * idle_duration_us)168*4882a593Smuzhiyun void idle_inject_get_duration(struct idle_inject_device *ii_dev,
169*4882a593Smuzhiyun 			      unsigned int *run_duration_us,
170*4882a593Smuzhiyun 			      unsigned int *idle_duration_us)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	*run_duration_us = READ_ONCE(ii_dev->run_duration_us);
173*4882a593Smuzhiyun 	*idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun  * idle_inject_set_latency - set the maximum latency allowed
178*4882a593Smuzhiyun  * @latency_us: set the latency requirement for the idle state
179*4882a593Smuzhiyun  */
idle_inject_set_latency(struct idle_inject_device * ii_dev,unsigned int latency_us)180*4882a593Smuzhiyun void idle_inject_set_latency(struct idle_inject_device *ii_dev,
181*4882a593Smuzhiyun 			     unsigned int latency_us)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	WRITE_ONCE(ii_dev->latency_us, latency_us);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * idle_inject_start - start idle injections
188*4882a593Smuzhiyun  * @ii_dev: idle injection control device structure
189*4882a593Smuzhiyun  *
190*4882a593Smuzhiyun  * The function starts idle injection by first waking up all of the idle
191*4882a593Smuzhiyun  * injection kthreads associated with @ii_dev to let them inject CPU idle time
192*4882a593Smuzhiyun  * sets up a timer to start the next idle injection period.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * Return: -EINVAL if the CPU idle or CPU run time is not set or 0 on success.
195*4882a593Smuzhiyun  */
idle_inject_start(struct idle_inject_device * ii_dev)196*4882a593Smuzhiyun int idle_inject_start(struct idle_inject_device *ii_dev)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
199*4882a593Smuzhiyun 	unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (!idle_duration_us || !run_duration_us)
202*4882a593Smuzhiyun 		return -EINVAL;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
205*4882a593Smuzhiyun 		 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	idle_inject_wakeup(ii_dev);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	hrtimer_start(&ii_dev->timer,
210*4882a593Smuzhiyun 		      ns_to_ktime((idle_duration_us + run_duration_us) *
211*4882a593Smuzhiyun 				  NSEC_PER_USEC),
212*4882a593Smuzhiyun 		      HRTIMER_MODE_REL);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * idle_inject_stop - stops idle injections
219*4882a593Smuzhiyun  * @ii_dev: idle injection control device structure
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * The function stops idle injection and waits for the threads to finish work.
222*4882a593Smuzhiyun  * If CPU idle time is being injected when this function runs, then it will
223*4882a593Smuzhiyun  * wait until the end of the cycle.
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * When it returns, there is no more idle injection kthread activity.  The
226*4882a593Smuzhiyun  * kthreads are scheduled out and the periodic timer is off.
227*4882a593Smuzhiyun  */
idle_inject_stop(struct idle_inject_device * ii_dev)228*4882a593Smuzhiyun void idle_inject_stop(struct idle_inject_device *ii_dev)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct idle_inject_thread *iit;
231*4882a593Smuzhiyun 	unsigned int cpu;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
234*4882a593Smuzhiyun 		 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	hrtimer_cancel(&ii_dev->timer);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/*
239*4882a593Smuzhiyun 	 * Stopping idle injection requires all of the idle injection kthreads
240*4882a593Smuzhiyun 	 * associated with the given cpumask to be parked and stay that way, so
241*4882a593Smuzhiyun 	 * prevent CPUs from going online at this point.  Any CPUs going online
242*4882a593Smuzhiyun 	 * after the loop below will be covered by clearing the should_run flag
243*4882a593Smuzhiyun 	 * that will cause the smpboot main loop to schedule them out.
244*4882a593Smuzhiyun 	 */
245*4882a593Smuzhiyun 	cpu_hotplug_disable();
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/*
248*4882a593Smuzhiyun 	 * Iterate over all (online + offline) CPUs here in case one of them
249*4882a593Smuzhiyun 	 * goes offline with the should_run flag set so as to prevent its idle
250*4882a593Smuzhiyun 	 * injection kthread from running when the CPU goes online again after
251*4882a593Smuzhiyun 	 * the ii_dev has been freed.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
254*4882a593Smuzhiyun 		iit = per_cpu_ptr(&idle_inject_thread, cpu);
255*4882a593Smuzhiyun 		iit->should_run = 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		wait_task_inactive(iit->tsk, 0);
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	cpu_hotplug_enable();
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun  * idle_inject_setup - prepare the current task for idle injection
265*4882a593Smuzhiyun  * @cpu: not used
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Called once, this function is in charge of setting the current task's
268*4882a593Smuzhiyun  * scheduler parameters to make it an RT task.
269*4882a593Smuzhiyun  */
idle_inject_setup(unsigned int cpu)270*4882a593Smuzhiyun static void idle_inject_setup(unsigned int cpu)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	sched_set_fifo(current);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * idle_inject_should_run - function helper for the smpboot API
277*4882a593Smuzhiyun  * @cpu: CPU the kthread is running on
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * Return: whether or not the thread can run.
280*4882a593Smuzhiyun  */
idle_inject_should_run(unsigned int cpu)281*4882a593Smuzhiyun static int idle_inject_should_run(unsigned int cpu)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct idle_inject_thread *iit =
284*4882a593Smuzhiyun 		per_cpu_ptr(&idle_inject_thread, cpu);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return iit->should_run;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun  * idle_inject_register - initialize idle injection on a set of CPUs
291*4882a593Smuzhiyun  * @cpumask: CPUs to be affected by idle injection
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * This function creates an idle injection control device structure for the
294*4882a593Smuzhiyun  * given set of CPUs and initializes the timer associated with it.  It does not
295*4882a593Smuzhiyun  * start any injection cycles.
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * Return: NULL if memory allocation fails, idle injection control device
298*4882a593Smuzhiyun  * pointer on success.
299*4882a593Smuzhiyun  */
idle_inject_register(struct cpumask * cpumask)300*4882a593Smuzhiyun struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct idle_inject_device *ii_dev;
303*4882a593Smuzhiyun 	int cpu, cpu_rb;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
306*4882a593Smuzhiyun 	if (!ii_dev)
307*4882a593Smuzhiyun 		return NULL;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
310*4882a593Smuzhiyun 	hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
311*4882a593Smuzhiyun 	ii_dev->timer.function = idle_inject_timer_fn;
312*4882a593Smuzhiyun 	ii_dev->latency_us = UINT_MAX;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		if (per_cpu(idle_inject_device, cpu)) {
317*4882a593Smuzhiyun 			pr_err("cpu%d is already registered\n", cpu);
318*4882a593Smuzhiyun 			goto out_rollback;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		per_cpu(idle_inject_device, cpu) = ii_dev;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return ii_dev;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun out_rollback:
327*4882a593Smuzhiyun 	for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
328*4882a593Smuzhiyun 		if (cpu == cpu_rb)
329*4882a593Smuzhiyun 			break;
330*4882a593Smuzhiyun 		per_cpu(idle_inject_device, cpu_rb) = NULL;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	kfree(ii_dev);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return NULL;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /**
339*4882a593Smuzhiyun  * idle_inject_unregister - unregister idle injection control device
340*4882a593Smuzhiyun  * @ii_dev: idle injection control device to unregister
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * The function stops idle injection for the given control device,
343*4882a593Smuzhiyun  * unregisters its kthreads and frees memory allocated when that device was
344*4882a593Smuzhiyun  * created.
345*4882a593Smuzhiyun  */
idle_inject_unregister(struct idle_inject_device * ii_dev)346*4882a593Smuzhiyun void idle_inject_unregister(struct idle_inject_device *ii_dev)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	unsigned int cpu;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	idle_inject_stop(ii_dev);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
353*4882a593Smuzhiyun 		per_cpu(idle_inject_device, cpu) = NULL;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	kfree(ii_dev);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun static struct smp_hotplug_thread idle_inject_threads = {
359*4882a593Smuzhiyun 	.store = &idle_inject_thread.tsk,
360*4882a593Smuzhiyun 	.setup = idle_inject_setup,
361*4882a593Smuzhiyun 	.thread_fn = idle_inject_fn,
362*4882a593Smuzhiyun 	.thread_comm = "idle_inject/%u",
363*4882a593Smuzhiyun 	.thread_should_run = idle_inject_should_run,
364*4882a593Smuzhiyun };
365*4882a593Smuzhiyun 
idle_inject_init(void)366*4882a593Smuzhiyun static int __init idle_inject_init(void)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return smpboot_register_percpu_thread(&idle_inject_threads);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun early_initcall(idle_inject_init);
371