xref: /OK3568_Linux_fs/kernel/include/linux/workqueue.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * workqueue.h --- work queue handling for Linux.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _LINUX_WORKQUEUE_H
7*4882a593Smuzhiyun #define _LINUX_WORKQUEUE_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/timer.h>
10*4882a593Smuzhiyun #include <linux/linkage.h>
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/lockdep.h>
13*4882a593Smuzhiyun #include <linux/threads.h>
14*4882a593Smuzhiyun #include <linux/atomic.h>
15*4882a593Smuzhiyun #include <linux/cpumask.h>
16*4882a593Smuzhiyun #include <linux/rcupdate.h>
17*4882a593Smuzhiyun #include <linux/android_kabi.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct workqueue_struct;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct work_struct;
22*4882a593Smuzhiyun typedef void (*work_func_t)(struct work_struct *work);
23*4882a593Smuzhiyun void delayed_work_timer_fn(struct timer_list *t);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * The first word is the work queue pointer and the flags rolled into
27*4882a593Smuzhiyun  * one
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #define work_data_bits(work) ((unsigned long *)(&(work)->data))
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun enum {
32*4882a593Smuzhiyun 	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
33*4882a593Smuzhiyun 	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
34*4882a593Smuzhiyun 	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
35*4882a593Smuzhiyun 	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
36*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_WORK
37*4882a593Smuzhiyun 	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
38*4882a593Smuzhiyun 	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
39*4882a593Smuzhiyun #else
40*4882a593Smuzhiyun 	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	WORK_STRUCT_COLOR_BITS	= 4,
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
46*4882a593Smuzhiyun 	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
47*4882a593Smuzhiyun 	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
48*4882a593Smuzhiyun 	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
49*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_WORK
50*4882a593Smuzhiyun 	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun 	WORK_STRUCT_STATIC	= 0,
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/*
56*4882a593Smuzhiyun 	 * The last color is no color used for works which don't
57*4882a593Smuzhiyun 	 * participate in workqueue flushing.
58*4882a593Smuzhiyun 	 */
59*4882a593Smuzhiyun 	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
60*4882a593Smuzhiyun 	WORK_NO_COLOR		= WORK_NR_COLORS,
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* not bound to any CPU, prefer the local CPU */
63*4882a593Smuzhiyun 	WORK_CPU_UNBOUND	= NR_CPUS,
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/*
66*4882a593Smuzhiyun 	 * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
67*4882a593Smuzhiyun 	 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
68*4882a593Smuzhiyun 	 * flush colors.
69*4882a593Smuzhiyun 	 */
70*4882a593Smuzhiyun 	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
71*4882a593Smuzhiyun 				  WORK_STRUCT_COLOR_BITS,
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* data contains off-queue information when !WORK_STRUCT_PWQ */
74*4882a593Smuzhiyun 	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
77*4882a593Smuzhiyun 	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/*
80*4882a593Smuzhiyun 	 * When a work item is off queue, its high bits point to the last
81*4882a593Smuzhiyun 	 * pool it was on.  Cap at 31 bits and use the highest number to
82*4882a593Smuzhiyun 	 * indicate that no pool is associated.
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	WORK_OFFQ_FLAG_BITS	= 1,
85*4882a593Smuzhiyun 	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
86*4882a593Smuzhiyun 	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
87*4882a593Smuzhiyun 	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
88*4882a593Smuzhiyun 	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* convenience constants */
91*4882a593Smuzhiyun 	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
92*4882a593Smuzhiyun 	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
93*4882a593Smuzhiyun 	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* bit mask for work_busy() return values */
96*4882a593Smuzhiyun 	WORK_BUSY_PENDING	= 1 << 0,
97*4882a593Smuzhiyun 	WORK_BUSY_RUNNING	= 1 << 1,
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* maximum string length for set_worker_desc() */
100*4882a593Smuzhiyun 	WORKER_DESC_LEN		= 24,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun struct work_struct {
104*4882a593Smuzhiyun 	atomic_long_t data;
105*4882a593Smuzhiyun 	struct list_head entry;
106*4882a593Smuzhiyun 	work_func_t func;
107*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
108*4882a593Smuzhiyun 	struct lockdep_map lockdep_map;
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
111*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
115*4882a593Smuzhiyun #define WORK_DATA_STATIC_INIT()	\
116*4882a593Smuzhiyun 	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct delayed_work {
119*4882a593Smuzhiyun 	struct work_struct work;
120*4882a593Smuzhiyun 	struct timer_list timer;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* target workqueue and CPU ->timer uses to queue ->work */
123*4882a593Smuzhiyun 	struct workqueue_struct *wq;
124*4882a593Smuzhiyun 	int cpu;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(1);
127*4882a593Smuzhiyun 	ANDROID_KABI_RESERVE(2);
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct rcu_work {
131*4882a593Smuzhiyun 	struct work_struct work;
132*4882a593Smuzhiyun 	struct rcu_head rcu;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* target workqueue ->rcu uses to queue ->work */
135*4882a593Smuzhiyun 	struct workqueue_struct *wq;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * struct workqueue_attrs - A struct for workqueue attributes.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * This can be used to change attributes of an unbound workqueue.
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun struct workqueue_attrs {
144*4882a593Smuzhiyun 	/**
145*4882a593Smuzhiyun 	 * @nice: nice level
146*4882a593Smuzhiyun 	 */
147*4882a593Smuzhiyun 	int nice;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/**
150*4882a593Smuzhiyun 	 * @cpumask: allowed CPUs
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	cpumask_var_t cpumask;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/**
155*4882a593Smuzhiyun 	 * @no_numa: disable NUMA affinity
156*4882a593Smuzhiyun 	 *
157*4882a593Smuzhiyun 	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
158*4882a593Smuzhiyun 	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
159*4882a593Smuzhiyun 	 * doesn't participate in pool hash calculations or equality comparisons.
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	bool no_numa;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun 
to_delayed_work(struct work_struct * work)164*4882a593Smuzhiyun static inline struct delayed_work *to_delayed_work(struct work_struct *work)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return container_of(work, struct delayed_work, work);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
to_rcu_work(struct work_struct * work)169*4882a593Smuzhiyun static inline struct rcu_work *to_rcu_work(struct work_struct *work)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	return container_of(work, struct rcu_work, work);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun struct execute_work {
175*4882a593Smuzhiyun 	struct work_struct work;
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun  * NB: because we have to copy the lockdep_map, setting _key
181*4882a593Smuzhiyun  * here is required, otherwise it could get initialised to the
182*4882a593Smuzhiyun  * copy of the lockdep_map!
183*4882a593Smuzhiyun  */
184*4882a593Smuzhiyun #define __WORK_INIT_LOCKDEP_MAP(n, k) \
185*4882a593Smuzhiyun 	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
186*4882a593Smuzhiyun #else
187*4882a593Smuzhiyun #define __WORK_INIT_LOCKDEP_MAP(n, k)
188*4882a593Smuzhiyun #endif
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #define __WORK_INITIALIZER(n, f) {					\
191*4882a593Smuzhiyun 	.data = WORK_DATA_STATIC_INIT(),				\
192*4882a593Smuzhiyun 	.entry	= { &(n).entry, &(n).entry },				\
193*4882a593Smuzhiyun 	.func = (f),							\
194*4882a593Smuzhiyun 	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
198*4882a593Smuzhiyun 	.work = __WORK_INITIALIZER((n).work, (f)),			\
199*4882a593Smuzhiyun 	.timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
200*4882a593Smuzhiyun 				     (tflags) | TIMER_IRQSAFE),		\
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #define DECLARE_WORK(n, f)						\
204*4882a593Smuzhiyun 	struct work_struct n = __WORK_INITIALIZER(n, f)
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun #define DECLARE_DELAYED_WORK(n, f)					\
207*4882a593Smuzhiyun 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun #define DECLARE_DEFERRABLE_WORK(n, f)					\
210*4882a593Smuzhiyun 	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_OBJECTS_WORK
213*4882a593Smuzhiyun extern void __init_work(struct work_struct *work, int onstack);
214*4882a593Smuzhiyun extern void destroy_work_on_stack(struct work_struct *work);
215*4882a593Smuzhiyun extern void destroy_delayed_work_on_stack(struct delayed_work *work);
work_static(struct work_struct * work)216*4882a593Smuzhiyun static inline unsigned int work_static(struct work_struct *work)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	return *work_data_bits(work) & WORK_STRUCT_STATIC;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun #else
__init_work(struct work_struct * work,int onstack)221*4882a593Smuzhiyun static inline void __init_work(struct work_struct *work, int onstack) { }
destroy_work_on_stack(struct work_struct * work)222*4882a593Smuzhiyun static inline void destroy_work_on_stack(struct work_struct *work) { }
destroy_delayed_work_on_stack(struct delayed_work * work)223*4882a593Smuzhiyun static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
work_static(struct work_struct * work)224*4882a593Smuzhiyun static inline unsigned int work_static(struct work_struct *work) { return 0; }
225*4882a593Smuzhiyun #endif
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun  * initialize all of a work item in one go
229*4882a593Smuzhiyun  *
230*4882a593Smuzhiyun  * NOTE! No point in using "atomic_long_set()": using a direct
231*4882a593Smuzhiyun  * assignment of the work data initializer allows the compiler
232*4882a593Smuzhiyun  * to generate better code.
233*4882a593Smuzhiyun  */
234*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
235*4882a593Smuzhiyun #define __INIT_WORK(_work, _func, _onstack)				\
236*4882a593Smuzhiyun 	do {								\
237*4882a593Smuzhiyun 		static struct lock_class_key __key;			\
238*4882a593Smuzhiyun 									\
239*4882a593Smuzhiyun 		__init_work((_work), _onstack);				\
240*4882a593Smuzhiyun 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
241*4882a593Smuzhiyun 		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
242*4882a593Smuzhiyun 		INIT_LIST_HEAD(&(_work)->entry);			\
243*4882a593Smuzhiyun 		(_work)->func = (_func);				\
244*4882a593Smuzhiyun 	} while (0)
245*4882a593Smuzhiyun #else
246*4882a593Smuzhiyun #define __INIT_WORK(_work, _func, _onstack)				\
247*4882a593Smuzhiyun 	do {								\
248*4882a593Smuzhiyun 		__init_work((_work), _onstack);				\
249*4882a593Smuzhiyun 		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
250*4882a593Smuzhiyun 		INIT_LIST_HEAD(&(_work)->entry);			\
251*4882a593Smuzhiyun 		(_work)->func = (_func);				\
252*4882a593Smuzhiyun 	} while (0)
253*4882a593Smuzhiyun #endif
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #define INIT_WORK(_work, _func)						\
256*4882a593Smuzhiyun 	__INIT_WORK((_work), (_func), 0)
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #define INIT_WORK_ONSTACK(_work, _func)					\
259*4882a593Smuzhiyun 	__INIT_WORK((_work), (_func), 1)
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun #define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
262*4882a593Smuzhiyun 	do {								\
263*4882a593Smuzhiyun 		INIT_WORK(&(_work)->work, (_func));			\
264*4882a593Smuzhiyun 		__init_timer(&(_work)->timer,				\
265*4882a593Smuzhiyun 			     delayed_work_timer_fn,			\
266*4882a593Smuzhiyun 			     (_tflags) | TIMER_IRQSAFE);		\
267*4882a593Smuzhiyun 	} while (0)
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
270*4882a593Smuzhiyun 	do {								\
271*4882a593Smuzhiyun 		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
272*4882a593Smuzhiyun 		__init_timer_on_stack(&(_work)->timer,			\
273*4882a593Smuzhiyun 				      delayed_work_timer_fn,		\
274*4882a593Smuzhiyun 				      (_tflags) | TIMER_IRQSAFE);	\
275*4882a593Smuzhiyun 	} while (0)
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #define INIT_DELAYED_WORK(_work, _func)					\
278*4882a593Smuzhiyun 	__INIT_DELAYED_WORK(_work, _func, 0)
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
281*4882a593Smuzhiyun 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #define INIT_DEFERRABLE_WORK(_work, _func)				\
284*4882a593Smuzhiyun 	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
287*4882a593Smuzhiyun 	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define INIT_RCU_WORK(_work, _func)					\
290*4882a593Smuzhiyun 	INIT_WORK(&(_work)->work, (_func))
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #define INIT_RCU_WORK_ONSTACK(_work, _func)				\
293*4882a593Smuzhiyun 	INIT_WORK_ONSTACK(&(_work)->work, (_func))
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /**
296*4882a593Smuzhiyun  * work_pending - Find out whether a work item is currently pending
297*4882a593Smuzhiyun  * @work: The work item in question
298*4882a593Smuzhiyun  */
299*4882a593Smuzhiyun #define work_pending(work) \
300*4882a593Smuzhiyun 	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun  * delayed_work_pending - Find out whether a delayable work item is currently
304*4882a593Smuzhiyun  * pending
305*4882a593Smuzhiyun  * @w: The work item in question
306*4882a593Smuzhiyun  */
307*4882a593Smuzhiyun #define delayed_work_pending(w) \
308*4882a593Smuzhiyun 	work_pending(&(w)->work)
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * Workqueue flags and constants.  For details, please refer to
312*4882a593Smuzhiyun  * Documentation/core-api/workqueue.rst.
313*4882a593Smuzhiyun  */
314*4882a593Smuzhiyun enum {
315*4882a593Smuzhiyun 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
316*4882a593Smuzhiyun 	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
317*4882a593Smuzhiyun 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
318*4882a593Smuzhiyun 	WQ_HIGHPRI		= 1 << 4, /* high priority */
319*4882a593Smuzhiyun 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
320*4882a593Smuzhiyun 	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/*
323*4882a593Smuzhiyun 	 * Per-cpu workqueues are generally preferred because they tend to
324*4882a593Smuzhiyun 	 * show better performance thanks to cache locality.  Per-cpu
325*4882a593Smuzhiyun 	 * workqueues exclude the scheduler from choosing the CPU to
326*4882a593Smuzhiyun 	 * execute the worker threads, which has an unfortunate side effect
327*4882a593Smuzhiyun 	 * of increasing power consumption.
328*4882a593Smuzhiyun 	 *
329*4882a593Smuzhiyun 	 * The scheduler considers a CPU idle if it doesn't have any task
330*4882a593Smuzhiyun 	 * to execute and tries to keep idle cores idle to conserve power;
331*4882a593Smuzhiyun 	 * however, for example, a per-cpu work item scheduled from an
332*4882a593Smuzhiyun 	 * interrupt handler on an idle CPU will force the scheduler to
333*4882a593Smuzhiyun 	 * excute the work item on that CPU breaking the idleness, which in
334*4882a593Smuzhiyun 	 * turn may lead to more scheduling choices which are sub-optimal
335*4882a593Smuzhiyun 	 * in terms of power consumption.
336*4882a593Smuzhiyun 	 *
337*4882a593Smuzhiyun 	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
338*4882a593Smuzhiyun 	 * but become unbound if workqueue.power_efficient kernel param is
339*4882a593Smuzhiyun 	 * specified.  Per-cpu workqueues which are identified to
340*4882a593Smuzhiyun 	 * contribute significantly to power-consumption are identified and
341*4882a593Smuzhiyun 	 * marked with this flag and enabling the power_efficient mode
342*4882a593Smuzhiyun 	 * leads to noticeable power saving at the cost of small
343*4882a593Smuzhiyun 	 * performance disadvantage.
344*4882a593Smuzhiyun 	 *
345*4882a593Smuzhiyun 	 * http://thread.gmane.org/gmane.linux.kernel/1480396
346*4882a593Smuzhiyun 	 */
347*4882a593Smuzhiyun 	WQ_POWER_EFFICIENT	= 1 << 7,
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
350*4882a593Smuzhiyun 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
351*4882a593Smuzhiyun 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
352*4882a593Smuzhiyun 	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
355*4882a593Smuzhiyun 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
356*4882a593Smuzhiyun 	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
357*4882a593Smuzhiyun };
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
360*4882a593Smuzhiyun #define WQ_UNBOUND_MAX_ACTIVE	\
361*4882a593Smuzhiyun 	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun  * System-wide workqueues which are always present.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  * system_wq is the one used by schedule[_delayed]_work[_on]().
367*4882a593Smuzhiyun  * Multi-CPU multi-threaded.  There are users which expect relatively
368*4882a593Smuzhiyun  * short queue flush time.  Don't queue works which can run for too
369*4882a593Smuzhiyun  * long.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * system_highpri_wq is similar to system_wq but for work items which
372*4882a593Smuzhiyun  * require WQ_HIGHPRI.
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * system_long_wq is similar to system_wq but may host long running
375*4882a593Smuzhiyun  * works.  Queue flushing might take relatively long.
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * system_unbound_wq is unbound workqueue.  Workers are not bound to
378*4882a593Smuzhiyun  * any specific CPU, not concurrency managed, and all queued works are
379*4882a593Smuzhiyun  * executed immediately as long as max_active limit is not reached and
380*4882a593Smuzhiyun  * resources are available.
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * system_freezable_wq is equivalent to system_wq except that it's
383*4882a593Smuzhiyun  * freezable.
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * *_power_efficient_wq are inclined towards saving power and converted
386*4882a593Smuzhiyun  * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
387*4882a593Smuzhiyun  * they are same as their non-power-efficient counterparts - e.g.
388*4882a593Smuzhiyun  * system_power_efficient_wq is identical to system_wq if
389*4882a593Smuzhiyun  * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
390*4882a593Smuzhiyun  */
391*4882a593Smuzhiyun extern struct workqueue_struct *system_wq;
392*4882a593Smuzhiyun extern struct workqueue_struct *system_highpri_wq;
393*4882a593Smuzhiyun extern struct workqueue_struct *system_long_wq;
394*4882a593Smuzhiyun extern struct workqueue_struct *system_unbound_wq;
395*4882a593Smuzhiyun extern struct workqueue_struct *system_freezable_wq;
396*4882a593Smuzhiyun extern struct workqueue_struct *system_power_efficient_wq;
397*4882a593Smuzhiyun extern struct workqueue_struct *system_freezable_power_efficient_wq;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /**
400*4882a593Smuzhiyun  * alloc_workqueue - allocate a workqueue
401*4882a593Smuzhiyun  * @fmt: printf format for the name of the workqueue
402*4882a593Smuzhiyun  * @flags: WQ_* flags
403*4882a593Smuzhiyun  * @max_active: max in-flight work items, 0 for default
404*4882a593Smuzhiyun  * remaining args: args for @fmt
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  * Allocate a workqueue with the specified parameters.  For detailed
407*4882a593Smuzhiyun  * information on WQ_* flags, please refer to
408*4882a593Smuzhiyun  * Documentation/core-api/workqueue.rst.
409*4882a593Smuzhiyun  *
410*4882a593Smuzhiyun  * RETURNS:
411*4882a593Smuzhiyun  * Pointer to the allocated workqueue on success, %NULL on failure.
412*4882a593Smuzhiyun  */
413*4882a593Smuzhiyun struct workqueue_struct *alloc_workqueue(const char *fmt,
414*4882a593Smuzhiyun 					 unsigned int flags,
415*4882a593Smuzhiyun 					 int max_active, ...);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /**
418*4882a593Smuzhiyun  * alloc_ordered_workqueue - allocate an ordered workqueue
419*4882a593Smuzhiyun  * @fmt: printf format for the name of the workqueue
420*4882a593Smuzhiyun  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
421*4882a593Smuzhiyun  * @args...: args for @fmt
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * Allocate an ordered workqueue.  An ordered workqueue executes at
424*4882a593Smuzhiyun  * most one work item at any given time in the queued order.  They are
425*4882a593Smuzhiyun  * implemented as unbound workqueues with @max_active of one.
426*4882a593Smuzhiyun  *
427*4882a593Smuzhiyun  * RETURNS:
428*4882a593Smuzhiyun  * Pointer to the allocated workqueue on success, %NULL on failure.
429*4882a593Smuzhiyun  */
430*4882a593Smuzhiyun #define alloc_ordered_workqueue(fmt, flags, args...)			\
431*4882a593Smuzhiyun 	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
432*4882a593Smuzhiyun 			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun #define create_workqueue(name)						\
435*4882a593Smuzhiyun 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
436*4882a593Smuzhiyun #define create_freezable_workqueue(name)				\
437*4882a593Smuzhiyun 	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
438*4882a593Smuzhiyun 			WQ_MEM_RECLAIM, 1, (name))
439*4882a593Smuzhiyun #define create_singlethread_workqueue(name)				\
440*4882a593Smuzhiyun 	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun extern void destroy_workqueue(struct workqueue_struct *wq);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun struct workqueue_attrs *alloc_workqueue_attrs(void);
445*4882a593Smuzhiyun void free_workqueue_attrs(struct workqueue_attrs *attrs);
446*4882a593Smuzhiyun int apply_workqueue_attrs(struct workqueue_struct *wq,
447*4882a593Smuzhiyun 			  const struct workqueue_attrs *attrs);
448*4882a593Smuzhiyun int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
451*4882a593Smuzhiyun 			struct work_struct *work);
452*4882a593Smuzhiyun extern bool queue_work_node(int node, struct workqueue_struct *wq,
453*4882a593Smuzhiyun 			    struct work_struct *work);
454*4882a593Smuzhiyun extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
455*4882a593Smuzhiyun 			struct delayed_work *work, unsigned long delay);
456*4882a593Smuzhiyun extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
457*4882a593Smuzhiyun 			struct delayed_work *dwork, unsigned long delay);
458*4882a593Smuzhiyun extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun extern void flush_workqueue(struct workqueue_struct *wq);
461*4882a593Smuzhiyun extern void drain_workqueue(struct workqueue_struct *wq);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun extern int schedule_on_each_cpu(work_func_t func);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun int execute_in_process_context(work_func_t fn, struct execute_work *);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun extern bool flush_work(struct work_struct *work);
468*4882a593Smuzhiyun extern bool cancel_work_sync(struct work_struct *work);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun extern bool flush_delayed_work(struct delayed_work *dwork);
471*4882a593Smuzhiyun extern bool cancel_delayed_work(struct delayed_work *dwork);
472*4882a593Smuzhiyun extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun extern bool flush_rcu_work(struct rcu_work *rwork);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun extern void workqueue_set_max_active(struct workqueue_struct *wq,
477*4882a593Smuzhiyun 				     int max_active);
478*4882a593Smuzhiyun extern struct work_struct *current_work(void);
479*4882a593Smuzhiyun extern bool current_is_workqueue_rescuer(void);
480*4882a593Smuzhiyun extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
481*4882a593Smuzhiyun extern unsigned int work_busy(struct work_struct *work);
482*4882a593Smuzhiyun extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
483*4882a593Smuzhiyun extern void print_worker_info(const char *log_lvl, struct task_struct *task);
484*4882a593Smuzhiyun extern void show_workqueue_state(void);
485*4882a593Smuzhiyun extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun  * queue_work - queue work on a workqueue
489*4882a593Smuzhiyun  * @wq: workqueue to use
490*4882a593Smuzhiyun  * @work: work to queue
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * Returns %false if @work was already on a queue, %true otherwise.
493*4882a593Smuzhiyun  *
494*4882a593Smuzhiyun  * We queue the work to the CPU on which it was submitted, but if the CPU dies
495*4882a593Smuzhiyun  * it can be processed by another CPU.
496*4882a593Smuzhiyun  *
497*4882a593Smuzhiyun  * Memory-ordering properties:  If it returns %true, guarantees that all stores
498*4882a593Smuzhiyun  * preceding the call to queue_work() in the program order will be visible from
499*4882a593Smuzhiyun  * the CPU which will execute @work by the time such work executes, e.g.,
500*4882a593Smuzhiyun  *
501*4882a593Smuzhiyun  * { x is initially 0 }
502*4882a593Smuzhiyun  *
503*4882a593Smuzhiyun  *   CPU0				CPU1
504*4882a593Smuzhiyun  *
505*4882a593Smuzhiyun  *   WRITE_ONCE(x, 1);			[ @work is being executed ]
506*4882a593Smuzhiyun  *   r0 = queue_work(wq, work);		  r1 = READ_ONCE(x);
507*4882a593Smuzhiyun  *
508*4882a593Smuzhiyun  * Forbids: r0 == true && r1 == 0
509*4882a593Smuzhiyun  */
queue_work(struct workqueue_struct * wq,struct work_struct * work)510*4882a593Smuzhiyun static inline bool queue_work(struct workqueue_struct *wq,
511*4882a593Smuzhiyun 			      struct work_struct *work)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun /**
517*4882a593Smuzhiyun  * queue_delayed_work - queue work on a workqueue after delay
518*4882a593Smuzhiyun  * @wq: workqueue to use
519*4882a593Smuzhiyun  * @dwork: delayable work to queue
520*4882a593Smuzhiyun  * @delay: number of jiffies to wait before queueing
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
523*4882a593Smuzhiyun  */
queue_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)524*4882a593Smuzhiyun static inline bool queue_delayed_work(struct workqueue_struct *wq,
525*4882a593Smuzhiyun 				      struct delayed_work *dwork,
526*4882a593Smuzhiyun 				      unsigned long delay)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun  * mod_delayed_work - modify delay of or queue a delayed work
533*4882a593Smuzhiyun  * @wq: workqueue to use
534*4882a593Smuzhiyun  * @dwork: work to queue
535*4882a593Smuzhiyun  * @delay: number of jiffies to wait before queueing
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * mod_delayed_work_on() on local CPU.
538*4882a593Smuzhiyun  */
mod_delayed_work(struct workqueue_struct * wq,struct delayed_work * dwork,unsigned long delay)539*4882a593Smuzhiyun static inline bool mod_delayed_work(struct workqueue_struct *wq,
540*4882a593Smuzhiyun 				    struct delayed_work *dwork,
541*4882a593Smuzhiyun 				    unsigned long delay)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun  * schedule_work_on - put work task on a specific cpu
548*4882a593Smuzhiyun  * @cpu: cpu to put the work task on
549*4882a593Smuzhiyun  * @work: job to be done
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  * This puts a job on a specific cpu
552*4882a593Smuzhiyun  */
schedule_work_on(int cpu,struct work_struct * work)553*4882a593Smuzhiyun static inline bool schedule_work_on(int cpu, struct work_struct *work)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	return queue_work_on(cpu, system_wq, work);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun  * schedule_work - put work task in global workqueue
560*4882a593Smuzhiyun  * @work: job to be done
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * Returns %false if @work was already on the kernel-global workqueue and
563*4882a593Smuzhiyun  * %true otherwise.
564*4882a593Smuzhiyun  *
565*4882a593Smuzhiyun  * This puts a job in the kernel-global workqueue if it was not already
566*4882a593Smuzhiyun  * queued and leaves it in the same position on the kernel-global
567*4882a593Smuzhiyun  * workqueue otherwise.
568*4882a593Smuzhiyun  *
569*4882a593Smuzhiyun  * Shares the same memory-ordering properties of queue_work(), cf. the
570*4882a593Smuzhiyun  * DocBook header of queue_work().
571*4882a593Smuzhiyun  */
schedule_work(struct work_struct * work)572*4882a593Smuzhiyun static inline bool schedule_work(struct work_struct *work)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	return queue_work(system_wq, work);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /**
578*4882a593Smuzhiyun  * flush_scheduled_work - ensure that any scheduled work has run to completion.
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * Forces execution of the kernel-global workqueue and blocks until its
581*4882a593Smuzhiyun  * completion.
582*4882a593Smuzhiyun  *
583*4882a593Smuzhiyun  * Think twice before calling this function!  It's very easy to get into
584*4882a593Smuzhiyun  * trouble if you don't take great care.  Either of the following situations
585*4882a593Smuzhiyun  * will lead to deadlock:
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  *	One of the work items currently on the workqueue needs to acquire
588*4882a593Smuzhiyun  *	a lock held by your code or its caller.
589*4882a593Smuzhiyun  *
590*4882a593Smuzhiyun  *	Your code is running in the context of a work routine.
591*4882a593Smuzhiyun  *
592*4882a593Smuzhiyun  * They will be detected by lockdep when they occur, but the first might not
593*4882a593Smuzhiyun  * occur very often.  It depends on what work items are on the workqueue and
594*4882a593Smuzhiyun  * what locks they need, which you have no control over.
595*4882a593Smuzhiyun  *
596*4882a593Smuzhiyun  * In most situations flushing the entire workqueue is overkill; you merely
597*4882a593Smuzhiyun  * need to know that a particular work item isn't queued and isn't running.
598*4882a593Smuzhiyun  * In such cases you should use cancel_delayed_work_sync() or
599*4882a593Smuzhiyun  * cancel_work_sync() instead.
600*4882a593Smuzhiyun  */
flush_scheduled_work(void)601*4882a593Smuzhiyun static inline void flush_scheduled_work(void)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	flush_workqueue(system_wq);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
608*4882a593Smuzhiyun  * @cpu: cpu to use
609*4882a593Smuzhiyun  * @dwork: job to be done
610*4882a593Smuzhiyun  * @delay: number of jiffies to wait
611*4882a593Smuzhiyun  *
612*4882a593Smuzhiyun  * After waiting for a given time this puts a job in the kernel-global
613*4882a593Smuzhiyun  * workqueue on the specified CPU.
614*4882a593Smuzhiyun  */
schedule_delayed_work_on(int cpu,struct delayed_work * dwork,unsigned long delay)615*4882a593Smuzhiyun static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
616*4882a593Smuzhiyun 					    unsigned long delay)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun /**
622*4882a593Smuzhiyun  * schedule_delayed_work - put work task in global workqueue after delay
623*4882a593Smuzhiyun  * @dwork: job to be done
624*4882a593Smuzhiyun  * @delay: number of jiffies to wait or 0 for immediate execution
625*4882a593Smuzhiyun  *
626*4882a593Smuzhiyun  * After waiting for a given time this puts a job in the kernel-global
627*4882a593Smuzhiyun  * workqueue.
628*4882a593Smuzhiyun  */
schedule_delayed_work(struct delayed_work * dwork,unsigned long delay)629*4882a593Smuzhiyun static inline bool schedule_delayed_work(struct delayed_work *dwork,
630*4882a593Smuzhiyun 					 unsigned long delay)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	return queue_delayed_work(system_wq, dwork, delay);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun #ifndef CONFIG_SMP
work_on_cpu(int cpu,long (* fn)(void *),void * arg)636*4882a593Smuzhiyun static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	return fn(arg);
639*4882a593Smuzhiyun }
work_on_cpu_safe(int cpu,long (* fn)(void *),void * arg)640*4882a593Smuzhiyun static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	return fn(arg);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun #else
645*4882a593Smuzhiyun long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
646*4882a593Smuzhiyun long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
647*4882a593Smuzhiyun #endif /* CONFIG_SMP */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun #ifdef CONFIG_FREEZER
650*4882a593Smuzhiyun extern void freeze_workqueues_begin(void);
651*4882a593Smuzhiyun extern bool freeze_workqueues_busy(void);
652*4882a593Smuzhiyun extern void thaw_workqueues(void);
653*4882a593Smuzhiyun #endif /* CONFIG_FREEZER */
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
656*4882a593Smuzhiyun int workqueue_sysfs_register(struct workqueue_struct *wq);
657*4882a593Smuzhiyun #else	/* CONFIG_SYSFS */
workqueue_sysfs_register(struct workqueue_struct * wq)658*4882a593Smuzhiyun static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
659*4882a593Smuzhiyun { return 0; }
660*4882a593Smuzhiyun #endif	/* CONFIG_SYSFS */
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun #ifdef CONFIG_WQ_WATCHDOG
663*4882a593Smuzhiyun void wq_watchdog_touch(int cpu);
664*4882a593Smuzhiyun #else	/* CONFIG_WQ_WATCHDOG */
wq_watchdog_touch(int cpu)665*4882a593Smuzhiyun static inline void wq_watchdog_touch(int cpu) { }
666*4882a593Smuzhiyun #endif	/* CONFIG_WQ_WATCHDOG */
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun #ifdef CONFIG_SMP
669*4882a593Smuzhiyun int workqueue_prepare_cpu(unsigned int cpu);
670*4882a593Smuzhiyun int workqueue_online_cpu(unsigned int cpu);
671*4882a593Smuzhiyun int workqueue_offline_cpu(unsigned int cpu);
672*4882a593Smuzhiyun #endif
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun void __init workqueue_init_early(void);
675*4882a593Smuzhiyun void __init workqueue_init(void);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun #endif
678