xref: /OK3568_Linux_fs/kernel/include/linux/wait.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_WAIT_H
3*4882a593Smuzhiyun #define _LINUX_WAIT_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * Linux wait queue related types and methods
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/list.h>
8*4882a593Smuzhiyun #include <linux/stddef.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/current.h>
12*4882a593Smuzhiyun #include <uapi/linux/wait.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun typedef struct wait_queue_entry wait_queue_entry_t;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17*4882a593Smuzhiyun int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* wait_queue_entry::flags */
20*4882a593Smuzhiyun #define WQ_FLAG_EXCLUSIVE	0x01
21*4882a593Smuzhiyun #define WQ_FLAG_WOKEN		0x02
22*4882a593Smuzhiyun #define WQ_FLAG_BOOKMARK	0x04
23*4882a593Smuzhiyun #define WQ_FLAG_CUSTOM		0x08
24*4882a593Smuzhiyun #define WQ_FLAG_DONE		0x10
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * A single wait-queue entry structure:
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun struct wait_queue_entry {
30*4882a593Smuzhiyun 	unsigned int		flags;
31*4882a593Smuzhiyun 	void			*private;
32*4882a593Smuzhiyun 	wait_queue_func_t	func;
33*4882a593Smuzhiyun 	struct list_head	entry;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct wait_queue_head {
37*4882a593Smuzhiyun 	spinlock_t		lock;
38*4882a593Smuzhiyun 	struct list_head	head;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun typedef struct wait_queue_head wait_queue_head_t;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct task_struct;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Macros for declaration and initialisaton of the datatypes
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
49*4882a593Smuzhiyun 	.private	= tsk,							\
50*4882a593Smuzhiyun 	.func		= default_wake_function,				\
51*4882a593Smuzhiyun 	.entry		= { NULL, NULL } }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define DECLARE_WAITQUEUE(name, tsk)						\
54*4882a593Smuzhiyun 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
57*4882a593Smuzhiyun 	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
58*4882a593Smuzhiyun 	.head		= { &(name).head, &(name).head } }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define DECLARE_WAIT_QUEUE_HEAD(name) \
61*4882a593Smuzhiyun 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define init_waitqueue_head(wq_head)						\
66*4882a593Smuzhiyun 	do {									\
67*4882a593Smuzhiyun 		static struct lock_class_key __key;				\
68*4882a593Smuzhiyun 										\
69*4882a593Smuzhiyun 		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
70*4882a593Smuzhiyun 	} while (0)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
73*4882a593Smuzhiyun # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74*4882a593Smuzhiyun 	({ init_waitqueue_head(&name); name; })
75*4882a593Smuzhiyun # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76*4882a593Smuzhiyun 	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)81*4882a593Smuzhiyun static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	wq_entry->flags		= 0;
84*4882a593Smuzhiyun 	wq_entry->private	= p;
85*4882a593Smuzhiyun 	wq_entry->func		= default_wake_function;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)89*4882a593Smuzhiyun init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	wq_entry->flags		= 0;
92*4882a593Smuzhiyun 	wq_entry->private	= NULL;
93*4882a593Smuzhiyun 	wq_entry->func		= func;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * waitqueue_active -- locklessly test for waiters on the queue
98*4882a593Smuzhiyun  * @wq_head: the waitqueue to test for waiters
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * returns true if the wait list is not empty
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * NOTE: this function is lockless and requires care, incorrect usage _will_
103*4882a593Smuzhiyun  * lead to sporadic and non-obvious failure.
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * Use either while holding wait_queue_head::lock or when used for wakeups
106*4882a593Smuzhiyun  * with an extra smp_mb() like::
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  *      CPU0 - waker                    CPU1 - waiter
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  *                                      for (;;) {
111*4882a593Smuzhiyun  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
112*4882a593Smuzhiyun  *      smp_mb();                         // smp_mb() from set_current_state()
113*4882a593Smuzhiyun  *      if (waitqueue_active(wq_head))         if (@cond)
114*4882a593Smuzhiyun  *        wake_up(wq_head);                      break;
115*4882a593Smuzhiyun  *                                        schedule();
116*4882a593Smuzhiyun  *                                      }
117*4882a593Smuzhiyun  *                                      finish_wait(&wq_head, &wait);
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Because without the explicit smp_mb() it's possible for the
120*4882a593Smuzhiyun  * waitqueue_active() load to get hoisted over the @cond store such that we'll
121*4882a593Smuzhiyun  * observe an empty wait list while the waiter might not observe @cond.
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
124*4882a593Smuzhiyun  * which (when the lock is uncontended) are of roughly equal cost.
125*4882a593Smuzhiyun  */
waitqueue_active(struct wait_queue_head * wq_head)126*4882a593Smuzhiyun static inline int waitqueue_active(struct wait_queue_head *wq_head)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	return !list_empty(&wq_head->head);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun  * wq_has_single_sleeper - check if there is only one sleeper
133*4882a593Smuzhiyun  * @wq_head: wait queue head
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  * Returns true of wq_head has only one sleeper on the list.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Please refer to the comment for waitqueue_active.
138*4882a593Smuzhiyun  */
wq_has_single_sleeper(struct wait_queue_head * wq_head)139*4882a593Smuzhiyun static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	return list_is_singular(&wq_head->head);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun  * wq_has_sleeper - check if there are any waiting processes
146*4882a593Smuzhiyun  * @wq_head: wait queue head
147*4882a593Smuzhiyun  *
148*4882a593Smuzhiyun  * Returns true if wq_head has waiting processes
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * Please refer to the comment for waitqueue_active.
151*4882a593Smuzhiyun  */
wq_has_sleeper(struct wait_queue_head * wq_head)152*4882a593Smuzhiyun static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * We need to be sure we are in sync with the
156*4882a593Smuzhiyun 	 * add_wait_queue modifications to the wait queue.
157*4882a593Smuzhiyun 	 *
158*4882a593Smuzhiyun 	 * This memory barrier should be paired with one on the
159*4882a593Smuzhiyun 	 * waiting side.
160*4882a593Smuzhiyun 	 */
161*4882a593Smuzhiyun 	smp_mb();
162*4882a593Smuzhiyun 	return waitqueue_active(wq_head);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166*4882a593Smuzhiyun extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167*4882a593Smuzhiyun extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168*4882a593Smuzhiyun 
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)169*4882a593Smuzhiyun static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	list_add(&wq_entry->entry, &wq_head->head);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun  * Used for wake-one threads:
176*4882a593Smuzhiyun  */
177*4882a593Smuzhiyun static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)178*4882a593Smuzhiyun __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181*4882a593Smuzhiyun 	__add_wait_queue(wq_head, wq_entry);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)184*4882a593Smuzhiyun static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	list_add_tail(&wq_entry->entry, &wq_head->head);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)190*4882a593Smuzhiyun __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193*4882a593Smuzhiyun 	__add_wait_queue_entry_tail(wq_head, wq_entry);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)197*4882a593Smuzhiyun __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	list_del(&wq_entry->entry);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203*4882a593Smuzhiyun void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204*4882a593Smuzhiyun void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205*4882a593Smuzhiyun 		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206*4882a593Smuzhiyun void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207*4882a593Smuzhiyun void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208*4882a593Smuzhiyun void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209*4882a593Smuzhiyun void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
210*4882a593Smuzhiyun void __wake_up_pollfree(struct wait_queue_head *wq_head);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
213*4882a593Smuzhiyun #define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
214*4882a593Smuzhiyun #define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
215*4882a593Smuzhiyun #define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
216*4882a593Smuzhiyun #define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
219*4882a593Smuzhiyun #define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
220*4882a593Smuzhiyun #define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
221*4882a593Smuzhiyun #define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
222*4882a593Smuzhiyun #define wake_up_sync(x)			__wake_up_sync((x), TASK_NORMAL)
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun  * Wakeup macros to be used to report events to the targets.
226*4882a593Smuzhiyun  */
227*4882a593Smuzhiyun #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
228*4882a593Smuzhiyun #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
229*4882a593Smuzhiyun #define wake_up_poll(x, m)							\
230*4882a593Smuzhiyun 	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
231*4882a593Smuzhiyun #define wake_up_locked_poll(x, m)						\
232*4882a593Smuzhiyun 	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
233*4882a593Smuzhiyun #define wake_up_interruptible_poll(x, m)					\
234*4882a593Smuzhiyun 	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
235*4882a593Smuzhiyun #define wake_up_interruptible_sync_poll(x, m)					\
236*4882a593Smuzhiyun 	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
237*4882a593Smuzhiyun #define wake_up_interruptible_sync_poll_locked(x, m)				\
238*4882a593Smuzhiyun 	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * wake_up_pollfree - signal that a polled waitqueue is going away
242*4882a593Smuzhiyun  * @wq_head: the wait queue head
243*4882a593Smuzhiyun  *
244*4882a593Smuzhiyun  * In the very rare cases where a ->poll() implementation uses a waitqueue whose
245*4882a593Smuzhiyun  * lifetime is tied to a task rather than to the 'struct file' being polled,
246*4882a593Smuzhiyun  * this function must be called before the waitqueue is freed so that
247*4882a593Smuzhiyun  * non-blocking polls (e.g. epoll) are notified that the queue is going away.
248*4882a593Smuzhiyun  *
249*4882a593Smuzhiyun  * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
250*4882a593Smuzhiyun  * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
251*4882a593Smuzhiyun  */
wake_up_pollfree(struct wait_queue_head * wq_head)252*4882a593Smuzhiyun static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * For performance reasons, we don't always take the queue lock here.
256*4882a593Smuzhiyun 	 * Therefore, we might race with someone removing the last entry from
257*4882a593Smuzhiyun 	 * the queue, and proceed while they still hold the queue lock.
258*4882a593Smuzhiyun 	 * However, rcu_read_lock() is required to be held in such cases, so we
259*4882a593Smuzhiyun 	 * can safely proceed with an RCU-delayed free.
260*4882a593Smuzhiyun 	 */
261*4882a593Smuzhiyun 	if (waitqueue_active(wq_head))
262*4882a593Smuzhiyun 		__wake_up_pollfree(wq_head);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define ___wait_cond_timeout(condition)						\
266*4882a593Smuzhiyun ({										\
267*4882a593Smuzhiyun 	bool __cond = (condition);						\
268*4882a593Smuzhiyun 	if (__cond && !__ret)							\
269*4882a593Smuzhiyun 		__ret = 1;							\
270*4882a593Smuzhiyun 	__cond || !__ret;							\
271*4882a593Smuzhiyun })
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #define ___wait_is_interruptible(state)						\
274*4882a593Smuzhiyun 	(!__builtin_constant_p(state) ||					\
275*4882a593Smuzhiyun 		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * The below macro ___wait_event() has an explicit shadow of the __ret
281*4882a593Smuzhiyun  * variable when used from the wait_event_*() macros.
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * This is so that both can use the ___wait_cond_timeout() construct
284*4882a593Smuzhiyun  * to wrap the condition.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * The type inconsistency of the wait_event_*() __ret variable is also
287*4882a593Smuzhiyun  * on purpose; we use long where we can return timeout values and int
288*4882a593Smuzhiyun  * otherwise.
289*4882a593Smuzhiyun  */
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
292*4882a593Smuzhiyun ({										\
293*4882a593Smuzhiyun 	__label__ __out;							\
294*4882a593Smuzhiyun 	struct wait_queue_entry __wq_entry;					\
295*4882a593Smuzhiyun 	long __ret = ret;	/* explicit shadow */				\
296*4882a593Smuzhiyun 										\
297*4882a593Smuzhiyun 	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
298*4882a593Smuzhiyun 	for (;;) {								\
299*4882a593Smuzhiyun 		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
300*4882a593Smuzhiyun 										\
301*4882a593Smuzhiyun 		if (condition)							\
302*4882a593Smuzhiyun 			break;							\
303*4882a593Smuzhiyun 										\
304*4882a593Smuzhiyun 		if (___wait_is_interruptible(state) && __int) {			\
305*4882a593Smuzhiyun 			__ret = __int;						\
306*4882a593Smuzhiyun 			goto __out;						\
307*4882a593Smuzhiyun 		}								\
308*4882a593Smuzhiyun 										\
309*4882a593Smuzhiyun 		cmd;								\
310*4882a593Smuzhiyun 	}									\
311*4882a593Smuzhiyun 	finish_wait(&wq_head, &__wq_entry);					\
312*4882a593Smuzhiyun __out:	__ret;									\
313*4882a593Smuzhiyun })
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #define __wait_event(wq_head, condition)					\
316*4882a593Smuzhiyun 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
317*4882a593Smuzhiyun 			    schedule())
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun  * wait_event - sleep until a condition gets true
321*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
322*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
325*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
326*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
327*4882a593Smuzhiyun  *
328*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
329*4882a593Smuzhiyun  * change the result of the wait condition.
330*4882a593Smuzhiyun  */
331*4882a593Smuzhiyun #define wait_event(wq_head, condition)						\
332*4882a593Smuzhiyun do {										\
333*4882a593Smuzhiyun 	might_sleep();								\
334*4882a593Smuzhiyun 	if (condition)								\
335*4882a593Smuzhiyun 		break;								\
336*4882a593Smuzhiyun 	__wait_event(wq_head, condition);					\
337*4882a593Smuzhiyun } while (0)
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #define __io_wait_event(wq_head, condition)					\
340*4882a593Smuzhiyun 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
341*4882a593Smuzhiyun 			    io_schedule())
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun  * io_wait_event() -- like wait_event() but with io_schedule()
345*4882a593Smuzhiyun  */
346*4882a593Smuzhiyun #define io_wait_event(wq_head, condition)					\
347*4882a593Smuzhiyun do {										\
348*4882a593Smuzhiyun 	might_sleep();								\
349*4882a593Smuzhiyun 	if (condition)								\
350*4882a593Smuzhiyun 		break;								\
351*4882a593Smuzhiyun 	__io_wait_event(wq_head, condition);					\
352*4882a593Smuzhiyun } while (0)
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun #define __wait_event_freezable(wq_head, condition)				\
355*4882a593Smuzhiyun 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
356*4882a593Smuzhiyun 			    freezable_schedule())
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun  * wait_event_freezable - sleep (or freeze) until a condition gets true
360*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
361*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
364*4882a593Smuzhiyun  * to system load) until the @condition evaluates to true. The
365*4882a593Smuzhiyun  * @condition is checked each time the waitqueue @wq_head is woken up.
366*4882a593Smuzhiyun  *
367*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
368*4882a593Smuzhiyun  * change the result of the wait condition.
369*4882a593Smuzhiyun  */
370*4882a593Smuzhiyun #define wait_event_freezable(wq_head, condition)				\
371*4882a593Smuzhiyun ({										\
372*4882a593Smuzhiyun 	int __ret = 0;								\
373*4882a593Smuzhiyun 	might_sleep();								\
374*4882a593Smuzhiyun 	if (!(condition))							\
375*4882a593Smuzhiyun 		__ret = __wait_event_freezable(wq_head, condition);		\
376*4882a593Smuzhiyun 	__ret;									\
377*4882a593Smuzhiyun })
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun #define __wait_event_timeout(wq_head, condition, timeout)			\
380*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
381*4882a593Smuzhiyun 		      TASK_UNINTERRUPTIBLE, 0, timeout,				\
382*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret))
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
386*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
387*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
388*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
389*4882a593Smuzhiyun  *
390*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
391*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
392*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
393*4882a593Smuzhiyun  *
394*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
395*4882a593Smuzhiyun  * change the result of the wait condition.
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * Returns:
398*4882a593Smuzhiyun  * 0 if the @condition evaluated to %false after the @timeout elapsed,
399*4882a593Smuzhiyun  * 1 if the @condition evaluated to %true after the @timeout elapsed,
400*4882a593Smuzhiyun  * or the remaining jiffies (at least 1) if the @condition evaluated
401*4882a593Smuzhiyun  * to %true before the @timeout elapsed.
402*4882a593Smuzhiyun  */
403*4882a593Smuzhiyun #define wait_event_timeout(wq_head, condition, timeout)				\
404*4882a593Smuzhiyun ({										\
405*4882a593Smuzhiyun 	long __ret = timeout;							\
406*4882a593Smuzhiyun 	might_sleep();								\
407*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
408*4882a593Smuzhiyun 		__ret = __wait_event_timeout(wq_head, condition, timeout);	\
409*4882a593Smuzhiyun 	__ret;									\
410*4882a593Smuzhiyun })
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun #define __wait_event_freezable_timeout(wq_head, condition, timeout)		\
413*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
414*4882a593Smuzhiyun 		      TASK_INTERRUPTIBLE, 0, timeout,				\
415*4882a593Smuzhiyun 		      __ret = freezable_schedule_timeout(__ret))
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
419*4882a593Smuzhiyun  * increasing load and is freezable.
420*4882a593Smuzhiyun  */
421*4882a593Smuzhiyun #define wait_event_freezable_timeout(wq_head, condition, timeout)		\
422*4882a593Smuzhiyun ({										\
423*4882a593Smuzhiyun 	long __ret = timeout;							\
424*4882a593Smuzhiyun 	might_sleep();								\
425*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
426*4882a593Smuzhiyun 		__ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
427*4882a593Smuzhiyun 	__ret;									\
428*4882a593Smuzhiyun })
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
431*4882a593Smuzhiyun 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,	\
432*4882a593Smuzhiyun 			    cmd1; schedule(); cmd2)
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun  * Just like wait_event_cmd(), except it sets exclusive flag
435*4882a593Smuzhiyun  */
436*4882a593Smuzhiyun #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)		\
437*4882a593Smuzhiyun do {										\
438*4882a593Smuzhiyun 	if (condition)								\
439*4882a593Smuzhiyun 		break;								\
440*4882a593Smuzhiyun 	__wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);		\
441*4882a593Smuzhiyun } while (0)
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)			\
444*4882a593Smuzhiyun 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
445*4882a593Smuzhiyun 			    cmd1; schedule(); cmd2)
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun  * wait_event_cmd - sleep until a condition gets true
449*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
450*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
451*4882a593Smuzhiyun  * @cmd1: the command will be executed before sleep
452*4882a593Smuzhiyun  * @cmd2: the command will be executed after sleep
453*4882a593Smuzhiyun  *
454*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
455*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
456*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
457*4882a593Smuzhiyun  *
458*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
459*4882a593Smuzhiyun  * change the result of the wait condition.
460*4882a593Smuzhiyun  */
461*4882a593Smuzhiyun #define wait_event_cmd(wq_head, condition, cmd1, cmd2)				\
462*4882a593Smuzhiyun do {										\
463*4882a593Smuzhiyun 	if (condition)								\
464*4882a593Smuzhiyun 		break;								\
465*4882a593Smuzhiyun 	__wait_event_cmd(wq_head, condition, cmd1, cmd2);			\
466*4882a593Smuzhiyun } while (0)
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun #define __wait_event_interruptible(wq_head, condition)				\
469*4882a593Smuzhiyun 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
470*4882a593Smuzhiyun 		      schedule())
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun  * wait_event_interruptible - sleep until a condition gets true
474*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
475*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
478*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
479*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
482*4882a593Smuzhiyun  * change the result of the wait condition.
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
485*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
486*4882a593Smuzhiyun  */
487*4882a593Smuzhiyun #define wait_event_interruptible(wq_head, condition)				\
488*4882a593Smuzhiyun ({										\
489*4882a593Smuzhiyun 	int __ret = 0;								\
490*4882a593Smuzhiyun 	might_sleep();								\
491*4882a593Smuzhiyun 	if (!(condition))							\
492*4882a593Smuzhiyun 		__ret = __wait_event_interruptible(wq_head, condition);		\
493*4882a593Smuzhiyun 	__ret;									\
494*4882a593Smuzhiyun })
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun #define __wait_event_interruptible_timeout(wq_head, condition, timeout)		\
497*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
498*4882a593Smuzhiyun 		      TASK_INTERRUPTIBLE, 0, timeout,				\
499*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret))
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /**
502*4882a593Smuzhiyun  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
503*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
504*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
505*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
506*4882a593Smuzhiyun  *
507*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
508*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
509*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
512*4882a593Smuzhiyun  * change the result of the wait condition.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  * Returns:
515*4882a593Smuzhiyun  * 0 if the @condition evaluated to %false after the @timeout elapsed,
516*4882a593Smuzhiyun  * 1 if the @condition evaluated to %true after the @timeout elapsed,
517*4882a593Smuzhiyun  * the remaining jiffies (at least 1) if the @condition evaluated
518*4882a593Smuzhiyun  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
519*4882a593Smuzhiyun  * interrupted by a signal.
520*4882a593Smuzhiyun  */
521*4882a593Smuzhiyun #define wait_event_interruptible_timeout(wq_head, condition, timeout)		\
522*4882a593Smuzhiyun ({										\
523*4882a593Smuzhiyun 	long __ret = timeout;							\
524*4882a593Smuzhiyun 	might_sleep();								\
525*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
526*4882a593Smuzhiyun 		__ret = __wait_event_interruptible_timeout(wq_head,		\
527*4882a593Smuzhiyun 						condition, timeout);		\
528*4882a593Smuzhiyun 	__ret;									\
529*4882a593Smuzhiyun })
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun #define __wait_event_hrtimeout(wq_head, condition, timeout, state)		\
532*4882a593Smuzhiyun ({										\
533*4882a593Smuzhiyun 	int __ret = 0;								\
534*4882a593Smuzhiyun 	struct hrtimer_sleeper __t;						\
535*4882a593Smuzhiyun 										\
536*4882a593Smuzhiyun 	hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC,			\
537*4882a593Smuzhiyun 				      HRTIMER_MODE_REL);			\
538*4882a593Smuzhiyun 	if ((timeout) != KTIME_MAX) {						\
539*4882a593Smuzhiyun 		hrtimer_set_expires_range_ns(&__t.timer, timeout,		\
540*4882a593Smuzhiyun 					current->timer_slack_ns);		\
541*4882a593Smuzhiyun 		hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL);		\
542*4882a593Smuzhiyun 	}									\
543*4882a593Smuzhiyun 										\
544*4882a593Smuzhiyun 	__ret = ___wait_event(wq_head, condition, state, 0, 0,			\
545*4882a593Smuzhiyun 		if (!__t.task) {						\
546*4882a593Smuzhiyun 			__ret = -ETIME;						\
547*4882a593Smuzhiyun 			break;							\
548*4882a593Smuzhiyun 		}								\
549*4882a593Smuzhiyun 		schedule());							\
550*4882a593Smuzhiyun 										\
551*4882a593Smuzhiyun 	hrtimer_cancel(&__t.timer);						\
552*4882a593Smuzhiyun 	destroy_hrtimer_on_stack(&__t.timer);					\
553*4882a593Smuzhiyun 	__ret;									\
554*4882a593Smuzhiyun })
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
558*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
559*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
560*4882a593Smuzhiyun  * @timeout: timeout, as a ktime_t
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
563*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
564*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
565*4882a593Smuzhiyun  *
566*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
567*4882a593Smuzhiyun  * change the result of the wait condition.
568*4882a593Smuzhiyun  *
569*4882a593Smuzhiyun  * The function returns 0 if @condition became true, or -ETIME if the timeout
570*4882a593Smuzhiyun  * elapsed.
571*4882a593Smuzhiyun  */
572*4882a593Smuzhiyun #define wait_event_hrtimeout(wq_head, condition, timeout)			\
573*4882a593Smuzhiyun ({										\
574*4882a593Smuzhiyun 	int __ret = 0;								\
575*4882a593Smuzhiyun 	might_sleep();								\
576*4882a593Smuzhiyun 	if (!(condition))							\
577*4882a593Smuzhiyun 		__ret = __wait_event_hrtimeout(wq_head, condition, timeout,	\
578*4882a593Smuzhiyun 					       TASK_UNINTERRUPTIBLE);		\
579*4882a593Smuzhiyun 	__ret;									\
580*4882a593Smuzhiyun })
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
584*4882a593Smuzhiyun  * @wq: the waitqueue to wait on
585*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
586*4882a593Smuzhiyun  * @timeout: timeout, as a ktime_t
587*4882a593Smuzhiyun  *
588*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
589*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
590*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq is woken up.
591*4882a593Smuzhiyun  *
592*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
593*4882a593Smuzhiyun  * change the result of the wait condition.
594*4882a593Smuzhiyun  *
595*4882a593Smuzhiyun  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
596*4882a593Smuzhiyun  * interrupted by a signal, or -ETIME if the timeout elapsed.
597*4882a593Smuzhiyun  */
598*4882a593Smuzhiyun #define wait_event_interruptible_hrtimeout(wq, condition, timeout)		\
599*4882a593Smuzhiyun ({										\
600*4882a593Smuzhiyun 	long __ret = 0;								\
601*4882a593Smuzhiyun 	might_sleep();								\
602*4882a593Smuzhiyun 	if (!(condition))							\
603*4882a593Smuzhiyun 		__ret = __wait_event_hrtimeout(wq, condition, timeout,		\
604*4882a593Smuzhiyun 					       TASK_INTERRUPTIBLE);		\
605*4882a593Smuzhiyun 	__ret;									\
606*4882a593Smuzhiyun })
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun #define __wait_event_interruptible_exclusive(wq, condition)			\
609*4882a593Smuzhiyun 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
610*4882a593Smuzhiyun 		      schedule())
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #define wait_event_interruptible_exclusive(wq, condition)			\
613*4882a593Smuzhiyun ({										\
614*4882a593Smuzhiyun 	int __ret = 0;								\
615*4882a593Smuzhiyun 	might_sleep();								\
616*4882a593Smuzhiyun 	if (!(condition))							\
617*4882a593Smuzhiyun 		__ret = __wait_event_interruptible_exclusive(wq, condition);	\
618*4882a593Smuzhiyun 	__ret;									\
619*4882a593Smuzhiyun })
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun #define __wait_event_killable_exclusive(wq, condition)				\
622*4882a593Smuzhiyun 	___wait_event(wq, condition, TASK_KILLABLE, 1, 0,			\
623*4882a593Smuzhiyun 		      schedule())
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun #define wait_event_killable_exclusive(wq, condition)				\
626*4882a593Smuzhiyun ({										\
627*4882a593Smuzhiyun 	int __ret = 0;								\
628*4882a593Smuzhiyun 	might_sleep();								\
629*4882a593Smuzhiyun 	if (!(condition))							\
630*4882a593Smuzhiyun 		__ret = __wait_event_killable_exclusive(wq, condition);		\
631*4882a593Smuzhiyun 	__ret;									\
632*4882a593Smuzhiyun })
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun #define __wait_event_freezable_exclusive(wq, condition)				\
636*4882a593Smuzhiyun 	___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,			\
637*4882a593Smuzhiyun 			freezable_schedule())
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun #define wait_event_freezable_exclusive(wq, condition)				\
640*4882a593Smuzhiyun ({										\
641*4882a593Smuzhiyun 	int __ret = 0;								\
642*4882a593Smuzhiyun 	might_sleep();								\
643*4882a593Smuzhiyun 	if (!(condition))							\
644*4882a593Smuzhiyun 		__ret = __wait_event_freezable_exclusive(wq, condition);	\
645*4882a593Smuzhiyun 	__ret;									\
646*4882a593Smuzhiyun })
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /**
649*4882a593Smuzhiyun  * wait_event_idle - wait for a condition without contributing to system load
650*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
651*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
652*4882a593Smuzhiyun  *
653*4882a593Smuzhiyun  * The process is put to sleep (TASK_IDLE) until the
654*4882a593Smuzhiyun  * @condition evaluates to true.
655*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
658*4882a593Smuzhiyun  * change the result of the wait condition.
659*4882a593Smuzhiyun  *
660*4882a593Smuzhiyun  */
661*4882a593Smuzhiyun #define wait_event_idle(wq_head, condition)					\
662*4882a593Smuzhiyun do {										\
663*4882a593Smuzhiyun 	might_sleep();								\
664*4882a593Smuzhiyun 	if (!(condition))							\
665*4882a593Smuzhiyun 		___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule());	\
666*4882a593Smuzhiyun } while (0)
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun /**
669*4882a593Smuzhiyun  * wait_event_idle_exclusive - wait for a condition with contributing to system load
670*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
671*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
672*4882a593Smuzhiyun  *
673*4882a593Smuzhiyun  * The process is put to sleep (TASK_IDLE) until the
674*4882a593Smuzhiyun  * @condition evaluates to true.
675*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
676*4882a593Smuzhiyun  *
677*4882a593Smuzhiyun  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
678*4882a593Smuzhiyun  * set thus if other processes wait on the same list, when this
679*4882a593Smuzhiyun  * process is woken further processes are not considered.
680*4882a593Smuzhiyun  *
681*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
682*4882a593Smuzhiyun  * change the result of the wait condition.
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  */
685*4882a593Smuzhiyun #define wait_event_idle_exclusive(wq_head, condition)				\
686*4882a593Smuzhiyun do {										\
687*4882a593Smuzhiyun 	might_sleep();								\
688*4882a593Smuzhiyun 	if (!(condition))							\
689*4882a593Smuzhiyun 		___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule());	\
690*4882a593Smuzhiyun } while (0)
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun #define __wait_event_idle_timeout(wq_head, condition, timeout)			\
693*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
694*4882a593Smuzhiyun 		      TASK_IDLE, 0, timeout,					\
695*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret))
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun /**
698*4882a593Smuzhiyun  * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
699*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
700*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
701*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
702*4882a593Smuzhiyun  *
703*4882a593Smuzhiyun  * The process is put to sleep (TASK_IDLE) until the
704*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
705*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
708*4882a593Smuzhiyun  * change the result of the wait condition.
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * Returns:
711*4882a593Smuzhiyun  * 0 if the @condition evaluated to %false after the @timeout elapsed,
712*4882a593Smuzhiyun  * 1 if the @condition evaluated to %true after the @timeout elapsed,
713*4882a593Smuzhiyun  * or the remaining jiffies (at least 1) if the @condition evaluated
714*4882a593Smuzhiyun  * to %true before the @timeout elapsed.
715*4882a593Smuzhiyun  */
716*4882a593Smuzhiyun #define wait_event_idle_timeout(wq_head, condition, timeout)			\
717*4882a593Smuzhiyun ({										\
718*4882a593Smuzhiyun 	long __ret = timeout;							\
719*4882a593Smuzhiyun 	might_sleep();								\
720*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
721*4882a593Smuzhiyun 		__ret = __wait_event_idle_timeout(wq_head, condition, timeout);	\
722*4882a593Smuzhiyun 	__ret;									\
723*4882a593Smuzhiyun })
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)	\
726*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
727*4882a593Smuzhiyun 		      TASK_IDLE, 1, timeout,					\
728*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret))
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun /**
731*4882a593Smuzhiyun  * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
732*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
733*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
734*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
735*4882a593Smuzhiyun  *
736*4882a593Smuzhiyun  * The process is put to sleep (TASK_IDLE) until the
737*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
738*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
739*4882a593Smuzhiyun  *
740*4882a593Smuzhiyun  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
741*4882a593Smuzhiyun  * set thus if other processes wait on the same list, when this
742*4882a593Smuzhiyun  * process is woken further processes are not considered.
743*4882a593Smuzhiyun  *
744*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
745*4882a593Smuzhiyun  * change the result of the wait condition.
746*4882a593Smuzhiyun  *
747*4882a593Smuzhiyun  * Returns:
748*4882a593Smuzhiyun  * 0 if the @condition evaluated to %false after the @timeout elapsed,
749*4882a593Smuzhiyun  * 1 if the @condition evaluated to %true after the @timeout elapsed,
750*4882a593Smuzhiyun  * or the remaining jiffies (at least 1) if the @condition evaluated
751*4882a593Smuzhiyun  * to %true before the @timeout elapsed.
752*4882a593Smuzhiyun  */
753*4882a593Smuzhiyun #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)		\
754*4882a593Smuzhiyun ({										\
755*4882a593Smuzhiyun 	long __ret = timeout;							\
756*4882a593Smuzhiyun 	might_sleep();								\
757*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
758*4882a593Smuzhiyun 		__ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
759*4882a593Smuzhiyun 	__ret;									\
760*4882a593Smuzhiyun })
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
763*4882a593Smuzhiyun extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)		\
766*4882a593Smuzhiyun ({										\
767*4882a593Smuzhiyun 	int __ret;								\
768*4882a593Smuzhiyun 	DEFINE_WAIT(__wait);							\
769*4882a593Smuzhiyun 	if (exclusive)								\
770*4882a593Smuzhiyun 		__wait.flags |= WQ_FLAG_EXCLUSIVE;				\
771*4882a593Smuzhiyun 	do {									\
772*4882a593Smuzhiyun 		__ret = fn(&(wq), &__wait);					\
773*4882a593Smuzhiyun 		if (__ret)							\
774*4882a593Smuzhiyun 			break;							\
775*4882a593Smuzhiyun 	} while (!(condition));							\
776*4882a593Smuzhiyun 	__remove_wait_queue(&(wq), &__wait);					\
777*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);					\
778*4882a593Smuzhiyun 	__ret;									\
779*4882a593Smuzhiyun })
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun /**
783*4882a593Smuzhiyun  * wait_event_interruptible_locked - sleep until a condition gets true
784*4882a593Smuzhiyun  * @wq: the waitqueue to wait on
785*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
786*4882a593Smuzhiyun  *
787*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
788*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
789*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq is woken up.
790*4882a593Smuzhiyun  *
791*4882a593Smuzhiyun  * It must be called with wq.lock being held.  This spinlock is
792*4882a593Smuzhiyun  * unlocked while sleeping but @condition testing is done while lock
793*4882a593Smuzhiyun  * is held and when this macro exits the lock is held.
794*4882a593Smuzhiyun  *
795*4882a593Smuzhiyun  * The lock is locked/unlocked using spin_lock()/spin_unlock()
796*4882a593Smuzhiyun  * functions which must match the way they are locked/unlocked outside
797*4882a593Smuzhiyun  * of this macro.
798*4882a593Smuzhiyun  *
799*4882a593Smuzhiyun  * wake_up_locked() has to be called after changing any variable that could
800*4882a593Smuzhiyun  * change the result of the wait condition.
801*4882a593Smuzhiyun  *
802*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
803*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
804*4882a593Smuzhiyun  */
805*4882a593Smuzhiyun #define wait_event_interruptible_locked(wq, condition)				\
806*4882a593Smuzhiyun 	((condition)								\
807*4882a593Smuzhiyun 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun /**
810*4882a593Smuzhiyun  * wait_event_interruptible_locked_irq - sleep until a condition gets true
811*4882a593Smuzhiyun  * @wq: the waitqueue to wait on
812*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
813*4882a593Smuzhiyun  *
814*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
815*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
816*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq is woken up.
817*4882a593Smuzhiyun  *
818*4882a593Smuzhiyun  * It must be called with wq.lock being held.  This spinlock is
819*4882a593Smuzhiyun  * unlocked while sleeping but @condition testing is done while lock
820*4882a593Smuzhiyun  * is held and when this macro exits the lock is held.
821*4882a593Smuzhiyun  *
822*4882a593Smuzhiyun  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
823*4882a593Smuzhiyun  * functions which must match the way they are locked/unlocked outside
824*4882a593Smuzhiyun  * of this macro.
825*4882a593Smuzhiyun  *
826*4882a593Smuzhiyun  * wake_up_locked() has to be called after changing any variable that could
827*4882a593Smuzhiyun  * change the result of the wait condition.
828*4882a593Smuzhiyun  *
829*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
830*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
831*4882a593Smuzhiyun  */
832*4882a593Smuzhiyun #define wait_event_interruptible_locked_irq(wq, condition)			\
833*4882a593Smuzhiyun 	((condition)								\
834*4882a593Smuzhiyun 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun /**
837*4882a593Smuzhiyun  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
838*4882a593Smuzhiyun  * @wq: the waitqueue to wait on
839*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
842*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
843*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq is woken up.
844*4882a593Smuzhiyun  *
845*4882a593Smuzhiyun  * It must be called with wq.lock being held.  This spinlock is
846*4882a593Smuzhiyun  * unlocked while sleeping but @condition testing is done while lock
847*4882a593Smuzhiyun  * is held and when this macro exits the lock is held.
848*4882a593Smuzhiyun  *
849*4882a593Smuzhiyun  * The lock is locked/unlocked using spin_lock()/spin_unlock()
850*4882a593Smuzhiyun  * functions which must match the way they are locked/unlocked outside
851*4882a593Smuzhiyun  * of this macro.
852*4882a593Smuzhiyun  *
853*4882a593Smuzhiyun  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
854*4882a593Smuzhiyun  * set thus when other process waits process on the list if this
855*4882a593Smuzhiyun  * process is awaken further processes are not considered.
856*4882a593Smuzhiyun  *
857*4882a593Smuzhiyun  * wake_up_locked() has to be called after changing any variable that could
858*4882a593Smuzhiyun  * change the result of the wait condition.
859*4882a593Smuzhiyun  *
860*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
861*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
862*4882a593Smuzhiyun  */
863*4882a593Smuzhiyun #define wait_event_interruptible_exclusive_locked(wq, condition)		\
864*4882a593Smuzhiyun 	((condition)								\
865*4882a593Smuzhiyun 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /**
868*4882a593Smuzhiyun  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
869*4882a593Smuzhiyun  * @wq: the waitqueue to wait on
870*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
871*4882a593Smuzhiyun  *
872*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
873*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
874*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq is woken up.
875*4882a593Smuzhiyun  *
876*4882a593Smuzhiyun  * It must be called with wq.lock being held.  This spinlock is
877*4882a593Smuzhiyun  * unlocked while sleeping but @condition testing is done while lock
878*4882a593Smuzhiyun  * is held and when this macro exits the lock is held.
879*4882a593Smuzhiyun  *
880*4882a593Smuzhiyun  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
881*4882a593Smuzhiyun  * functions which must match the way they are locked/unlocked outside
882*4882a593Smuzhiyun  * of this macro.
883*4882a593Smuzhiyun  *
884*4882a593Smuzhiyun  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
885*4882a593Smuzhiyun  * set thus when other process waits process on the list if this
886*4882a593Smuzhiyun  * process is awaken further processes are not considered.
887*4882a593Smuzhiyun  *
888*4882a593Smuzhiyun  * wake_up_locked() has to be called after changing any variable that could
889*4882a593Smuzhiyun  * change the result of the wait condition.
890*4882a593Smuzhiyun  *
891*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
892*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
893*4882a593Smuzhiyun  */
894*4882a593Smuzhiyun #define wait_event_interruptible_exclusive_locked_irq(wq, condition)		\
895*4882a593Smuzhiyun 	((condition)								\
896*4882a593Smuzhiyun 	 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun #define __wait_event_killable(wq, condition)					\
900*4882a593Smuzhiyun 	___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun /**
903*4882a593Smuzhiyun  * wait_event_killable - sleep until a condition gets true
904*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
905*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
906*4882a593Smuzhiyun  *
907*4882a593Smuzhiyun  * The process is put to sleep (TASK_KILLABLE) until the
908*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received.
909*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
910*4882a593Smuzhiyun  *
911*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
912*4882a593Smuzhiyun  * change the result of the wait condition.
913*4882a593Smuzhiyun  *
914*4882a593Smuzhiyun  * The function will return -ERESTARTSYS if it was interrupted by a
915*4882a593Smuzhiyun  * signal and 0 if @condition evaluated to true.
916*4882a593Smuzhiyun  */
917*4882a593Smuzhiyun #define wait_event_killable(wq_head, condition)					\
918*4882a593Smuzhiyun ({										\
919*4882a593Smuzhiyun 	int __ret = 0;								\
920*4882a593Smuzhiyun 	might_sleep();								\
921*4882a593Smuzhiyun 	if (!(condition))							\
922*4882a593Smuzhiyun 		__ret = __wait_event_killable(wq_head, condition);		\
923*4882a593Smuzhiyun 	__ret;									\
924*4882a593Smuzhiyun })
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun #define __wait_event_killable_timeout(wq_head, condition, timeout)		\
927*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
928*4882a593Smuzhiyun 		      TASK_KILLABLE, 0, timeout,				\
929*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret))
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun /**
932*4882a593Smuzhiyun  * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
933*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
934*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
935*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
936*4882a593Smuzhiyun  *
937*4882a593Smuzhiyun  * The process is put to sleep (TASK_KILLABLE) until the
938*4882a593Smuzhiyun  * @condition evaluates to true or a kill signal is received.
939*4882a593Smuzhiyun  * The @condition is checked each time the waitqueue @wq_head is woken up.
940*4882a593Smuzhiyun  *
941*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
942*4882a593Smuzhiyun  * change the result of the wait condition.
943*4882a593Smuzhiyun  *
944*4882a593Smuzhiyun  * Returns:
945*4882a593Smuzhiyun  * 0 if the @condition evaluated to %false after the @timeout elapsed,
946*4882a593Smuzhiyun  * 1 if the @condition evaluated to %true after the @timeout elapsed,
947*4882a593Smuzhiyun  * the remaining jiffies (at least 1) if the @condition evaluated
948*4882a593Smuzhiyun  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
949*4882a593Smuzhiyun  * interrupted by a kill signal.
950*4882a593Smuzhiyun  *
951*4882a593Smuzhiyun  * Only kill signals interrupt this process.
952*4882a593Smuzhiyun  */
953*4882a593Smuzhiyun #define wait_event_killable_timeout(wq_head, condition, timeout)		\
954*4882a593Smuzhiyun ({										\
955*4882a593Smuzhiyun 	long __ret = timeout;							\
956*4882a593Smuzhiyun 	might_sleep();								\
957*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
958*4882a593Smuzhiyun 		__ret = __wait_event_killable_timeout(wq_head,			\
959*4882a593Smuzhiyun 						condition, timeout);		\
960*4882a593Smuzhiyun 	__ret;									\
961*4882a593Smuzhiyun })
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun #define __wait_event_lock_irq(wq_head, condition, lock, cmd)			\
965*4882a593Smuzhiyun 	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
966*4882a593Smuzhiyun 			    spin_unlock_irq(&lock);				\
967*4882a593Smuzhiyun 			    cmd;						\
968*4882a593Smuzhiyun 			    schedule();						\
969*4882a593Smuzhiyun 			    spin_lock_irq(&lock))
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun /**
972*4882a593Smuzhiyun  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
973*4882a593Smuzhiyun  *			     condition is checked under the lock. This
974*4882a593Smuzhiyun  *			     is expected to be called with the lock
975*4882a593Smuzhiyun  *			     taken.
976*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
977*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
978*4882a593Smuzhiyun  * @lock: a locked spinlock_t, which will be released before cmd
979*4882a593Smuzhiyun  *	  and schedule() and reacquired afterwards.
980*4882a593Smuzhiyun  * @cmd: a command which is invoked outside the critical section before
981*4882a593Smuzhiyun  *	 sleep
982*4882a593Smuzhiyun  *
983*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
984*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
985*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
986*4882a593Smuzhiyun  *
987*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
988*4882a593Smuzhiyun  * change the result of the wait condition.
989*4882a593Smuzhiyun  *
990*4882a593Smuzhiyun  * This is supposed to be called while holding the lock. The lock is
991*4882a593Smuzhiyun  * dropped before invoking the cmd and going to sleep and is reacquired
992*4882a593Smuzhiyun  * afterwards.
993*4882a593Smuzhiyun  */
994*4882a593Smuzhiyun #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)			\
995*4882a593Smuzhiyun do {										\
996*4882a593Smuzhiyun 	if (condition)								\
997*4882a593Smuzhiyun 		break;								\
998*4882a593Smuzhiyun 	__wait_event_lock_irq(wq_head, condition, lock, cmd);			\
999*4882a593Smuzhiyun } while (0)
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun  * wait_event_lock_irq - sleep until a condition gets true. The
1003*4882a593Smuzhiyun  *			 condition is checked under the lock. This
1004*4882a593Smuzhiyun  *			 is expected to be called with the lock
1005*4882a593Smuzhiyun  *			 taken.
1006*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
1007*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
1008*4882a593Smuzhiyun  * @lock: a locked spinlock_t, which will be released before schedule()
1009*4882a593Smuzhiyun  *	  and reacquired afterwards.
1010*4882a593Smuzhiyun  *
1011*4882a593Smuzhiyun  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1012*4882a593Smuzhiyun  * @condition evaluates to true. The @condition is checked each time
1013*4882a593Smuzhiyun  * the waitqueue @wq_head is woken up.
1014*4882a593Smuzhiyun  *
1015*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
1016*4882a593Smuzhiyun  * change the result of the wait condition.
1017*4882a593Smuzhiyun  *
1018*4882a593Smuzhiyun  * This is supposed to be called while holding the lock. The lock is
1019*4882a593Smuzhiyun  * dropped before going to sleep and is reacquired afterwards.
1020*4882a593Smuzhiyun  */
1021*4882a593Smuzhiyun #define wait_event_lock_irq(wq_head, condition, lock)				\
1022*4882a593Smuzhiyun do {										\
1023*4882a593Smuzhiyun 	if (condition)								\
1024*4882a593Smuzhiyun 		break;								\
1025*4882a593Smuzhiyun 	__wait_event_lock_irq(wq_head, condition, lock, );			\
1026*4882a593Smuzhiyun } while (0)
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)	\
1030*4882a593Smuzhiyun 	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
1031*4882a593Smuzhiyun 		      spin_unlock_irq(&lock);					\
1032*4882a593Smuzhiyun 		      cmd;							\
1033*4882a593Smuzhiyun 		      schedule();						\
1034*4882a593Smuzhiyun 		      spin_lock_irq(&lock))
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun /**
1037*4882a593Smuzhiyun  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1038*4882a593Smuzhiyun  *		The condition is checked under the lock. This is expected to
1039*4882a593Smuzhiyun  *		be called with the lock taken.
1040*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
1041*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
1042*4882a593Smuzhiyun  * @lock: a locked spinlock_t, which will be released before cmd and
1043*4882a593Smuzhiyun  *	  schedule() and reacquired afterwards.
1044*4882a593Smuzhiyun  * @cmd: a command which is invoked outside the critical section before
1045*4882a593Smuzhiyun  *	 sleep
1046*4882a593Smuzhiyun  *
1047*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1048*4882a593Smuzhiyun  * @condition evaluates to true or a signal is received. The @condition is
1049*4882a593Smuzhiyun  * checked each time the waitqueue @wq_head is woken up.
1050*4882a593Smuzhiyun  *
1051*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
1052*4882a593Smuzhiyun  * change the result of the wait condition.
1053*4882a593Smuzhiyun  *
1054*4882a593Smuzhiyun  * This is supposed to be called while holding the lock. The lock is
1055*4882a593Smuzhiyun  * dropped before invoking the cmd and going to sleep and is reacquired
1056*4882a593Smuzhiyun  * afterwards.
1057*4882a593Smuzhiyun  *
1058*4882a593Smuzhiyun  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1059*4882a593Smuzhiyun  * and 0 if @condition evaluated to true.
1060*4882a593Smuzhiyun  */
1061*4882a593Smuzhiyun #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)	\
1062*4882a593Smuzhiyun ({										\
1063*4882a593Smuzhiyun 	int __ret = 0;								\
1064*4882a593Smuzhiyun 	if (!(condition))							\
1065*4882a593Smuzhiyun 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1066*4882a593Smuzhiyun 						condition, lock, cmd);		\
1067*4882a593Smuzhiyun 	__ret;									\
1068*4882a593Smuzhiyun })
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun /**
1071*4882a593Smuzhiyun  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1072*4882a593Smuzhiyun  *		The condition is checked under the lock. This is expected
1073*4882a593Smuzhiyun  *		to be called with the lock taken.
1074*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
1075*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
1076*4882a593Smuzhiyun  * @lock: a locked spinlock_t, which will be released before schedule()
1077*4882a593Smuzhiyun  *	  and reacquired afterwards.
1078*4882a593Smuzhiyun  *
1079*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1080*4882a593Smuzhiyun  * @condition evaluates to true or signal is received. The @condition is
1081*4882a593Smuzhiyun  * checked each time the waitqueue @wq_head is woken up.
1082*4882a593Smuzhiyun  *
1083*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
1084*4882a593Smuzhiyun  * change the result of the wait condition.
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * This is supposed to be called while holding the lock. The lock is
1087*4882a593Smuzhiyun  * dropped before going to sleep and is reacquired afterwards.
1088*4882a593Smuzhiyun  *
1089*4882a593Smuzhiyun  * The macro will return -ERESTARTSYS if it was interrupted by a signal
1090*4882a593Smuzhiyun  * and 0 if @condition evaluated to true.
1091*4882a593Smuzhiyun  */
1092*4882a593Smuzhiyun #define wait_event_interruptible_lock_irq(wq_head, condition, lock)		\
1093*4882a593Smuzhiyun ({										\
1094*4882a593Smuzhiyun 	int __ret = 0;								\
1095*4882a593Smuzhiyun 	if (!(condition))							\
1096*4882a593Smuzhiyun 		__ret = __wait_event_interruptible_lock_irq(wq_head,		\
1097*4882a593Smuzhiyun 						condition, lock,);		\
1098*4882a593Smuzhiyun 	__ret;									\
1099*4882a593Smuzhiyun })
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)	\
1102*4882a593Smuzhiyun 	___wait_event(wq_head, ___wait_cond_timeout(condition),			\
1103*4882a593Smuzhiyun 		      state, 0, timeout,					\
1104*4882a593Smuzhiyun 		      spin_unlock_irq(&lock);					\
1105*4882a593Smuzhiyun 		      __ret = schedule_timeout(__ret);				\
1106*4882a593Smuzhiyun 		      spin_lock_irq(&lock));
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun /**
1109*4882a593Smuzhiyun  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1110*4882a593Smuzhiyun  *		true or a timeout elapses. The condition is checked under
1111*4882a593Smuzhiyun  *		the lock. This is expected to be called with the lock taken.
1112*4882a593Smuzhiyun  * @wq_head: the waitqueue to wait on
1113*4882a593Smuzhiyun  * @condition: a C expression for the event to wait for
1114*4882a593Smuzhiyun  * @lock: a locked spinlock_t, which will be released before schedule()
1115*4882a593Smuzhiyun  *	  and reacquired afterwards.
1116*4882a593Smuzhiyun  * @timeout: timeout, in jiffies
1117*4882a593Smuzhiyun  *
1118*4882a593Smuzhiyun  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1119*4882a593Smuzhiyun  * @condition evaluates to true or signal is received. The @condition is
1120*4882a593Smuzhiyun  * checked each time the waitqueue @wq_head is woken up.
1121*4882a593Smuzhiyun  *
1122*4882a593Smuzhiyun  * wake_up() has to be called after changing any variable that could
1123*4882a593Smuzhiyun  * change the result of the wait condition.
1124*4882a593Smuzhiyun  *
1125*4882a593Smuzhiyun  * This is supposed to be called while holding the lock. The lock is
1126*4882a593Smuzhiyun  * dropped before going to sleep and is reacquired afterwards.
1127*4882a593Smuzhiyun  *
1128*4882a593Smuzhiyun  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1129*4882a593Smuzhiyun  * was interrupted by a signal, and the remaining jiffies otherwise
1130*4882a593Smuzhiyun  * if the condition evaluated to true before the timeout elapsed.
1131*4882a593Smuzhiyun  */
1132*4882a593Smuzhiyun #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,	\
1133*4882a593Smuzhiyun 						  timeout)			\
1134*4882a593Smuzhiyun ({										\
1135*4882a593Smuzhiyun 	long __ret = timeout;							\
1136*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
1137*4882a593Smuzhiyun 		__ret = __wait_event_lock_irq_timeout(				\
1138*4882a593Smuzhiyun 					wq_head, condition, lock, timeout,	\
1139*4882a593Smuzhiyun 					TASK_INTERRUPTIBLE);			\
1140*4882a593Smuzhiyun 	__ret;									\
1141*4882a593Smuzhiyun })
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)		\
1144*4882a593Smuzhiyun ({										\
1145*4882a593Smuzhiyun 	long __ret = timeout;							\
1146*4882a593Smuzhiyun 	if (!___wait_cond_timeout(condition))					\
1147*4882a593Smuzhiyun 		__ret = __wait_event_lock_irq_timeout(				\
1148*4882a593Smuzhiyun 					wq_head, condition, lock, timeout,	\
1149*4882a593Smuzhiyun 					TASK_UNINTERRUPTIBLE);			\
1150*4882a593Smuzhiyun 	__ret;									\
1151*4882a593Smuzhiyun })
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun /*
1154*4882a593Smuzhiyun  * Waitqueues which are removed from the waitqueue_head at wakeup time
1155*4882a593Smuzhiyun  */
1156*4882a593Smuzhiyun void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1157*4882a593Smuzhiyun bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1158*4882a593Smuzhiyun long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1159*4882a593Smuzhiyun void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1160*4882a593Smuzhiyun long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1161*4882a593Smuzhiyun int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1162*4882a593Smuzhiyun int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun #define DEFINE_WAIT_FUNC(name, function)					\
1165*4882a593Smuzhiyun 	struct wait_queue_entry name = {					\
1166*4882a593Smuzhiyun 		.private	= current,					\
1167*4882a593Smuzhiyun 		.func		= function,					\
1168*4882a593Smuzhiyun 		.entry		= LIST_HEAD_INIT((name).entry),			\
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun #define init_wait(wait)								\
1174*4882a593Smuzhiyun 	do {									\
1175*4882a593Smuzhiyun 		(wait)->private = current;					\
1176*4882a593Smuzhiyun 		(wait)->func = autoremove_wake_function;			\
1177*4882a593Smuzhiyun 		INIT_LIST_HEAD(&(wait)->entry);					\
1178*4882a593Smuzhiyun 		(wait)->flags = 0;						\
1179*4882a593Smuzhiyun 	} while (0)
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun #endif /* _LINUX_WAIT_H */
1184