xref: /OK3568_Linux_fs/kernel/include/linux/sched/idle.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_SCHED_IDLE_H
3*4882a593Smuzhiyun #define _LINUX_SCHED_IDLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/sched.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun enum cpu_idle_type {
8*4882a593Smuzhiyun 	CPU_IDLE,
9*4882a593Smuzhiyun 	CPU_NOT_IDLE,
10*4882a593Smuzhiyun 	CPU_NEWLY_IDLE,
11*4882a593Smuzhiyun 	CPU_MAX_IDLE_TYPES
12*4882a593Smuzhiyun };
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun extern void wake_up_if_idle(int cpu);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Idle thread specific functions to determine the need_resched
18*4882a593Smuzhiyun  * polling state.
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun #ifdef TIF_POLLING_NRFLAG
21*4882a593Smuzhiyun 
__current_set_polling(void)22*4882a593Smuzhiyun static inline void __current_set_polling(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	set_thread_flag(TIF_POLLING_NRFLAG);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
current_set_polling_and_test(void)27*4882a593Smuzhiyun static inline bool __must_check current_set_polling_and_test(void)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	__current_set_polling();
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/*
32*4882a593Smuzhiyun 	 * Polling state must be visible before we test NEED_RESCHED,
33*4882a593Smuzhiyun 	 * paired by resched_curr()
34*4882a593Smuzhiyun 	 */
35*4882a593Smuzhiyun 	smp_mb__after_atomic();
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return unlikely(tif_need_resched());
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
__current_clr_polling(void)40*4882a593Smuzhiyun static inline void __current_clr_polling(void)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	clear_thread_flag(TIF_POLLING_NRFLAG);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
current_clr_polling_and_test(void)45*4882a593Smuzhiyun static inline bool __must_check current_clr_polling_and_test(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	__current_clr_polling();
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/*
50*4882a593Smuzhiyun 	 * Polling state must be visible before we test NEED_RESCHED,
51*4882a593Smuzhiyun 	 * paired by resched_curr()
52*4882a593Smuzhiyun 	 */
53*4882a593Smuzhiyun 	smp_mb__after_atomic();
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return unlikely(tif_need_resched());
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #else
__current_set_polling(void)59*4882a593Smuzhiyun static inline void __current_set_polling(void) { }
__current_clr_polling(void)60*4882a593Smuzhiyun static inline void __current_clr_polling(void) { }
61*4882a593Smuzhiyun 
current_set_polling_and_test(void)62*4882a593Smuzhiyun static inline bool __must_check current_set_polling_and_test(void)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	return unlikely(tif_need_resched());
65*4882a593Smuzhiyun }
current_clr_polling_and_test(void)66*4882a593Smuzhiyun static inline bool __must_check current_clr_polling_and_test(void)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	return unlikely(tif_need_resched());
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 
current_clr_polling(void)72*4882a593Smuzhiyun static inline void current_clr_polling(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	__current_clr_polling();
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
78*4882a593Smuzhiyun 	 * Once the bit is cleared, we'll get IPIs with every new
79*4882a593Smuzhiyun 	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
80*4882a593Smuzhiyun 	 * fold.
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	smp_mb(); /* paired with resched_curr() */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	preempt_fold_need_resched();
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #endif /* _LINUX_SCHED_IDLE_H */
88