xref: /OK3568_Linux_fs/kernel/include/linux/freezer.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Freezer declarations */
3 
4 #ifndef FREEZER_H_INCLUDED
5 #define FREEZER_H_INCLUDED
6 
7 #include <linux/debug_locks.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/atomic.h>
11 
12 #ifdef CONFIG_FREEZER
13 extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
14 extern bool pm_freezing;		/* PM freezing in effect */
15 extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
16 
17 /*
18  * Timeout for stopping processes
19  */
20 extern unsigned int freeze_timeout_msecs;
21 
22 /*
23  * Check if a process has been frozen
24  */
frozen(struct task_struct * p)25 static inline bool frozen(struct task_struct *p)
26 {
27 	return p->flags & PF_FROZEN;
28 }
29 
frozen_or_skipped(struct task_struct * p)30 static inline bool frozen_or_skipped(struct task_struct *p)
31 {
32 	return p->flags & (PF_FROZEN | PF_FREEZER_SKIP);
33 }
34 
35 extern bool freezing_slow_path(struct task_struct *p);
36 
37 /*
38  * Check if there is a request to freeze a process
39  */
freezing(struct task_struct * p)40 static inline bool freezing(struct task_struct *p)
41 {
42 	if (likely(!atomic_read(&system_freezing_cnt)))
43 		return false;
44 	return freezing_slow_path(p);
45 }
46 
47 /* Takes and releases task alloc lock using task_lock() */
48 extern void __thaw_task(struct task_struct *t);
49 
50 extern bool __refrigerator(bool check_kthr_stop);
51 extern int freeze_processes(void);
52 extern int freeze_kernel_threads(void);
53 extern void thaw_processes(void);
54 extern void thaw_kernel_threads(void);
55 
56 /*
57  * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
58  * If try_to_freeze causes a lockdep warning it means the caller may deadlock
59  */
try_to_freeze_unsafe(void)60 static inline bool try_to_freeze_unsafe(void)
61 {
62 	might_sleep();
63 	if (likely(!freezing(current)))
64 		return false;
65 	return __refrigerator(false);
66 }
67 
try_to_freeze(void)68 static inline bool try_to_freeze(void)
69 {
70 	if (!(current->flags & PF_NOFREEZE))
71 		debug_check_no_locks_held();
72 	return try_to_freeze_unsafe();
73 }
74 
75 extern bool freeze_task(struct task_struct *p);
76 extern bool set_freezable(void);
77 
78 #ifdef CONFIG_CGROUP_FREEZER
79 extern bool cgroup_freezing(struct task_struct *task);
80 #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)81 static inline bool cgroup_freezing(struct task_struct *task)
82 {
83 	return false;
84 }
85 #endif /* !CONFIG_CGROUP_FREEZER */
86 
87 /*
88  * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
89  * calls wait_for_completion(&vfork) and reset right after it returns from this
90  * function.  Next, the parent should call try_to_freeze() to freeze itself
91  * appropriately in case the child has exited before the freezing of tasks is
92  * complete.  However, we don't want kernel threads to be frozen in unexpected
93  * places, so we allow them to block freeze_processes() instead or to set
94  * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
95  * parent won't really block freeze_processes(), since ____call_usermodehelper()
96  * (the child) does a little before exec/exit and it can't be frozen before
97  * waking up the parent.
98  */
99 
100 
101 /**
102  * freezer_do_not_count - tell freezer to ignore %current
103  *
104  * Tell freezers to ignore the current task when determining whether the
105  * target frozen state is reached.  IOW, the current task will be
106  * considered frozen enough by freezers.
107  *
108  * The caller shouldn't do anything which isn't allowed for a frozen task
109  * until freezer_cont() is called.  Usually, freezer[_do_not]_count() pair
110  * wrap a scheduling operation and nothing much else.
111  */
freezer_do_not_count(void)112 static inline void freezer_do_not_count(void)
113 {
114 	current->flags |= PF_FREEZER_SKIP;
115 }
116 
117 /**
118  * freezer_count - tell freezer to stop ignoring %current
119  *
120  * Undo freezer_do_not_count().  It tells freezers that %current should be
121  * considered again and tries to freeze if freezing condition is already in
122  * effect.
123  */
freezer_count(void)124 static inline void freezer_count(void)
125 {
126 	current->flags &= ~PF_FREEZER_SKIP;
127 	/*
128 	 * If freezing is in progress, the following paired with smp_mb()
129 	 * in freezer_should_skip() ensures that either we see %true
130 	 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
131 	 */
132 	smp_mb();
133 	try_to_freeze();
134 }
135 
136 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)137 static inline void freezer_count_unsafe(void)
138 {
139 	current->flags &= ~PF_FREEZER_SKIP;
140 	smp_mb();
141 	try_to_freeze_unsafe();
142 }
143 
144 /**
145  * freezer_should_skip - whether to skip a task when determining frozen
146  *			 state is reached
147  * @p: task in quesion
148  *
149  * This function is used by freezers after establishing %true freezing() to
150  * test whether a task should be skipped when determining the target frozen
151  * state is reached.  IOW, if this function returns %true, @p is considered
152  * frozen enough.
153  */
freezer_should_skip(struct task_struct * p)154 static inline bool freezer_should_skip(struct task_struct *p)
155 {
156 	/*
157 	 * The following smp_mb() paired with the one in freezer_count()
158 	 * ensures that either freezer_count() sees %true freezing() or we
159 	 * see cleared %PF_FREEZER_SKIP and return %false.  This makes it
160 	 * impossible for a task to slip frozen state testing after
161 	 * clearing %PF_FREEZER_SKIP.
162 	 */
163 	smp_mb();
164 	return p->flags & PF_FREEZER_SKIP;
165 }
166 
167 /*
168  * These functions are intended to be used whenever you want allow a sleeping
169  * task to be frozen. Note that neither return any clear indication of
170  * whether a freeze event happened while in this function.
171  */
172 
173 /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)174 static inline void freezable_schedule(void)
175 {
176 	freezer_do_not_count();
177 	schedule();
178 	freezer_count();
179 }
180 
181 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)182 static inline void freezable_schedule_unsafe(void)
183 {
184 	freezer_do_not_count();
185 	schedule();
186 	freezer_count_unsafe();
187 }
188 
189 /*
190  * Like schedule_timeout(), but should not block the freezer.  Do not
191  * call this with locks held.
192  */
freezable_schedule_timeout(long timeout)193 static inline long freezable_schedule_timeout(long timeout)
194 {
195 	long __retval;
196 	freezer_do_not_count();
197 	__retval = schedule_timeout(timeout);
198 	freezer_count();
199 	return __retval;
200 }
201 
202 /*
203  * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
204  * call this with locks held.
205  */
freezable_schedule_timeout_interruptible(long timeout)206 static inline long freezable_schedule_timeout_interruptible(long timeout)
207 {
208 	long __retval;
209 	freezer_do_not_count();
210 	__retval = schedule_timeout_interruptible(timeout);
211 	freezer_count();
212 	return __retval;
213 }
214 
215 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_interruptible_unsafe(long timeout)216 static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
217 {
218 	long __retval;
219 
220 	freezer_do_not_count();
221 	__retval = schedule_timeout_interruptible(timeout);
222 	freezer_count_unsafe();
223 	return __retval;
224 }
225 
226 /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)227 static inline long freezable_schedule_timeout_killable(long timeout)
228 {
229 	long __retval;
230 	freezer_do_not_count();
231 	__retval = schedule_timeout_killable(timeout);
232 	freezer_count();
233 	return __retval;
234 }
235 
236 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)237 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
238 {
239 	long __retval;
240 	freezer_do_not_count();
241 	__retval = schedule_timeout_killable(timeout);
242 	freezer_count_unsafe();
243 	return __retval;
244 }
245 
246 /*
247  * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
248  * call this with locks held.
249  */
freezable_schedule_hrtimeout_range(ktime_t * expires,u64 delta,const enum hrtimer_mode mode)250 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
251 		u64 delta, const enum hrtimer_mode mode)
252 {
253 	int __retval;
254 	freezer_do_not_count();
255 	__retval = schedule_hrtimeout_range(expires, delta, mode);
256 	freezer_count();
257 	return __retval;
258 }
259 
260 /*
261  * Freezer-friendly wrappers around wait_event_interruptible(),
262  * wait_event_killable() and wait_event_interruptible_timeout(), originally
263  * defined in <linux/wait.h>
264  */
265 
266 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
267 #define wait_event_freezekillable_unsafe(wq, condition)			\
268 ({									\
269 	int __retval;							\
270 	freezer_do_not_count();						\
271 	__retval = wait_event_killable(wq, (condition));		\
272 	freezer_count_unsafe();						\
273 	__retval;							\
274 })
275 
276 #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)277 static inline bool frozen(struct task_struct *p) { return false; }
frozen_or_skipped(struct task_struct * p)278 static inline bool frozen_or_skipped(struct task_struct *p) { return false; }
freezing(struct task_struct * p)279 static inline bool freezing(struct task_struct *p) { return false; }
__thaw_task(struct task_struct * t)280 static inline void __thaw_task(struct task_struct *t) {}
281 
__refrigerator(bool check_kthr_stop)282 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
freeze_processes(void)283 static inline int freeze_processes(void) { return -ENOSYS; }
freeze_kernel_threads(void)284 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
thaw_processes(void)285 static inline void thaw_processes(void) {}
thaw_kernel_threads(void)286 static inline void thaw_kernel_threads(void) {}
287 
try_to_freeze_nowarn(void)288 static inline bool try_to_freeze_nowarn(void) { return false; }
try_to_freeze(void)289 static inline bool try_to_freeze(void) { return false; }
290 
freezer_do_not_count(void)291 static inline void freezer_do_not_count(void) {}
freezer_count(void)292 static inline void freezer_count(void) {}
freezer_should_skip(struct task_struct * p)293 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
set_freezable(void)294 static inline void set_freezable(void) {}
295 
296 #define freezable_schedule()  schedule()
297 
298 #define freezable_schedule_unsafe()  schedule()
299 
300 #define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
301 
302 #define freezable_schedule_timeout_interruptible(timeout)		\
303 	schedule_timeout_interruptible(timeout)
304 
305 #define freezable_schedule_timeout_interruptible_unsafe(timeout)	\
306 	schedule_timeout_interruptible(timeout)
307 
308 #define freezable_schedule_timeout_killable(timeout)			\
309 	schedule_timeout_killable(timeout)
310 
311 #define freezable_schedule_timeout_killable_unsafe(timeout)		\
312 	schedule_timeout_killable(timeout)
313 
314 #define freezable_schedule_hrtimeout_range(expires, delta, mode)	\
315 	schedule_hrtimeout_range(expires, delta, mode)
316 
317 #define wait_event_freezekillable_unsafe(wq, condition)			\
318 		wait_event_killable(wq, condition)
319 
320 #endif /* !CONFIG_FREEZER */
321 
322 #endif	/* FREEZER_H_INCLUDED */
323