1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Freezer declarations */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #ifndef FREEZER_H_INCLUDED
5*4882a593Smuzhiyun #define FREEZER_H_INCLUDED
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/debug_locks.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/wait.h>
10*4882a593Smuzhiyun #include <linux/atomic.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifdef CONFIG_FREEZER
13*4882a593Smuzhiyun extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
14*4882a593Smuzhiyun extern bool pm_freezing; /* PM freezing in effect */
15*4882a593Smuzhiyun extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * Timeout for stopping processes
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun extern unsigned int freeze_timeout_msecs;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * Check if a process has been frozen
24*4882a593Smuzhiyun */
frozen(struct task_struct * p)25*4882a593Smuzhiyun static inline bool frozen(struct task_struct *p)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun return p->flags & PF_FROZEN;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
frozen_or_skipped(struct task_struct * p)30*4882a593Smuzhiyun static inline bool frozen_or_skipped(struct task_struct *p)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun return p->flags & (PF_FROZEN | PF_FREEZER_SKIP);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun extern bool freezing_slow_path(struct task_struct *p);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Check if there is a request to freeze a process
39*4882a593Smuzhiyun */
freezing(struct task_struct * p)40*4882a593Smuzhiyun static inline bool freezing(struct task_struct *p)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (likely(!atomic_read(&system_freezing_cnt)))
43*4882a593Smuzhiyun return false;
44*4882a593Smuzhiyun return freezing_slow_path(p);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Takes and releases task alloc lock using task_lock() */
48*4882a593Smuzhiyun extern void __thaw_task(struct task_struct *t);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun extern bool __refrigerator(bool check_kthr_stop);
51*4882a593Smuzhiyun extern int freeze_processes(void);
52*4882a593Smuzhiyun extern int freeze_kernel_threads(void);
53*4882a593Smuzhiyun extern void thaw_processes(void);
54*4882a593Smuzhiyun extern void thaw_kernel_threads(void);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
58*4882a593Smuzhiyun * If try_to_freeze causes a lockdep warning it means the caller may deadlock
59*4882a593Smuzhiyun */
try_to_freeze_unsafe(void)60*4882a593Smuzhiyun static inline bool try_to_freeze_unsafe(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun might_sleep();
63*4882a593Smuzhiyun if (likely(!freezing(current)))
64*4882a593Smuzhiyun return false;
65*4882a593Smuzhiyun return __refrigerator(false);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
try_to_freeze(void)68*4882a593Smuzhiyun static inline bool try_to_freeze(void)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun if (!(current->flags & PF_NOFREEZE))
71*4882a593Smuzhiyun debug_check_no_locks_held();
72*4882a593Smuzhiyun return try_to_freeze_unsafe();
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun extern bool freeze_task(struct task_struct *p);
76*4882a593Smuzhiyun extern bool set_freezable(void);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_FREEZER
79*4882a593Smuzhiyun extern bool cgroup_freezing(struct task_struct *task);
80*4882a593Smuzhiyun #else /* !CONFIG_CGROUP_FREEZER */
cgroup_freezing(struct task_struct * task)81*4882a593Smuzhiyun static inline bool cgroup_freezing(struct task_struct *task)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun return false;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun #endif /* !CONFIG_CGROUP_FREEZER */
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
89*4882a593Smuzhiyun * calls wait_for_completion(&vfork) and reset right after it returns from this
90*4882a593Smuzhiyun * function. Next, the parent should call try_to_freeze() to freeze itself
91*4882a593Smuzhiyun * appropriately in case the child has exited before the freezing of tasks is
92*4882a593Smuzhiyun * complete. However, we don't want kernel threads to be frozen in unexpected
93*4882a593Smuzhiyun * places, so we allow them to block freeze_processes() instead or to set
94*4882a593Smuzhiyun * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
95*4882a593Smuzhiyun * parent won't really block freeze_processes(), since ____call_usermodehelper()
96*4882a593Smuzhiyun * (the child) does a little before exec/exit and it can't be frozen before
97*4882a593Smuzhiyun * waking up the parent.
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * freezer_do_not_count - tell freezer to ignore %current
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Tell freezers to ignore the current task when determining whether the
105*4882a593Smuzhiyun * target frozen state is reached. IOW, the current task will be
106*4882a593Smuzhiyun * considered frozen enough by freezers.
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * The caller shouldn't do anything which isn't allowed for a frozen task
109*4882a593Smuzhiyun * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
110*4882a593Smuzhiyun * wrap a scheduling operation and nothing much else.
111*4882a593Smuzhiyun */
freezer_do_not_count(void)112*4882a593Smuzhiyun static inline void freezer_do_not_count(void)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun current->flags |= PF_FREEZER_SKIP;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun * freezer_count - tell freezer to stop ignoring %current
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * Undo freezer_do_not_count(). It tells freezers that %current should be
121*4882a593Smuzhiyun * considered again and tries to freeze if freezing condition is already in
122*4882a593Smuzhiyun * effect.
123*4882a593Smuzhiyun */
freezer_count(void)124*4882a593Smuzhiyun static inline void freezer_count(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun current->flags &= ~PF_FREEZER_SKIP;
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * If freezing is in progress, the following paired with smp_mb()
129*4882a593Smuzhiyun * in freezer_should_skip() ensures that either we see %true
130*4882a593Smuzhiyun * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun smp_mb();
133*4882a593Smuzhiyun try_to_freeze();
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezer_count_unsafe(void)137*4882a593Smuzhiyun static inline void freezer_count_unsafe(void)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun current->flags &= ~PF_FREEZER_SKIP;
140*4882a593Smuzhiyun smp_mb();
141*4882a593Smuzhiyun try_to_freeze_unsafe();
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun * freezer_should_skip - whether to skip a task when determining frozen
146*4882a593Smuzhiyun * state is reached
147*4882a593Smuzhiyun * @p: task in quesion
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * This function is used by freezers after establishing %true freezing() to
150*4882a593Smuzhiyun * test whether a task should be skipped when determining the target frozen
151*4882a593Smuzhiyun * state is reached. IOW, if this function returns %true, @p is considered
152*4882a593Smuzhiyun * frozen enough.
153*4882a593Smuzhiyun */
freezer_should_skip(struct task_struct * p)154*4882a593Smuzhiyun static inline bool freezer_should_skip(struct task_struct *p)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * The following smp_mb() paired with the one in freezer_count()
158*4882a593Smuzhiyun * ensures that either freezer_count() sees %true freezing() or we
159*4882a593Smuzhiyun * see cleared %PF_FREEZER_SKIP and return %false. This makes it
160*4882a593Smuzhiyun * impossible for a task to slip frozen state testing after
161*4882a593Smuzhiyun * clearing %PF_FREEZER_SKIP.
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun smp_mb();
164*4882a593Smuzhiyun return p->flags & PF_FREEZER_SKIP;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * These functions are intended to be used whenever you want allow a sleeping
169*4882a593Smuzhiyun * task to be frozen. Note that neither return any clear indication of
170*4882a593Smuzhiyun * whether a freeze event happened while in this function.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Like schedule(), but should not block the freezer. */
freezable_schedule(void)174*4882a593Smuzhiyun static inline void freezable_schedule(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun freezer_do_not_count();
177*4882a593Smuzhiyun schedule();
178*4882a593Smuzhiyun freezer_count();
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_unsafe(void)182*4882a593Smuzhiyun static inline void freezable_schedule_unsafe(void)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun freezer_do_not_count();
185*4882a593Smuzhiyun schedule();
186*4882a593Smuzhiyun freezer_count_unsafe();
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * Like schedule_timeout(), but should not block the freezer. Do not
191*4882a593Smuzhiyun * call this with locks held.
192*4882a593Smuzhiyun */
freezable_schedule_timeout(long timeout)193*4882a593Smuzhiyun static inline long freezable_schedule_timeout(long timeout)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun long __retval;
196*4882a593Smuzhiyun freezer_do_not_count();
197*4882a593Smuzhiyun __retval = schedule_timeout(timeout);
198*4882a593Smuzhiyun freezer_count();
199*4882a593Smuzhiyun return __retval;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
204*4882a593Smuzhiyun * call this with locks held.
205*4882a593Smuzhiyun */
freezable_schedule_timeout_interruptible(long timeout)206*4882a593Smuzhiyun static inline long freezable_schedule_timeout_interruptible(long timeout)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun long __retval;
209*4882a593Smuzhiyun freezer_do_not_count();
210*4882a593Smuzhiyun __retval = schedule_timeout_interruptible(timeout);
211*4882a593Smuzhiyun freezer_count();
212*4882a593Smuzhiyun return __retval;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_interruptible_unsafe(long timeout)216*4882a593Smuzhiyun static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun long __retval;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun freezer_do_not_count();
221*4882a593Smuzhiyun __retval = schedule_timeout_interruptible(timeout);
222*4882a593Smuzhiyun freezer_count_unsafe();
223*4882a593Smuzhiyun return __retval;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Like schedule_timeout_killable(), but should not block the freezer. */
freezable_schedule_timeout_killable(long timeout)227*4882a593Smuzhiyun static inline long freezable_schedule_timeout_killable(long timeout)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun long __retval;
230*4882a593Smuzhiyun freezer_do_not_count();
231*4882a593Smuzhiyun __retval = schedule_timeout_killable(timeout);
232*4882a593Smuzhiyun freezer_count();
233*4882a593Smuzhiyun return __retval;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
freezable_schedule_timeout_killable_unsafe(long timeout)237*4882a593Smuzhiyun static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun long __retval;
240*4882a593Smuzhiyun freezer_do_not_count();
241*4882a593Smuzhiyun __retval = schedule_timeout_killable(timeout);
242*4882a593Smuzhiyun freezer_count_unsafe();
243*4882a593Smuzhiyun return __retval;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
248*4882a593Smuzhiyun * call this with locks held.
249*4882a593Smuzhiyun */
freezable_schedule_hrtimeout_range(ktime_t * expires,u64 delta,const enum hrtimer_mode mode)250*4882a593Smuzhiyun static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
251*4882a593Smuzhiyun u64 delta, const enum hrtimer_mode mode)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun int __retval;
254*4882a593Smuzhiyun freezer_do_not_count();
255*4882a593Smuzhiyun __retval = schedule_hrtimeout_range(expires, delta, mode);
256*4882a593Smuzhiyun freezer_count();
257*4882a593Smuzhiyun return __retval;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Freezer-friendly wrappers around wait_event_interruptible(),
262*4882a593Smuzhiyun * wait_event_killable() and wait_event_interruptible_timeout(), originally
263*4882a593Smuzhiyun * defined in <linux/wait.h>
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
267*4882a593Smuzhiyun #define wait_event_freezekillable_unsafe(wq, condition) \
268*4882a593Smuzhiyun ({ \
269*4882a593Smuzhiyun int __retval; \
270*4882a593Smuzhiyun freezer_do_not_count(); \
271*4882a593Smuzhiyun __retval = wait_event_killable(wq, (condition)); \
272*4882a593Smuzhiyun freezer_count_unsafe(); \
273*4882a593Smuzhiyun __retval; \
274*4882a593Smuzhiyun })
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun #else /* !CONFIG_FREEZER */
frozen(struct task_struct * p)277*4882a593Smuzhiyun static inline bool frozen(struct task_struct *p) { return false; }
frozen_or_skipped(struct task_struct * p)278*4882a593Smuzhiyun static inline bool frozen_or_skipped(struct task_struct *p) { return false; }
freezing(struct task_struct * p)279*4882a593Smuzhiyun static inline bool freezing(struct task_struct *p) { return false; }
__thaw_task(struct task_struct * t)280*4882a593Smuzhiyun static inline void __thaw_task(struct task_struct *t) {}
281*4882a593Smuzhiyun
__refrigerator(bool check_kthr_stop)282*4882a593Smuzhiyun static inline bool __refrigerator(bool check_kthr_stop) { return false; }
freeze_processes(void)283*4882a593Smuzhiyun static inline int freeze_processes(void) { return -ENOSYS; }
freeze_kernel_threads(void)284*4882a593Smuzhiyun static inline int freeze_kernel_threads(void) { return -ENOSYS; }
thaw_processes(void)285*4882a593Smuzhiyun static inline void thaw_processes(void) {}
thaw_kernel_threads(void)286*4882a593Smuzhiyun static inline void thaw_kernel_threads(void) {}
287*4882a593Smuzhiyun
try_to_freeze_nowarn(void)288*4882a593Smuzhiyun static inline bool try_to_freeze_nowarn(void) { return false; }
try_to_freeze(void)289*4882a593Smuzhiyun static inline bool try_to_freeze(void) { return false; }
290*4882a593Smuzhiyun
freezer_do_not_count(void)291*4882a593Smuzhiyun static inline void freezer_do_not_count(void) {}
freezer_count(void)292*4882a593Smuzhiyun static inline void freezer_count(void) {}
freezer_should_skip(struct task_struct * p)293*4882a593Smuzhiyun static inline int freezer_should_skip(struct task_struct *p) { return 0; }
set_freezable(void)294*4882a593Smuzhiyun static inline void set_freezable(void) {}
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun #define freezable_schedule() schedule()
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun #define freezable_schedule_unsafe() schedule()
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun #define freezable_schedule_timeout_interruptible(timeout) \
303*4882a593Smuzhiyun schedule_timeout_interruptible(timeout)
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun #define freezable_schedule_timeout_interruptible_unsafe(timeout) \
306*4882a593Smuzhiyun schedule_timeout_interruptible(timeout)
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun #define freezable_schedule_timeout_killable(timeout) \
309*4882a593Smuzhiyun schedule_timeout_killable(timeout)
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #define freezable_schedule_timeout_killable_unsafe(timeout) \
312*4882a593Smuzhiyun schedule_timeout_killable(timeout)
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun #define freezable_schedule_hrtimeout_range(expires, delta, mode) \
315*4882a593Smuzhiyun schedule_hrtimeout_range(expires, delta, mode)
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #define wait_event_freezekillable_unsafe(wq, condition) \
318*4882a593Smuzhiyun wait_event_killable(wq, condition)
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #endif /* !CONFIG_FREEZER */
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun #endif /* FREEZER_H_INCLUDED */
323