xref: /OK3568_Linux_fs/kernel/include/linux/oom.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __INCLUDE_LINUX_OOM_H
3*4882a593Smuzhiyun #define __INCLUDE_LINUX_OOM_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/sched/signal.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/nodemask.h>
9*4882a593Smuzhiyun #include <uapi/linux/oom.h>
10*4882a593Smuzhiyun #include <linux/sched/coredump.h> /* MMF_* */
11*4882a593Smuzhiyun #include <linux/mm.h> /* VM_FAULT* */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun struct zonelist;
14*4882a593Smuzhiyun struct notifier_block;
15*4882a593Smuzhiyun struct mem_cgroup;
16*4882a593Smuzhiyun struct task_struct;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun enum oom_constraint {
19*4882a593Smuzhiyun 	CONSTRAINT_NONE,
20*4882a593Smuzhiyun 	CONSTRAINT_CPUSET,
21*4882a593Smuzhiyun 	CONSTRAINT_MEMORY_POLICY,
22*4882a593Smuzhiyun 	CONSTRAINT_MEMCG,
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Details of the page allocation that triggered the oom killer that are used to
27*4882a593Smuzhiyun  * determine what should be killed.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun struct oom_control {
30*4882a593Smuzhiyun 	/* Used to determine cpuset */
31*4882a593Smuzhiyun 	struct zonelist *zonelist;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	/* Used to determine mempolicy */
34*4882a593Smuzhiyun 	nodemask_t *nodemask;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/* Memory cgroup in which oom is invoked, or NULL for global oom */
37*4882a593Smuzhiyun 	struct mem_cgroup *memcg;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/* Used to determine cpuset and node locality requirement */
40*4882a593Smuzhiyun 	const gfp_t gfp_mask;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/*
43*4882a593Smuzhiyun 	 * order == -1 means the oom kill is required by sysrq, otherwise only
44*4882a593Smuzhiyun 	 * for display purposes.
45*4882a593Smuzhiyun 	 */
46*4882a593Smuzhiyun 	const int order;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	/* Used by oom implementation, do not set */
49*4882a593Smuzhiyun 	unsigned long totalpages;
50*4882a593Smuzhiyun 	struct task_struct *chosen;
51*4882a593Smuzhiyun 	long chosen_points;
52*4882a593Smuzhiyun 	struct task_struct *chosen_non_negative_adj;
53*4882a593Smuzhiyun 	long chosen_non_negative_adj_points;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Used to print the constraint info. */
56*4882a593Smuzhiyun 	enum oom_constraint constraint;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun extern struct mutex oom_lock;
60*4882a593Smuzhiyun extern struct mutex oom_adj_mutex;
61*4882a593Smuzhiyun 
set_current_oom_origin(void)62*4882a593Smuzhiyun static inline void set_current_oom_origin(void)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	current->signal->oom_flag_origin = true;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
clear_current_oom_origin(void)67*4882a593Smuzhiyun static inline void clear_current_oom_origin(void)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	current->signal->oom_flag_origin = false;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
oom_task_origin(const struct task_struct * p)72*4882a593Smuzhiyun static inline bool oom_task_origin(const struct task_struct *p)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return p->signal->oom_flag_origin;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
tsk_is_oom_victim(struct task_struct * tsk)77*4882a593Smuzhiyun static inline bool tsk_is_oom_victim(struct task_struct * tsk)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return tsk->signal->oom_mm;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * Use this helper if tsk->mm != mm and the victim mm needs a special
84*4882a593Smuzhiyun  * handling. This is guaranteed to stay true after once set.
85*4882a593Smuzhiyun  */
mm_is_oom_victim(struct mm_struct * mm)86*4882a593Smuzhiyun static inline bool mm_is_oom_victim(struct mm_struct *mm)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return test_bit(MMF_OOM_VICTIM, &mm->flags);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun  * Checks whether a page fault on the given mm is still reliable.
93*4882a593Smuzhiyun  * This is no longer true if the oom reaper started to reap the
94*4882a593Smuzhiyun  * address space which is reflected by MMF_UNSTABLE flag set in
95*4882a593Smuzhiyun  * the mm. At that moment any !shared mapping would lose the content
96*4882a593Smuzhiyun  * and could cause a memory corruption (zero pages instead of the
97*4882a593Smuzhiyun  * original content).
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * User should call this before establishing a page table entry for
100*4882a593Smuzhiyun  * a !shared mapping and under the proper page table lock.
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
103*4882a593Smuzhiyun  */
check_stable_address_space(struct mm_struct * mm)104*4882a593Smuzhiyun static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
107*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
108*4882a593Smuzhiyun 	return 0;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun bool __oom_reap_task_mm(struct mm_struct *mm);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun long oom_badness(struct task_struct *p,
114*4882a593Smuzhiyun 		unsigned long totalpages);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun extern bool out_of_memory(struct oom_control *oc);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun extern void exit_oom_victim(void);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun extern int register_oom_notifier(struct notifier_block *nb);
121*4882a593Smuzhiyun extern int unregister_oom_notifier(struct notifier_block *nb);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun extern bool oom_killer_disable(signed long timeout);
124*4882a593Smuzhiyun extern void oom_killer_enable(void);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun extern struct task_struct *find_lock_task_mm(struct task_struct *p);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* sysctls */
129*4882a593Smuzhiyun extern int sysctl_oom_dump_tasks;
130*4882a593Smuzhiyun extern int sysctl_oom_kill_allocating_task;
131*4882a593Smuzhiyun extern int sysctl_panic_on_oom;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* call for adding killed process to reaper. */
134*4882a593Smuzhiyun extern void add_to_oom_reaper(struct task_struct *p);
135*4882a593Smuzhiyun #endif /* _INCLUDE_LINUX_OOM_H */
136