1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/mm/oom_kill.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1998,2000 Rik van Riel
6*4882a593Smuzhiyun * Thanks go out to Claus Fischer for some serious inspiration and
7*4882a593Smuzhiyun * for goading me into coding this file...
8*4882a593Smuzhiyun * Copyright (C) 2010 Google, Inc.
9*4882a593Smuzhiyun * Rewritten by David Rientjes
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The routines in this file are used to kill a process when
12*4882a593Smuzhiyun * we're seriously out of memory. This gets called from __alloc_pages()
13*4882a593Smuzhiyun * in mm/page_alloc.c when we really run out of memory.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Since we won't call these routines often (on a well-configured
16*4882a593Smuzhiyun * machine) this file will double as a 'coding guide' and a signpost
17*4882a593Smuzhiyun * for newbie kernel hackers. It features several pointers to major
18*4882a593Smuzhiyun * kernel subsystems and hints as to where to find out what things do.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/oom.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/err.h>
24*4882a593Smuzhiyun #include <linux/gfp.h>
25*4882a593Smuzhiyun #include <linux/sched.h>
26*4882a593Smuzhiyun #include <linux/sched/mm.h>
27*4882a593Smuzhiyun #include <linux/sched/coredump.h>
28*4882a593Smuzhiyun #include <linux/sched/task.h>
29*4882a593Smuzhiyun #include <linux/sched/debug.h>
30*4882a593Smuzhiyun #include <linux/swap.h>
31*4882a593Smuzhiyun #include <linux/syscalls.h>
32*4882a593Smuzhiyun #include <linux/timex.h>
33*4882a593Smuzhiyun #include <linux/jiffies.h>
34*4882a593Smuzhiyun #include <linux/cpuset.h>
35*4882a593Smuzhiyun #include <linux/export.h>
36*4882a593Smuzhiyun #include <linux/notifier.h>
37*4882a593Smuzhiyun #include <linux/memcontrol.h>
38*4882a593Smuzhiyun #include <linux/mempolicy.h>
39*4882a593Smuzhiyun #include <linux/security.h>
40*4882a593Smuzhiyun #include <linux/ptrace.h>
41*4882a593Smuzhiyun #include <linux/freezer.h>
42*4882a593Smuzhiyun #include <linux/ftrace.h>
43*4882a593Smuzhiyun #include <linux/ratelimit.h>
44*4882a593Smuzhiyun #include <linux/kthread.h>
45*4882a593Smuzhiyun #include <linux/init.h>
46*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <asm/tlb.h>
49*4882a593Smuzhiyun #include "internal.h"
50*4882a593Smuzhiyun #include "slab.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
53*4882a593Smuzhiyun #include <trace/events/oom.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #undef CREATE_TRACE_POINTS
56*4882a593Smuzhiyun #include <trace/hooks/mm.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun int sysctl_panic_on_oom;
59*4882a593Smuzhiyun int sysctl_oom_kill_allocating_task;
60*4882a593Smuzhiyun int sysctl_oom_dump_tasks = 1;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Serializes oom killer invocations (out_of_memory()) from all contexts to
64*4882a593Smuzhiyun * prevent from over eager oom killing (e.g. when the oom killer is invoked
65*4882a593Smuzhiyun * from different domains).
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
68*4882a593Smuzhiyun * and mark_oom_victim
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun DEFINE_MUTEX(oom_lock);
71*4882a593Smuzhiyun /* Serializes oom_score_adj and oom_score_adj_min updates */
72*4882a593Smuzhiyun DEFINE_MUTEX(oom_adj_mutex);
73*4882a593Smuzhiyun
is_memcg_oom(struct oom_control * oc)74*4882a593Smuzhiyun static inline bool is_memcg_oom(struct oom_control *oc)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun return oc->memcg != NULL;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #ifdef CONFIG_NUMA
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun * oom_cpuset_eligible() - check task eligiblity for kill
82*4882a593Smuzhiyun * @start: task struct of which task to consider
83*4882a593Smuzhiyun * @oc: pointer to struct oom_control
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * Task eligibility is determined by whether or not a candidate task, @tsk,
86*4882a593Smuzhiyun * shares the same mempolicy nodes as current if it is bound by such a policy
87*4882a593Smuzhiyun * and whether or not it has the same set of allowed cpuset nodes.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * This function is assuming oom-killer context and 'current' has triggered
90*4882a593Smuzhiyun * the oom-killer.
91*4882a593Smuzhiyun */
oom_cpuset_eligible(struct task_struct * start,struct oom_control * oc)92*4882a593Smuzhiyun static bool oom_cpuset_eligible(struct task_struct *start,
93*4882a593Smuzhiyun struct oom_control *oc)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct task_struct *tsk;
96*4882a593Smuzhiyun bool ret = false;
97*4882a593Smuzhiyun const nodemask_t *mask = oc->nodemask;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (is_memcg_oom(oc))
100*4882a593Smuzhiyun return true;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun rcu_read_lock();
103*4882a593Smuzhiyun for_each_thread(start, tsk) {
104*4882a593Smuzhiyun if (mask) {
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * If this is a mempolicy constrained oom, tsk's
107*4882a593Smuzhiyun * cpuset is irrelevant. Only return true if its
108*4882a593Smuzhiyun * mempolicy intersects current, otherwise it may be
109*4882a593Smuzhiyun * needlessly killed.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun ret = mempolicy_nodemask_intersects(tsk, mask);
112*4882a593Smuzhiyun } else {
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * This is not a mempolicy constrained oom, so only
115*4882a593Smuzhiyun * check the mems of tsk's cpuset.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun ret = cpuset_mems_allowed_intersects(current, tsk);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun if (ret)
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun rcu_read_unlock();
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return ret;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun #else
oom_cpuset_eligible(struct task_struct * tsk,struct oom_control * oc)127*4882a593Smuzhiyun static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun return true;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * The process p may have detached its own ->mm while exiting or through
135*4882a593Smuzhiyun * kthread_use_mm(), but one or more of its subthreads may still have a valid
136*4882a593Smuzhiyun * pointer. Return p, or any of its subthreads with a valid ->mm, with
137*4882a593Smuzhiyun * task_lock() held.
138*4882a593Smuzhiyun */
find_lock_task_mm(struct task_struct * p)139*4882a593Smuzhiyun struct task_struct *find_lock_task_mm(struct task_struct *p)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct task_struct *t;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun rcu_read_lock();
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun for_each_thread(p, t) {
146*4882a593Smuzhiyun task_lock(t);
147*4882a593Smuzhiyun if (likely(t->mm))
148*4882a593Smuzhiyun goto found;
149*4882a593Smuzhiyun task_unlock(t);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun t = NULL;
152*4882a593Smuzhiyun found:
153*4882a593Smuzhiyun rcu_read_unlock();
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return t;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * order == -1 means the oom kill is required by sysrq, otherwise only
160*4882a593Smuzhiyun * for display purposes.
161*4882a593Smuzhiyun */
is_sysrq_oom(struct oom_control * oc)162*4882a593Smuzhiyun static inline bool is_sysrq_oom(struct oom_control *oc)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun return oc->order == -1;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* return true if the task is not adequate as candidate victim task. */
oom_unkillable_task(struct task_struct * p)168*4882a593Smuzhiyun static bool oom_unkillable_task(struct task_struct *p)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun if (is_global_init(p))
171*4882a593Smuzhiyun return true;
172*4882a593Smuzhiyun if (p->flags & PF_KTHREAD)
173*4882a593Smuzhiyun return true;
174*4882a593Smuzhiyun return false;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
179*4882a593Smuzhiyun * than all user memory (LRU pages)
180*4882a593Smuzhiyun */
is_dump_unreclaim_slabs(void)181*4882a593Smuzhiyun static bool is_dump_unreclaim_slabs(void)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun unsigned long nr_lru;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
186*4882a593Smuzhiyun global_node_page_state(NR_INACTIVE_ANON) +
187*4882a593Smuzhiyun global_node_page_state(NR_ACTIVE_FILE) +
188*4882a593Smuzhiyun global_node_page_state(NR_INACTIVE_FILE) +
189*4882a593Smuzhiyun global_node_page_state(NR_ISOLATED_ANON) +
190*4882a593Smuzhiyun global_node_page_state(NR_ISOLATED_FILE) +
191*4882a593Smuzhiyun global_node_page_state(NR_UNEVICTABLE);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * oom_badness - heuristic function to determine which candidate task to kill
198*4882a593Smuzhiyun * @p: task struct of which task we should calculate
199*4882a593Smuzhiyun * @totalpages: total present RAM allowed for page allocation
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * The heuristic for determining which task to kill is made to be as simple and
202*4882a593Smuzhiyun * predictable as possible. The goal is to return the highest value for the
203*4882a593Smuzhiyun * task consuming the most memory to avoid subsequent oom failures.
204*4882a593Smuzhiyun */
oom_badness(struct task_struct * p,unsigned long totalpages)205*4882a593Smuzhiyun long oom_badness(struct task_struct *p, unsigned long totalpages)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun long points;
208*4882a593Smuzhiyun long adj;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (oom_unkillable_task(p))
211*4882a593Smuzhiyun return LONG_MIN;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun p = find_lock_task_mm(p);
214*4882a593Smuzhiyun if (!p)
215*4882a593Smuzhiyun return LONG_MIN;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * Do not even consider tasks which are explicitly marked oom
219*4882a593Smuzhiyun * unkillable or have been already oom reaped or the are in
220*4882a593Smuzhiyun * the middle of vfork
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun adj = (long)p->signal->oom_score_adj;
223*4882a593Smuzhiyun if (adj == OOM_SCORE_ADJ_MIN ||
224*4882a593Smuzhiyun test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
225*4882a593Smuzhiyun in_vfork(p)) {
226*4882a593Smuzhiyun task_unlock(p);
227*4882a593Smuzhiyun return LONG_MIN;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * The baseline for the badness score is the proportion of RAM that each
232*4882a593Smuzhiyun * task's rss, pagetable and swap space use.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
235*4882a593Smuzhiyun mm_pgtables_bytes(p->mm) / PAGE_SIZE;
236*4882a593Smuzhiyun task_unlock(p);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Normalize to oom_score_adj units */
239*4882a593Smuzhiyun adj *= totalpages / 1000;
240*4882a593Smuzhiyun points += adj;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return points;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun static const char * const oom_constraint_text[] = {
246*4882a593Smuzhiyun [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
247*4882a593Smuzhiyun [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
248*4882a593Smuzhiyun [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
249*4882a593Smuzhiyun [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * Determine the type of allocation constraint.
254*4882a593Smuzhiyun */
constrained_alloc(struct oom_control * oc)255*4882a593Smuzhiyun static enum oom_constraint constrained_alloc(struct oom_control *oc)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct zone *zone;
258*4882a593Smuzhiyun struct zoneref *z;
259*4882a593Smuzhiyun enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask);
260*4882a593Smuzhiyun bool cpuset_limited = false;
261*4882a593Smuzhiyun int nid;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (is_memcg_oom(oc)) {
264*4882a593Smuzhiyun oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
265*4882a593Smuzhiyun return CONSTRAINT_MEMCG;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Default to all available memory */
269*4882a593Smuzhiyun oc->totalpages = totalram_pages() + total_swap_pages;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_NUMA))
272*4882a593Smuzhiyun return CONSTRAINT_NONE;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (!oc->zonelist)
275*4882a593Smuzhiyun return CONSTRAINT_NONE;
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Reach here only when __GFP_NOFAIL is used. So, we should avoid
278*4882a593Smuzhiyun * to kill current.We have to random task kill in this case.
279*4882a593Smuzhiyun * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun if (oc->gfp_mask & __GFP_THISNODE)
282*4882a593Smuzhiyun return CONSTRAINT_NONE;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
286*4882a593Smuzhiyun * the page allocator means a mempolicy is in effect. Cpuset policy
287*4882a593Smuzhiyun * is enforced in get_page_from_freelist().
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun if (oc->nodemask &&
290*4882a593Smuzhiyun !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
291*4882a593Smuzhiyun oc->totalpages = total_swap_pages;
292*4882a593Smuzhiyun for_each_node_mask(nid, *oc->nodemask)
293*4882a593Smuzhiyun oc->totalpages += node_present_pages(nid);
294*4882a593Smuzhiyun return CONSTRAINT_MEMORY_POLICY;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* Check this allocation failure is caused by cpuset's wall function */
298*4882a593Smuzhiyun for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
299*4882a593Smuzhiyun highest_zoneidx, oc->nodemask)
300*4882a593Smuzhiyun if (!cpuset_zone_allowed(zone, oc->gfp_mask))
301*4882a593Smuzhiyun cpuset_limited = true;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (cpuset_limited) {
304*4882a593Smuzhiyun oc->totalpages = total_swap_pages;
305*4882a593Smuzhiyun for_each_node_mask(nid, cpuset_current_mems_allowed)
306*4882a593Smuzhiyun oc->totalpages += node_present_pages(nid);
307*4882a593Smuzhiyun return CONSTRAINT_CPUSET;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun return CONSTRAINT_NONE;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
oom_evaluate_task(struct task_struct * task,void * arg)312*4882a593Smuzhiyun static int oom_evaluate_task(struct task_struct *task, void *arg)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct oom_control *oc = arg;
315*4882a593Smuzhiyun long points;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (oom_unkillable_task(task))
318*4882a593Smuzhiyun goto next;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* p may not have freeable memory in nodemask */
321*4882a593Smuzhiyun if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc))
322*4882a593Smuzhiyun goto next;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * This task already has access to memory reserves and is being killed.
326*4882a593Smuzhiyun * Don't allow any other task to have access to the reserves unless
327*4882a593Smuzhiyun * the task has MMF_OOM_SKIP because chances that it would release
328*4882a593Smuzhiyun * any memory is quite low.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
331*4882a593Smuzhiyun if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
332*4882a593Smuzhiyun goto next;
333*4882a593Smuzhiyun goto abort;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * If task is allocating a lot of memory and has been marked to be
338*4882a593Smuzhiyun * killed first if it triggers an oom, then select it.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun if (oom_task_origin(task)) {
341*4882a593Smuzhiyun points = LONG_MAX;
342*4882a593Smuzhiyun goto select;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun points = oom_badness(task, oc->totalpages);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (points == LONG_MIN)
348*4882a593Smuzhiyun goto next;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * Check to see if this is the worst task with a non-negative
352*4882a593Smuzhiyun * ADJ score seen so far
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun if (task->signal->oom_score_adj >= 0 &&
355*4882a593Smuzhiyun points > oc->chosen_non_negative_adj_points) {
356*4882a593Smuzhiyun if (oc->chosen_non_negative_adj)
357*4882a593Smuzhiyun put_task_struct(oc->chosen_non_negative_adj);
358*4882a593Smuzhiyun get_task_struct(task);
359*4882a593Smuzhiyun oc->chosen_non_negative_adj = task;
360*4882a593Smuzhiyun oc->chosen_non_negative_adj_points = points;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (points < oc->chosen_points)
364*4882a593Smuzhiyun goto next;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun select:
367*4882a593Smuzhiyun if (oc->chosen)
368*4882a593Smuzhiyun put_task_struct(oc->chosen);
369*4882a593Smuzhiyun get_task_struct(task);
370*4882a593Smuzhiyun oc->chosen = task;
371*4882a593Smuzhiyun oc->chosen_points = points;
372*4882a593Smuzhiyun next:
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun abort:
375*4882a593Smuzhiyun if (oc->chosen_non_negative_adj)
376*4882a593Smuzhiyun put_task_struct(oc->chosen_non_negative_adj);
377*4882a593Smuzhiyun if (oc->chosen)
378*4882a593Smuzhiyun put_task_struct(oc->chosen);
379*4882a593Smuzhiyun oc->chosen_non_negative_adj = NULL;
380*4882a593Smuzhiyun oc->chosen = (void *)-1UL;
381*4882a593Smuzhiyun return 1;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * Simple selection loop. We choose the process with the highest number of
386*4882a593Smuzhiyun * 'points'. In case scan was aborted, oc->chosen is set to -1.
387*4882a593Smuzhiyun */
select_bad_process(struct oom_control * oc)388*4882a593Smuzhiyun static void select_bad_process(struct oom_control *oc)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun oc->chosen_points = LONG_MIN;
391*4882a593Smuzhiyun oc->chosen_non_negative_adj_points = LONG_MIN;
392*4882a593Smuzhiyun oc->chosen_non_negative_adj = NULL;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (is_memcg_oom(oc))
395*4882a593Smuzhiyun mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
396*4882a593Smuzhiyun else {
397*4882a593Smuzhiyun struct task_struct *p;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun rcu_read_lock();
400*4882a593Smuzhiyun for_each_process(p)
401*4882a593Smuzhiyun if (oom_evaluate_task(p, oc))
402*4882a593Smuzhiyun break;
403*4882a593Smuzhiyun rcu_read_unlock();
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (oc->chosen_non_negative_adj) {
407*4882a593Smuzhiyun /*
408*4882a593Smuzhiyun * If oc->chosen has a negative ADJ, and we found a task with
409*4882a593Smuzhiyun * a postive ADJ to kill, kill the task with the positive ADJ
410*4882a593Smuzhiyun * instead.
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun if (oc->chosen && oc->chosen->signal->oom_score_adj < 0) {
413*4882a593Smuzhiyun put_task_struct(oc->chosen);
414*4882a593Smuzhiyun oc->chosen = oc->chosen_non_negative_adj;
415*4882a593Smuzhiyun oc->chosen_points = oc->chosen_non_negative_adj_points;
416*4882a593Smuzhiyun } else
417*4882a593Smuzhiyun put_task_struct(oc->chosen_non_negative_adj);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
dump_task(struct task_struct * p,void * arg)421*4882a593Smuzhiyun static int dump_task(struct task_struct *p, void *arg)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct oom_control *oc = arg;
424*4882a593Smuzhiyun struct task_struct *task;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (oom_unkillable_task(p))
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /* p may not have freeable memory in nodemask */
430*4882a593Smuzhiyun if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc))
431*4882a593Smuzhiyun return 0;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun task = find_lock_task_mm(p);
434*4882a593Smuzhiyun if (!task) {
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * This is a kthread or all of p's threads have already
437*4882a593Smuzhiyun * detached their mm's. There's no need to report
438*4882a593Smuzhiyun * them; they can't be oom killed anyway.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
444*4882a593Smuzhiyun task->pid, from_kuid(&init_user_ns, task_uid(task)),
445*4882a593Smuzhiyun task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
446*4882a593Smuzhiyun mm_pgtables_bytes(task->mm),
447*4882a593Smuzhiyun get_mm_counter(task->mm, MM_SWAPENTS),
448*4882a593Smuzhiyun task->signal->oom_score_adj, task->comm);
449*4882a593Smuzhiyun task_unlock(task);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /**
455*4882a593Smuzhiyun * dump_tasks - dump current memory state of all system tasks
456*4882a593Smuzhiyun * @oc: pointer to struct oom_control
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * Dumps the current memory state of all eligible tasks. Tasks not in the same
459*4882a593Smuzhiyun * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
460*4882a593Smuzhiyun * are not shown.
461*4882a593Smuzhiyun * State information includes task's pid, uid, tgid, vm size, rss,
462*4882a593Smuzhiyun * pgtables_bytes, swapents, oom_score_adj value, and name.
463*4882a593Smuzhiyun */
dump_tasks(struct oom_control * oc)464*4882a593Smuzhiyun static void dump_tasks(struct oom_control *oc)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun pr_info("Tasks state (memory values in pages):\n");
467*4882a593Smuzhiyun pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (is_memcg_oom(oc))
470*4882a593Smuzhiyun mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
471*4882a593Smuzhiyun else {
472*4882a593Smuzhiyun struct task_struct *p;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun rcu_read_lock();
475*4882a593Smuzhiyun for_each_process(p)
476*4882a593Smuzhiyun dump_task(p, oc);
477*4882a593Smuzhiyun rcu_read_unlock();
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
dump_oom_summary(struct oom_control * oc,struct task_struct * victim)481*4882a593Smuzhiyun static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun /* one line summary of the oom killer context. */
484*4882a593Smuzhiyun pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
485*4882a593Smuzhiyun oom_constraint_text[oc->constraint],
486*4882a593Smuzhiyun nodemask_pr_args(oc->nodemask));
487*4882a593Smuzhiyun cpuset_print_current_mems_allowed();
488*4882a593Smuzhiyun mem_cgroup_print_oom_context(oc->memcg, victim);
489*4882a593Smuzhiyun pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
490*4882a593Smuzhiyun from_kuid(&init_user_ns, task_uid(victim)));
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
dump_header(struct oom_control * oc,struct task_struct * p)493*4882a593Smuzhiyun static void dump_header(struct oom_control *oc, struct task_struct *p)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
496*4882a593Smuzhiyun current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
497*4882a593Smuzhiyun current->signal->oom_score_adj);
498*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
499*4882a593Smuzhiyun pr_warn("COMPACTION is disabled!!!\n");
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun dump_stack();
502*4882a593Smuzhiyun if (is_memcg_oom(oc))
503*4882a593Smuzhiyun mem_cgroup_print_oom_meminfo(oc->memcg);
504*4882a593Smuzhiyun else {
505*4882a593Smuzhiyun show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
506*4882a593Smuzhiyun if (is_dump_unreclaim_slabs())
507*4882a593Smuzhiyun dump_unreclaimable_slab();
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun if (sysctl_oom_dump_tasks)
510*4882a593Smuzhiyun dump_tasks(oc);
511*4882a593Smuzhiyun if (p)
512*4882a593Smuzhiyun dump_oom_summary(oc, p);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /*
516*4882a593Smuzhiyun * Number of OOM victims in flight
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun static atomic_t oom_victims = ATOMIC_INIT(0);
519*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun static bool oom_killer_disabled __read_mostly;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun #define K(x) ((x) << (PAGE_SHIFT-10))
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * task->mm can be NULL if the task is the exited group leader. So to
527*4882a593Smuzhiyun * determine whether the task is using a particular mm, we examine all the
528*4882a593Smuzhiyun * task's threads: if one of those is using this mm then this task was also
529*4882a593Smuzhiyun * using it.
530*4882a593Smuzhiyun */
process_shares_mm(struct task_struct * p,struct mm_struct * mm)531*4882a593Smuzhiyun bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct task_struct *t;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun for_each_thread(p, t) {
536*4882a593Smuzhiyun struct mm_struct *t_mm = READ_ONCE(t->mm);
537*4882a593Smuzhiyun if (t_mm)
538*4882a593Smuzhiyun return t_mm == mm;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun return false;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun #ifdef CONFIG_MMU
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun * OOM Reaper kernel thread which tries to reap the memory used by the OOM
546*4882a593Smuzhiyun * victim (if that is possible) to help the OOM killer to move on.
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun static struct task_struct *oom_reaper_th;
549*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
550*4882a593Smuzhiyun static struct task_struct *oom_reaper_list;
551*4882a593Smuzhiyun static DEFINE_SPINLOCK(oom_reaper_lock);
552*4882a593Smuzhiyun
__oom_reap_task_mm(struct mm_struct * mm)553*4882a593Smuzhiyun bool __oom_reap_task_mm(struct mm_struct *mm)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct vm_area_struct *vma;
556*4882a593Smuzhiyun bool ret = true;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun * Tell all users of get_user/copy_from_user etc... that the content
560*4882a593Smuzhiyun * is no longer stable. No barriers really needed because unmapping
561*4882a593Smuzhiyun * should imply barriers already and the reader would hit a page fault
562*4882a593Smuzhiyun * if it stumbled over a reaped memory.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun set_bit(MMF_UNSTABLE, &mm->flags);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun for (vma = mm->mmap ; vma; vma = vma->vm_next) {
567*4882a593Smuzhiyun if (!can_madv_lru_vma(vma))
568*4882a593Smuzhiyun continue;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /*
571*4882a593Smuzhiyun * Only anonymous pages have a good chance to be dropped
572*4882a593Smuzhiyun * without additional steps which we cannot afford as we
573*4882a593Smuzhiyun * are OOM already.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * We do not even care about fs backed pages because all
576*4882a593Smuzhiyun * which are reclaimable have already been reclaimed and
577*4882a593Smuzhiyun * we do not want to block exit_mmap by keeping mm ref
578*4882a593Smuzhiyun * count elevated without a good reason.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
581*4882a593Smuzhiyun struct mmu_notifier_range range;
582*4882a593Smuzhiyun struct mmu_gather tlb;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
585*4882a593Smuzhiyun vma, mm, vma->vm_start,
586*4882a593Smuzhiyun vma->vm_end);
587*4882a593Smuzhiyun tlb_gather_mmu(&tlb, mm, range.start, range.end);
588*4882a593Smuzhiyun if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
589*4882a593Smuzhiyun tlb_finish_mmu(&tlb, range.start, range.end);
590*4882a593Smuzhiyun ret = false;
591*4882a593Smuzhiyun continue;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun unmap_page_range(&tlb, vma, range.start, range.end, NULL);
594*4882a593Smuzhiyun mmu_notifier_invalidate_range_end(&range);
595*4882a593Smuzhiyun tlb_finish_mmu(&tlb, range.start, range.end);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * Reaps the address space of the give task.
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * Returns true on success and false if none or part of the address space
606*4882a593Smuzhiyun * has been reclaimed and the caller should retry later.
607*4882a593Smuzhiyun */
oom_reap_task_mm(struct task_struct * tsk,struct mm_struct * mm)608*4882a593Smuzhiyun static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun bool ret = true;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (!mmap_read_trylock(mm)) {
613*4882a593Smuzhiyun trace_skip_task_reaping(tsk->pid);
614*4882a593Smuzhiyun return false;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /*
618*4882a593Smuzhiyun * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
619*4882a593Smuzhiyun * work on the mm anymore. The check for MMF_OOM_SKIP must run
620*4882a593Smuzhiyun * under mmap_lock for reading because it serializes against the
621*4882a593Smuzhiyun * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
624*4882a593Smuzhiyun trace_skip_task_reaping(tsk->pid);
625*4882a593Smuzhiyun goto out_unlock;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun trace_start_task_reaping(tsk->pid);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* failed to reap part of the address space. Try again later */
631*4882a593Smuzhiyun ret = __oom_reap_task_mm(mm);
632*4882a593Smuzhiyun if (!ret)
633*4882a593Smuzhiyun goto out_finish;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
636*4882a593Smuzhiyun task_pid_nr(tsk), tsk->comm,
637*4882a593Smuzhiyun K(get_mm_counter(mm, MM_ANONPAGES)),
638*4882a593Smuzhiyun K(get_mm_counter(mm, MM_FILEPAGES)),
639*4882a593Smuzhiyun K(get_mm_counter(mm, MM_SHMEMPAGES)));
640*4882a593Smuzhiyun out_finish:
641*4882a593Smuzhiyun trace_finish_task_reaping(tsk->pid);
642*4882a593Smuzhiyun out_unlock:
643*4882a593Smuzhiyun mmap_read_unlock(mm);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun return ret;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun #define MAX_OOM_REAP_RETRIES 10
oom_reap_task(struct task_struct * tsk)649*4882a593Smuzhiyun static void oom_reap_task(struct task_struct *tsk)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun int attempts = 0;
652*4882a593Smuzhiyun struct mm_struct *mm = tsk->signal->oom_mm;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* Retry the mmap_read_trylock(mm) a few times */
655*4882a593Smuzhiyun while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
656*4882a593Smuzhiyun schedule_timeout_idle(HZ/10);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (attempts <= MAX_OOM_REAP_RETRIES ||
659*4882a593Smuzhiyun test_bit(MMF_OOM_SKIP, &mm->flags))
660*4882a593Smuzhiyun goto done;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
663*4882a593Smuzhiyun task_pid_nr(tsk), tsk->comm);
664*4882a593Smuzhiyun sched_show_task(tsk);
665*4882a593Smuzhiyun debug_show_all_locks();
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun done:
668*4882a593Smuzhiyun tsk->oom_reaper_list = NULL;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /*
671*4882a593Smuzhiyun * Hide this mm from OOM killer because it has been either reaped or
672*4882a593Smuzhiyun * somebody can't call mmap_write_unlock(mm).
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun set_bit(MMF_OOM_SKIP, &mm->flags);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Drop a reference taken by wake_oom_reaper */
677*4882a593Smuzhiyun put_task_struct(tsk);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
oom_reaper(void * unused)680*4882a593Smuzhiyun static int oom_reaper(void *unused)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun while (true) {
683*4882a593Smuzhiyun struct task_struct *tsk = NULL;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
686*4882a593Smuzhiyun spin_lock(&oom_reaper_lock);
687*4882a593Smuzhiyun if (oom_reaper_list != NULL) {
688*4882a593Smuzhiyun tsk = oom_reaper_list;
689*4882a593Smuzhiyun oom_reaper_list = tsk->oom_reaper_list;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun spin_unlock(&oom_reaper_lock);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (tsk)
694*4882a593Smuzhiyun oom_reap_task(tsk);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun return 0;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
wake_oom_reaper(struct task_struct * tsk)700*4882a593Smuzhiyun static void wake_oom_reaper(struct task_struct *tsk)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun /* mm is already queued? */
703*4882a593Smuzhiyun if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
704*4882a593Smuzhiyun return;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun get_task_struct(tsk);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun spin_lock(&oom_reaper_lock);
709*4882a593Smuzhiyun tsk->oom_reaper_list = oom_reaper_list;
710*4882a593Smuzhiyun oom_reaper_list = tsk;
711*4882a593Smuzhiyun spin_unlock(&oom_reaper_lock);
712*4882a593Smuzhiyun trace_wake_reaper(tsk->pid);
713*4882a593Smuzhiyun wake_up(&oom_reaper_wait);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
oom_init(void)716*4882a593Smuzhiyun static int __init oom_init(void)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
719*4882a593Smuzhiyun return 0;
720*4882a593Smuzhiyun }
subsys_initcall(oom_init)721*4882a593Smuzhiyun subsys_initcall(oom_init)
722*4882a593Smuzhiyun #else
723*4882a593Smuzhiyun static inline void wake_oom_reaper(struct task_struct *tsk)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun #endif /* CONFIG_MMU */
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /**
729*4882a593Smuzhiyun * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
730*4882a593Smuzhiyun * under task_lock or operate on the current).
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun static void __mark_oom_victim(struct task_struct *tsk)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct mm_struct *mm = tsk->mm;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
737*4882a593Smuzhiyun mmgrab(tsk->signal->oom_mm);
738*4882a593Smuzhiyun set_bit(MMF_OOM_VICTIM, &mm->flags);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /**
743*4882a593Smuzhiyun * mark_oom_victim - mark the given task as OOM victim
744*4882a593Smuzhiyun * @tsk: task to mark
745*4882a593Smuzhiyun *
746*4882a593Smuzhiyun * Has to be called with oom_lock held and never after
747*4882a593Smuzhiyun * oom has been disabled already.
748*4882a593Smuzhiyun *
749*4882a593Smuzhiyun * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
750*4882a593Smuzhiyun * under task_lock or operate on the current).
751*4882a593Smuzhiyun */
mark_oom_victim(struct task_struct * tsk)752*4882a593Smuzhiyun static void mark_oom_victim(struct task_struct *tsk)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun WARN_ON(oom_killer_disabled);
755*4882a593Smuzhiyun /* OOM killer might race with memcg OOM */
756*4882a593Smuzhiyun if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
757*4882a593Smuzhiyun return;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /* oom_mm is bound to the signal struct life time. */
760*4882a593Smuzhiyun __mark_oom_victim(tsk);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /*
763*4882a593Smuzhiyun * Make sure that the task is woken up from uninterruptible sleep
764*4882a593Smuzhiyun * if it is frozen because OOM killer wouldn't be able to free
765*4882a593Smuzhiyun * any memory and livelock. freezing_slow_path will tell the freezer
766*4882a593Smuzhiyun * that TIF_MEMDIE tasks should be ignored.
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun __thaw_task(tsk);
769*4882a593Smuzhiyun atomic_inc(&oom_victims);
770*4882a593Smuzhiyun trace_mark_victim(tsk->pid);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /**
774*4882a593Smuzhiyun * exit_oom_victim - note the exit of an OOM victim
775*4882a593Smuzhiyun */
exit_oom_victim(void)776*4882a593Smuzhiyun void exit_oom_victim(void)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun clear_thread_flag(TIF_MEMDIE);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (!atomic_dec_return(&oom_victims))
781*4882a593Smuzhiyun wake_up_all(&oom_victims_wait);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /**
785*4882a593Smuzhiyun * oom_killer_enable - enable OOM killer
786*4882a593Smuzhiyun */
oom_killer_enable(void)787*4882a593Smuzhiyun void oom_killer_enable(void)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun oom_killer_disabled = false;
790*4882a593Smuzhiyun pr_info("OOM killer enabled.\n");
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun * oom_killer_disable - disable OOM killer
795*4882a593Smuzhiyun * @timeout: maximum timeout to wait for oom victims in jiffies
796*4882a593Smuzhiyun *
797*4882a593Smuzhiyun * Forces all page allocations to fail rather than trigger OOM killer.
798*4882a593Smuzhiyun * Will block and wait until all OOM victims are killed or the given
799*4882a593Smuzhiyun * timeout expires.
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * The function cannot be called when there are runnable user tasks because
802*4882a593Smuzhiyun * the userspace would see unexpected allocation failures as a result. Any
803*4882a593Smuzhiyun * new usage of this function should be consulted with MM people.
804*4882a593Smuzhiyun *
805*4882a593Smuzhiyun * Returns true if successful and false if the OOM killer cannot be
806*4882a593Smuzhiyun * disabled.
807*4882a593Smuzhiyun */
oom_killer_disable(signed long timeout)808*4882a593Smuzhiyun bool oom_killer_disable(signed long timeout)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun signed long ret;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun * Make sure to not race with an ongoing OOM killer. Check that the
814*4882a593Smuzhiyun * current is not killed (possibly due to sharing the victim's memory).
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun if (mutex_lock_killable(&oom_lock))
817*4882a593Smuzhiyun return false;
818*4882a593Smuzhiyun oom_killer_disabled = true;
819*4882a593Smuzhiyun mutex_unlock(&oom_lock);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun ret = wait_event_interruptible_timeout(oom_victims_wait,
822*4882a593Smuzhiyun !atomic_read(&oom_victims), timeout);
823*4882a593Smuzhiyun if (ret <= 0) {
824*4882a593Smuzhiyun oom_killer_enable();
825*4882a593Smuzhiyun return false;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun pr_info("OOM killer disabled.\n");
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun return true;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
__task_will_free_mem(struct task_struct * task)832*4882a593Smuzhiyun static inline bool __task_will_free_mem(struct task_struct *task)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct signal_struct *sig = task->signal;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /*
837*4882a593Smuzhiyun * A coredumping process may sleep for an extended period in exit_mm(),
838*4882a593Smuzhiyun * so the oom killer cannot assume that the process will promptly exit
839*4882a593Smuzhiyun * and release memory.
840*4882a593Smuzhiyun */
841*4882a593Smuzhiyun if (sig->flags & SIGNAL_GROUP_COREDUMP)
842*4882a593Smuzhiyun return false;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (sig->flags & SIGNAL_GROUP_EXIT)
845*4882a593Smuzhiyun return true;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (thread_group_empty(task) && (task->flags & PF_EXITING))
848*4882a593Smuzhiyun return true;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun return false;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun * Checks whether the given task is dying or exiting and likely to
855*4882a593Smuzhiyun * release its address space. This means that all threads and processes
856*4882a593Smuzhiyun * sharing the same mm have to be killed or exiting.
857*4882a593Smuzhiyun * Caller has to make sure that task->mm is stable (hold task_lock or
858*4882a593Smuzhiyun * it operates on the current).
859*4882a593Smuzhiyun */
task_will_free_mem(struct task_struct * task)860*4882a593Smuzhiyun static bool task_will_free_mem(struct task_struct *task)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun struct mm_struct *mm = task->mm;
863*4882a593Smuzhiyun struct task_struct *p;
864*4882a593Smuzhiyun bool ret = true;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * Skip tasks without mm because it might have passed its exit_mm and
868*4882a593Smuzhiyun * exit_oom_victim. oom_reaper could have rescued that but do not rely
869*4882a593Smuzhiyun * on that for now. We can consider find_lock_task_mm in future.
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun if (!mm)
872*4882a593Smuzhiyun return false;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (!__task_will_free_mem(task))
875*4882a593Smuzhiyun return false;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /*
878*4882a593Smuzhiyun * This task has already been drained by the oom reaper so there are
879*4882a593Smuzhiyun * only small chances it will free some more
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun if (test_bit(MMF_OOM_SKIP, &mm->flags))
882*4882a593Smuzhiyun return false;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (atomic_read(&mm->mm_users) <= 1)
885*4882a593Smuzhiyun return true;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun * Make sure that all tasks which share the mm with the given tasks
889*4882a593Smuzhiyun * are dying as well to make sure that a) nobody pins its mm and
890*4882a593Smuzhiyun * b) the task is also reapable by the oom reaper.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun rcu_read_lock();
893*4882a593Smuzhiyun for_each_process(p) {
894*4882a593Smuzhiyun if (!process_shares_mm(p, mm))
895*4882a593Smuzhiyun continue;
896*4882a593Smuzhiyun if (same_thread_group(task, p))
897*4882a593Smuzhiyun continue;
898*4882a593Smuzhiyun ret = __task_will_free_mem(p);
899*4882a593Smuzhiyun if (!ret)
900*4882a593Smuzhiyun break;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun rcu_read_unlock();
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun return ret;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
__oom_kill_process(struct task_struct * victim,const char * message)907*4882a593Smuzhiyun static void __oom_kill_process(struct task_struct *victim, const char *message)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun struct task_struct *p;
910*4882a593Smuzhiyun struct mm_struct *mm;
911*4882a593Smuzhiyun bool can_oom_reap = true;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun p = find_lock_task_mm(victim);
914*4882a593Smuzhiyun if (!p) {
915*4882a593Smuzhiyun pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
916*4882a593Smuzhiyun message, task_pid_nr(victim), victim->comm);
917*4882a593Smuzhiyun put_task_struct(victim);
918*4882a593Smuzhiyun return;
919*4882a593Smuzhiyun } else if (victim != p) {
920*4882a593Smuzhiyun get_task_struct(p);
921*4882a593Smuzhiyun put_task_struct(victim);
922*4882a593Smuzhiyun victim = p;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* Get a reference to safely compare mm after task_unlock(victim) */
926*4882a593Smuzhiyun mm = victim->mm;
927*4882a593Smuzhiyun mmgrab(mm);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* Raise event before sending signal: task reaper must see this */
930*4882a593Smuzhiyun count_vm_event(OOM_KILL);
931*4882a593Smuzhiyun memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /*
934*4882a593Smuzhiyun * We should send SIGKILL before granting access to memory reserves
935*4882a593Smuzhiyun * in order to prevent the OOM victim from depleting the memory
936*4882a593Smuzhiyun * reserves from the user space under its control.
937*4882a593Smuzhiyun */
938*4882a593Smuzhiyun do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
939*4882a593Smuzhiyun mark_oom_victim(victim);
940*4882a593Smuzhiyun pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
941*4882a593Smuzhiyun message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
942*4882a593Smuzhiyun K(get_mm_counter(mm, MM_ANONPAGES)),
943*4882a593Smuzhiyun K(get_mm_counter(mm, MM_FILEPAGES)),
944*4882a593Smuzhiyun K(get_mm_counter(mm, MM_SHMEMPAGES)),
945*4882a593Smuzhiyun from_kuid(&init_user_ns, task_uid(victim)),
946*4882a593Smuzhiyun mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
947*4882a593Smuzhiyun task_unlock(victim);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * Kill all user processes sharing victim->mm in other thread groups, if
951*4882a593Smuzhiyun * any. They don't get access to memory reserves, though, to avoid
952*4882a593Smuzhiyun * depletion of all memory. This prevents mm->mmap_lock livelock when an
953*4882a593Smuzhiyun * oom killed thread cannot exit because it requires the semaphore and
954*4882a593Smuzhiyun * its contended by another thread trying to allocate memory itself.
955*4882a593Smuzhiyun * That thread will now get access to memory reserves since it has a
956*4882a593Smuzhiyun * pending fatal signal.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun rcu_read_lock();
959*4882a593Smuzhiyun for_each_process(p) {
960*4882a593Smuzhiyun if (!process_shares_mm(p, mm))
961*4882a593Smuzhiyun continue;
962*4882a593Smuzhiyun if (same_thread_group(p, victim))
963*4882a593Smuzhiyun continue;
964*4882a593Smuzhiyun if (is_global_init(p)) {
965*4882a593Smuzhiyun can_oom_reap = false;
966*4882a593Smuzhiyun set_bit(MMF_OOM_SKIP, &mm->flags);
967*4882a593Smuzhiyun pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
968*4882a593Smuzhiyun task_pid_nr(victim), victim->comm,
969*4882a593Smuzhiyun task_pid_nr(p), p->comm);
970*4882a593Smuzhiyun continue;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * No kthead_use_mm() user needs to read from the userspace so
974*4882a593Smuzhiyun * we are ok to reap it.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun if (unlikely(p->flags & PF_KTHREAD))
977*4882a593Smuzhiyun continue;
978*4882a593Smuzhiyun do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun rcu_read_unlock();
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun if (can_oom_reap)
983*4882a593Smuzhiyun wake_oom_reaper(victim);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun mmdrop(mm);
986*4882a593Smuzhiyun put_task_struct(victim);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun #undef K
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /*
991*4882a593Smuzhiyun * Kill provided task unless it's secured by setting
992*4882a593Smuzhiyun * oom_score_adj to OOM_SCORE_ADJ_MIN.
993*4882a593Smuzhiyun */
oom_kill_memcg_member(struct task_struct * task,void * message)994*4882a593Smuzhiyun static int oom_kill_memcg_member(struct task_struct *task, void *message)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
997*4882a593Smuzhiyun !is_global_init(task)) {
998*4882a593Smuzhiyun get_task_struct(task);
999*4882a593Smuzhiyun __oom_kill_process(task, message);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun return 0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
oom_kill_process(struct oom_control * oc,const char * message)1004*4882a593Smuzhiyun static void oom_kill_process(struct oom_control *oc, const char *message)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun struct task_struct *victim = oc->chosen;
1007*4882a593Smuzhiyun struct mem_cgroup *oom_group;
1008*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1009*4882a593Smuzhiyun DEFAULT_RATELIMIT_BURST);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /*
1012*4882a593Smuzhiyun * If the task is already exiting, don't alarm the sysadmin or kill
1013*4882a593Smuzhiyun * its children or threads, just give it access to memory reserves
1014*4882a593Smuzhiyun * so it can die quickly
1015*4882a593Smuzhiyun */
1016*4882a593Smuzhiyun task_lock(victim);
1017*4882a593Smuzhiyun if (task_will_free_mem(victim)) {
1018*4882a593Smuzhiyun mark_oom_victim(victim);
1019*4882a593Smuzhiyun wake_oom_reaper(victim);
1020*4882a593Smuzhiyun task_unlock(victim);
1021*4882a593Smuzhiyun put_task_struct(victim);
1022*4882a593Smuzhiyun return;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun task_unlock(victim);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (__ratelimit(&oom_rs))
1027*4882a593Smuzhiyun dump_header(oc, victim);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /*
1030*4882a593Smuzhiyun * Do we need to kill the entire memory cgroup?
1031*4882a593Smuzhiyun * Or even one of the ancestor memory cgroups?
1032*4882a593Smuzhiyun * Check this out before killing the victim task.
1033*4882a593Smuzhiyun */
1034*4882a593Smuzhiyun oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun __oom_kill_process(victim, message);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * If necessary, kill all tasks in the selected memory cgroup.
1040*4882a593Smuzhiyun */
1041*4882a593Smuzhiyun if (oom_group) {
1042*4882a593Smuzhiyun mem_cgroup_print_oom_group(oom_group);
1043*4882a593Smuzhiyun mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
1044*4882a593Smuzhiyun (void*)message);
1045*4882a593Smuzhiyun mem_cgroup_put(oom_group);
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /*
1050*4882a593Smuzhiyun * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1051*4882a593Smuzhiyun */
check_panic_on_oom(struct oom_control * oc)1052*4882a593Smuzhiyun static void check_panic_on_oom(struct oom_control *oc)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun if (likely(!sysctl_panic_on_oom))
1055*4882a593Smuzhiyun return;
1056*4882a593Smuzhiyun if (sysctl_panic_on_oom != 2) {
1057*4882a593Smuzhiyun /*
1058*4882a593Smuzhiyun * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1059*4882a593Smuzhiyun * does not panic for cpuset, mempolicy, or memcg allocation
1060*4882a593Smuzhiyun * failures.
1061*4882a593Smuzhiyun */
1062*4882a593Smuzhiyun if (oc->constraint != CONSTRAINT_NONE)
1063*4882a593Smuzhiyun return;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun /* Do not panic for oom kills triggered by sysrq */
1066*4882a593Smuzhiyun if (is_sysrq_oom(oc))
1067*4882a593Smuzhiyun return;
1068*4882a593Smuzhiyun dump_header(oc, NULL);
1069*4882a593Smuzhiyun panic("Out of memory: %s panic_on_oom is enabled\n",
1070*4882a593Smuzhiyun sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1074*4882a593Smuzhiyun
register_oom_notifier(struct notifier_block * nb)1075*4882a593Smuzhiyun int register_oom_notifier(struct notifier_block *nb)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun return blocking_notifier_chain_register(&oom_notify_list, nb);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_oom_notifier);
1080*4882a593Smuzhiyun
unregister_oom_notifier(struct notifier_block * nb)1081*4882a593Smuzhiyun int unregister_oom_notifier(struct notifier_block *nb)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /**
1088*4882a593Smuzhiyun * out_of_memory - kill the "best" process when we run out of memory
1089*4882a593Smuzhiyun * @oc: pointer to struct oom_control
1090*4882a593Smuzhiyun *
1091*4882a593Smuzhiyun * If we run out of memory, we have the choice between either
1092*4882a593Smuzhiyun * killing a random task (bad), letting the system crash (worse)
1093*4882a593Smuzhiyun * OR try to be smart about which process to kill. Note that we
1094*4882a593Smuzhiyun * don't have to be perfect here, we just have to be good.
1095*4882a593Smuzhiyun */
out_of_memory(struct oom_control * oc)1096*4882a593Smuzhiyun bool out_of_memory(struct oom_control *oc)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun unsigned long freed = 0;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (oom_killer_disabled)
1101*4882a593Smuzhiyun return false;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun if (!is_memcg_oom(oc)) {
1104*4882a593Smuzhiyun blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1105*4882a593Smuzhiyun if (freed > 0)
1106*4882a593Smuzhiyun /* Got some memory back in the last second. */
1107*4882a593Smuzhiyun return true;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /*
1111*4882a593Smuzhiyun * If current has a pending SIGKILL or is exiting, then automatically
1112*4882a593Smuzhiyun * select it. The goal is to allow it to allocate so that it may
1113*4882a593Smuzhiyun * quickly exit and free its memory.
1114*4882a593Smuzhiyun */
1115*4882a593Smuzhiyun if (task_will_free_mem(current)) {
1116*4882a593Smuzhiyun mark_oom_victim(current);
1117*4882a593Smuzhiyun wake_oom_reaper(current);
1118*4882a593Smuzhiyun return true;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * The OOM killer does not compensate for IO-less reclaim.
1123*4882a593Smuzhiyun * pagefault_out_of_memory lost its gfp context so we have to
1124*4882a593Smuzhiyun * make sure exclude 0 mask - all other users should have at least
1125*4882a593Smuzhiyun * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1126*4882a593Smuzhiyun * invoke the OOM killer even if it is a GFP_NOFS allocation.
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
1129*4882a593Smuzhiyun return true;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun * Check if there were limitations on the allocation (only relevant for
1133*4882a593Smuzhiyun * NUMA and memcg) that may require different handling.
1134*4882a593Smuzhiyun */
1135*4882a593Smuzhiyun oc->constraint = constrained_alloc(oc);
1136*4882a593Smuzhiyun if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1137*4882a593Smuzhiyun oc->nodemask = NULL;
1138*4882a593Smuzhiyun check_panic_on_oom(oc);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1141*4882a593Smuzhiyun current->mm && !oom_unkillable_task(current) &&
1142*4882a593Smuzhiyun oom_cpuset_eligible(current, oc) &&
1143*4882a593Smuzhiyun current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1144*4882a593Smuzhiyun get_task_struct(current);
1145*4882a593Smuzhiyun oc->chosen = current;
1146*4882a593Smuzhiyun oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1147*4882a593Smuzhiyun return true;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun select_bad_process(oc);
1151*4882a593Smuzhiyun /* Found nothing?!?! */
1152*4882a593Smuzhiyun if (!oc->chosen) {
1153*4882a593Smuzhiyun int ret = false;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun trace_android_vh_oom_check_panic(oc, &ret);
1156*4882a593Smuzhiyun if (ret)
1157*4882a593Smuzhiyun return true;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun dump_header(oc, NULL);
1160*4882a593Smuzhiyun pr_warn("Out of memory and no killable processes...\n");
1161*4882a593Smuzhiyun /*
1162*4882a593Smuzhiyun * If we got here due to an actual allocation at the
1163*4882a593Smuzhiyun * system level, we cannot survive this and will enter
1164*4882a593Smuzhiyun * an endless loop in the allocator. Bail out now.
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1167*4882a593Smuzhiyun panic("System is deadlocked on memory\n");
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun if (oc->chosen && oc->chosen != (void *)-1UL)
1170*4882a593Smuzhiyun oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1171*4882a593Smuzhiyun "Memory cgroup out of memory");
1172*4882a593Smuzhiyun return !!oc->chosen;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /*
1176*4882a593Smuzhiyun * The pagefault handler calls here because some allocation has failed. We have
1177*4882a593Smuzhiyun * to take care of the memcg OOM here because this is the only safe context without
1178*4882a593Smuzhiyun * any locks held but let the oom killer triggered from the allocation context care
1179*4882a593Smuzhiyun * about the global OOM.
1180*4882a593Smuzhiyun */
pagefault_out_of_memory(void)1181*4882a593Smuzhiyun void pagefault_out_of_memory(void)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
1184*4882a593Smuzhiyun DEFAULT_RATELIMIT_BURST);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (mem_cgroup_oom_synchronize(true))
1187*4882a593Smuzhiyun return;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (fatal_signal_pending(current))
1190*4882a593Smuzhiyun return;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (__ratelimit(&pfoom_rs))
1193*4882a593Smuzhiyun pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
SYSCALL_DEFINE2(process_mrelease,int,pidfd,unsigned int,flags)1196*4882a593Smuzhiyun SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun #ifdef CONFIG_MMU
1199*4882a593Smuzhiyun struct mm_struct *mm = NULL;
1200*4882a593Smuzhiyun struct task_struct *task;
1201*4882a593Smuzhiyun struct task_struct *p;
1202*4882a593Smuzhiyun unsigned int f_flags;
1203*4882a593Smuzhiyun bool reap = false;
1204*4882a593Smuzhiyun struct pid *pid;
1205*4882a593Smuzhiyun long ret = 0;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (flags)
1208*4882a593Smuzhiyun return -EINVAL;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun pid = pidfd_get_pid(pidfd, &f_flags);
1211*4882a593Smuzhiyun if (IS_ERR(pid))
1212*4882a593Smuzhiyun return PTR_ERR(pid);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun task = get_pid_task(pid, PIDTYPE_TGID);
1215*4882a593Smuzhiyun if (!task) {
1216*4882a593Smuzhiyun ret = -ESRCH;
1217*4882a593Smuzhiyun goto put_pid;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /*
1221*4882a593Smuzhiyun * Make sure to choose a thread which still has a reference to mm
1222*4882a593Smuzhiyun * during the group exit
1223*4882a593Smuzhiyun */
1224*4882a593Smuzhiyun p = find_lock_task_mm(task);
1225*4882a593Smuzhiyun if (!p) {
1226*4882a593Smuzhiyun ret = -ESRCH;
1227*4882a593Smuzhiyun goto put_task;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun mm = p->mm;
1231*4882a593Smuzhiyun mmgrab(mm);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /*
1234*4882a593Smuzhiyun * If we are too late and exit_mmap already checked mm_is_oom_victim
1235*4882a593Smuzhiyun * then will block on mmap_read_lock until exit_mmap releases mmap_lock
1236*4882a593Smuzhiyun */
1237*4882a593Smuzhiyun set_bit(MMF_OOM_VICTIM, &mm->flags);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (task_will_free_mem(p))
1240*4882a593Smuzhiyun reap = true;
1241*4882a593Smuzhiyun else {
1242*4882a593Smuzhiyun /* Error only if the work has not been done already */
1243*4882a593Smuzhiyun if (!test_bit(MMF_OOM_SKIP, &mm->flags))
1244*4882a593Smuzhiyun ret = -EINVAL;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun task_unlock(p);
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (!reap)
1249*4882a593Smuzhiyun goto drop_mm;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun if (mmap_read_lock_killable(mm)) {
1252*4882a593Smuzhiyun ret = -EINTR;
1253*4882a593Smuzhiyun goto drop_mm;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun /*
1256*4882a593Smuzhiyun * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure
1257*4882a593Smuzhiyun * possible change in exit_mmap is seen
1258*4882a593Smuzhiyun */
1259*4882a593Smuzhiyun if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm))
1260*4882a593Smuzhiyun ret = -EAGAIN;
1261*4882a593Smuzhiyun mmap_read_unlock(mm);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun drop_mm:
1264*4882a593Smuzhiyun mmdrop(mm);
1265*4882a593Smuzhiyun put_task:
1266*4882a593Smuzhiyun put_task_struct(task);
1267*4882a593Smuzhiyun put_pid:
1268*4882a593Smuzhiyun put_pid(pid);
1269*4882a593Smuzhiyun return ret;
1270*4882a593Smuzhiyun #else
1271*4882a593Smuzhiyun return -ENOSYS;
1272*4882a593Smuzhiyun #endif /* CONFIG_MMU */
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
add_to_oom_reaper(struct task_struct * p)1275*4882a593Smuzhiyun void add_to_oom_reaper(struct task_struct *p)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun p = find_lock_task_mm(p);
1278*4882a593Smuzhiyun if (!p)
1279*4882a593Smuzhiyun return;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun get_task_struct(p);
1282*4882a593Smuzhiyun if (task_will_free_mem(p)) {
1283*4882a593Smuzhiyun __mark_oom_victim(p);
1284*4882a593Smuzhiyun wake_oom_reaper(p);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun task_unlock(p);
1287*4882a593Smuzhiyun put_task_struct(p);
1288*4882a593Smuzhiyun }
1289