xref: /OK3568_Linux_fs/kernel/mm/vmpressure.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Linux VM pressure
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2012 Linaro Ltd.
6*4882a593Smuzhiyun  *		  Anton Vorontsov <anton.vorontsov@linaro.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
9*4882a593Smuzhiyun  * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/cgroup.h>
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/log2.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/vmstat.h>
18*4882a593Smuzhiyun #include <linux/eventfd.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/swap.h>
21*4882a593Smuzhiyun #include <linux/printk.h>
22*4882a593Smuzhiyun #include <linux/vmpressure.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <trace/hooks/mm.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * The window size (vmpressure_win) is the number of scanned pages before
28*4882a593Smuzhiyun  * we try to analyze scanned/reclaimed ratio. So the window is used as a
29*4882a593Smuzhiyun  * rate-limit tunable for the "low" level notification, and also for
30*4882a593Smuzhiyun  * averaging the ratio for medium/critical levels. Using small window
31*4882a593Smuzhiyun  * sizes can cause lot of false positives, but too big window size will
32*4882a593Smuzhiyun  * delay the notifications.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * As the vmscan reclaimer logic works with chunks which are multiple of
35*4882a593Smuzhiyun  * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * TODO: Make the window size depend on machine size, as we do for vmstat
38*4882a593Smuzhiyun  * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * These thresholds are used when we account memory pressure through
44*4882a593Smuzhiyun  * scanned/reclaimed ratio. The current values were chosen empirically. In
45*4882a593Smuzhiyun  * essence, they are percents: the higher the value, the more number
46*4882a593Smuzhiyun  * unsuccessful reclaims there were.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun static const unsigned int vmpressure_level_med = 60;
49*4882a593Smuzhiyun static const unsigned int vmpressure_level_critical = 95;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * When there are too little pages left to scan, vmpressure() may miss the
53*4882a593Smuzhiyun  * critical pressure as number of pages will be less than "window size".
54*4882a593Smuzhiyun  * However, in that case the vmscan priority will raise fast as the
55*4882a593Smuzhiyun  * reclaimer will try to scan LRUs more deeply.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * The vmscan logic considers these special priorities:
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * prio == DEF_PRIORITY (12): reclaimer starts with that value
60*4882a593Smuzhiyun  * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
61*4882a593Smuzhiyun  * prio == 0                : close to OOM, kernel scans every page in an lru
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * Any value in this range is acceptable for this tunable (i.e. from 12 to
64*4882a593Smuzhiyun  * 0). Current value for the vmpressure_level_critical_prio is chosen
65*4882a593Smuzhiyun  * empirically, but the number, in essence, means that we consider
66*4882a593Smuzhiyun  * critical level when scanning depth is ~10% of the lru size (vmscan
67*4882a593Smuzhiyun  * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
68*4882a593Smuzhiyun  * eights).
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
71*4882a593Smuzhiyun 
work_to_vmpressure(struct work_struct * work)72*4882a593Smuzhiyun static struct vmpressure *work_to_vmpressure(struct work_struct *work)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return container_of(work, struct vmpressure, work);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
vmpressure_parent(struct vmpressure * vmpr)77*4882a593Smuzhiyun static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
80*4882a593Smuzhiyun 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	memcg = parent_mem_cgroup(memcg);
83*4882a593Smuzhiyun 	if (!memcg)
84*4882a593Smuzhiyun 		return NULL;
85*4882a593Smuzhiyun 	return memcg_to_vmpressure(memcg);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun enum vmpressure_levels {
89*4882a593Smuzhiyun 	VMPRESSURE_LOW = 0,
90*4882a593Smuzhiyun 	VMPRESSURE_MEDIUM,
91*4882a593Smuzhiyun 	VMPRESSURE_CRITICAL,
92*4882a593Smuzhiyun 	VMPRESSURE_NUM_LEVELS,
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun enum vmpressure_modes {
96*4882a593Smuzhiyun 	VMPRESSURE_NO_PASSTHROUGH = 0,
97*4882a593Smuzhiyun 	VMPRESSURE_HIERARCHY,
98*4882a593Smuzhiyun 	VMPRESSURE_LOCAL,
99*4882a593Smuzhiyun 	VMPRESSURE_NUM_MODES,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun static const char * const vmpressure_str_levels[] = {
103*4882a593Smuzhiyun 	[VMPRESSURE_LOW] = "low",
104*4882a593Smuzhiyun 	[VMPRESSURE_MEDIUM] = "medium",
105*4882a593Smuzhiyun 	[VMPRESSURE_CRITICAL] = "critical",
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun static const char * const vmpressure_str_modes[] = {
109*4882a593Smuzhiyun 	[VMPRESSURE_NO_PASSTHROUGH] = "default",
110*4882a593Smuzhiyun 	[VMPRESSURE_HIERARCHY] = "hierarchy",
111*4882a593Smuzhiyun 	[VMPRESSURE_LOCAL] = "local",
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
vmpressure_level(unsigned long pressure)114*4882a593Smuzhiyun static enum vmpressure_levels vmpressure_level(unsigned long pressure)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	if (pressure >= vmpressure_level_critical)
117*4882a593Smuzhiyun 		return VMPRESSURE_CRITICAL;
118*4882a593Smuzhiyun 	else if (pressure >= vmpressure_level_med)
119*4882a593Smuzhiyun 		return VMPRESSURE_MEDIUM;
120*4882a593Smuzhiyun 	return VMPRESSURE_LOW;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
vmpressure_calc_level(unsigned long scanned,unsigned long reclaimed)123*4882a593Smuzhiyun static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
124*4882a593Smuzhiyun 						    unsigned long reclaimed)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	unsigned long scale = scanned + reclaimed;
127*4882a593Smuzhiyun 	unsigned long pressure = 0;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/*
130*4882a593Smuzhiyun 	 * reclaimed can be greater than scanned for things such as reclaimed
131*4882a593Smuzhiyun 	 * slab pages. shrink_node() just adds reclaimed pages without a
132*4882a593Smuzhiyun 	 * related increment to scanned pages.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	if (reclaimed >= scanned)
135*4882a593Smuzhiyun 		goto out;
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * We calculate the ratio (in percents) of how many pages were
138*4882a593Smuzhiyun 	 * scanned vs. reclaimed in a given time frame (window). Note that
139*4882a593Smuzhiyun 	 * time is in VM reclaimer's "ticks", i.e. number of pages
140*4882a593Smuzhiyun 	 * scanned. This makes it possible to set desired reaction time
141*4882a593Smuzhiyun 	 * and serves as a ratelimit.
142*4882a593Smuzhiyun 	 */
143*4882a593Smuzhiyun 	pressure = scale - (reclaimed * scale / scanned);
144*4882a593Smuzhiyun 	pressure = pressure * 100 / scale;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun out:
147*4882a593Smuzhiyun 	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
148*4882a593Smuzhiyun 		 scanned, reclaimed);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return vmpressure_level(pressure);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct vmpressure_event {
154*4882a593Smuzhiyun 	struct eventfd_ctx *efd;
155*4882a593Smuzhiyun 	enum vmpressure_levels level;
156*4882a593Smuzhiyun 	enum vmpressure_modes mode;
157*4882a593Smuzhiyun 	struct list_head node;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
vmpressure_event(struct vmpressure * vmpr,const enum vmpressure_levels level,bool ancestor,bool signalled)160*4882a593Smuzhiyun static bool vmpressure_event(struct vmpressure *vmpr,
161*4882a593Smuzhiyun 			     const enum vmpressure_levels level,
162*4882a593Smuzhiyun 			     bool ancestor, bool signalled)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct vmpressure_event *ev;
165*4882a593Smuzhiyun 	bool ret = false;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	mutex_lock(&vmpr->events_lock);
168*4882a593Smuzhiyun 	list_for_each_entry(ev, &vmpr->events, node) {
169*4882a593Smuzhiyun 		if (ancestor && ev->mode == VMPRESSURE_LOCAL)
170*4882a593Smuzhiyun 			continue;
171*4882a593Smuzhiyun 		if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
172*4882a593Smuzhiyun 			continue;
173*4882a593Smuzhiyun 		if (level < ev->level)
174*4882a593Smuzhiyun 			continue;
175*4882a593Smuzhiyun 		eventfd_signal(ev->efd, 1);
176*4882a593Smuzhiyun 		ret = true;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 	mutex_unlock(&vmpr->events_lock);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return ret;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
vmpressure_work_fn(struct work_struct * work)183*4882a593Smuzhiyun static void vmpressure_work_fn(struct work_struct *work)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct vmpressure *vmpr = work_to_vmpressure(work);
186*4882a593Smuzhiyun 	unsigned long scanned;
187*4882a593Smuzhiyun 	unsigned long reclaimed;
188*4882a593Smuzhiyun 	enum vmpressure_levels level;
189*4882a593Smuzhiyun 	bool ancestor = false;
190*4882a593Smuzhiyun 	bool signalled = false;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	spin_lock(&vmpr->sr_lock);
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * Several contexts might be calling vmpressure(), so it is
195*4882a593Smuzhiyun 	 * possible that the work was rescheduled again before the old
196*4882a593Smuzhiyun 	 * work context cleared the counters. In that case we will run
197*4882a593Smuzhiyun 	 * just after the old work returns, but then scanned might be zero
198*4882a593Smuzhiyun 	 * here. No need for any locks here since we don't care if
199*4882a593Smuzhiyun 	 * vmpr->reclaimed is in sync.
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	scanned = vmpr->tree_scanned;
202*4882a593Smuzhiyun 	if (!scanned) {
203*4882a593Smuzhiyun 		spin_unlock(&vmpr->sr_lock);
204*4882a593Smuzhiyun 		return;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	reclaimed = vmpr->tree_reclaimed;
208*4882a593Smuzhiyun 	vmpr->tree_scanned = 0;
209*4882a593Smuzhiyun 	vmpr->tree_reclaimed = 0;
210*4882a593Smuzhiyun 	spin_unlock(&vmpr->sr_lock);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	level = vmpressure_calc_level(scanned, reclaimed);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	do {
215*4882a593Smuzhiyun 		if (vmpressure_event(vmpr, level, ancestor, signalled))
216*4882a593Smuzhiyun 			signalled = true;
217*4882a593Smuzhiyun 		ancestor = true;
218*4882a593Smuzhiyun 	} while ((vmpr = vmpressure_parent(vmpr)));
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * vmpressure() - Account memory pressure through scanned/reclaimed ratio
223*4882a593Smuzhiyun  * @gfp:	reclaimer's gfp mask
224*4882a593Smuzhiyun  * @memcg:	cgroup memory controller handle
225*4882a593Smuzhiyun  * @tree:	legacy subtree mode
226*4882a593Smuzhiyun  * @scanned:	number of pages scanned
227*4882a593Smuzhiyun  * @reclaimed:	number of pages reclaimed
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * This function should be called from the vmscan reclaim path to account
230*4882a593Smuzhiyun  * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
231*4882a593Smuzhiyun  * pressure index is then further refined and averaged over time.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * If @tree is set, vmpressure is in traditional userspace reporting
234*4882a593Smuzhiyun  * mode: @memcg is considered the pressure root and userspace is
235*4882a593Smuzhiyun  * notified of the entire subtree's reclaim efficiency.
236*4882a593Smuzhiyun  *
237*4882a593Smuzhiyun  * If @tree is not set, reclaim efficiency is recorded for @memcg, and
238*4882a593Smuzhiyun  * only in-kernel users are notified.
239*4882a593Smuzhiyun  *
240*4882a593Smuzhiyun  * This function does not return any value.
241*4882a593Smuzhiyun  */
vmpressure(gfp_t gfp,struct mem_cgroup * memcg,bool tree,unsigned long scanned,unsigned long reclaimed)242*4882a593Smuzhiyun void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
243*4882a593Smuzhiyun 		unsigned long scanned, unsigned long reclaimed)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	struct vmpressure *vmpr;
246*4882a593Smuzhiyun 	bool bypass = false;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (mem_cgroup_disabled())
249*4882a593Smuzhiyun 		return;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	vmpr = memcg_to_vmpressure(memcg);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	trace_android_vh_vmpressure(memcg, &bypass);
254*4882a593Smuzhiyun 	if (unlikely(bypass))
255*4882a593Smuzhiyun 		return;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/*
258*4882a593Smuzhiyun 	 * Here we only want to account pressure that userland is able to
259*4882a593Smuzhiyun 	 * help us with. For example, suppose that DMA zone is under
260*4882a593Smuzhiyun 	 * pressure; if we notify userland about that kind of pressure,
261*4882a593Smuzhiyun 	 * then it will be mostly a waste as it will trigger unnecessary
262*4882a593Smuzhiyun 	 * freeing of memory by userland (since userland is more likely to
263*4882a593Smuzhiyun 	 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
264*4882a593Smuzhiyun 	 * is why we include only movable, highmem and FS/IO pages.
265*4882a593Smuzhiyun 	 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
266*4882a593Smuzhiyun 	 * we account it too.
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
269*4882a593Smuzhiyun 		return;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/*
272*4882a593Smuzhiyun 	 * If we got here with no pages scanned, then that is an indicator
273*4882a593Smuzhiyun 	 * that reclaimer was unable to find any shrinkable LRUs at the
274*4882a593Smuzhiyun 	 * current scanning depth. But it does not mean that we should
275*4882a593Smuzhiyun 	 * report the critical pressure, yet. If the scanning priority
276*4882a593Smuzhiyun 	 * (scanning depth) goes too high (deep), we will be notified
277*4882a593Smuzhiyun 	 * through vmpressure_prio(). But so far, keep calm.
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	if (!scanned)
280*4882a593Smuzhiyun 		return;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (tree) {
283*4882a593Smuzhiyun 		spin_lock(&vmpr->sr_lock);
284*4882a593Smuzhiyun 		scanned = vmpr->tree_scanned += scanned;
285*4882a593Smuzhiyun 		vmpr->tree_reclaimed += reclaimed;
286*4882a593Smuzhiyun 		spin_unlock(&vmpr->sr_lock);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		if (scanned < vmpressure_win)
289*4882a593Smuzhiyun 			return;
290*4882a593Smuzhiyun 		schedule_work(&vmpr->work);
291*4882a593Smuzhiyun 	} else {
292*4882a593Smuzhiyun 		enum vmpressure_levels level;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		/* For now, no users for root-level efficiency */
295*4882a593Smuzhiyun 		if (!memcg || mem_cgroup_is_root(memcg))
296*4882a593Smuzhiyun 			return;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		spin_lock(&vmpr->sr_lock);
299*4882a593Smuzhiyun 		scanned = vmpr->scanned += scanned;
300*4882a593Smuzhiyun 		reclaimed = vmpr->reclaimed += reclaimed;
301*4882a593Smuzhiyun 		if (scanned < vmpressure_win) {
302*4882a593Smuzhiyun 			spin_unlock(&vmpr->sr_lock);
303*4882a593Smuzhiyun 			return;
304*4882a593Smuzhiyun 		}
305*4882a593Smuzhiyun 		vmpr->scanned = vmpr->reclaimed = 0;
306*4882a593Smuzhiyun 		spin_unlock(&vmpr->sr_lock);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		level = vmpressure_calc_level(scanned, reclaimed);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 		if (level > VMPRESSURE_LOW) {
311*4882a593Smuzhiyun 			/*
312*4882a593Smuzhiyun 			 * Let the socket buffer allocator know that
313*4882a593Smuzhiyun 			 * we are having trouble reclaiming LRU pages.
314*4882a593Smuzhiyun 			 *
315*4882a593Smuzhiyun 			 * For hysteresis keep the pressure state
316*4882a593Smuzhiyun 			 * asserted for a second in which subsequent
317*4882a593Smuzhiyun 			 * pressure events can occur.
318*4882a593Smuzhiyun 			 */
319*4882a593Smuzhiyun 			memcg->socket_pressure = jiffies + HZ;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun  * vmpressure_prio() - Account memory pressure through reclaimer priority level
326*4882a593Smuzhiyun  * @gfp:	reclaimer's gfp mask
327*4882a593Smuzhiyun  * @memcg:	cgroup memory controller handle
328*4882a593Smuzhiyun  * @prio:	reclaimer's priority
329*4882a593Smuzhiyun  *
330*4882a593Smuzhiyun  * This function should be called from the reclaim path every time when
331*4882a593Smuzhiyun  * the vmscan's reclaiming priority (scanning depth) changes.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * This function does not return any value.
334*4882a593Smuzhiyun  */
vmpressure_prio(gfp_t gfp,struct mem_cgroup * memcg,int prio)335*4882a593Smuzhiyun void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	/*
338*4882a593Smuzhiyun 	 * We only use prio for accounting critical level. For more info
339*4882a593Smuzhiyun 	 * see comment for vmpressure_level_critical_prio variable above.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 	if (prio > vmpressure_level_critical_prio)
342*4882a593Smuzhiyun 		return;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * OK, the prio is below the threshold, updating vmpressure
346*4882a593Smuzhiyun 	 * information before shrinker dives into long shrinking of long
347*4882a593Smuzhiyun 	 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
348*4882a593Smuzhiyun 	 * to the vmpressure() basically means that we signal 'critical'
349*4882a593Smuzhiyun 	 * level.
350*4882a593Smuzhiyun 	 */
351*4882a593Smuzhiyun 	vmpressure(gfp, memcg, true, vmpressure_win, 0);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun #define MAX_VMPRESSURE_ARGS_LEN	(strlen("critical") + strlen("hierarchy") + 2)
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun  * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
358*4882a593Smuzhiyun  * @memcg:	memcg that is interested in vmpressure notifications
359*4882a593Smuzhiyun  * @eventfd:	eventfd context to link notifications with
360*4882a593Smuzhiyun  * @args:	event arguments (pressure level threshold, optional mode)
361*4882a593Smuzhiyun  *
362*4882a593Smuzhiyun  * This function associates eventfd context with the vmpressure
363*4882a593Smuzhiyun  * infrastructure, so that the notifications will be delivered to the
364*4882a593Smuzhiyun  * @eventfd. The @args parameter is a comma-delimited string that denotes a
365*4882a593Smuzhiyun  * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
366*4882a593Smuzhiyun  * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
367*4882a593Smuzhiyun  * "hierarchy" or "local").
368*4882a593Smuzhiyun  *
369*4882a593Smuzhiyun  * To be used as memcg event method.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
372*4882a593Smuzhiyun  * not be parsed.
373*4882a593Smuzhiyun  */
vmpressure_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)374*4882a593Smuzhiyun int vmpressure_register_event(struct mem_cgroup *memcg,
375*4882a593Smuzhiyun 			      struct eventfd_ctx *eventfd, const char *args)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
378*4882a593Smuzhiyun 	struct vmpressure_event *ev;
379*4882a593Smuzhiyun 	enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
380*4882a593Smuzhiyun 	enum vmpressure_levels level;
381*4882a593Smuzhiyun 	char *spec, *spec_orig;
382*4882a593Smuzhiyun 	char *token;
383*4882a593Smuzhiyun 	int ret = 0;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
386*4882a593Smuzhiyun 	if (!spec)
387*4882a593Smuzhiyun 		return -ENOMEM;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Find required level */
390*4882a593Smuzhiyun 	token = strsep(&spec, ",");
391*4882a593Smuzhiyun 	ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
392*4882a593Smuzhiyun 	if (ret < 0)
393*4882a593Smuzhiyun 		goto out;
394*4882a593Smuzhiyun 	level = ret;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Find optional mode */
397*4882a593Smuzhiyun 	token = strsep(&spec, ",");
398*4882a593Smuzhiyun 	if (token) {
399*4882a593Smuzhiyun 		ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
400*4882a593Smuzhiyun 		if (ret < 0)
401*4882a593Smuzhiyun 			goto out;
402*4882a593Smuzhiyun 		mode = ret;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
406*4882a593Smuzhiyun 	if (!ev) {
407*4882a593Smuzhiyun 		ret = -ENOMEM;
408*4882a593Smuzhiyun 		goto out;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	ev->efd = eventfd;
412*4882a593Smuzhiyun 	ev->level = level;
413*4882a593Smuzhiyun 	ev->mode = mode;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	mutex_lock(&vmpr->events_lock);
416*4882a593Smuzhiyun 	list_add(&ev->node, &vmpr->events);
417*4882a593Smuzhiyun 	mutex_unlock(&vmpr->events_lock);
418*4882a593Smuzhiyun 	ret = 0;
419*4882a593Smuzhiyun out:
420*4882a593Smuzhiyun 	kfree(spec_orig);
421*4882a593Smuzhiyun 	return ret;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun  * vmpressure_unregister_event() - Unbind eventfd from vmpressure
426*4882a593Smuzhiyun  * @memcg:	memcg handle
427*4882a593Smuzhiyun  * @eventfd:	eventfd context that was used to link vmpressure with the @cg
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  * This function does internal manipulations to detach the @eventfd from
430*4882a593Smuzhiyun  * the vmpressure notifications, and then frees internal resources
431*4882a593Smuzhiyun  * associated with the @eventfd (but the @eventfd itself is not freed).
432*4882a593Smuzhiyun  *
433*4882a593Smuzhiyun  * To be used as memcg event method.
434*4882a593Smuzhiyun  */
vmpressure_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)435*4882a593Smuzhiyun void vmpressure_unregister_event(struct mem_cgroup *memcg,
436*4882a593Smuzhiyun 				 struct eventfd_ctx *eventfd)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
439*4882a593Smuzhiyun 	struct vmpressure_event *ev;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	mutex_lock(&vmpr->events_lock);
442*4882a593Smuzhiyun 	list_for_each_entry(ev, &vmpr->events, node) {
443*4882a593Smuzhiyun 		if (ev->efd != eventfd)
444*4882a593Smuzhiyun 			continue;
445*4882a593Smuzhiyun 		list_del(&ev->node);
446*4882a593Smuzhiyun 		kfree(ev);
447*4882a593Smuzhiyun 		break;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 	mutex_unlock(&vmpr->events_lock);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun  * vmpressure_init() - Initialize vmpressure control structure
454*4882a593Smuzhiyun  * @vmpr:	Structure to be initialized
455*4882a593Smuzhiyun  *
456*4882a593Smuzhiyun  * This function should be called on every allocated vmpressure structure
457*4882a593Smuzhiyun  * before any usage.
458*4882a593Smuzhiyun  */
vmpressure_init(struct vmpressure * vmpr)459*4882a593Smuzhiyun void vmpressure_init(struct vmpressure *vmpr)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	spin_lock_init(&vmpr->sr_lock);
462*4882a593Smuzhiyun 	mutex_init(&vmpr->events_lock);
463*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vmpr->events);
464*4882a593Smuzhiyun 	INIT_WORK(&vmpr->work, vmpressure_work_fn);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun /**
468*4882a593Smuzhiyun  * vmpressure_cleanup() - shuts down vmpressure control structure
469*4882a593Smuzhiyun  * @vmpr:	Structure to be cleaned up
470*4882a593Smuzhiyun  *
471*4882a593Smuzhiyun  * This function should be called before the structure in which it is
472*4882a593Smuzhiyun  * embedded is cleaned up.
473*4882a593Smuzhiyun  */
vmpressure_cleanup(struct vmpressure * vmpr)474*4882a593Smuzhiyun void vmpressure_cleanup(struct vmpressure *vmpr)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	/*
477*4882a593Smuzhiyun 	 * Make sure there is no pending work before eventfd infrastructure
478*4882a593Smuzhiyun 	 * goes away.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	flush_work(&vmpr->work);
481*4882a593Smuzhiyun }
482