1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Workingset detection
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/memcontrol.h>
9*4882a593Smuzhiyun #include <linux/mm_inline.h>
10*4882a593Smuzhiyun #include <linux/writeback.h>
11*4882a593Smuzhiyun #include <linux/shmem_fs.h>
12*4882a593Smuzhiyun #include <linux/pagemap.h>
13*4882a593Smuzhiyun #include <linux/atomic.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/swap.h>
16*4882a593Smuzhiyun #include <linux/dax.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * Double CLOCK lists
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Per node, two clock lists are maintained for file pages: the
24*4882a593Smuzhiyun * inactive and the active list. Freshly faulted pages start out at
25*4882a593Smuzhiyun * the head of the inactive list and page reclaim scans pages from the
26*4882a593Smuzhiyun * tail. Pages that are accessed multiple times on the inactive list
27*4882a593Smuzhiyun * are promoted to the active list, to protect them from reclaim,
28*4882a593Smuzhiyun * whereas active pages are demoted to the inactive list when the
29*4882a593Smuzhiyun * active list grows too big.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * fault ------------------------+
32*4882a593Smuzhiyun * |
33*4882a593Smuzhiyun * +--------------+ | +-------------+
34*4882a593Smuzhiyun * reclaim <- | inactive | <-+-- demotion | active | <--+
35*4882a593Smuzhiyun * +--------------+ +-------------+ |
36*4882a593Smuzhiyun * | |
37*4882a593Smuzhiyun * +-------------- promotion ------------------+
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Access frequency and refault distance
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * A workload is thrashing when its pages are frequently used but they
43*4882a593Smuzhiyun * are evicted from the inactive list every time before another access
44*4882a593Smuzhiyun * would have promoted them to the active list.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * In cases where the average access distance between thrashing pages
47*4882a593Smuzhiyun * is bigger than the size of memory there is nothing that can be
48*4882a593Smuzhiyun * done - the thrashing set could never fit into memory under any
49*4882a593Smuzhiyun * circumstance.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * However, the average access distance could be bigger than the
52*4882a593Smuzhiyun * inactive list, yet smaller than the size of memory. In this case,
53*4882a593Smuzhiyun * the set could fit into memory if it weren't for the currently
54*4882a593Smuzhiyun * active pages - which may be used more, hopefully less frequently:
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * +-memory available to cache-+
57*4882a593Smuzhiyun * | |
58*4882a593Smuzhiyun * +-inactive------+-active----+
59*4882a593Smuzhiyun * a b | c d e f g h i | J K L M N |
60*4882a593Smuzhiyun * +---------------+-----------+
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * It is prohibitively expensive to accurately track access frequency
63*4882a593Smuzhiyun * of pages. But a reasonable approximation can be made to measure
64*4882a593Smuzhiyun * thrashing on the inactive list, after which refaulting pages can be
65*4882a593Smuzhiyun * activated optimistically to compete with the existing active pages.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * Approximating inactive page access frequency - Observations:
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * 1. When a page is accessed for the first time, it is added to the
70*4882a593Smuzhiyun * head of the inactive list, slides every existing inactive page
71*4882a593Smuzhiyun * towards the tail by one slot, and pushes the current tail page
72*4882a593Smuzhiyun * out of memory.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * 2. When a page is accessed for the second time, it is promoted to
75*4882a593Smuzhiyun * the active list, shrinking the inactive list by one slot. This
76*4882a593Smuzhiyun * also slides all inactive pages that were faulted into the cache
77*4882a593Smuzhiyun * more recently than the activated page towards the tail of the
78*4882a593Smuzhiyun * inactive list.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * Thus:
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * 1. The sum of evictions and activations between any two points in
83*4882a593Smuzhiyun * time indicate the minimum number of inactive pages accessed in
84*4882a593Smuzhiyun * between.
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * 2. Moving one inactive page N page slots towards the tail of the
87*4882a593Smuzhiyun * list requires at least N inactive page accesses.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Combining these:
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * 1. When a page is finally evicted from memory, the number of
92*4882a593Smuzhiyun * inactive pages accessed while the page was in cache is at least
93*4882a593Smuzhiyun * the number of page slots on the inactive list.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * 2. In addition, measuring the sum of evictions and activations (E)
96*4882a593Smuzhiyun * at the time of a page's eviction, and comparing it to another
97*4882a593Smuzhiyun * reading (R) at the time the page faults back into memory tells
98*4882a593Smuzhiyun * the minimum number of accesses while the page was not cached.
99*4882a593Smuzhiyun * This is called the refault distance.
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * Because the first access of the page was the fault and the second
102*4882a593Smuzhiyun * access the refault, we combine the in-cache distance with the
103*4882a593Smuzhiyun * out-of-cache distance to get the complete minimum access distance
104*4882a593Smuzhiyun * of this page:
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * NR_inactive + (R - E)
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * And knowing the minimum access distance of a page, we can easily
109*4882a593Smuzhiyun * tell if the page would be able to stay in cache assuming all page
110*4882a593Smuzhiyun * slots in the cache were available:
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * NR_inactive + (R - E) <= NR_inactive + NR_active
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * which can be further simplified to
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * (R - E) <= NR_active
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * Put into words, the refault distance (out-of-cache) can be seen as
119*4882a593Smuzhiyun * a deficit in inactive list space (in-cache). If the inactive list
120*4882a593Smuzhiyun * had (R - E) more page slots, the page would not have been evicted
121*4882a593Smuzhiyun * in between accesses, but activated instead. And on a full system,
122*4882a593Smuzhiyun * the only thing eating into inactive list space is active pages.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * Refaulting inactive pages
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * All that is known about the active list is that the pages have been
128*4882a593Smuzhiyun * accessed more than once in the past. This means that at any given
129*4882a593Smuzhiyun * time there is actually a good chance that pages on the active list
130*4882a593Smuzhiyun * are no longer in active use.
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * So when a refault distance of (R - E) is observed and there are at
133*4882a593Smuzhiyun * least (R - E) active pages, the refaulting page is activated
134*4882a593Smuzhiyun * optimistically in the hope that (R - E) active pages are actually
135*4882a593Smuzhiyun * used less frequently than the refaulting page - or even not used at
136*4882a593Smuzhiyun * all anymore.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * That means if inactive cache is refaulting with a suitable refault
139*4882a593Smuzhiyun * distance, we assume the cache workingset is transitioning and put
140*4882a593Smuzhiyun * pressure on the current active list.
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * If this is wrong and demotion kicks in, the pages which are truly
143*4882a593Smuzhiyun * used more frequently will be reactivated while the less frequently
144*4882a593Smuzhiyun * used once will be evicted from memory.
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * But if this is right, the stale pages will be pushed out of memory
147*4882a593Smuzhiyun * and the used pages get to stay in cache.
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * Refaulting active pages
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * If on the other hand the refaulting pages have recently been
152*4882a593Smuzhiyun * deactivated, it means that the active list is no longer protecting
153*4882a593Smuzhiyun * actively used cache from reclaim. The cache is NOT transitioning to
154*4882a593Smuzhiyun * a different workingset; the existing workingset is thrashing in the
155*4882a593Smuzhiyun * space allocated to the page cache.
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * Implementation
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * For each node's LRU lists, a counter for inactive evictions and
161*4882a593Smuzhiyun * activations is maintained (node->nonresident_age).
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * On eviction, a snapshot of this counter (along with some bits to
164*4882a593Smuzhiyun * identify the node) is stored in the now empty page cache
165*4882a593Smuzhiyun * slot of the evicted page. This is called a shadow entry.
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * On cache misses for which there are shadow entries, an eligible
168*4882a593Smuzhiyun * refault distance will immediately activate the refaulting page.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
172*4882a593Smuzhiyun 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
173*4882a593Smuzhiyun #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Eviction timestamps need to be able to cover the full range of
177*4882a593Smuzhiyun * actionable refaults. However, bits are tight in the xarray
178*4882a593Smuzhiyun * entry, and after storing the identifier for the lruvec there might
179*4882a593Smuzhiyun * not be enough left to represent every single actionable refault. In
180*4882a593Smuzhiyun * that case, we have to sacrifice granularity for distance, and group
181*4882a593Smuzhiyun * evictions into coarser buckets by shaving off lower timestamp bits.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun static unsigned int bucket_order __read_mostly;
184*4882a593Smuzhiyun
pack_shadow(int memcgid,pg_data_t * pgdat,unsigned long eviction,bool workingset)185*4882a593Smuzhiyun static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
186*4882a593Smuzhiyun bool workingset)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun eviction >>= bucket_order;
189*4882a593Smuzhiyun eviction &= EVICTION_MASK;
190*4882a593Smuzhiyun eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
191*4882a593Smuzhiyun eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
192*4882a593Smuzhiyun eviction = (eviction << 1) | workingset;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return xa_mk_value(eviction);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
unpack_shadow(void * shadow,int * memcgidp,pg_data_t ** pgdat,unsigned long * evictionp,bool * workingsetp)197*4882a593Smuzhiyun static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
198*4882a593Smuzhiyun unsigned long *evictionp, bool *workingsetp)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun unsigned long entry = xa_to_value(shadow);
201*4882a593Smuzhiyun int memcgid, nid;
202*4882a593Smuzhiyun bool workingset;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun workingset = entry & 1;
205*4882a593Smuzhiyun entry >>= 1;
206*4882a593Smuzhiyun nid = entry & ((1UL << NODES_SHIFT) - 1);
207*4882a593Smuzhiyun entry >>= NODES_SHIFT;
208*4882a593Smuzhiyun memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
209*4882a593Smuzhiyun entry >>= MEM_CGROUP_ID_SHIFT;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun *memcgidp = memcgid;
212*4882a593Smuzhiyun *pgdat = NODE_DATA(nid);
213*4882a593Smuzhiyun *evictionp = entry << bucket_order;
214*4882a593Smuzhiyun *workingsetp = workingset;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * workingset_age_nonresident - age non-resident entries as LRU ages
219*4882a593Smuzhiyun * @lruvec: the lruvec that was aged
220*4882a593Smuzhiyun * @nr_pages: the number of pages to count
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * As in-memory pages are aged, non-resident pages need to be aged as
223*4882a593Smuzhiyun * well, in order for the refault distances later on to be comparable
224*4882a593Smuzhiyun * to the in-memory dimensions. This function allows reclaim and LRU
225*4882a593Smuzhiyun * operations to drive the non-resident aging along in parallel.
226*4882a593Smuzhiyun */
workingset_age_nonresident(struct lruvec * lruvec,unsigned long nr_pages)227*4882a593Smuzhiyun void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Reclaiming a cgroup means reclaiming all its children in a
231*4882a593Smuzhiyun * round-robin fashion. That means that each cgroup has an LRU
232*4882a593Smuzhiyun * order that is composed of the LRU orders of its child
233*4882a593Smuzhiyun * cgroups; and every page has an LRU position not just in the
234*4882a593Smuzhiyun * cgroup that owns it, but in all of that group's ancestors.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * So when the physical inactive list of a leaf cgroup ages,
237*4882a593Smuzhiyun * the virtual inactive lists of all its parents, including
238*4882a593Smuzhiyun * the root cgroup's, age as well.
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun do {
241*4882a593Smuzhiyun atomic_long_add(nr_pages, &lruvec->nonresident_age);
242*4882a593Smuzhiyun } while ((lruvec = parent_lruvec(lruvec)));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun * workingset_eviction - note the eviction of a page from memory
247*4882a593Smuzhiyun * @target_memcg: the cgroup that is causing the reclaim
248*4882a593Smuzhiyun * @page: the page being evicted
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * Returns a shadow entry to be stored in @page->mapping->i_pages in place
251*4882a593Smuzhiyun * of the evicted @page so that a later refault can be detected.
252*4882a593Smuzhiyun */
workingset_eviction(struct page * page,struct mem_cgroup * target_memcg)253*4882a593Smuzhiyun void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct pglist_data *pgdat = page_pgdat(page);
256*4882a593Smuzhiyun unsigned long eviction;
257*4882a593Smuzhiyun struct lruvec *lruvec;
258*4882a593Smuzhiyun int memcgid;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Page is fully exclusive and pins page->mem_cgroup */
261*4882a593Smuzhiyun VM_BUG_ON_PAGE(PageLRU(page), page);
262*4882a593Smuzhiyun VM_BUG_ON_PAGE(page_count(page), page);
263*4882a593Smuzhiyun VM_BUG_ON_PAGE(!PageLocked(page), page);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
266*4882a593Smuzhiyun workingset_age_nonresident(lruvec, thp_nr_pages(page));
267*4882a593Smuzhiyun /* XXX: target_memcg can be NULL, go through lruvec */
268*4882a593Smuzhiyun memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
269*4882a593Smuzhiyun eviction = atomic_long_read(&lruvec->nonresident_age);
270*4882a593Smuzhiyun return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * workingset_refault - evaluate the refault of a previously evicted page
275*4882a593Smuzhiyun * @page: the freshly allocated replacement page
276*4882a593Smuzhiyun * @shadow: shadow entry of the evicted page
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * Calculates and evaluates the refault distance of the previously
279*4882a593Smuzhiyun * evicted page in the context of the node and the memcg whose memory
280*4882a593Smuzhiyun * pressure caused the eviction.
281*4882a593Smuzhiyun */
workingset_refault(struct page * page,void * shadow)282*4882a593Smuzhiyun void workingset_refault(struct page *page, void *shadow)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun bool file = page_is_file_lru(page);
285*4882a593Smuzhiyun struct mem_cgroup *eviction_memcg;
286*4882a593Smuzhiyun struct lruvec *eviction_lruvec;
287*4882a593Smuzhiyun unsigned long refault_distance;
288*4882a593Smuzhiyun unsigned long workingset_size;
289*4882a593Smuzhiyun struct pglist_data *pgdat;
290*4882a593Smuzhiyun struct mem_cgroup *memcg;
291*4882a593Smuzhiyun unsigned long eviction;
292*4882a593Smuzhiyun struct lruvec *lruvec;
293*4882a593Smuzhiyun unsigned long refault;
294*4882a593Smuzhiyun bool workingset;
295*4882a593Smuzhiyun int memcgid;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun rcu_read_lock();
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * Look up the memcg associated with the stored ID. It might
302*4882a593Smuzhiyun * have been deleted since the page's eviction.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * Note that in rare events the ID could have been recycled
305*4882a593Smuzhiyun * for a new cgroup that refaults a shared page. This is
306*4882a593Smuzhiyun * impossible to tell from the available data. However, this
307*4882a593Smuzhiyun * should be a rare and limited disturbance, and activations
308*4882a593Smuzhiyun * are always speculative anyway. Ultimately, it's the aging
309*4882a593Smuzhiyun * algorithm's job to shake out the minimum access frequency
310*4882a593Smuzhiyun * for the active cache.
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * XXX: On !CONFIG_MEMCG, this will always return NULL; it
313*4882a593Smuzhiyun * would be better if the root_mem_cgroup existed in all
314*4882a593Smuzhiyun * configurations instead.
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun eviction_memcg = mem_cgroup_from_id(memcgid);
317*4882a593Smuzhiyun if (!mem_cgroup_disabled() && !eviction_memcg)
318*4882a593Smuzhiyun goto out;
319*4882a593Smuzhiyun eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
320*4882a593Smuzhiyun refault = atomic_long_read(&eviction_lruvec->nonresident_age);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Calculate the refault distance
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * The unsigned subtraction here gives an accurate distance
326*4882a593Smuzhiyun * across nonresident_age overflows in most cases. There is a
327*4882a593Smuzhiyun * special case: usually, shadow entries have a short lifetime
328*4882a593Smuzhiyun * and are either refaulted or reclaimed along with the inode
329*4882a593Smuzhiyun * before they get too old. But it is not impossible for the
330*4882a593Smuzhiyun * nonresident_age to lap a shadow entry in the field, which
331*4882a593Smuzhiyun * can then result in a false small refault distance, leading
332*4882a593Smuzhiyun * to a false activation should this old entry actually
333*4882a593Smuzhiyun * refault again. However, earlier kernels used to deactivate
334*4882a593Smuzhiyun * unconditionally with *every* reclaim invocation for the
335*4882a593Smuzhiyun * longest time, so the occasional inappropriate activation
336*4882a593Smuzhiyun * leading to pressure on the active list is not a problem.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun refault_distance = (refault - eviction) & EVICTION_MASK;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * The activation decision for this page is made at the level
342*4882a593Smuzhiyun * where the eviction occurred, as that is where the LRU order
343*4882a593Smuzhiyun * during page reclaim is being determined.
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * However, the cgroup that will own the page is the one that
346*4882a593Smuzhiyun * is actually experiencing the refault event.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun memcg = page_memcg(page);
349*4882a593Smuzhiyun lruvec = mem_cgroup_lruvec(memcg, pgdat);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * Compare the distance to the existing workingset size. We
355*4882a593Smuzhiyun * don't activate pages that couldn't stay resident even if
356*4882a593Smuzhiyun * all the memory was available to the workingset. Whether
357*4882a593Smuzhiyun * workingset competition needs to consider anon or not depends
358*4882a593Smuzhiyun * on having swap.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
361*4882a593Smuzhiyun if (!file) {
362*4882a593Smuzhiyun workingset_size += lruvec_page_state(eviction_lruvec,
363*4882a593Smuzhiyun NR_INACTIVE_FILE);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
366*4882a593Smuzhiyun workingset_size += lruvec_page_state(eviction_lruvec,
367*4882a593Smuzhiyun NR_ACTIVE_ANON);
368*4882a593Smuzhiyun if (file) {
369*4882a593Smuzhiyun workingset_size += lruvec_page_state(eviction_lruvec,
370*4882a593Smuzhiyun NR_INACTIVE_ANON);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun if (refault_distance > workingset_size)
374*4882a593Smuzhiyun goto out;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun SetPageActive(page);
377*4882a593Smuzhiyun workingset_age_nonresident(lruvec, thp_nr_pages(page));
378*4882a593Smuzhiyun inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Page was active prior to eviction */
381*4882a593Smuzhiyun if (workingset) {
382*4882a593Smuzhiyun SetPageWorkingset(page);
383*4882a593Smuzhiyun /* XXX: Move to lru_cache_add() when it supports new vs putback */
384*4882a593Smuzhiyun spin_lock_irq(&page_pgdat(page)->lru_lock);
385*4882a593Smuzhiyun lru_note_cost_page(page);
386*4882a593Smuzhiyun spin_unlock_irq(&page_pgdat(page)->lru_lock);
387*4882a593Smuzhiyun inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun out:
390*4882a593Smuzhiyun rcu_read_unlock();
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun * workingset_activation - note a page activation
395*4882a593Smuzhiyun * @page: page that is being activated
396*4882a593Smuzhiyun */
workingset_activation(struct page * page)397*4882a593Smuzhiyun void workingset_activation(struct page *page)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct mem_cgroup *memcg;
400*4882a593Smuzhiyun struct lruvec *lruvec;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun rcu_read_lock();
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * Filter non-memcg pages here, e.g. unmap can call
405*4882a593Smuzhiyun * mark_page_accessed() on VDSO pages.
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * XXX: See workingset_refault() - this should return
408*4882a593Smuzhiyun * root_mem_cgroup even for !CONFIG_MEMCG.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun memcg = page_memcg_rcu(page);
411*4882a593Smuzhiyun if (!mem_cgroup_disabled() && !memcg)
412*4882a593Smuzhiyun goto out;
413*4882a593Smuzhiyun lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
414*4882a593Smuzhiyun workingset_age_nonresident(lruvec, thp_nr_pages(page));
415*4882a593Smuzhiyun out:
416*4882a593Smuzhiyun rcu_read_unlock();
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Shadow entries reflect the share of the working set that does not
421*4882a593Smuzhiyun * fit into memory, so their number depends on the access pattern of
422*4882a593Smuzhiyun * the workload. In most cases, they will refault or get reclaimed
423*4882a593Smuzhiyun * along with the inode, but a (malicious) workload that streams
424*4882a593Smuzhiyun * through files with a total size several times that of available
425*4882a593Smuzhiyun * memory, while preventing the inodes from being reclaimed, can
426*4882a593Smuzhiyun * create excessive amounts of shadow nodes. To keep a lid on this,
427*4882a593Smuzhiyun * track shadow nodes and reclaim them when they grow way past the
428*4882a593Smuzhiyun * point where they would still be useful.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun static struct list_lru shadow_nodes;
432*4882a593Smuzhiyun
workingset_update_node(struct xa_node * node)433*4882a593Smuzhiyun void workingset_update_node(struct xa_node *node)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun /*
436*4882a593Smuzhiyun * Track non-empty nodes that contain only shadow entries;
437*4882a593Smuzhiyun * unlink those that contain pages or are being freed.
438*4882a593Smuzhiyun *
439*4882a593Smuzhiyun * Avoid acquiring the list_lru lock when the nodes are
440*4882a593Smuzhiyun * already where they should be. The list_empty() test is safe
441*4882a593Smuzhiyun * as node->private_list is protected by the i_pages lock.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (node->count && node->count == node->nr_values) {
446*4882a593Smuzhiyun if (list_empty(&node->private_list)) {
447*4882a593Smuzhiyun list_lru_add(&shadow_nodes, &node->private_list);
448*4882a593Smuzhiyun __inc_lruvec_slab_state(node, WORKINGSET_NODES);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun } else {
451*4882a593Smuzhiyun if (!list_empty(&node->private_list)) {
452*4882a593Smuzhiyun list_lru_del(&shadow_nodes, &node->private_list);
453*4882a593Smuzhiyun __dec_lruvec_slab_state(node, WORKINGSET_NODES);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
count_shadow_nodes(struct shrinker * shrinker,struct shrink_control * sc)458*4882a593Smuzhiyun static unsigned long count_shadow_nodes(struct shrinker *shrinker,
459*4882a593Smuzhiyun struct shrink_control *sc)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun unsigned long max_nodes;
462*4882a593Smuzhiyun unsigned long nodes;
463*4882a593Smuzhiyun unsigned long pages;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun nodes = list_lru_shrink_count(&shadow_nodes, sc);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * Approximate a reasonable limit for the nodes
469*4882a593Smuzhiyun * containing shadow entries. We don't need to keep more
470*4882a593Smuzhiyun * shadow entries than possible pages on the active list,
471*4882a593Smuzhiyun * since refault distances bigger than that are dismissed.
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * The size of the active list converges toward 100% of
474*4882a593Smuzhiyun * overall page cache as memory grows, with only a tiny
475*4882a593Smuzhiyun * inactive list. Assume the total cache size for that.
476*4882a593Smuzhiyun *
477*4882a593Smuzhiyun * Nodes might be sparsely populated, with only one shadow
478*4882a593Smuzhiyun * entry in the extreme case. Obviously, we cannot keep one
479*4882a593Smuzhiyun * node for every eligible shadow entry, so compromise on a
480*4882a593Smuzhiyun * worst-case density of 1/8th. Below that, not all eligible
481*4882a593Smuzhiyun * refaults can be detected anymore.
482*4882a593Smuzhiyun *
483*4882a593Smuzhiyun * On 64-bit with 7 xa_nodes per page and 64 slots
484*4882a593Smuzhiyun * each, this will reclaim shadow entries when they consume
485*4882a593Smuzhiyun * ~1.8% of available memory:
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun #ifdef CONFIG_MEMCG
490*4882a593Smuzhiyun if (sc->memcg) {
491*4882a593Smuzhiyun struct lruvec *lruvec;
492*4882a593Smuzhiyun int i;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
495*4882a593Smuzhiyun for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
496*4882a593Smuzhiyun pages += lruvec_page_state_local(lruvec,
497*4882a593Smuzhiyun NR_LRU_BASE + i);
498*4882a593Smuzhiyun pages += lruvec_page_state_local(
499*4882a593Smuzhiyun lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
500*4882a593Smuzhiyun pages += lruvec_page_state_local(
501*4882a593Smuzhiyun lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
502*4882a593Smuzhiyun } else
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun pages = node_present_pages(sc->nid);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (!nodes)
509*4882a593Smuzhiyun return SHRINK_EMPTY;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (nodes <= max_nodes)
512*4882a593Smuzhiyun return 0;
513*4882a593Smuzhiyun return nodes - max_nodes;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
shadow_lru_isolate(struct list_head * item,struct list_lru_one * lru,spinlock_t * lru_lock,void * arg)516*4882a593Smuzhiyun static enum lru_status shadow_lru_isolate(struct list_head *item,
517*4882a593Smuzhiyun struct list_lru_one *lru,
518*4882a593Smuzhiyun spinlock_t *lru_lock,
519*4882a593Smuzhiyun void *arg) __must_hold(lru_lock)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun struct xa_node *node = container_of(item, struct xa_node, private_list);
522*4882a593Smuzhiyun struct address_space *mapping;
523*4882a593Smuzhiyun int ret;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * Page cache insertions and deletions synchronously maintain
527*4882a593Smuzhiyun * the shadow node LRU under the i_pages lock and the
528*4882a593Smuzhiyun * lru_lock. Because the page cache tree is emptied before
529*4882a593Smuzhiyun * the inode can be destroyed, holding the lru_lock pins any
530*4882a593Smuzhiyun * address_space that has nodes on the LRU.
531*4882a593Smuzhiyun *
532*4882a593Smuzhiyun * We can then safely transition to the i_pages lock to
533*4882a593Smuzhiyun * pin only the address_space of the particular node we want
534*4882a593Smuzhiyun * to reclaim, take the node off-LRU, and drop the lru_lock.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun mapping = container_of(node->array, struct address_space, i_pages);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* Coming from the list, invert the lock order */
540*4882a593Smuzhiyun if (!xa_trylock(&mapping->i_pages)) {
541*4882a593Smuzhiyun spin_unlock_irq(lru_lock);
542*4882a593Smuzhiyun ret = LRU_RETRY;
543*4882a593Smuzhiyun goto out;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun list_lru_isolate(lru, item);
547*4882a593Smuzhiyun __dec_lruvec_slab_state(node, WORKINGSET_NODES);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun spin_unlock(lru_lock);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun * The nodes should only contain one or more shadow entries,
553*4882a593Smuzhiyun * no pages, so we expect to be able to remove them all and
554*4882a593Smuzhiyun * delete and free the empty node afterwards.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun if (WARN_ON_ONCE(!node->nr_values))
557*4882a593Smuzhiyun goto out_invalid;
558*4882a593Smuzhiyun if (WARN_ON_ONCE(node->count != node->nr_values))
559*4882a593Smuzhiyun goto out_invalid;
560*4882a593Smuzhiyun mapping->nrexceptional -= node->nr_values;
561*4882a593Smuzhiyun xa_delete_node(node, workingset_update_node);
562*4882a593Smuzhiyun __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun out_invalid:
565*4882a593Smuzhiyun xa_unlock_irq(&mapping->i_pages);
566*4882a593Smuzhiyun ret = LRU_REMOVED_RETRY;
567*4882a593Smuzhiyun out:
568*4882a593Smuzhiyun cond_resched();
569*4882a593Smuzhiyun spin_lock_irq(lru_lock);
570*4882a593Smuzhiyun return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
scan_shadow_nodes(struct shrinker * shrinker,struct shrink_control * sc)573*4882a593Smuzhiyun static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
574*4882a593Smuzhiyun struct shrink_control *sc)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun /* list_lru lock nests inside the IRQ-safe i_pages lock */
577*4882a593Smuzhiyun return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
578*4882a593Smuzhiyun NULL);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun static struct shrinker workingset_shadow_shrinker = {
582*4882a593Smuzhiyun .count_objects = count_shadow_nodes,
583*4882a593Smuzhiyun .scan_objects = scan_shadow_nodes,
584*4882a593Smuzhiyun .seeks = 0, /* ->count reports only fully expendable nodes */
585*4882a593Smuzhiyun .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
586*4882a593Smuzhiyun };
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
590*4882a593Smuzhiyun * i_pages lock.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun static struct lock_class_key shadow_nodes_key;
593*4882a593Smuzhiyun
workingset_init(void)594*4882a593Smuzhiyun static int __init workingset_init(void)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun unsigned int timestamp_bits;
597*4882a593Smuzhiyun unsigned int max_order;
598*4882a593Smuzhiyun int ret;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
601*4882a593Smuzhiyun /*
602*4882a593Smuzhiyun * Calculate the eviction bucket size to cover the longest
603*4882a593Smuzhiyun * actionable refault distance, which is currently half of
604*4882a593Smuzhiyun * memory (totalram_pages/2). However, memory hotplug may add
605*4882a593Smuzhiyun * some more pages at runtime, so keep working with up to
606*4882a593Smuzhiyun * double the initial memory by using totalram_pages as-is.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
609*4882a593Smuzhiyun max_order = fls_long(totalram_pages() - 1);
610*4882a593Smuzhiyun if (max_order > timestamp_bits)
611*4882a593Smuzhiyun bucket_order = max_order - timestamp_bits;
612*4882a593Smuzhiyun pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
613*4882a593Smuzhiyun timestamp_bits, max_order, bucket_order);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ret = prealloc_shrinker(&workingset_shadow_shrinker);
616*4882a593Smuzhiyun if (ret)
617*4882a593Smuzhiyun goto err;
618*4882a593Smuzhiyun ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
619*4882a593Smuzhiyun &workingset_shadow_shrinker);
620*4882a593Smuzhiyun if (ret)
621*4882a593Smuzhiyun goto err_list_lru;
622*4882a593Smuzhiyun register_shrinker_prepared(&workingset_shadow_shrinker);
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun err_list_lru:
625*4882a593Smuzhiyun free_prealloced_shrinker(&workingset_shadow_shrinker);
626*4882a593Smuzhiyun err:
627*4882a593Smuzhiyun return ret;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun module_init(workingset_init);
630