1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DAMON Primitives for The Physical Address Space
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: SeongJae Park <sj@kernel.org>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "damon-pa: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
11*4882a593Smuzhiyun #include <linux/page_idle.h>
12*4882a593Smuzhiyun #include <linux/pagemap.h>
13*4882a593Smuzhiyun #include <linux/rmap.h>
14*4882a593Smuzhiyun #include <linux/swap.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "../internal.h"
17*4882a593Smuzhiyun #include "prmtv-common.h"
18*4882a593Smuzhiyun
__damon_pa_mkold(struct page * page,struct vm_area_struct * vma,unsigned long addr,void * arg)19*4882a593Smuzhiyun static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
20*4882a593Smuzhiyun unsigned long addr, void *arg)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun struct page_vma_mapped_walk pvmw = {
23*4882a593Smuzhiyun .page = page,
24*4882a593Smuzhiyun .vma = vma,
25*4882a593Smuzhiyun .address = addr,
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun while (page_vma_mapped_walk(&pvmw)) {
29*4882a593Smuzhiyun addr = pvmw.address;
30*4882a593Smuzhiyun if (pvmw.pte)
31*4882a593Smuzhiyun damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
32*4882a593Smuzhiyun else
33*4882a593Smuzhiyun damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun return true;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
damon_pa_mkold(unsigned long paddr)38*4882a593Smuzhiyun static void damon_pa_mkold(unsigned long paddr)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct page *page = damon_get_page(PHYS_PFN(paddr));
41*4882a593Smuzhiyun struct rmap_walk_control rwc = {
42*4882a593Smuzhiyun .rmap_one = __damon_pa_mkold,
43*4882a593Smuzhiyun .anon_lock = page_lock_anon_vma_read,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun bool need_lock;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (!page)
48*4882a593Smuzhiyun return;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (!page_mapped(page) || !page_rmapping(page)) {
51*4882a593Smuzhiyun set_page_idle(page);
52*4882a593Smuzhiyun goto out;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun need_lock = !PageAnon(page) || PageKsm(page);
56*4882a593Smuzhiyun if (need_lock && !trylock_page(page))
57*4882a593Smuzhiyun goto out;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun rmap_walk(page, &rwc);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (need_lock)
62*4882a593Smuzhiyun unlock_page(page);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun out:
65*4882a593Smuzhiyun put_page(page);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
__damon_pa_prepare_access_check(struct damon_ctx * ctx,struct damon_region * r)68*4882a593Smuzhiyun static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
69*4882a593Smuzhiyun struct damon_region *r)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun damon_pa_mkold(r->sampling_addr);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
damon_pa_prepare_access_checks(struct damon_ctx * ctx)76*4882a593Smuzhiyun static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct damon_target *t;
79*4882a593Smuzhiyun struct damon_region *r;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
82*4882a593Smuzhiyun damon_for_each_region(r, t)
83*4882a593Smuzhiyun __damon_pa_prepare_access_check(ctx, r);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct damon_pa_access_chk_result {
88*4882a593Smuzhiyun unsigned long page_sz;
89*4882a593Smuzhiyun bool accessed;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
__damon_pa_young(struct page * page,struct vm_area_struct * vma,unsigned long addr,void * arg)92*4882a593Smuzhiyun static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
93*4882a593Smuzhiyun unsigned long addr, void *arg)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct damon_pa_access_chk_result *result = arg;
96*4882a593Smuzhiyun struct page_vma_mapped_walk pvmw = {
97*4882a593Smuzhiyun .page = page,
98*4882a593Smuzhiyun .vma = vma,
99*4882a593Smuzhiyun .address = addr,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun result->accessed = false;
103*4882a593Smuzhiyun result->page_sz = PAGE_SIZE;
104*4882a593Smuzhiyun while (page_vma_mapped_walk(&pvmw)) {
105*4882a593Smuzhiyun addr = pvmw.address;
106*4882a593Smuzhiyun if (pvmw.pte) {
107*4882a593Smuzhiyun result->accessed = pte_young(*pvmw.pte) ||
108*4882a593Smuzhiyun !page_is_idle(page) ||
109*4882a593Smuzhiyun mmu_notifier_test_young(vma->vm_mm, addr);
110*4882a593Smuzhiyun } else {
111*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
112*4882a593Smuzhiyun result->accessed = pmd_young(*pvmw.pmd) ||
113*4882a593Smuzhiyun !page_is_idle(page) ||
114*4882a593Smuzhiyun mmu_notifier_test_young(vma->vm_mm, addr);
115*4882a593Smuzhiyun result->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
116*4882a593Smuzhiyun #else
117*4882a593Smuzhiyun WARN_ON_ONCE(1);
118*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun if (result->accessed) {
121*4882a593Smuzhiyun page_vma_mapped_walk_done(&pvmw);
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* If accessed, stop walking */
127*4882a593Smuzhiyun return !result->accessed;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
damon_pa_young(unsigned long paddr,unsigned long * page_sz)130*4882a593Smuzhiyun static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct page *page = damon_get_page(PHYS_PFN(paddr));
133*4882a593Smuzhiyun struct damon_pa_access_chk_result result = {
134*4882a593Smuzhiyun .page_sz = PAGE_SIZE,
135*4882a593Smuzhiyun .accessed = false,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun struct rmap_walk_control rwc = {
138*4882a593Smuzhiyun .arg = &result,
139*4882a593Smuzhiyun .rmap_one = __damon_pa_young,
140*4882a593Smuzhiyun .anon_lock = page_lock_anon_vma_read,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun bool need_lock;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (!page)
145*4882a593Smuzhiyun return false;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (!page_mapped(page) || !page_rmapping(page)) {
148*4882a593Smuzhiyun if (page_is_idle(page))
149*4882a593Smuzhiyun result.accessed = false;
150*4882a593Smuzhiyun else
151*4882a593Smuzhiyun result.accessed = true;
152*4882a593Smuzhiyun put_page(page);
153*4882a593Smuzhiyun goto out;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun need_lock = !PageAnon(page) || PageKsm(page);
157*4882a593Smuzhiyun if (need_lock && !trylock_page(page)) {
158*4882a593Smuzhiyun put_page(page);
159*4882a593Smuzhiyun return NULL;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun rmap_walk(page, &rwc);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (need_lock)
165*4882a593Smuzhiyun unlock_page(page);
166*4882a593Smuzhiyun put_page(page);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun out:
169*4882a593Smuzhiyun *page_sz = result.page_sz;
170*4882a593Smuzhiyun return result.accessed;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
__damon_pa_check_access(struct damon_ctx * ctx,struct damon_region * r)173*4882a593Smuzhiyun static void __damon_pa_check_access(struct damon_ctx *ctx,
174*4882a593Smuzhiyun struct damon_region *r)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun static unsigned long last_addr;
177*4882a593Smuzhiyun static unsigned long last_page_sz = PAGE_SIZE;
178*4882a593Smuzhiyun static bool last_accessed;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* If the region is in the last checked page, reuse the result */
181*4882a593Smuzhiyun if (ALIGN_DOWN(last_addr, last_page_sz) ==
182*4882a593Smuzhiyun ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
183*4882a593Smuzhiyun if (last_accessed)
184*4882a593Smuzhiyun r->nr_accesses++;
185*4882a593Smuzhiyun return;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
189*4882a593Smuzhiyun if (last_accessed)
190*4882a593Smuzhiyun r->nr_accesses++;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun last_addr = r->sampling_addr;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
damon_pa_check_accesses(struct damon_ctx * ctx)195*4882a593Smuzhiyun static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct damon_target *t;
198*4882a593Smuzhiyun struct damon_region *r;
199*4882a593Smuzhiyun unsigned int max_nr_accesses = 0;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
202*4882a593Smuzhiyun damon_for_each_region(r, t) {
203*4882a593Smuzhiyun __damon_pa_check_access(ctx, r);
204*4882a593Smuzhiyun max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return max_nr_accesses;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
damon_pa_target_valid(void * t)211*4882a593Smuzhiyun bool damon_pa_target_valid(void *t)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun return true;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
damon_pa_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme)216*4882a593Smuzhiyun static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
217*4882a593Smuzhiyun struct damon_target *t, struct damon_region *r,
218*4882a593Smuzhiyun struct damos *scheme)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun unsigned long addr, applied;
221*4882a593Smuzhiyun LIST_HEAD(page_list);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (scheme->action != DAMOS_PAGEOUT)
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
227*4882a593Smuzhiyun struct page *page = damon_get_page(PHYS_PFN(addr));
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (!page)
230*4882a593Smuzhiyun continue;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun ClearPageReferenced(page);
233*4882a593Smuzhiyun test_and_clear_page_young(page);
234*4882a593Smuzhiyun if (isolate_lru_page(page)) {
235*4882a593Smuzhiyun put_page(page);
236*4882a593Smuzhiyun continue;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun if (PageUnevictable(page)) {
239*4882a593Smuzhiyun putback_lru_page(page);
240*4882a593Smuzhiyun } else {
241*4882a593Smuzhiyun list_add(&page->lru, &page_list);
242*4882a593Smuzhiyun put_page(page);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun applied = reclaim_pages(&page_list);
246*4882a593Smuzhiyun cond_resched();
247*4882a593Smuzhiyun return applied * PAGE_SIZE;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
damon_pa_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)250*4882a593Smuzhiyun static int damon_pa_scheme_score(struct damon_ctx *context,
251*4882a593Smuzhiyun struct damon_target *t, struct damon_region *r,
252*4882a593Smuzhiyun struct damos *scheme)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun switch (scheme->action) {
255*4882a593Smuzhiyun case DAMOS_PAGEOUT:
256*4882a593Smuzhiyun return damon_pageout_score(context, r, scheme);
257*4882a593Smuzhiyun default:
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return DAMOS_MAX_SCORE;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
damon_pa_set_primitives(struct damon_ctx * ctx)264*4882a593Smuzhiyun void damon_pa_set_primitives(struct damon_ctx *ctx)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun ctx->primitive.init = NULL;
267*4882a593Smuzhiyun ctx->primitive.update = NULL;
268*4882a593Smuzhiyun ctx->primitive.prepare_access_checks = damon_pa_prepare_access_checks;
269*4882a593Smuzhiyun ctx->primitive.check_accesses = damon_pa_check_accesses;
270*4882a593Smuzhiyun ctx->primitive.reset_aggregated = NULL;
271*4882a593Smuzhiyun ctx->primitive.target_valid = damon_pa_target_valid;
272*4882a593Smuzhiyun ctx->primitive.cleanup = NULL;
273*4882a593Smuzhiyun ctx->primitive.apply_scheme = damon_pa_apply_scheme;
274*4882a593Smuzhiyun ctx->primitive.get_scheme_score = damon_pa_scheme_score;
275*4882a593Smuzhiyun }
276