1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DAMON Primitives for Virtual Address Spaces
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: SeongJae Park <sjpark@amazon.de>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "damon-va: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <asm-generic/mman-common.h>
11*4882a593Smuzhiyun #include <linux/highmem.h>
12*4882a593Smuzhiyun #include <linux/hugetlb.h>
13*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
14*4882a593Smuzhiyun #include <linux/page_idle.h>
15*4882a593Smuzhiyun #include <linux/pagewalk.h>
16*4882a593Smuzhiyun #include <linux/sched/mm.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "prmtv-common.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
21*4882a593Smuzhiyun #undef DAMON_MIN_REGION
22*4882a593Smuzhiyun #define DAMON_MIN_REGION 1
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * 't->id' should be the pointer to the relevant 'struct pid' having reference
27*4882a593Smuzhiyun * count. Caller must put the returned task, unless it is NULL.
28*4882a593Smuzhiyun */
damon_get_task_struct(struct damon_target * t)29*4882a593Smuzhiyun static inline struct task_struct *damon_get_task_struct(struct damon_target *t)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun return get_pid_task((struct pid *)t->id, PIDTYPE_PID);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Get the mm_struct of the given target
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Caller _must_ put the mm_struct after use, unless it is NULL.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Returns the mm_struct of the target on success, NULL on failure
40*4882a593Smuzhiyun */
damon_get_mm(struct damon_target * t)41*4882a593Smuzhiyun static struct mm_struct *damon_get_mm(struct damon_target *t)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct task_struct *task;
44*4882a593Smuzhiyun struct mm_struct *mm;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun task = damon_get_task_struct(t);
47*4882a593Smuzhiyun if (!task)
48*4882a593Smuzhiyun return NULL;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun mm = get_task_mm(task);
51*4882a593Smuzhiyun put_task_struct(task);
52*4882a593Smuzhiyun return mm;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Functions for the initial monitoring target regions construction
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Size-evenly split a region into 'nr_pieces' small regions
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Returns 0 on success, or negative error code otherwise.
63*4882a593Smuzhiyun */
damon_va_evenly_split_region(struct damon_target * t,struct damon_region * r,unsigned int nr_pieces)64*4882a593Smuzhiyun static int damon_va_evenly_split_region(struct damon_target *t,
65*4882a593Smuzhiyun struct damon_region *r, unsigned int nr_pieces)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun unsigned long sz_orig, sz_piece, orig_end;
68*4882a593Smuzhiyun struct damon_region *n = NULL, *next;
69*4882a593Smuzhiyun unsigned long start;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (!r || !nr_pieces)
72*4882a593Smuzhiyun return -EINVAL;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun orig_end = r->ar.end;
75*4882a593Smuzhiyun sz_orig = r->ar.end - r->ar.start;
76*4882a593Smuzhiyun sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (!sz_piece)
79*4882a593Smuzhiyun return -EINVAL;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun r->ar.end = r->ar.start + sz_piece;
82*4882a593Smuzhiyun next = damon_next_region(r);
83*4882a593Smuzhiyun for (start = r->ar.end; start + sz_piece <= orig_end;
84*4882a593Smuzhiyun start += sz_piece) {
85*4882a593Smuzhiyun n = damon_new_region(start, start + sz_piece);
86*4882a593Smuzhiyun if (!n)
87*4882a593Smuzhiyun return -ENOMEM;
88*4882a593Smuzhiyun damon_insert_region(n, r, next, t);
89*4882a593Smuzhiyun r = n;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun /* complement last region for possible rounding error */
92*4882a593Smuzhiyun if (n)
93*4882a593Smuzhiyun n->ar.end = orig_end;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
sz_range(struct damon_addr_range * r)98*4882a593Smuzhiyun static unsigned long sz_range(struct damon_addr_range *r)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun return r->end - r->start;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Find three regions separated by two biggest unmapped regions
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * vma the head vma of the target address space
107*4882a593Smuzhiyun * regions an array of three address ranges that results will be saved
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * This function receives an address space and finds three regions in it which
110*4882a593Smuzhiyun * separated by the two biggest unmapped regions in the space. Please refer to
111*4882a593Smuzhiyun * below comments of '__damon_va_init_regions()' function to know why this is
112*4882a593Smuzhiyun * necessary.
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * Returns 0 if success, or negative error code otherwise.
115*4882a593Smuzhiyun */
__damon_va_three_regions(struct vm_area_struct * vma,struct damon_addr_range regions[3])116*4882a593Smuzhiyun static int __damon_va_three_regions(struct vm_area_struct *vma,
117*4882a593Smuzhiyun struct damon_addr_range regions[3])
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
120*4882a593Smuzhiyun struct vm_area_struct *last_vma = NULL;
121*4882a593Smuzhiyun unsigned long start = 0;
122*4882a593Smuzhiyun struct rb_root rbroot;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Find two biggest gaps so that first_gap > second_gap > others */
125*4882a593Smuzhiyun for (; vma; vma = vma->vm_next) {
126*4882a593Smuzhiyun if (!last_vma) {
127*4882a593Smuzhiyun start = vma->vm_start;
128*4882a593Smuzhiyun goto next;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
132*4882a593Smuzhiyun rbroot.rb_node = &vma->vm_rb;
133*4882a593Smuzhiyun vma = rb_entry(rb_last(&rbroot),
134*4882a593Smuzhiyun struct vm_area_struct, vm_rb);
135*4882a593Smuzhiyun goto next;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun gap.start = last_vma->vm_end;
139*4882a593Smuzhiyun gap.end = vma->vm_start;
140*4882a593Smuzhiyun if (sz_range(&gap) > sz_range(&second_gap)) {
141*4882a593Smuzhiyun swap(gap, second_gap);
142*4882a593Smuzhiyun if (sz_range(&second_gap) > sz_range(&first_gap))
143*4882a593Smuzhiyun swap(second_gap, first_gap);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun next:
146*4882a593Smuzhiyun last_vma = vma;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!sz_range(&second_gap) || !sz_range(&first_gap))
150*4882a593Smuzhiyun return -EINVAL;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Sort the two biggest gaps by address */
153*4882a593Smuzhiyun if (first_gap.start > second_gap.start)
154*4882a593Smuzhiyun swap(first_gap, second_gap);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Store the result */
157*4882a593Smuzhiyun regions[0].start = ALIGN(start, DAMON_MIN_REGION);
158*4882a593Smuzhiyun regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION);
159*4882a593Smuzhiyun regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
160*4882a593Smuzhiyun regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
161*4882a593Smuzhiyun regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
162*4882a593Smuzhiyun regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * Get the three regions in the given target (task)
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * Returns 0 on success, negative error code otherwise.
171*4882a593Smuzhiyun */
damon_va_three_regions(struct damon_target * t,struct damon_addr_range regions[3])172*4882a593Smuzhiyun static int damon_va_three_regions(struct damon_target *t,
173*4882a593Smuzhiyun struct damon_addr_range regions[3])
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct mm_struct *mm;
176*4882a593Smuzhiyun int rc;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun mm = damon_get_mm(t);
179*4882a593Smuzhiyun if (!mm)
180*4882a593Smuzhiyun return -EINVAL;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun mmap_read_lock(mm);
183*4882a593Smuzhiyun rc = __damon_va_three_regions(mm->mmap, regions);
184*4882a593Smuzhiyun mmap_read_unlock(mm);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun mmput(mm);
187*4882a593Smuzhiyun return rc;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Initialize the monitoring target regions for the given target (task)
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * t the given target
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * Because only a number of small portions of the entire address space
196*4882a593Smuzhiyun * is actually mapped to the memory and accessed, monitoring the unmapped
197*4882a593Smuzhiyun * regions is wasteful. That said, because we can deal with small noises,
198*4882a593Smuzhiyun * tracking every mapping is not strictly required but could even incur a high
199*4882a593Smuzhiyun * overhead if the mapping frequently changes or the number of mappings is
200*4882a593Smuzhiyun * high. The adaptive regions adjustment mechanism will further help to deal
201*4882a593Smuzhiyun * with the noise by simply identifying the unmapped areas as a region that
202*4882a593Smuzhiyun * has no access. Moreover, applying the real mappings that would have many
203*4882a593Smuzhiyun * unmapped areas inside will make the adaptive mechanism quite complex. That
204*4882a593Smuzhiyun * said, too huge unmapped areas inside the monitoring target should be removed
205*4882a593Smuzhiyun * to not take the time for the adaptive mechanism.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * For the reason, we convert the complex mappings to three distinct regions
208*4882a593Smuzhiyun * that cover every mapped area of the address space. Also the two gaps
209*4882a593Smuzhiyun * between the three regions are the two biggest unmapped areas in the given
210*4882a593Smuzhiyun * address space. In detail, this function first identifies the start and the
211*4882a593Smuzhiyun * end of the mappings and the two biggest unmapped areas of the address space.
212*4882a593Smuzhiyun * Then, it constructs the three regions as below:
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * [mappings[0]->start, big_two_unmapped_areas[0]->start)
215*4882a593Smuzhiyun * [big_two_unmapped_areas[0]->end, big_two_unmapped_areas[1]->start)
216*4882a593Smuzhiyun * [big_two_unmapped_areas[1]->end, mappings[nr_mappings - 1]->end)
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * As usual memory map of processes is as below, the gap between the heap and
219*4882a593Smuzhiyun * the uppermost mmap()-ed region, and the gap between the lowermost mmap()-ed
220*4882a593Smuzhiyun * region and the stack will be two biggest unmapped regions. Because these
221*4882a593Smuzhiyun * gaps are exceptionally huge areas in usual address space, excluding these
222*4882a593Smuzhiyun * two biggest unmapped regions will be sufficient to make a trade-off.
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * <heap>
225*4882a593Smuzhiyun * <BIG UNMAPPED REGION 1>
226*4882a593Smuzhiyun * <uppermost mmap()-ed region>
227*4882a593Smuzhiyun * (other mmap()-ed regions and small unmapped regions)
228*4882a593Smuzhiyun * <lowermost mmap()-ed region>
229*4882a593Smuzhiyun * <BIG UNMAPPED REGION 2>
230*4882a593Smuzhiyun * <stack>
231*4882a593Smuzhiyun */
__damon_va_init_regions(struct damon_ctx * ctx,struct damon_target * t)232*4882a593Smuzhiyun static void __damon_va_init_regions(struct damon_ctx *ctx,
233*4882a593Smuzhiyun struct damon_target *t)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct damon_target *ti;
236*4882a593Smuzhiyun struct damon_region *r;
237*4882a593Smuzhiyun struct damon_addr_range regions[3];
238*4882a593Smuzhiyun unsigned long sz = 0, nr_pieces;
239*4882a593Smuzhiyun int i, tidx = 0;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (damon_va_three_regions(t, regions)) {
242*4882a593Smuzhiyun damon_for_each_target(ti, ctx) {
243*4882a593Smuzhiyun if (ti == t)
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun tidx++;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun pr_debug("Failed to get three regions of %dth target\n", tidx);
248*4882a593Smuzhiyun return;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun for (i = 0; i < 3; i++)
252*4882a593Smuzhiyun sz += regions[i].end - regions[i].start;
253*4882a593Smuzhiyun if (ctx->min_nr_regions)
254*4882a593Smuzhiyun sz /= ctx->min_nr_regions;
255*4882a593Smuzhiyun if (sz < DAMON_MIN_REGION)
256*4882a593Smuzhiyun sz = DAMON_MIN_REGION;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Set the initial three regions of the target */
259*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
260*4882a593Smuzhiyun r = damon_new_region(regions[i].start, regions[i].end);
261*4882a593Smuzhiyun if (!r) {
262*4882a593Smuzhiyun pr_err("%d'th init region creation failed\n", i);
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun damon_add_region(r, t);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun nr_pieces = (regions[i].end - regions[i].start) / sz;
268*4882a593Smuzhiyun damon_va_evenly_split_region(t, r, nr_pieces);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Initialize '->regions_list' of every target (task) */
damon_va_init(struct damon_ctx * ctx)273*4882a593Smuzhiyun static void damon_va_init(struct damon_ctx *ctx)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct damon_target *t;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
278*4882a593Smuzhiyun /* the user may set the target regions as they want */
279*4882a593Smuzhiyun if (!damon_nr_regions(t))
280*4882a593Smuzhiyun __damon_va_init_regions(ctx, t);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Functions for the dynamic monitoring target regions update
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Check whether a region is intersecting an address range
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Returns true if it is.
292*4882a593Smuzhiyun */
damon_intersect(struct damon_region * r,struct damon_addr_range * re)293*4882a593Smuzhiyun static bool damon_intersect(struct damon_region *r,
294*4882a593Smuzhiyun struct damon_addr_range *re)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return !(r->ar.end <= re->start || re->end <= r->ar.start);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Update damon regions for the three big regions of the given target
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * t the given target
303*4882a593Smuzhiyun * bregions the three big regions of the target
304*4882a593Smuzhiyun */
damon_va_apply_three_regions(struct damon_target * t,struct damon_addr_range bregions[3])305*4882a593Smuzhiyun static void damon_va_apply_three_regions(struct damon_target *t,
306*4882a593Smuzhiyun struct damon_addr_range bregions[3])
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct damon_region *r, *next;
309*4882a593Smuzhiyun unsigned int i;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Remove regions which are not in the three big regions now */
312*4882a593Smuzhiyun damon_for_each_region_safe(r, next, t) {
313*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
314*4882a593Smuzhiyun if (damon_intersect(r, &bregions[i]))
315*4882a593Smuzhiyun break;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun if (i == 3)
318*4882a593Smuzhiyun damon_destroy_region(r, t);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Adjust intersecting regions to fit with the three big regions */
322*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
323*4882a593Smuzhiyun struct damon_region *first = NULL, *last;
324*4882a593Smuzhiyun struct damon_region *newr;
325*4882a593Smuzhiyun struct damon_addr_range *br;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun br = &bregions[i];
328*4882a593Smuzhiyun /* Get the first and last regions which intersects with br */
329*4882a593Smuzhiyun damon_for_each_region(r, t) {
330*4882a593Smuzhiyun if (damon_intersect(r, br)) {
331*4882a593Smuzhiyun if (!first)
332*4882a593Smuzhiyun first = r;
333*4882a593Smuzhiyun last = r;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun if (r->ar.start >= br->end)
336*4882a593Smuzhiyun break;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun if (!first) {
339*4882a593Smuzhiyun /* no damon_region intersects with this big region */
340*4882a593Smuzhiyun newr = damon_new_region(
341*4882a593Smuzhiyun ALIGN_DOWN(br->start,
342*4882a593Smuzhiyun DAMON_MIN_REGION),
343*4882a593Smuzhiyun ALIGN(br->end, DAMON_MIN_REGION));
344*4882a593Smuzhiyun if (!newr)
345*4882a593Smuzhiyun continue;
346*4882a593Smuzhiyun damon_insert_region(newr, damon_prev_region(r), r, t);
347*4882a593Smuzhiyun } else {
348*4882a593Smuzhiyun first->ar.start = ALIGN_DOWN(br->start,
349*4882a593Smuzhiyun DAMON_MIN_REGION);
350*4882a593Smuzhiyun last->ar.end = ALIGN(br->end, DAMON_MIN_REGION);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * Update regions for current memory mappings
357*4882a593Smuzhiyun */
damon_va_update(struct damon_ctx * ctx)358*4882a593Smuzhiyun static void damon_va_update(struct damon_ctx *ctx)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct damon_addr_range three_regions[3];
361*4882a593Smuzhiyun struct damon_target *t;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
364*4882a593Smuzhiyun if (damon_va_three_regions(t, three_regions))
365*4882a593Smuzhiyun continue;
366*4882a593Smuzhiyun damon_va_apply_three_regions(t, three_regions);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
damon_mkold_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)370*4882a593Smuzhiyun static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
371*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun pte_t *pte;
374*4882a593Smuzhiyun spinlock_t *ptl;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (pmd_huge(*pmd)) {
377*4882a593Smuzhiyun ptl = pmd_lock(walk->mm, pmd);
378*4882a593Smuzhiyun if (!pmd_present(*pmd)) {
379*4882a593Smuzhiyun spin_unlock(ptl);
380*4882a593Smuzhiyun return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (pmd_huge(*pmd)) {
384*4882a593Smuzhiyun damon_pmdp_mkold(pmd, walk->mm, addr);
385*4882a593Smuzhiyun spin_unlock(ptl);
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun spin_unlock(ptl);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
394*4882a593Smuzhiyun if (!pte_present(*pte))
395*4882a593Smuzhiyun goto out;
396*4882a593Smuzhiyun damon_ptep_mkold(pte, walk->mm, addr);
397*4882a593Smuzhiyun out:
398*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
damon_hugetlb_mkold(pte_t * pte,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr)403*4882a593Smuzhiyun static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
404*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned long addr)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun bool referenced = false;
407*4882a593Smuzhiyun pte_t entry = huge_ptep_get(pte);
408*4882a593Smuzhiyun struct page *page = pte_page(entry);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!page)
411*4882a593Smuzhiyun return;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun get_page(page);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (pte_young(entry)) {
416*4882a593Smuzhiyun referenced = true;
417*4882a593Smuzhiyun entry = pte_mkold(entry);
418*4882a593Smuzhiyun set_huge_pte_at(mm, addr, pte, entry);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun #ifdef CONFIG_MMU_NOTIFIER
422*4882a593Smuzhiyun if (mmu_notifier_clear_young(mm, addr,
423*4882a593Smuzhiyun addr + huge_page_size(hstate_vma(vma))))
424*4882a593Smuzhiyun referenced = true;
425*4882a593Smuzhiyun #endif /* CONFIG_MMU_NOTIFIER */
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (referenced)
428*4882a593Smuzhiyun set_page_young(page);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun set_page_idle(page);
431*4882a593Smuzhiyun put_page(page);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
damon_mkold_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)434*4882a593Smuzhiyun static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
435*4882a593Smuzhiyun unsigned long addr, unsigned long end,
436*4882a593Smuzhiyun struct mm_walk *walk)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct hstate *h = hstate_vma(walk->vma);
439*4882a593Smuzhiyun spinlock_t *ptl;
440*4882a593Smuzhiyun pte_t entry;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ptl = huge_pte_lock(h, walk->mm, pte);
443*4882a593Smuzhiyun entry = huge_ptep_get(pte);
444*4882a593Smuzhiyun if (!pte_present(entry))
445*4882a593Smuzhiyun goto out;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun out:
450*4882a593Smuzhiyun spin_unlock(ptl);
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun #else
454*4882a593Smuzhiyun #define damon_mkold_hugetlb_entry NULL
455*4882a593Smuzhiyun #endif /* CONFIG_HUGETLB_PAGE */
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun static const struct mm_walk_ops damon_mkold_ops = {
458*4882a593Smuzhiyun .pmd_entry = damon_mkold_pmd_entry,
459*4882a593Smuzhiyun .hugetlb_entry = damon_mkold_hugetlb_entry,
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun
damon_va_mkold(struct mm_struct * mm,unsigned long addr)462*4882a593Smuzhiyun static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun mmap_read_lock(mm);
465*4882a593Smuzhiyun walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL);
466*4882a593Smuzhiyun mmap_read_unlock(mm);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * Functions for the access checking of the regions
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun
__damon_va_prepare_access_check(struct damon_ctx * ctx,struct mm_struct * mm,struct damon_region * r)473*4882a593Smuzhiyun static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
474*4882a593Smuzhiyun struct mm_struct *mm, struct damon_region *r)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun damon_va_mkold(mm, r->sampling_addr);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
damon_va_prepare_access_checks(struct damon_ctx * ctx)481*4882a593Smuzhiyun static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct damon_target *t;
484*4882a593Smuzhiyun struct mm_struct *mm;
485*4882a593Smuzhiyun struct damon_region *r;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
488*4882a593Smuzhiyun mm = damon_get_mm(t);
489*4882a593Smuzhiyun if (!mm)
490*4882a593Smuzhiyun continue;
491*4882a593Smuzhiyun damon_for_each_region(r, t)
492*4882a593Smuzhiyun __damon_va_prepare_access_check(ctx, mm, r);
493*4882a593Smuzhiyun mmput(mm);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun struct damon_young_walk_private {
498*4882a593Smuzhiyun unsigned long *page_sz;
499*4882a593Smuzhiyun bool young;
500*4882a593Smuzhiyun };
501*4882a593Smuzhiyun
damon_young_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long next,struct mm_walk * walk)502*4882a593Smuzhiyun static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
503*4882a593Smuzhiyun unsigned long next, struct mm_walk *walk)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun pte_t *pte;
506*4882a593Smuzhiyun spinlock_t *ptl;
507*4882a593Smuzhiyun struct page *page;
508*4882a593Smuzhiyun struct damon_young_walk_private *priv = walk->private;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
511*4882a593Smuzhiyun if (pmd_huge(*pmd)) {
512*4882a593Smuzhiyun ptl = pmd_lock(walk->mm, pmd);
513*4882a593Smuzhiyun if (!pmd_present(*pmd)) {
514*4882a593Smuzhiyun spin_unlock(ptl);
515*4882a593Smuzhiyun return 0;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (!pmd_huge(*pmd)) {
519*4882a593Smuzhiyun spin_unlock(ptl);
520*4882a593Smuzhiyun goto regular_page;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun page = damon_get_page(pmd_pfn(*pmd));
523*4882a593Smuzhiyun if (!page)
524*4882a593Smuzhiyun goto huge_out;
525*4882a593Smuzhiyun if (pmd_young(*pmd) || !page_is_idle(page) ||
526*4882a593Smuzhiyun mmu_notifier_test_young(walk->mm,
527*4882a593Smuzhiyun addr)) {
528*4882a593Smuzhiyun *priv->page_sz = ((1UL) << HPAGE_PMD_SHIFT);
529*4882a593Smuzhiyun priv->young = true;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun put_page(page);
532*4882a593Smuzhiyun huge_out:
533*4882a593Smuzhiyun spin_unlock(ptl);
534*4882a593Smuzhiyun return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun regular_page:
538*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
541*4882a593Smuzhiyun return -EINVAL;
542*4882a593Smuzhiyun pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
543*4882a593Smuzhiyun if (!pte_present(*pte))
544*4882a593Smuzhiyun goto out;
545*4882a593Smuzhiyun page = damon_get_page(pte_pfn(*pte));
546*4882a593Smuzhiyun if (!page)
547*4882a593Smuzhiyun goto out;
548*4882a593Smuzhiyun if (pte_young(*pte) || !page_is_idle(page) ||
549*4882a593Smuzhiyun mmu_notifier_test_young(walk->mm, addr)) {
550*4882a593Smuzhiyun *priv->page_sz = PAGE_SIZE;
551*4882a593Smuzhiyun priv->young = true;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun put_page(page);
554*4882a593Smuzhiyun out:
555*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
556*4882a593Smuzhiyun return 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
damon_young_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)560*4882a593Smuzhiyun static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
561*4882a593Smuzhiyun unsigned long addr, unsigned long end,
562*4882a593Smuzhiyun struct mm_walk *walk)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun struct damon_young_walk_private *priv = walk->private;
565*4882a593Smuzhiyun struct hstate *h = hstate_vma(walk->vma);
566*4882a593Smuzhiyun struct page *page;
567*4882a593Smuzhiyun spinlock_t *ptl;
568*4882a593Smuzhiyun pte_t entry;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun ptl = huge_pte_lock(h, walk->mm, pte);
571*4882a593Smuzhiyun entry = huge_ptep_get(pte);
572*4882a593Smuzhiyun if (!pte_present(entry))
573*4882a593Smuzhiyun goto out;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun page = pte_page(entry);
576*4882a593Smuzhiyun if (!page)
577*4882a593Smuzhiyun goto out;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun get_page(page);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (pte_young(entry) || !page_is_idle(page) ||
582*4882a593Smuzhiyun mmu_notifier_test_young(walk->mm, addr)) {
583*4882a593Smuzhiyun *priv->page_sz = huge_page_size(h);
584*4882a593Smuzhiyun priv->young = true;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun put_page(page);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun out:
590*4882a593Smuzhiyun spin_unlock(ptl);
591*4882a593Smuzhiyun return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun #else
594*4882a593Smuzhiyun #define damon_young_hugetlb_entry NULL
595*4882a593Smuzhiyun #endif /* CONFIG_HUGETLB_PAGE */
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun static const struct mm_walk_ops damon_young_ops = {
598*4882a593Smuzhiyun .pmd_entry = damon_young_pmd_entry,
599*4882a593Smuzhiyun .hugetlb_entry = damon_young_hugetlb_entry,
600*4882a593Smuzhiyun };
601*4882a593Smuzhiyun
damon_va_young(struct mm_struct * mm,unsigned long addr,unsigned long * page_sz)602*4882a593Smuzhiyun static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
603*4882a593Smuzhiyun unsigned long *page_sz)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct damon_young_walk_private arg = {
606*4882a593Smuzhiyun .page_sz = page_sz,
607*4882a593Smuzhiyun .young = false,
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun mmap_read_lock(mm);
611*4882a593Smuzhiyun walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg);
612*4882a593Smuzhiyun mmap_read_unlock(mm);
613*4882a593Smuzhiyun return arg.young;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /*
617*4882a593Smuzhiyun * Check whether the region was accessed after the last preparation
618*4882a593Smuzhiyun *
619*4882a593Smuzhiyun * mm 'mm_struct' for the given virtual address space
620*4882a593Smuzhiyun * r the region to be checked
621*4882a593Smuzhiyun */
__damon_va_check_access(struct damon_ctx * ctx,struct mm_struct * mm,struct damon_region * r)622*4882a593Smuzhiyun static void __damon_va_check_access(struct damon_ctx *ctx,
623*4882a593Smuzhiyun struct mm_struct *mm, struct damon_region *r)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun static struct mm_struct *last_mm;
626*4882a593Smuzhiyun static unsigned long last_addr;
627*4882a593Smuzhiyun static unsigned long last_page_sz = PAGE_SIZE;
628*4882a593Smuzhiyun static bool last_accessed;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* If the region is in the last checked page, reuse the result */
631*4882a593Smuzhiyun if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
632*4882a593Smuzhiyun ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
633*4882a593Smuzhiyun if (last_accessed)
634*4882a593Smuzhiyun r->nr_accesses++;
635*4882a593Smuzhiyun return;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
639*4882a593Smuzhiyun if (last_accessed)
640*4882a593Smuzhiyun r->nr_accesses++;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun last_mm = mm;
643*4882a593Smuzhiyun last_addr = r->sampling_addr;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
damon_va_check_accesses(struct damon_ctx * ctx)646*4882a593Smuzhiyun static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun struct damon_target *t;
649*4882a593Smuzhiyun struct mm_struct *mm;
650*4882a593Smuzhiyun struct damon_region *r;
651*4882a593Smuzhiyun unsigned int max_nr_accesses = 0;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun damon_for_each_target(t, ctx) {
654*4882a593Smuzhiyun mm = damon_get_mm(t);
655*4882a593Smuzhiyun if (!mm)
656*4882a593Smuzhiyun continue;
657*4882a593Smuzhiyun damon_for_each_region(r, t) {
658*4882a593Smuzhiyun __damon_va_check_access(ctx, mm, r);
659*4882a593Smuzhiyun max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun mmput(mm);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return max_nr_accesses;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun * Functions for the target validity check and cleanup
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun
damon_va_target_valid(void * target)671*4882a593Smuzhiyun bool damon_va_target_valid(void *target)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct damon_target *t = target;
674*4882a593Smuzhiyun struct task_struct *task;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun task = damon_get_task_struct(t);
677*4882a593Smuzhiyun if (task) {
678*4882a593Smuzhiyun put_task_struct(task);
679*4882a593Smuzhiyun return true;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun return false;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun #ifndef CONFIG_ADVISE_SYSCALLS
damos_madvise(struct damon_target * target,struct damon_region * r,int behavior)686*4882a593Smuzhiyun static unsigned long damos_madvise(struct damon_target *target,
687*4882a593Smuzhiyun struct damon_region *r, int behavior)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun return 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun #else
damos_madvise(struct damon_target * target,struct damon_region * r,int behavior)692*4882a593Smuzhiyun static unsigned long damos_madvise(struct damon_target *target,
693*4882a593Smuzhiyun struct damon_region *r, int behavior)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct mm_struct *mm;
696*4882a593Smuzhiyun unsigned long start = PAGE_ALIGN(r->ar.start);
697*4882a593Smuzhiyun unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start);
698*4882a593Smuzhiyun unsigned long applied;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun mm = damon_get_mm(target);
701*4882a593Smuzhiyun if (!mm)
702*4882a593Smuzhiyun return 0;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun applied = do_madvise(mm, start, len, behavior) ? 0 : len;
705*4882a593Smuzhiyun mmput(mm);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun return applied;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun #endif /* CONFIG_ADVISE_SYSCALLS */
710*4882a593Smuzhiyun
damon_va_apply_scheme(struct damon_ctx * ctx,struct damon_target * t,struct damon_region * r,struct damos * scheme)711*4882a593Smuzhiyun static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
712*4882a593Smuzhiyun struct damon_target *t, struct damon_region *r,
713*4882a593Smuzhiyun struct damos *scheme)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun int madv_action;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun switch (scheme->action) {
718*4882a593Smuzhiyun case DAMOS_WILLNEED:
719*4882a593Smuzhiyun madv_action = MADV_WILLNEED;
720*4882a593Smuzhiyun break;
721*4882a593Smuzhiyun case DAMOS_COLD:
722*4882a593Smuzhiyun madv_action = MADV_COLD;
723*4882a593Smuzhiyun break;
724*4882a593Smuzhiyun case DAMOS_PAGEOUT:
725*4882a593Smuzhiyun madv_action = MADV_PAGEOUT;
726*4882a593Smuzhiyun break;
727*4882a593Smuzhiyun case DAMOS_HUGEPAGE:
728*4882a593Smuzhiyun madv_action = MADV_HUGEPAGE;
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun case DAMOS_NOHUGEPAGE:
731*4882a593Smuzhiyun madv_action = MADV_NOHUGEPAGE;
732*4882a593Smuzhiyun break;
733*4882a593Smuzhiyun case DAMOS_STAT:
734*4882a593Smuzhiyun return 0;
735*4882a593Smuzhiyun default:
736*4882a593Smuzhiyun return 0;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return damos_madvise(t, r, madv_action);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
damon_va_scheme_score(struct damon_ctx * context,struct damon_target * t,struct damon_region * r,struct damos * scheme)742*4882a593Smuzhiyun static int damon_va_scheme_score(struct damon_ctx *context,
743*4882a593Smuzhiyun struct damon_target *t, struct damon_region *r,
744*4882a593Smuzhiyun struct damos *scheme)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun switch (scheme->action) {
748*4882a593Smuzhiyun case DAMOS_PAGEOUT:
749*4882a593Smuzhiyun return damon_pageout_score(context, r, scheme);
750*4882a593Smuzhiyun default:
751*4882a593Smuzhiyun break;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return DAMOS_MAX_SCORE;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
damon_va_set_primitives(struct damon_ctx * ctx)757*4882a593Smuzhiyun void damon_va_set_primitives(struct damon_ctx *ctx)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun ctx->primitive.init = damon_va_init;
760*4882a593Smuzhiyun ctx->primitive.update = damon_va_update;
761*4882a593Smuzhiyun ctx->primitive.prepare_access_checks = damon_va_prepare_access_checks;
762*4882a593Smuzhiyun ctx->primitive.check_accesses = damon_va_check_accesses;
763*4882a593Smuzhiyun ctx->primitive.reset_aggregated = NULL;
764*4882a593Smuzhiyun ctx->primitive.target_valid = damon_va_target_valid;
765*4882a593Smuzhiyun ctx->primitive.cleanup = NULL;
766*4882a593Smuzhiyun ctx->primitive.apply_scheme = damon_va_apply_scheme;
767*4882a593Smuzhiyun ctx->primitive.get_scheme_score = damon_va_scheme_score;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun #include "vaddr-test.h"
771