1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DAMON-based page reclamation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: SeongJae Park <sj@kernel.org>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "damon-reclaim: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/damon.h>
11*4882a593Smuzhiyun #include <linux/ioport.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/workqueue.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifdef MODULE_PARAM_PREFIX
17*4882a593Smuzhiyun #undef MODULE_PARAM_PREFIX
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun #define MODULE_PARAM_PREFIX "damon_reclaim."
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * Enable or disable DAMON_RECLAIM.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
25*4882a593Smuzhiyun * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
26*4882a593Smuzhiyun * do no real monitoring and reclamation due to the watermarks-based activation
27*4882a593Smuzhiyun * condition. Refer to below descriptions for the watermarks parameter for
28*4882a593Smuzhiyun * this.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun static bool enabled __read_mostly;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Time threshold for cold memory regions identification in microseconds.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
36*4882a593Smuzhiyun * identifies the region as cold, and reclaims. 120 seconds by default.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun static unsigned long min_age __read_mostly = 120000000;
39*4882a593Smuzhiyun module_param(min_age, ulong, 0600);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Limit of time for trying the reclamation in milliseconds.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * DAMON_RECLAIM tries to use only up to this time within a time window
45*4882a593Smuzhiyun * (quota_reset_interval_ms) for trying reclamation of cold pages. This can be
46*4882a593Smuzhiyun * used for limiting CPU consumption of DAMON_RECLAIM. If the value is zero,
47*4882a593Smuzhiyun * the limit is disabled.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * 10 ms by default.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun static unsigned long quota_ms __read_mostly = 10;
52*4882a593Smuzhiyun module_param(quota_ms, ulong, 0600);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Limit of size of memory for the reclamation in bytes.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * DAMON_RECLAIM charges amount of memory which it tried to reclaim within a
58*4882a593Smuzhiyun * time window (quota_reset_interval_ms) and makes no more than this limit is
59*4882a593Smuzhiyun * tried. This can be used for limiting consumption of CPU and IO. If this
60*4882a593Smuzhiyun * value is zero, the limit is disabled.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * 128 MiB by default.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun static unsigned long quota_sz __read_mostly = 128 * 1024 * 1024;
65*4882a593Smuzhiyun module_param(quota_sz, ulong, 0600);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * The time/size quota charge reset interval in milliseconds.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * The charge reset interval for the quota of time (quota_ms) and size
71*4882a593Smuzhiyun * (quota_sz). That is, DAMON_RECLAIM does not try reclamation for more than
72*4882a593Smuzhiyun * quota_ms milliseconds or quota_sz bytes within quota_reset_interval_ms
73*4882a593Smuzhiyun * milliseconds.
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * 1 second by default.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun static unsigned long quota_reset_interval_ms __read_mostly = 1000;
78*4882a593Smuzhiyun module_param(quota_reset_interval_ms, ulong, 0600);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * The watermarks check time interval in microseconds.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * Minimal time to wait before checking the watermarks, when DAMON_RECLAIM is
84*4882a593Smuzhiyun * enabled but inactive due to its watermarks rule. 5 seconds by default.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun static unsigned long wmarks_interval __read_mostly = 5000000;
87*4882a593Smuzhiyun module_param(wmarks_interval, ulong, 0600);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Free memory rate (per thousand) for the high watermark.
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * If free memory of the system in bytes per thousand bytes is higher than
93*4882a593Smuzhiyun * this, DAMON_RECLAIM becomes inactive, so it does nothing but periodically
94*4882a593Smuzhiyun * checks the watermarks. 500 (50%) by default.
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun static unsigned long wmarks_high __read_mostly = 500;
97*4882a593Smuzhiyun module_param(wmarks_high, ulong, 0600);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Free memory rate (per thousand) for the middle watermark.
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * If free memory of the system in bytes per thousand bytes is between this and
103*4882a593Smuzhiyun * the low watermark, DAMON_RECLAIM becomes active, so starts the monitoring
104*4882a593Smuzhiyun * and the reclaiming. 400 (40%) by default.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun static unsigned long wmarks_mid __read_mostly = 400;
107*4882a593Smuzhiyun module_param(wmarks_mid, ulong, 0600);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Free memory rate (per thousand) for the low watermark.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * If free memory of the system in bytes per thousand bytes is lower than this,
113*4882a593Smuzhiyun * DAMON_RECLAIM becomes inactive, so it does nothing but periodically checks
114*4882a593Smuzhiyun * the watermarks. In the case, the system falls back to the LRU-based page
115*4882a593Smuzhiyun * granularity reclamation logic. 200 (20%) by default.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun static unsigned long wmarks_low __read_mostly = 200;
118*4882a593Smuzhiyun module_param(wmarks_low, ulong, 0600);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Sampling interval for the monitoring in microseconds.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * The sampling interval of DAMON for the cold memory monitoring. Please refer
124*4882a593Smuzhiyun * to the DAMON documentation for more detail. 5 ms by default.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun static unsigned long sample_interval __read_mostly = 5000;
127*4882a593Smuzhiyun module_param(sample_interval, ulong, 0600);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Aggregation interval for the monitoring in microseconds.
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * The aggregation interval of DAMON for the cold memory monitoring. Please
133*4882a593Smuzhiyun * refer to the DAMON documentation for more detail. 100 ms by default.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun static unsigned long aggr_interval __read_mostly = 100000;
136*4882a593Smuzhiyun module_param(aggr_interval, ulong, 0600);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun * Minimum number of monitoring regions.
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun * The minimal number of monitoring regions of DAMON for the cold memory
142*4882a593Smuzhiyun * monitoring. This can be used to set lower-bound of the monitoring quality.
143*4882a593Smuzhiyun * But, setting this too high could result in increased monitoring overhead.
144*4882a593Smuzhiyun * Please refer to the DAMON documentation for more detail. 10 by default.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun static unsigned long min_nr_regions __read_mostly = 10;
147*4882a593Smuzhiyun module_param(min_nr_regions, ulong, 0600);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * Maximum number of monitoring regions.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * The maximum number of monitoring regions of DAMON for the cold memory
153*4882a593Smuzhiyun * monitoring. This can be used to set upper-bound of the monitoring overhead.
154*4882a593Smuzhiyun * However, setting this too low could result in bad monitoring quality.
155*4882a593Smuzhiyun * Please refer to the DAMON documentation for more detail. 1000 by default.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun static unsigned long max_nr_regions __read_mostly = 1000;
158*4882a593Smuzhiyun module_param(max_nr_regions, ulong, 0600);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Start of the target memory region in physical address.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * The start physical address of memory region that DAMON_RECLAIM will do work
164*4882a593Smuzhiyun * against. By default, biggest System RAM is used as the region.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun static unsigned long monitor_region_start __read_mostly;
167*4882a593Smuzhiyun module_param(monitor_region_start, ulong, 0600);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * End of the target memory region in physical address.
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * The end physical address of memory region that DAMON_RECLAIM will do work
173*4882a593Smuzhiyun * against. By default, biggest System RAM is used as the region.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun static unsigned long monitor_region_end __read_mostly;
176*4882a593Smuzhiyun module_param(monitor_region_end, ulong, 0600);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * PID of the DAMON thread
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
182*4882a593Smuzhiyun * Else, -1.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun static int kdamond_pid __read_mostly = -1;
185*4882a593Smuzhiyun module_param(kdamond_pid, int, 0400);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Number of memory regions that tried to be reclaimed.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun static unsigned long nr_reclaim_tried_regions __read_mostly;
191*4882a593Smuzhiyun module_param(nr_reclaim_tried_regions, ulong, 0400);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Total bytes of memory regions that tried to be reclaimed.
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun static unsigned long bytes_reclaim_tried_regions __read_mostly;
197*4882a593Smuzhiyun module_param(bytes_reclaim_tried_regions, ulong, 0400);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * Number of memory regions that successfully be reclaimed.
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun static unsigned long nr_reclaimed_regions __read_mostly;
203*4882a593Smuzhiyun module_param(nr_reclaimed_regions, ulong, 0400);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Total bytes of memory regions that successfully be reclaimed.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun static unsigned long bytes_reclaimed_regions __read_mostly;
209*4882a593Smuzhiyun module_param(bytes_reclaimed_regions, ulong, 0400);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * Number of times that the time/space quota limits have exceeded
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun static unsigned long nr_quota_exceeds __read_mostly;
215*4882a593Smuzhiyun module_param(nr_quota_exceeds, ulong, 0400);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun static struct damon_ctx *ctx;
218*4882a593Smuzhiyun static struct damon_target *target;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun struct damon_reclaim_ram_walk_arg {
221*4882a593Smuzhiyun unsigned long start;
222*4882a593Smuzhiyun unsigned long end;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
walk_system_ram(struct resource * res,void * arg)225*4882a593Smuzhiyun static int walk_system_ram(struct resource *res, void *arg)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct damon_reclaim_ram_walk_arg *a = arg;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (a->end - a->start < res->end - res->start) {
230*4882a593Smuzhiyun a->start = res->start;
231*4882a593Smuzhiyun a->end = res->end;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * Find biggest 'System RAM' resource and store its start and end address in
238*4882a593Smuzhiyun * @start and @end, respectively. If no System RAM is found, returns false.
239*4882a593Smuzhiyun */
get_monitoring_region(unsigned long * start,unsigned long * end)240*4882a593Smuzhiyun static bool get_monitoring_region(unsigned long *start, unsigned long *end)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct damon_reclaim_ram_walk_arg arg = {};
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
245*4882a593Smuzhiyun if (arg.end <= arg.start)
246*4882a593Smuzhiyun return false;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun *start = arg.start;
249*4882a593Smuzhiyun *end = arg.end;
250*4882a593Smuzhiyun return true;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
damon_reclaim_new_scheme(void)253*4882a593Smuzhiyun static struct damos *damon_reclaim_new_scheme(void)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct damos_watermarks wmarks = {
256*4882a593Smuzhiyun .metric = DAMOS_WMARK_FREE_MEM_RATE,
257*4882a593Smuzhiyun .interval = wmarks_interval,
258*4882a593Smuzhiyun .high = wmarks_high,
259*4882a593Smuzhiyun .mid = wmarks_mid,
260*4882a593Smuzhiyun .low = wmarks_low,
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun struct damos_quota quota = {
263*4882a593Smuzhiyun /*
264*4882a593Smuzhiyun * Do not try reclamation for more than quota_ms milliseconds
265*4882a593Smuzhiyun * or quota_sz bytes within quota_reset_interval_ms.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun .ms = quota_ms,
268*4882a593Smuzhiyun .sz = quota_sz,
269*4882a593Smuzhiyun .reset_interval = quota_reset_interval_ms,
270*4882a593Smuzhiyun /* Within the quota, page out older regions first. */
271*4882a593Smuzhiyun .weight_sz = 0,
272*4882a593Smuzhiyun .weight_nr_accesses = 0,
273*4882a593Smuzhiyun .weight_age = 1
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun struct damos *scheme = damon_new_scheme(
276*4882a593Smuzhiyun /* Find regions having PAGE_SIZE or larger size */
277*4882a593Smuzhiyun PAGE_SIZE, ULONG_MAX,
278*4882a593Smuzhiyun /* and not accessed at all */
279*4882a593Smuzhiyun 0, 0,
280*4882a593Smuzhiyun /* for min_age or more micro-seconds, and */
281*4882a593Smuzhiyun min_age / aggr_interval, UINT_MAX,
282*4882a593Smuzhiyun /* page out those, as soon as found */
283*4882a593Smuzhiyun DAMOS_PAGEOUT,
284*4882a593Smuzhiyun /* under the quota. */
285*4882a593Smuzhiyun "a,
286*4882a593Smuzhiyun /* (De)activate this according to the watermarks. */
287*4882a593Smuzhiyun &wmarks);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return scheme;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
damon_reclaim_turn(bool on)292*4882a593Smuzhiyun static int damon_reclaim_turn(bool on)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct damon_region *region;
295*4882a593Smuzhiyun struct damos *scheme;
296*4882a593Smuzhiyun int err;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (!on) {
299*4882a593Smuzhiyun err = damon_stop(&ctx, 1);
300*4882a593Smuzhiyun if (!err)
301*4882a593Smuzhiyun kdamond_pid = -1;
302*4882a593Smuzhiyun return err;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
306*4882a593Smuzhiyun min_nr_regions, max_nr_regions);
307*4882a593Smuzhiyun if (err)
308*4882a593Smuzhiyun return err;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (monitor_region_start > monitor_region_end)
311*4882a593Smuzhiyun return -EINVAL;
312*4882a593Smuzhiyun if (!monitor_region_start && !monitor_region_end &&
313*4882a593Smuzhiyun !get_monitoring_region(&monitor_region_start,
314*4882a593Smuzhiyun &monitor_region_end))
315*4882a593Smuzhiyun return -EINVAL;
316*4882a593Smuzhiyun /* DAMON will free this on its own when finish monitoring */
317*4882a593Smuzhiyun region = damon_new_region(monitor_region_start, monitor_region_end);
318*4882a593Smuzhiyun if (!region)
319*4882a593Smuzhiyun return -ENOMEM;
320*4882a593Smuzhiyun damon_add_region(region, target);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Will be freed by 'damon_set_schemes()' below */
323*4882a593Smuzhiyun scheme = damon_reclaim_new_scheme();
324*4882a593Smuzhiyun if (!scheme) {
325*4882a593Smuzhiyun err = -ENOMEM;
326*4882a593Smuzhiyun goto free_region_out;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun err = damon_set_schemes(ctx, &scheme, 1);
329*4882a593Smuzhiyun if (err)
330*4882a593Smuzhiyun goto free_scheme_out;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun err = damon_start(&ctx, 1);
333*4882a593Smuzhiyun if (!err) {
334*4882a593Smuzhiyun kdamond_pid = ctx->kdamond->pid;
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun free_scheme_out:
339*4882a593Smuzhiyun damon_destroy_scheme(scheme);
340*4882a593Smuzhiyun free_region_out:
341*4882a593Smuzhiyun damon_destroy_region(region, target);
342*4882a593Smuzhiyun return err;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun #define ENABLE_CHECK_INTERVAL_MS 1000
346*4882a593Smuzhiyun static struct delayed_work damon_reclaim_timer;
damon_reclaim_timer_fn(struct work_struct * work)347*4882a593Smuzhiyun static void damon_reclaim_timer_fn(struct work_struct *work)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun static bool last_enabled;
350*4882a593Smuzhiyun bool now_enabled;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun now_enabled = enabled;
353*4882a593Smuzhiyun if (last_enabled != now_enabled) {
354*4882a593Smuzhiyun if (!damon_reclaim_turn(now_enabled))
355*4882a593Smuzhiyun last_enabled = now_enabled;
356*4882a593Smuzhiyun else
357*4882a593Smuzhiyun enabled = last_enabled;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (enabled)
361*4882a593Smuzhiyun schedule_delayed_work(&damon_reclaim_timer,
362*4882a593Smuzhiyun msecs_to_jiffies(ENABLE_CHECK_INTERVAL_MS));
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
365*4882a593Smuzhiyun
enabled_store(const char * val,const struct kernel_param * kp)366*4882a593Smuzhiyun static int enabled_store(const char *val,
367*4882a593Smuzhiyun const struct kernel_param *kp)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun int rc = param_set_bool(val, kp);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (rc < 0)
372*4882a593Smuzhiyun return rc;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (enabled)
375*4882a593Smuzhiyun schedule_delayed_work(&damon_reclaim_timer, 0);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun static const struct kernel_param_ops enabled_param_ops = {
381*4882a593Smuzhiyun .set = enabled_store,
382*4882a593Smuzhiyun .get = param_get_bool,
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
386*4882a593Smuzhiyun MODULE_PARM_DESC(enabled,
387*4882a593Smuzhiyun "Enable or disable DAMON_RECLAIM (default: disabled)");
388*4882a593Smuzhiyun
damon_reclaim_after_aggregation(struct damon_ctx * c)389*4882a593Smuzhiyun static int damon_reclaim_after_aggregation(struct damon_ctx *c)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct damos *s;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* update the stats parameter */
394*4882a593Smuzhiyun damon_for_each_scheme(s, c) {
395*4882a593Smuzhiyun nr_reclaim_tried_regions = s->stat.nr_tried;
396*4882a593Smuzhiyun bytes_reclaim_tried_regions = s->stat.sz_tried;
397*4882a593Smuzhiyun nr_reclaimed_regions = s->stat.nr_applied;
398*4882a593Smuzhiyun bytes_reclaimed_regions = s->stat.sz_applied;
399*4882a593Smuzhiyun nr_quota_exceeds = s->stat.qt_exceeds;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
damon_reclaim_init(void)404*4882a593Smuzhiyun static int __init damon_reclaim_init(void)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun ctx = damon_new_ctx();
407*4882a593Smuzhiyun if (!ctx)
408*4882a593Smuzhiyun return -ENOMEM;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun damon_pa_set_primitives(ctx);
411*4882a593Smuzhiyun ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* 4242 means nothing but fun */
414*4882a593Smuzhiyun target = damon_new_target(4242);
415*4882a593Smuzhiyun if (!target) {
416*4882a593Smuzhiyun damon_destroy_ctx(ctx);
417*4882a593Smuzhiyun return -ENOMEM;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun damon_add_target(ctx, target);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun schedule_delayed_work(&damon_reclaim_timer, 0);
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun module_init(damon_reclaim_init);
426