1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DAMON-based page reclamation
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #define pr_fmt(fmt) "damon-reclaim: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/ioport.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/workqueue.h>
15
16 #ifdef MODULE_PARAM_PREFIX
17 #undef MODULE_PARAM_PREFIX
18 #endif
19 #define MODULE_PARAM_PREFIX "damon_reclaim."
20
21 /*
22 * Enable or disable DAMON_RECLAIM.
23 *
24 * You can enable DAMON_RCLAIM by setting the value of this parameter as ``Y``.
25 * Setting it as ``N`` disables DAMON_RECLAIM. Note that DAMON_RECLAIM could
26 * do no real monitoring and reclamation due to the watermarks-based activation
27 * condition. Refer to below descriptions for the watermarks parameter for
28 * this.
29 */
30 static bool enabled __read_mostly;
31
32 /*
33 * Time threshold for cold memory regions identification in microseconds.
34 *
35 * If a memory region is not accessed for this or longer time, DAMON_RECLAIM
36 * identifies the region as cold, and reclaims. 120 seconds by default.
37 */
38 static unsigned long min_age __read_mostly = 120000000;
39 module_param(min_age, ulong, 0600);
40
41 /*
42 * Limit of time for trying the reclamation in milliseconds.
43 *
44 * DAMON_RECLAIM tries to use only up to this time within a time window
45 * (quota_reset_interval_ms) for trying reclamation of cold pages. This can be
46 * used for limiting CPU consumption of DAMON_RECLAIM. If the value is zero,
47 * the limit is disabled.
48 *
49 * 10 ms by default.
50 */
51 static unsigned long quota_ms __read_mostly = 10;
52 module_param(quota_ms, ulong, 0600);
53
54 /*
55 * Limit of size of memory for the reclamation in bytes.
56 *
57 * DAMON_RECLAIM charges amount of memory which it tried to reclaim within a
58 * time window (quota_reset_interval_ms) and makes no more than this limit is
59 * tried. This can be used for limiting consumption of CPU and IO. If this
60 * value is zero, the limit is disabled.
61 *
62 * 128 MiB by default.
63 */
64 static unsigned long quota_sz __read_mostly = 128 * 1024 * 1024;
65 module_param(quota_sz, ulong, 0600);
66
67 /*
68 * The time/size quota charge reset interval in milliseconds.
69 *
70 * The charge reset interval for the quota of time (quota_ms) and size
71 * (quota_sz). That is, DAMON_RECLAIM does not try reclamation for more than
72 * quota_ms milliseconds or quota_sz bytes within quota_reset_interval_ms
73 * milliseconds.
74 *
75 * 1 second by default.
76 */
77 static unsigned long quota_reset_interval_ms __read_mostly = 1000;
78 module_param(quota_reset_interval_ms, ulong, 0600);
79
80 /*
81 * The watermarks check time interval in microseconds.
82 *
83 * Minimal time to wait before checking the watermarks, when DAMON_RECLAIM is
84 * enabled but inactive due to its watermarks rule. 5 seconds by default.
85 */
86 static unsigned long wmarks_interval __read_mostly = 5000000;
87 module_param(wmarks_interval, ulong, 0600);
88
89 /*
90 * Free memory rate (per thousand) for the high watermark.
91 *
92 * If free memory of the system in bytes per thousand bytes is higher than
93 * this, DAMON_RECLAIM becomes inactive, so it does nothing but periodically
94 * checks the watermarks. 500 (50%) by default.
95 */
96 static unsigned long wmarks_high __read_mostly = 500;
97 module_param(wmarks_high, ulong, 0600);
98
99 /*
100 * Free memory rate (per thousand) for the middle watermark.
101 *
102 * If free memory of the system in bytes per thousand bytes is between this and
103 * the low watermark, DAMON_RECLAIM becomes active, so starts the monitoring
104 * and the reclaiming. 400 (40%) by default.
105 */
106 static unsigned long wmarks_mid __read_mostly = 400;
107 module_param(wmarks_mid, ulong, 0600);
108
109 /*
110 * Free memory rate (per thousand) for the low watermark.
111 *
112 * If free memory of the system in bytes per thousand bytes is lower than this,
113 * DAMON_RECLAIM becomes inactive, so it does nothing but periodically checks
114 * the watermarks. In the case, the system falls back to the LRU-based page
115 * granularity reclamation logic. 200 (20%) by default.
116 */
117 static unsigned long wmarks_low __read_mostly = 200;
118 module_param(wmarks_low, ulong, 0600);
119
120 /*
121 * Sampling interval for the monitoring in microseconds.
122 *
123 * The sampling interval of DAMON for the cold memory monitoring. Please refer
124 * to the DAMON documentation for more detail. 5 ms by default.
125 */
126 static unsigned long sample_interval __read_mostly = 5000;
127 module_param(sample_interval, ulong, 0600);
128
129 /*
130 * Aggregation interval for the monitoring in microseconds.
131 *
132 * The aggregation interval of DAMON for the cold memory monitoring. Please
133 * refer to the DAMON documentation for more detail. 100 ms by default.
134 */
135 static unsigned long aggr_interval __read_mostly = 100000;
136 module_param(aggr_interval, ulong, 0600);
137
138 /*
139 * Minimum number of monitoring regions.
140 *
141 * The minimal number of monitoring regions of DAMON for the cold memory
142 * monitoring. This can be used to set lower-bound of the monitoring quality.
143 * But, setting this too high could result in increased monitoring overhead.
144 * Please refer to the DAMON documentation for more detail. 10 by default.
145 */
146 static unsigned long min_nr_regions __read_mostly = 10;
147 module_param(min_nr_regions, ulong, 0600);
148
149 /*
150 * Maximum number of monitoring regions.
151 *
152 * The maximum number of monitoring regions of DAMON for the cold memory
153 * monitoring. This can be used to set upper-bound of the monitoring overhead.
154 * However, setting this too low could result in bad monitoring quality.
155 * Please refer to the DAMON documentation for more detail. 1000 by default.
156 */
157 static unsigned long max_nr_regions __read_mostly = 1000;
158 module_param(max_nr_regions, ulong, 0600);
159
160 /*
161 * Start of the target memory region in physical address.
162 *
163 * The start physical address of memory region that DAMON_RECLAIM will do work
164 * against. By default, biggest System RAM is used as the region.
165 */
166 static unsigned long monitor_region_start __read_mostly;
167 module_param(monitor_region_start, ulong, 0600);
168
169 /*
170 * End of the target memory region in physical address.
171 *
172 * The end physical address of memory region that DAMON_RECLAIM will do work
173 * against. By default, biggest System RAM is used as the region.
174 */
175 static unsigned long monitor_region_end __read_mostly;
176 module_param(monitor_region_end, ulong, 0600);
177
178 /*
179 * PID of the DAMON thread
180 *
181 * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
182 * Else, -1.
183 */
184 static int kdamond_pid __read_mostly = -1;
185 module_param(kdamond_pid, int, 0400);
186
187 /*
188 * Number of memory regions that tried to be reclaimed.
189 */
190 static unsigned long nr_reclaim_tried_regions __read_mostly;
191 module_param(nr_reclaim_tried_regions, ulong, 0400);
192
193 /*
194 * Total bytes of memory regions that tried to be reclaimed.
195 */
196 static unsigned long bytes_reclaim_tried_regions __read_mostly;
197 module_param(bytes_reclaim_tried_regions, ulong, 0400);
198
199 /*
200 * Number of memory regions that successfully be reclaimed.
201 */
202 static unsigned long nr_reclaimed_regions __read_mostly;
203 module_param(nr_reclaimed_regions, ulong, 0400);
204
205 /*
206 * Total bytes of memory regions that successfully be reclaimed.
207 */
208 static unsigned long bytes_reclaimed_regions __read_mostly;
209 module_param(bytes_reclaimed_regions, ulong, 0400);
210
211 /*
212 * Number of times that the time/space quota limits have exceeded
213 */
214 static unsigned long nr_quota_exceeds __read_mostly;
215 module_param(nr_quota_exceeds, ulong, 0400);
216
217 static struct damon_ctx *ctx;
218 static struct damon_target *target;
219
220 struct damon_reclaim_ram_walk_arg {
221 unsigned long start;
222 unsigned long end;
223 };
224
walk_system_ram(struct resource * res,void * arg)225 static int walk_system_ram(struct resource *res, void *arg)
226 {
227 struct damon_reclaim_ram_walk_arg *a = arg;
228
229 if (a->end - a->start < res->end - res->start) {
230 a->start = res->start;
231 a->end = res->end;
232 }
233 return 0;
234 }
235
236 /*
237 * Find biggest 'System RAM' resource and store its start and end address in
238 * @start and @end, respectively. If no System RAM is found, returns false.
239 */
get_monitoring_region(unsigned long * start,unsigned long * end)240 static bool get_monitoring_region(unsigned long *start, unsigned long *end)
241 {
242 struct damon_reclaim_ram_walk_arg arg = {};
243
244 walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
245 if (arg.end <= arg.start)
246 return false;
247
248 *start = arg.start;
249 *end = arg.end;
250 return true;
251 }
252
damon_reclaim_new_scheme(void)253 static struct damos *damon_reclaim_new_scheme(void)
254 {
255 struct damos_watermarks wmarks = {
256 .metric = DAMOS_WMARK_FREE_MEM_RATE,
257 .interval = wmarks_interval,
258 .high = wmarks_high,
259 .mid = wmarks_mid,
260 .low = wmarks_low,
261 };
262 struct damos_quota quota = {
263 /*
264 * Do not try reclamation for more than quota_ms milliseconds
265 * or quota_sz bytes within quota_reset_interval_ms.
266 */
267 .ms = quota_ms,
268 .sz = quota_sz,
269 .reset_interval = quota_reset_interval_ms,
270 /* Within the quota, page out older regions first. */
271 .weight_sz = 0,
272 .weight_nr_accesses = 0,
273 .weight_age = 1
274 };
275 struct damos *scheme = damon_new_scheme(
276 /* Find regions having PAGE_SIZE or larger size */
277 PAGE_SIZE, ULONG_MAX,
278 /* and not accessed at all */
279 0, 0,
280 /* for min_age or more micro-seconds, and */
281 min_age / aggr_interval, UINT_MAX,
282 /* page out those, as soon as found */
283 DAMOS_PAGEOUT,
284 /* under the quota. */
285 "a,
286 /* (De)activate this according to the watermarks. */
287 &wmarks);
288
289 return scheme;
290 }
291
damon_reclaim_turn(bool on)292 static int damon_reclaim_turn(bool on)
293 {
294 struct damon_region *region;
295 struct damos *scheme;
296 int err;
297
298 if (!on) {
299 err = damon_stop(&ctx, 1);
300 if (!err)
301 kdamond_pid = -1;
302 return err;
303 }
304
305 err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
306 min_nr_regions, max_nr_regions);
307 if (err)
308 return err;
309
310 if (monitor_region_start > monitor_region_end)
311 return -EINVAL;
312 if (!monitor_region_start && !monitor_region_end &&
313 !get_monitoring_region(&monitor_region_start,
314 &monitor_region_end))
315 return -EINVAL;
316 /* DAMON will free this on its own when finish monitoring */
317 region = damon_new_region(monitor_region_start, monitor_region_end);
318 if (!region)
319 return -ENOMEM;
320 damon_add_region(region, target);
321
322 /* Will be freed by 'damon_set_schemes()' below */
323 scheme = damon_reclaim_new_scheme();
324 if (!scheme) {
325 err = -ENOMEM;
326 goto free_region_out;
327 }
328 err = damon_set_schemes(ctx, &scheme, 1);
329 if (err)
330 goto free_scheme_out;
331
332 err = damon_start(&ctx, 1);
333 if (!err) {
334 kdamond_pid = ctx->kdamond->pid;
335 return 0;
336 }
337
338 free_scheme_out:
339 damon_destroy_scheme(scheme);
340 free_region_out:
341 damon_destroy_region(region, target);
342 return err;
343 }
344
345 #define ENABLE_CHECK_INTERVAL_MS 1000
346 static struct delayed_work damon_reclaim_timer;
damon_reclaim_timer_fn(struct work_struct * work)347 static void damon_reclaim_timer_fn(struct work_struct *work)
348 {
349 static bool last_enabled;
350 bool now_enabled;
351
352 now_enabled = enabled;
353 if (last_enabled != now_enabled) {
354 if (!damon_reclaim_turn(now_enabled))
355 last_enabled = now_enabled;
356 else
357 enabled = last_enabled;
358 }
359
360 if (enabled)
361 schedule_delayed_work(&damon_reclaim_timer,
362 msecs_to_jiffies(ENABLE_CHECK_INTERVAL_MS));
363 }
364 static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
365
enabled_store(const char * val,const struct kernel_param * kp)366 static int enabled_store(const char *val,
367 const struct kernel_param *kp)
368 {
369 int rc = param_set_bool(val, kp);
370
371 if (rc < 0)
372 return rc;
373
374 if (enabled)
375 schedule_delayed_work(&damon_reclaim_timer, 0);
376
377 return 0;
378 }
379
380 static const struct kernel_param_ops enabled_param_ops = {
381 .set = enabled_store,
382 .get = param_get_bool,
383 };
384
385 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
386 MODULE_PARM_DESC(enabled,
387 "Enable or disable DAMON_RECLAIM (default: disabled)");
388
damon_reclaim_after_aggregation(struct damon_ctx * c)389 static int damon_reclaim_after_aggregation(struct damon_ctx *c)
390 {
391 struct damos *s;
392
393 /* update the stats parameter */
394 damon_for_each_scheme(s, c) {
395 nr_reclaim_tried_regions = s->stat.nr_tried;
396 bytes_reclaim_tried_regions = s->stat.sz_tried;
397 nr_reclaimed_regions = s->stat.nr_applied;
398 bytes_reclaimed_regions = s->stat.sz_applied;
399 nr_quota_exceeds = s->stat.qt_exceeds;
400 }
401 return 0;
402 }
403
damon_reclaim_init(void)404 static int __init damon_reclaim_init(void)
405 {
406 ctx = damon_new_ctx();
407 if (!ctx)
408 return -ENOMEM;
409
410 damon_pa_set_primitives(ctx);
411 ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
412
413 /* 4242 means nothing but fun */
414 target = damon_new_target(4242);
415 if (!target) {
416 damon_destroy_ctx(ctx);
417 return -ENOMEM;
418 }
419 damon_add_target(ctx, target);
420
421 schedule_delayed_work(&damon_reclaim_timer, 0);
422 return 0;
423 }
424
425 module_init(damon_reclaim_init);
426