xref: /OK3568_Linux_fs/kernel/mm/damon/core-test.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Data Access Monitor Unit Tests
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2019 Amazon.com, Inc. or its affiliates.  All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: SeongJae Park <sjpark@amazon.de>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifdef CONFIG_DAMON_KUNIT_TEST
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef _DAMON_CORE_TEST_H
13*4882a593Smuzhiyun #define _DAMON_CORE_TEST_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <kunit/test.h>
16*4882a593Smuzhiyun 
damon_test_regions(struct kunit * test)17*4882a593Smuzhiyun static void damon_test_regions(struct kunit *test)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct damon_region *r;
20*4882a593Smuzhiyun 	struct damon_target *t;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	r = damon_new_region(1, 2);
23*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 1ul, r->ar.start);
24*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 2ul, r->ar.end);
25*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	t = damon_new_target(42);
28*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	damon_add_region(r, t);
31*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	damon_del_region(r, t);
34*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	damon_free_target(t);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
nr_damon_targets(struct damon_ctx * ctx)39*4882a593Smuzhiyun static unsigned int nr_damon_targets(struct damon_ctx *ctx)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct damon_target *t;
42*4882a593Smuzhiyun 	unsigned int nr_targets = 0;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	damon_for_each_target(t, ctx)
45*4882a593Smuzhiyun 		nr_targets++;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	return nr_targets;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
damon_test_target(struct kunit * test)50*4882a593Smuzhiyun static void damon_test_target(struct kunit *test)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	struct damon_ctx *c = damon_new_ctx();
53*4882a593Smuzhiyun 	struct damon_target *t;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	t = damon_new_target(42);
56*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 42ul, t->id);
57*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	damon_add_target(c, t);
60*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c));
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	damon_destroy_target(t);
63*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c));
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	damon_destroy_ctx(c);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * Test kdamond_reset_aggregated()
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * DAMON checks access to each region and aggregates this information as the
72*4882a593Smuzhiyun  * access frequency of each region.  In detail, it increases '->nr_accesses' of
73*4882a593Smuzhiyun  * regions that an access has confirmed.  'kdamond_reset_aggregated()' flushes
74*4882a593Smuzhiyun  * the aggregated information ('->nr_accesses' of each regions) to the result
75*4882a593Smuzhiyun  * buffer.  As a result of the flushing, the '->nr_accesses' of regions are
76*4882a593Smuzhiyun  * initialized to zero.
77*4882a593Smuzhiyun  */
damon_test_aggregate(struct kunit * test)78*4882a593Smuzhiyun static void damon_test_aggregate(struct kunit *test)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct damon_ctx *ctx = damon_new_ctx();
81*4882a593Smuzhiyun 	unsigned long target_ids[] = {1, 2, 3};
82*4882a593Smuzhiyun 	unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} };
83*4882a593Smuzhiyun 	unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} };
84*4882a593Smuzhiyun 	unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} };
85*4882a593Smuzhiyun 	struct damon_target *t;
86*4882a593Smuzhiyun 	struct damon_region *r;
87*4882a593Smuzhiyun 	int it, ir;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	damon_set_targets(ctx, target_ids, 3);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	it = 0;
92*4882a593Smuzhiyun 	damon_for_each_target(t, ctx) {
93*4882a593Smuzhiyun 		for (ir = 0; ir < 3; ir++) {
94*4882a593Smuzhiyun 			r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
95*4882a593Smuzhiyun 			r->nr_accesses = accesses[it][ir];
96*4882a593Smuzhiyun 			damon_add_region(r, t);
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 		it++;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	kdamond_reset_aggregated(ctx);
101*4882a593Smuzhiyun 	it = 0;
102*4882a593Smuzhiyun 	damon_for_each_target(t, ctx) {
103*4882a593Smuzhiyun 		ir = 0;
104*4882a593Smuzhiyun 		/* '->nr_accesses' should be zeroed */
105*4882a593Smuzhiyun 		damon_for_each_region(r, t) {
106*4882a593Smuzhiyun 			KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses);
107*4882a593Smuzhiyun 			ir++;
108*4882a593Smuzhiyun 		}
109*4882a593Smuzhiyun 		/* regions should be preserved */
110*4882a593Smuzhiyun 		KUNIT_EXPECT_EQ(test, 3, ir);
111*4882a593Smuzhiyun 		it++;
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 	/* targets also should be preserved */
114*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, 3, it);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	damon_destroy_ctx(ctx);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
damon_test_split_at(struct kunit * test)119*4882a593Smuzhiyun static void damon_test_split_at(struct kunit *test)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct damon_ctx *c = damon_new_ctx();
122*4882a593Smuzhiyun 	struct damon_target *t;
123*4882a593Smuzhiyun 	struct damon_region *r;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	t = damon_new_target(42);
126*4882a593Smuzhiyun 	r = damon_new_region(0, 100);
127*4882a593Smuzhiyun 	damon_add_region(r, t);
128*4882a593Smuzhiyun 	damon_split_region_at(c, t, r, 25);
129*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
130*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	r = damon_next_region(r);
133*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.start, 25ul);
134*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.end, 100ul);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	damon_free_target(t);
137*4882a593Smuzhiyun 	damon_destroy_ctx(c);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
damon_test_merge_two(struct kunit * test)140*4882a593Smuzhiyun static void damon_test_merge_two(struct kunit *test)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct damon_target *t;
143*4882a593Smuzhiyun 	struct damon_region *r, *r2, *r3;
144*4882a593Smuzhiyun 	int i;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	t = damon_new_target(42);
147*4882a593Smuzhiyun 	r = damon_new_region(0, 100);
148*4882a593Smuzhiyun 	r->nr_accesses = 10;
149*4882a593Smuzhiyun 	damon_add_region(r, t);
150*4882a593Smuzhiyun 	r2 = damon_new_region(100, 300);
151*4882a593Smuzhiyun 	r2->nr_accesses = 20;
152*4882a593Smuzhiyun 	damon_add_region(r2, t);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	damon_merge_two_regions(t, r, r2);
155*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
156*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->ar.end, 300ul);
157*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	i = 0;
160*4882a593Smuzhiyun 	damon_for_each_region(r3, t) {
161*4882a593Smuzhiyun 		KUNIT_EXPECT_PTR_EQ(test, r, r3);
162*4882a593Smuzhiyun 		i++;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, i, 1);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	damon_free_target(t);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
__nth_region_of(struct damon_target * t,int idx)169*4882a593Smuzhiyun static struct damon_region *__nth_region_of(struct damon_target *t, int idx)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct damon_region *r;
172*4882a593Smuzhiyun 	unsigned int i = 0;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	damon_for_each_region(r, t) {
175*4882a593Smuzhiyun 		if (i++ == idx)
176*4882a593Smuzhiyun 			return r;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return NULL;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
damon_test_merge_regions_of(struct kunit * test)182*4882a593Smuzhiyun static void damon_test_merge_regions_of(struct kunit *test)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct damon_target *t;
185*4882a593Smuzhiyun 	struct damon_region *r;
186*4882a593Smuzhiyun 	unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184};
187*4882a593Smuzhiyun 	unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230};
188*4882a593Smuzhiyun 	unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2};
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	unsigned long saddrs[] = {0, 114, 130, 156, 170};
191*4882a593Smuzhiyun 	unsigned long eaddrs[] = {112, 130, 156, 170, 230};
192*4882a593Smuzhiyun 	int i;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	t = damon_new_target(42);
195*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sa); i++) {
196*4882a593Smuzhiyun 		r = damon_new_region(sa[i], ea[i]);
197*4882a593Smuzhiyun 		r->nr_accesses = nrs[i];
198*4882a593Smuzhiyun 		damon_add_region(r, t);
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	damon_merge_regions_of(t, 9, 9999);
202*4882a593Smuzhiyun 	/* 0-112, 114-130, 130-156, 156-170 */
203*4882a593Smuzhiyun 	KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
204*4882a593Smuzhiyun 	for (i = 0; i < 5; i++) {
205*4882a593Smuzhiyun 		r = __nth_region_of(t, i);
206*4882a593Smuzhiyun 		KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]);
207*4882a593Smuzhiyun 		KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]);
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 	damon_free_target(t);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
damon_test_split_regions_of(struct kunit * test)212*4882a593Smuzhiyun static void damon_test_split_regions_of(struct kunit *test)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct damon_ctx *c = damon_new_ctx();
215*4882a593Smuzhiyun 	struct damon_target *t;
216*4882a593Smuzhiyun 	struct damon_region *r;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	t = damon_new_target(42);
219*4882a593Smuzhiyun 	r = damon_new_region(0, 22);
220*4882a593Smuzhiyun 	damon_add_region(r, t);
221*4882a593Smuzhiyun 	damon_split_regions_of(c, t, 2);
222*4882a593Smuzhiyun 	KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
223*4882a593Smuzhiyun 	damon_free_target(t);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	t = damon_new_target(42);
226*4882a593Smuzhiyun 	r = damon_new_region(0, 220);
227*4882a593Smuzhiyun 	damon_add_region(r, t);
228*4882a593Smuzhiyun 	damon_split_regions_of(c, t, 4);
229*4882a593Smuzhiyun 	KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
230*4882a593Smuzhiyun 	damon_free_target(t);
231*4882a593Smuzhiyun 	damon_destroy_ctx(c);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static struct kunit_case damon_test_cases[] = {
235*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_target),
236*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_regions),
237*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_aggregate),
238*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_split_at),
239*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_merge_two),
240*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_merge_regions_of),
241*4882a593Smuzhiyun 	KUNIT_CASE(damon_test_split_regions_of),
242*4882a593Smuzhiyun 	{},
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun static struct kunit_suite damon_test_suite = {
246*4882a593Smuzhiyun 	.name = "damon",
247*4882a593Smuzhiyun 	.test_cases = damon_test_cases,
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun kunit_test_suite(damon_test_suite);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun #endif /* _DAMON_CORE_TEST_H */
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #endif	/* CONFIG_DAMON_KUNIT_TEST */
254