xref: /OK3568_Linux_fs/kernel/tools/testing/selftests/bpf/prog_tests/mmap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <test_progs.h>
3*4882a593Smuzhiyun #include <sys/mman.h>
4*4882a593Smuzhiyun #include "test_mmap.skel.h"
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun struct map_data {
7*4882a593Smuzhiyun 	__u64 val[512 * 4];
8*4882a593Smuzhiyun };
9*4882a593Smuzhiyun 
roundup_page(size_t sz)10*4882a593Smuzhiyun static size_t roundup_page(size_t sz)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	long page_size = sysconf(_SC_PAGE_SIZE);
13*4882a593Smuzhiyun 	return (sz + page_size - 1) / page_size * page_size;
14*4882a593Smuzhiyun }
15*4882a593Smuzhiyun 
test_mmap(void)16*4882a593Smuzhiyun void test_mmap(void)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
19*4882a593Smuzhiyun 	const size_t map_sz = roundup_page(sizeof(struct map_data));
20*4882a593Smuzhiyun 	const int zero = 0, one = 1, two = 2, far = 1500;
21*4882a593Smuzhiyun 	const long page_size = sysconf(_SC_PAGE_SIZE);
22*4882a593Smuzhiyun 	int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
23*4882a593Smuzhiyun 	struct bpf_map *data_map, *bss_map;
24*4882a593Smuzhiyun 	void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
25*4882a593Smuzhiyun 	struct test_mmap__bss *bss_data;
26*4882a593Smuzhiyun 	struct bpf_map_info map_info;
27*4882a593Smuzhiyun 	__u32 map_info_sz = sizeof(map_info);
28*4882a593Smuzhiyun 	struct map_data *map_data;
29*4882a593Smuzhiyun 	struct test_mmap *skel;
30*4882a593Smuzhiyun 	__u64 val = 0;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	skel = test_mmap__open_and_load();
33*4882a593Smuzhiyun 	if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
34*4882a593Smuzhiyun 		return;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	bss_map = skel->maps.bss;
37*4882a593Smuzhiyun 	data_map = skel->maps.data_map;
38*4882a593Smuzhiyun 	data_map_fd = bpf_map__fd(data_map);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
41*4882a593Smuzhiyun 	tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
42*4882a593Smuzhiyun 	if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
43*4882a593Smuzhiyun 		munmap(tmp1, 4096);
44*4882a593Smuzhiyun 		goto cleanup;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 	/* now double-check if it's mmap()'able at all */
47*4882a593Smuzhiyun 	tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
48*4882a593Smuzhiyun 	if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
49*4882a593Smuzhiyun 		goto cleanup;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* get map's ID */
52*4882a593Smuzhiyun 	memset(&map_info, 0, map_info_sz);
53*4882a593Smuzhiyun 	err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
54*4882a593Smuzhiyun 	if (CHECK(err, "map_get_info", "failed %d\n", errno))
55*4882a593Smuzhiyun 		goto cleanup;
56*4882a593Smuzhiyun 	data_map_id = map_info.id;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* mmap BSS map */
59*4882a593Smuzhiyun 	bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
60*4882a593Smuzhiyun 			  bpf_map__fd(bss_map), 0);
61*4882a593Smuzhiyun 	if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
62*4882a593Smuzhiyun 		  ".bss mmap failed: %d\n", errno)) {
63*4882a593Smuzhiyun 		bss_mmaped = NULL;
64*4882a593Smuzhiyun 		goto cleanup;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 	/* map as R/W first */
67*4882a593Smuzhiyun 	map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
68*4882a593Smuzhiyun 			  data_map_fd, 0);
69*4882a593Smuzhiyun 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
70*4882a593Smuzhiyun 		  "data_map mmap failed: %d\n", errno)) {
71*4882a593Smuzhiyun 		map_mmaped = NULL;
72*4882a593Smuzhiyun 		goto cleanup;
73*4882a593Smuzhiyun 	}
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	bss_data = bss_mmaped;
76*4882a593Smuzhiyun 	map_data = map_mmaped;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->in_val);
79*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->out_val);
80*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->in_val);
81*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->out_val);
82*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[0]);
83*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[1]);
84*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[2]);
85*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[far]);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	err = test_mmap__attach(skel);
88*4882a593Smuzhiyun 	if (CHECK(err, "attach_raw_tp", "err %d\n", err))
89*4882a593Smuzhiyun 		goto cleanup;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	bss_data->in_val = 123;
92*4882a593Smuzhiyun 	val = 111;
93*4882a593Smuzhiyun 	CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	usleep(1);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->in_val != 123);
98*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->out_val != 123);
99*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->in_val != 123);
100*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->out_val != 123);
101*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[0] != 111);
102*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[1] != 222);
103*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[2] != 123);
104*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[far] != 3 * 123);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
107*4882a593Smuzhiyun 	CHECK_FAIL(val != 111);
108*4882a593Smuzhiyun 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
109*4882a593Smuzhiyun 	CHECK_FAIL(val != 222);
110*4882a593Smuzhiyun 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
111*4882a593Smuzhiyun 	CHECK_FAIL(val != 123);
112*4882a593Smuzhiyun 	CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
113*4882a593Smuzhiyun 	CHECK_FAIL(val != 3 * 123);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* data_map freeze should fail due to R/W mmap() */
116*4882a593Smuzhiyun 	err = bpf_map_freeze(data_map_fd);
117*4882a593Smuzhiyun 	if (CHECK(!err || errno != EBUSY, "no_freeze",
118*4882a593Smuzhiyun 		  "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
119*4882a593Smuzhiyun 		goto cleanup;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	err = mprotect(map_mmaped, map_sz, PROT_READ);
122*4882a593Smuzhiyun 	if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
123*4882a593Smuzhiyun 		goto cleanup;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* unmap R/W mapping */
126*4882a593Smuzhiyun 	err = munmap(map_mmaped, map_sz);
127*4882a593Smuzhiyun 	map_mmaped = NULL;
128*4882a593Smuzhiyun 	if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
129*4882a593Smuzhiyun 		goto cleanup;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* re-map as R/O now */
132*4882a593Smuzhiyun 	map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
133*4882a593Smuzhiyun 	if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
134*4882a593Smuzhiyun 		  "data_map R/O mmap failed: %d\n", errno)) {
135*4882a593Smuzhiyun 		map_mmaped = NULL;
136*4882a593Smuzhiyun 		goto cleanup;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 	err = mprotect(map_mmaped, map_sz, PROT_WRITE);
139*4882a593Smuzhiyun 	if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
140*4882a593Smuzhiyun 		goto cleanup;
141*4882a593Smuzhiyun 	err = mprotect(map_mmaped, map_sz, PROT_EXEC);
142*4882a593Smuzhiyun 	if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
143*4882a593Smuzhiyun 		goto cleanup;
144*4882a593Smuzhiyun 	map_data = map_mmaped;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* map/unmap in a loop to test ref counting */
147*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
148*4882a593Smuzhiyun 		int flags = i % 2 ? PROT_READ : PROT_WRITE;
149*4882a593Smuzhiyun 		void *p;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
152*4882a593Smuzhiyun 		if (CHECK_FAIL(p == MAP_FAILED))
153*4882a593Smuzhiyun 			goto cleanup;
154*4882a593Smuzhiyun 		err = munmap(p, map_sz);
155*4882a593Smuzhiyun 		if (CHECK_FAIL(err))
156*4882a593Smuzhiyun 			goto cleanup;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* data_map freeze should now succeed due to no R/W mapping */
160*4882a593Smuzhiyun 	err = bpf_map_freeze(data_map_fd);
161*4882a593Smuzhiyun 	if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
162*4882a593Smuzhiyun 		  err, errno))
163*4882a593Smuzhiyun 		goto cleanup;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* mapping as R/W now should fail */
166*4882a593Smuzhiyun 	tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
167*4882a593Smuzhiyun 		    data_map_fd, 0);
168*4882a593Smuzhiyun 	if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
169*4882a593Smuzhiyun 		munmap(tmp1, map_sz);
170*4882a593Smuzhiyun 		goto cleanup;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	bss_data->in_val = 321;
174*4882a593Smuzhiyun 	usleep(1);
175*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->in_val != 321);
176*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->out_val != 321);
177*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->in_val != 321);
178*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->out_val != 321);
179*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[0] != 111);
180*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[1] != 222);
181*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[2] != 321);
182*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[far] != 3 * 321);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* check some more advanced mmap() manipulations */
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
187*4882a593Smuzhiyun 			  -1, 0);
188*4882a593Smuzhiyun 	if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
189*4882a593Smuzhiyun 		goto cleanup;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* map all but last page: pages 1-3 mapped */
192*4882a593Smuzhiyun 	tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
193*4882a593Smuzhiyun 			  data_map_fd, 0);
194*4882a593Smuzhiyun 	if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
195*4882a593Smuzhiyun 		munmap(tmp0, 4 * page_size);
196*4882a593Smuzhiyun 		goto cleanup;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* unmap second page: pages 1, 3 mapped */
200*4882a593Smuzhiyun 	err = munmap(tmp1 + page_size, page_size);
201*4882a593Smuzhiyun 	if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
202*4882a593Smuzhiyun 		munmap(tmp1, 4 * page_size);
203*4882a593Smuzhiyun 		goto cleanup;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* map page 2 back */
207*4882a593Smuzhiyun 	tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
208*4882a593Smuzhiyun 		    MAP_SHARED | MAP_FIXED, data_map_fd, 0);
209*4882a593Smuzhiyun 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
210*4882a593Smuzhiyun 		munmap(tmp1, page_size);
211*4882a593Smuzhiyun 		munmap(tmp1 + 2*page_size, 2 * page_size);
212*4882a593Smuzhiyun 		goto cleanup;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 	CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
215*4882a593Smuzhiyun 	      "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* re-map all 4 pages */
218*4882a593Smuzhiyun 	tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
219*4882a593Smuzhiyun 		    data_map_fd, 0);
220*4882a593Smuzhiyun 	if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
221*4882a593Smuzhiyun 		munmap(tmp1, 4 * page_size); /* unmap page 1 */
222*4882a593Smuzhiyun 		goto cleanup;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 	CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	map_data = tmp2;
227*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->in_val != 321);
228*4882a593Smuzhiyun 	CHECK_FAIL(bss_data->out_val != 321);
229*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->in_val != 321);
230*4882a593Smuzhiyun 	CHECK_FAIL(skel->bss->out_val != 321);
231*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[0] != 111);
232*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[1] != 222);
233*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[2] != 321);
234*4882a593Smuzhiyun 	CHECK_FAIL(map_data->val[far] != 3 * 321);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	munmap(tmp2, 4 * page_size);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* map all 4 pages, but with pg_off=1 page, should fail */
239*4882a593Smuzhiyun 	tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
240*4882a593Smuzhiyun 		    data_map_fd, page_size /* initial page shift */);
241*4882a593Smuzhiyun 	if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
242*4882a593Smuzhiyun 		munmap(tmp1, 4 * page_size);
243*4882a593Smuzhiyun 		goto cleanup;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
247*4882a593Smuzhiyun 	if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
248*4882a593Smuzhiyun 		goto cleanup;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	test_mmap__destroy(skel);
251*4882a593Smuzhiyun 	skel = NULL;
252*4882a593Smuzhiyun 	CHECK_FAIL(munmap(bss_mmaped, bss_sz));
253*4882a593Smuzhiyun 	bss_mmaped = NULL;
254*4882a593Smuzhiyun 	CHECK_FAIL(munmap(map_mmaped, map_sz));
255*4882a593Smuzhiyun 	map_mmaped = NULL;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* map should be still held by active mmap */
258*4882a593Smuzhiyun 	tmp_fd = bpf_map_get_fd_by_id(data_map_id);
259*4882a593Smuzhiyun 	if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
260*4882a593Smuzhiyun 		munmap(tmp1, map_sz);
261*4882a593Smuzhiyun 		goto cleanup;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 	close(tmp_fd);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* this should release data map finally */
266*4882a593Smuzhiyun 	munmap(tmp1, map_sz);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* we need to wait for RCU grace period */
269*4882a593Smuzhiyun 	for (i = 0; i < 10000; i++) {
270*4882a593Smuzhiyun 		__u32 id = data_map_id - 1;
271*4882a593Smuzhiyun 		if (bpf_map_get_next_id(id, &id) || id > data_map_id)
272*4882a593Smuzhiyun 			break;
273*4882a593Smuzhiyun 		usleep(1);
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* should fail to get map FD by non-existing ID */
277*4882a593Smuzhiyun 	tmp_fd = bpf_map_get_fd_by_id(data_map_id);
278*4882a593Smuzhiyun 	if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
279*4882a593Smuzhiyun 		  "unexpectedly succeeded %d\n", tmp_fd)) {
280*4882a593Smuzhiyun 		close(tmp_fd);
281*4882a593Smuzhiyun 		goto cleanup;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun cleanup:
285*4882a593Smuzhiyun 	if (bss_mmaped)
286*4882a593Smuzhiyun 		CHECK_FAIL(munmap(bss_mmaped, bss_sz));
287*4882a593Smuzhiyun 	if (map_mmaped)
288*4882a593Smuzhiyun 		CHECK_FAIL(munmap(map_mmaped, map_sz));
289*4882a593Smuzhiyun 	test_mmap__destroy(skel);
290*4882a593Smuzhiyun }
291