1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <test_progs.h>
3*4882a593Smuzhiyun #include <network_helpers.h>
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /* test_tailcall_1 checks basic functionality by patching multiple locations
6*4882a593Smuzhiyun * in a single program for a single tail call slot with nop->jmp, jmp->nop
7*4882a593Smuzhiyun * and jmp->jmp rewrites. Also checks for nop->nop.
8*4882a593Smuzhiyun */
test_tailcall_1(void)9*4882a593Smuzhiyun static void test_tailcall_1(void)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, i, j;
12*4882a593Smuzhiyun struct bpf_map *prog_array;
13*4882a593Smuzhiyun struct bpf_program *prog;
14*4882a593Smuzhiyun struct bpf_object *obj;
15*4882a593Smuzhiyun __u32 retval, duration;
16*4882a593Smuzhiyun char prog_name[32];
17*4882a593Smuzhiyun char buff[128] = {};
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
20*4882a593Smuzhiyun &prog_fd);
21*4882a593Smuzhiyun if (CHECK_FAIL(err))
22*4882a593Smuzhiyun return;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
25*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
26*4882a593Smuzhiyun goto out;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
29*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
30*4882a593Smuzhiyun goto out;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
33*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
34*4882a593Smuzhiyun goto out;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
37*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
38*4882a593Smuzhiyun goto out;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
41*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
44*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
45*4882a593Smuzhiyun goto out;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
48*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
49*4882a593Smuzhiyun goto out;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
52*4882a593Smuzhiyun if (CHECK_FAIL(err))
53*4882a593Smuzhiyun goto out;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
57*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
58*4882a593Smuzhiyun &duration, &retval, NULL);
59*4882a593Smuzhiyun CHECK(err || retval != i, "tailcall",
60*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
63*4882a593Smuzhiyun if (CHECK_FAIL(err))
64*4882a593Smuzhiyun goto out;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
68*4882a593Smuzhiyun &duration, &retval, NULL);
69*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
70*4882a593Smuzhiyun err, errno, retval);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
73*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
76*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
77*4882a593Smuzhiyun goto out;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
80*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
81*4882a593Smuzhiyun goto out;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
84*4882a593Smuzhiyun if (CHECK_FAIL(err))
85*4882a593Smuzhiyun goto out;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
89*4882a593Smuzhiyun &duration, &retval, NULL);
90*4882a593Smuzhiyun CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
91*4882a593Smuzhiyun err, errno, retval);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
94*4882a593Smuzhiyun j = bpf_map__def(prog_array)->max_entries - 1 - i;
95*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
98*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
99*4882a593Smuzhiyun goto out;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
102*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
103*4882a593Smuzhiyun goto out;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
106*4882a593Smuzhiyun if (CHECK_FAIL(err))
107*4882a593Smuzhiyun goto out;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
111*4882a593Smuzhiyun j = bpf_map__def(prog_array)->max_entries - 1 - i;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
114*4882a593Smuzhiyun &duration, &retval, NULL);
115*4882a593Smuzhiyun CHECK(err || retval != j, "tailcall",
116*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
119*4882a593Smuzhiyun if (CHECK_FAIL(err))
120*4882a593Smuzhiyun goto out;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
124*4882a593Smuzhiyun &duration, &retval, NULL);
125*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
126*4882a593Smuzhiyun err, errno, retval);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
129*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
130*4882a593Smuzhiyun if (CHECK_FAIL(err >= 0 || errno != ENOENT))
131*4882a593Smuzhiyun goto out;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
134*4882a593Smuzhiyun &duration, &retval, NULL);
135*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall",
136*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun out:
140*4882a593Smuzhiyun bpf_object__close(obj);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* test_tailcall_2 checks that patching multiple programs for a single
144*4882a593Smuzhiyun * tail call slot works. It also jumps through several programs and tests
145*4882a593Smuzhiyun * the tail call limit counter.
146*4882a593Smuzhiyun */
test_tailcall_2(void)147*4882a593Smuzhiyun static void test_tailcall_2(void)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, i;
150*4882a593Smuzhiyun struct bpf_map *prog_array;
151*4882a593Smuzhiyun struct bpf_program *prog;
152*4882a593Smuzhiyun struct bpf_object *obj;
153*4882a593Smuzhiyun __u32 retval, duration;
154*4882a593Smuzhiyun char prog_name[32];
155*4882a593Smuzhiyun char buff[128] = {};
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
158*4882a593Smuzhiyun &prog_fd);
159*4882a593Smuzhiyun if (CHECK_FAIL(err))
160*4882a593Smuzhiyun return;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
163*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
164*4882a593Smuzhiyun goto out;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
167*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
168*4882a593Smuzhiyun goto out;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
171*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
172*4882a593Smuzhiyun goto out;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
175*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
176*4882a593Smuzhiyun goto out;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
179*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
182*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
183*4882a593Smuzhiyun goto out;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
186*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
187*4882a593Smuzhiyun goto out;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
190*4882a593Smuzhiyun if (CHECK_FAIL(err))
191*4882a593Smuzhiyun goto out;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
195*4882a593Smuzhiyun &duration, &retval, NULL);
196*4882a593Smuzhiyun CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
197*4882a593Smuzhiyun err, errno, retval);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun i = 2;
200*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
201*4882a593Smuzhiyun if (CHECK_FAIL(err))
202*4882a593Smuzhiyun goto out;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
205*4882a593Smuzhiyun &duration, &retval, NULL);
206*4882a593Smuzhiyun CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
207*4882a593Smuzhiyun err, errno, retval);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun i = 0;
210*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
211*4882a593Smuzhiyun if (CHECK_FAIL(err))
212*4882a593Smuzhiyun goto out;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
215*4882a593Smuzhiyun &duration, &retval, NULL);
216*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
217*4882a593Smuzhiyun err, errno, retval);
218*4882a593Smuzhiyun out:
219*4882a593Smuzhiyun bpf_object__close(obj);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* test_tailcall_3 checks that the count value of the tail call limit
223*4882a593Smuzhiyun * enforcement matches with expectations.
224*4882a593Smuzhiyun */
test_tailcall_3(void)225*4882a593Smuzhiyun static void test_tailcall_3(void)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, data_fd, i, val;
228*4882a593Smuzhiyun struct bpf_map *prog_array, *data_map;
229*4882a593Smuzhiyun struct bpf_program *prog;
230*4882a593Smuzhiyun struct bpf_object *obj;
231*4882a593Smuzhiyun __u32 retval, duration;
232*4882a593Smuzhiyun char buff[128] = {};
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
235*4882a593Smuzhiyun &prog_fd);
236*4882a593Smuzhiyun if (CHECK_FAIL(err))
237*4882a593Smuzhiyun return;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
240*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
241*4882a593Smuzhiyun goto out;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
244*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
245*4882a593Smuzhiyun goto out;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
249*4882a593Smuzhiyun goto out;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
252*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
253*4882a593Smuzhiyun goto out;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier/0");
256*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
257*4882a593Smuzhiyun goto out;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
260*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
261*4882a593Smuzhiyun goto out;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun i = 0;
264*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265*4882a593Smuzhiyun if (CHECK_FAIL(err))
266*4882a593Smuzhiyun goto out;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
269*4882a593Smuzhiyun &duration, &retval, NULL);
270*4882a593Smuzhiyun CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
271*4882a593Smuzhiyun err, errno, retval);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
274*4882a593Smuzhiyun if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
275*4882a593Smuzhiyun return;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun data_fd = bpf_map__fd(data_map);
278*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun i = 0;
282*4882a593Smuzhiyun err = bpf_map_lookup_elem(data_fd, &i, &val);
283*4882a593Smuzhiyun CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
284*4882a593Smuzhiyun err, errno, val);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun i = 0;
287*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
288*4882a593Smuzhiyun if (CHECK_FAIL(err))
289*4882a593Smuzhiyun goto out;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
292*4882a593Smuzhiyun &duration, &retval, NULL);
293*4882a593Smuzhiyun CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
294*4882a593Smuzhiyun err, errno, retval);
295*4882a593Smuzhiyun out:
296*4882a593Smuzhiyun bpf_object__close(obj);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* test_tailcall_4 checks that the kernel properly selects indirect jump
300*4882a593Smuzhiyun * for the case where the key is not known. Latter is passed via global
301*4882a593Smuzhiyun * data to select different targets we can compare return value of.
302*4882a593Smuzhiyun */
test_tailcall_4(void)303*4882a593Smuzhiyun static void test_tailcall_4(void)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, data_fd, i;
306*4882a593Smuzhiyun struct bpf_map *prog_array, *data_map;
307*4882a593Smuzhiyun struct bpf_program *prog;
308*4882a593Smuzhiyun struct bpf_object *obj;
309*4882a593Smuzhiyun __u32 retval, duration;
310*4882a593Smuzhiyun static const int zero = 0;
311*4882a593Smuzhiyun char buff[128] = {};
312*4882a593Smuzhiyun char prog_name[32];
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
315*4882a593Smuzhiyun &prog_fd);
316*4882a593Smuzhiyun if (CHECK_FAIL(err))
317*4882a593Smuzhiyun return;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
320*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
321*4882a593Smuzhiyun goto out;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
324*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
325*4882a593Smuzhiyun goto out;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
328*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
329*4882a593Smuzhiyun goto out;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
332*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
333*4882a593Smuzhiyun goto out;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
336*4882a593Smuzhiyun if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
337*4882a593Smuzhiyun return;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun data_fd = bpf_map__fd(data_map);
340*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
341*4882a593Smuzhiyun return;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
344*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
347*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
348*4882a593Smuzhiyun goto out;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
351*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
352*4882a593Smuzhiyun goto out;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
355*4882a593Smuzhiyun if (CHECK_FAIL(err))
356*4882a593Smuzhiyun goto out;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
360*4882a593Smuzhiyun err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
361*4882a593Smuzhiyun if (CHECK_FAIL(err))
362*4882a593Smuzhiyun goto out;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
365*4882a593Smuzhiyun &duration, &retval, NULL);
366*4882a593Smuzhiyun CHECK(err || retval != i, "tailcall",
367*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
371*4882a593Smuzhiyun err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
372*4882a593Smuzhiyun if (CHECK_FAIL(err))
373*4882a593Smuzhiyun goto out;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
376*4882a593Smuzhiyun if (CHECK_FAIL(err))
377*4882a593Smuzhiyun goto out;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
380*4882a593Smuzhiyun &duration, &retval, NULL);
381*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall",
382*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun out:
385*4882a593Smuzhiyun bpf_object__close(obj);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
389*4882a593Smuzhiyun * an indirect jump when the keys are const but different from different branches.
390*4882a593Smuzhiyun */
test_tailcall_5(void)391*4882a593Smuzhiyun static void test_tailcall_5(void)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
394*4882a593Smuzhiyun struct bpf_map *prog_array, *data_map;
395*4882a593Smuzhiyun struct bpf_program *prog;
396*4882a593Smuzhiyun struct bpf_object *obj;
397*4882a593Smuzhiyun __u32 retval, duration;
398*4882a593Smuzhiyun static const int zero = 0;
399*4882a593Smuzhiyun char buff[128] = {};
400*4882a593Smuzhiyun char prog_name[32];
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
403*4882a593Smuzhiyun &prog_fd);
404*4882a593Smuzhiyun if (CHECK_FAIL(err))
405*4882a593Smuzhiyun return;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
408*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
409*4882a593Smuzhiyun goto out;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
412*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
413*4882a593Smuzhiyun goto out;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
416*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
417*4882a593Smuzhiyun goto out;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
420*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
424*4882a593Smuzhiyun if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
425*4882a593Smuzhiyun return;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun data_fd = bpf_map__fd(data_map);
428*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
429*4882a593Smuzhiyun return;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
432*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
435*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
436*4882a593Smuzhiyun goto out;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
439*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
440*4882a593Smuzhiyun goto out;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
443*4882a593Smuzhiyun if (CHECK_FAIL(err))
444*4882a593Smuzhiyun goto out;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
448*4882a593Smuzhiyun err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
449*4882a593Smuzhiyun if (CHECK_FAIL(err))
450*4882a593Smuzhiyun goto out;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
453*4882a593Smuzhiyun &duration, &retval, NULL);
454*4882a593Smuzhiyun CHECK(err || retval != i, "tailcall",
455*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
459*4882a593Smuzhiyun err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
460*4882a593Smuzhiyun if (CHECK_FAIL(err))
461*4882a593Smuzhiyun goto out;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
464*4882a593Smuzhiyun if (CHECK_FAIL(err))
465*4882a593Smuzhiyun goto out;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
468*4882a593Smuzhiyun &duration, &retval, NULL);
469*4882a593Smuzhiyun CHECK(err || retval != 3, "tailcall",
470*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun out:
473*4882a593Smuzhiyun bpf_object__close(obj);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
477*4882a593Smuzhiyun * correctly in correlation with BPF subprograms
478*4882a593Smuzhiyun */
test_tailcall_bpf2bpf_1(void)479*4882a593Smuzhiyun static void test_tailcall_bpf2bpf_1(void)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, i;
482*4882a593Smuzhiyun struct bpf_map *prog_array;
483*4882a593Smuzhiyun struct bpf_program *prog;
484*4882a593Smuzhiyun struct bpf_object *obj;
485*4882a593Smuzhiyun __u32 retval, duration;
486*4882a593Smuzhiyun char prog_name[32];
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
489*4882a593Smuzhiyun &obj, &prog_fd);
490*4882a593Smuzhiyun if (CHECK_FAIL(err))
491*4882a593Smuzhiyun return;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
494*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
495*4882a593Smuzhiyun goto out;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
498*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
499*4882a593Smuzhiyun goto out;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
502*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
503*4882a593Smuzhiyun goto out;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
506*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
507*4882a593Smuzhiyun goto out;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* nop -> jmp */
510*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
511*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
514*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
515*4882a593Smuzhiyun goto out;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
518*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
519*4882a593Smuzhiyun goto out;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
522*4882a593Smuzhiyun if (CHECK_FAIL(err))
523*4882a593Smuzhiyun goto out;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
527*4882a593Smuzhiyun 0, &retval, &duration);
528*4882a593Smuzhiyun CHECK(err || retval != 1, "tailcall",
529*4882a593Smuzhiyun "err %d errno %d retval %d\n", err, errno, retval);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* jmp -> nop, call subprog that will do tailcall */
532*4882a593Smuzhiyun i = 1;
533*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
534*4882a593Smuzhiyun if (CHECK_FAIL(err))
535*4882a593Smuzhiyun goto out;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
538*4882a593Smuzhiyun 0, &retval, &duration);
539*4882a593Smuzhiyun CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
540*4882a593Smuzhiyun err, errno, retval);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* make sure that subprog can access ctx and entry prog that
543*4882a593Smuzhiyun * called this subprog can properly return
544*4882a593Smuzhiyun */
545*4882a593Smuzhiyun i = 0;
546*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
547*4882a593Smuzhiyun if (CHECK_FAIL(err))
548*4882a593Smuzhiyun goto out;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
551*4882a593Smuzhiyun 0, &retval, &duration);
552*4882a593Smuzhiyun CHECK(err || retval != sizeof(pkt_v4) * 2,
553*4882a593Smuzhiyun "tailcall", "err %d errno %d retval %d\n",
554*4882a593Smuzhiyun err, errno, retval);
555*4882a593Smuzhiyun out:
556*4882a593Smuzhiyun bpf_object__close(obj);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
560*4882a593Smuzhiyun * enforcement matches with expectations when tailcall is preceded with
561*4882a593Smuzhiyun * bpf2bpf call.
562*4882a593Smuzhiyun */
test_tailcall_bpf2bpf_2(void)563*4882a593Smuzhiyun static void test_tailcall_bpf2bpf_2(void)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, data_fd, i, val;
566*4882a593Smuzhiyun struct bpf_map *prog_array, *data_map;
567*4882a593Smuzhiyun struct bpf_program *prog;
568*4882a593Smuzhiyun struct bpf_object *obj;
569*4882a593Smuzhiyun __u32 retval, duration;
570*4882a593Smuzhiyun char buff[128] = {};
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
573*4882a593Smuzhiyun &obj, &prog_fd);
574*4882a593Smuzhiyun if (CHECK_FAIL(err))
575*4882a593Smuzhiyun return;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
578*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
579*4882a593Smuzhiyun goto out;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
582*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
583*4882a593Smuzhiyun goto out;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
586*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
587*4882a593Smuzhiyun goto out;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
590*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
591*4882a593Smuzhiyun goto out;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier/0");
594*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
595*4882a593Smuzhiyun goto out;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
598*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
599*4882a593Smuzhiyun goto out;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun i = 0;
602*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
603*4882a593Smuzhiyun if (CHECK_FAIL(err))
604*4882a593Smuzhiyun goto out;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
607*4882a593Smuzhiyun &duration, &retval, NULL);
608*4882a593Smuzhiyun CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
609*4882a593Smuzhiyun err, errno, retval);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
612*4882a593Smuzhiyun if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
613*4882a593Smuzhiyun return;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun data_fd = bpf_map__fd(data_map);
616*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
617*4882a593Smuzhiyun return;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun i = 0;
620*4882a593Smuzhiyun err = bpf_map_lookup_elem(data_fd, &i, &val);
621*4882a593Smuzhiyun CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
622*4882a593Smuzhiyun err, errno, val);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun i = 0;
625*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
626*4882a593Smuzhiyun if (CHECK_FAIL(err))
627*4882a593Smuzhiyun goto out;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
630*4882a593Smuzhiyun &duration, &retval, NULL);
631*4882a593Smuzhiyun CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
632*4882a593Smuzhiyun err, errno, retval);
633*4882a593Smuzhiyun out:
634*4882a593Smuzhiyun bpf_object__close(obj);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
638*4882a593Smuzhiyun * 256 bytes) can be used within bpf subprograms that have the tailcalls
639*4882a593Smuzhiyun * in them
640*4882a593Smuzhiyun */
test_tailcall_bpf2bpf_3(void)641*4882a593Smuzhiyun static void test_tailcall_bpf2bpf_3(void)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, i;
644*4882a593Smuzhiyun struct bpf_map *prog_array;
645*4882a593Smuzhiyun struct bpf_program *prog;
646*4882a593Smuzhiyun struct bpf_object *obj;
647*4882a593Smuzhiyun __u32 retval, duration;
648*4882a593Smuzhiyun char prog_name[32];
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
651*4882a593Smuzhiyun &obj, &prog_fd);
652*4882a593Smuzhiyun if (CHECK_FAIL(err))
653*4882a593Smuzhiyun return;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
656*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
657*4882a593Smuzhiyun goto out;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
660*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
661*4882a593Smuzhiyun goto out;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
664*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
665*4882a593Smuzhiyun goto out;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
668*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
669*4882a593Smuzhiyun goto out;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
672*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
675*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
676*4882a593Smuzhiyun goto out;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
679*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
680*4882a593Smuzhiyun goto out;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
683*4882a593Smuzhiyun if (CHECK_FAIL(err))
684*4882a593Smuzhiyun goto out;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
688*4882a593Smuzhiyun &duration, &retval, NULL);
689*4882a593Smuzhiyun CHECK(err || retval != sizeof(pkt_v4) * 3,
690*4882a593Smuzhiyun "tailcall", "err %d errno %d retval %d\n",
691*4882a593Smuzhiyun err, errno, retval);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun i = 1;
694*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
695*4882a593Smuzhiyun if (CHECK_FAIL(err))
696*4882a593Smuzhiyun goto out;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
699*4882a593Smuzhiyun &duration, &retval, NULL);
700*4882a593Smuzhiyun CHECK(err || retval != sizeof(pkt_v4),
701*4882a593Smuzhiyun "tailcall", "err %d errno %d retval %d\n",
702*4882a593Smuzhiyun err, errno, retval);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun i = 0;
705*4882a593Smuzhiyun err = bpf_map_delete_elem(map_fd, &i);
706*4882a593Smuzhiyun if (CHECK_FAIL(err))
707*4882a593Smuzhiyun goto out;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
710*4882a593Smuzhiyun &duration, &retval, NULL);
711*4882a593Smuzhiyun CHECK(err || retval != sizeof(pkt_v4) * 2,
712*4882a593Smuzhiyun "tailcall", "err %d errno %d retval %d\n",
713*4882a593Smuzhiyun err, errno, retval);
714*4882a593Smuzhiyun out:
715*4882a593Smuzhiyun bpf_object__close(obj);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
719*4882a593Smuzhiyun * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
720*4882a593Smuzhiyun * counter behaves correctly, bpf program will go through following flow:
721*4882a593Smuzhiyun *
722*4882a593Smuzhiyun * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
723*4882a593Smuzhiyun * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
724*4882a593Smuzhiyun * subprog2 [here bump global counter] --------^
725*4882a593Smuzhiyun *
726*4882a593Smuzhiyun * We go through first two tailcalls and start counting from the subprog2 where
727*4882a593Smuzhiyun * the loop begins. At the end of the test make sure that the global counter is
728*4882a593Smuzhiyun * equal to 31, because tailcall counter includes the first two tailcalls
729*4882a593Smuzhiyun * whereas global counter is incremented only on loop presented on flow above.
730*4882a593Smuzhiyun */
test_tailcall_bpf2bpf_4(void)731*4882a593Smuzhiyun static void test_tailcall_bpf2bpf_4(void)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun int err, map_fd, prog_fd, main_fd, data_fd, i, val;
734*4882a593Smuzhiyun struct bpf_map *prog_array, *data_map;
735*4882a593Smuzhiyun struct bpf_program *prog;
736*4882a593Smuzhiyun struct bpf_object *obj;
737*4882a593Smuzhiyun __u32 retval, duration;
738*4882a593Smuzhiyun char prog_name[32];
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
741*4882a593Smuzhiyun &obj, &prog_fd);
742*4882a593Smuzhiyun if (CHECK_FAIL(err))
743*4882a593Smuzhiyun return;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, "classifier");
746*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
747*4882a593Smuzhiyun goto out;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun main_fd = bpf_program__fd(prog);
750*4882a593Smuzhiyun if (CHECK_FAIL(main_fd < 0))
751*4882a593Smuzhiyun goto out;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
754*4882a593Smuzhiyun if (CHECK_FAIL(!prog_array))
755*4882a593Smuzhiyun goto out;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun map_fd = bpf_map__fd(prog_array);
758*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
759*4882a593Smuzhiyun goto out;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
762*4882a593Smuzhiyun snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun prog = bpf_object__find_program_by_title(obj, prog_name);
765*4882a593Smuzhiyun if (CHECK_FAIL(!prog))
766*4882a593Smuzhiyun goto out;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun prog_fd = bpf_program__fd(prog);
769*4882a593Smuzhiyun if (CHECK_FAIL(prog_fd < 0))
770*4882a593Smuzhiyun goto out;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
773*4882a593Smuzhiyun if (CHECK_FAIL(err))
774*4882a593Smuzhiyun goto out;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
778*4882a593Smuzhiyun &duration, &retval, NULL);
779*4882a593Smuzhiyun CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
780*4882a593Smuzhiyun err, errno, retval);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
783*4882a593Smuzhiyun if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
784*4882a593Smuzhiyun return;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun data_fd = bpf_map__fd(data_map);
787*4882a593Smuzhiyun if (CHECK_FAIL(map_fd < 0))
788*4882a593Smuzhiyun return;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun i = 0;
791*4882a593Smuzhiyun err = bpf_map_lookup_elem(data_fd, &i, &val);
792*4882a593Smuzhiyun CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
793*4882a593Smuzhiyun err, errno, val);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun out:
796*4882a593Smuzhiyun bpf_object__close(obj);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
test_tailcalls(void)799*4882a593Smuzhiyun void test_tailcalls(void)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun if (test__start_subtest("tailcall_1"))
802*4882a593Smuzhiyun test_tailcall_1();
803*4882a593Smuzhiyun if (test__start_subtest("tailcall_2"))
804*4882a593Smuzhiyun test_tailcall_2();
805*4882a593Smuzhiyun if (test__start_subtest("tailcall_3"))
806*4882a593Smuzhiyun test_tailcall_3();
807*4882a593Smuzhiyun if (test__start_subtest("tailcall_4"))
808*4882a593Smuzhiyun test_tailcall_4();
809*4882a593Smuzhiyun if (test__start_subtest("tailcall_5"))
810*4882a593Smuzhiyun test_tailcall_5();
811*4882a593Smuzhiyun if (test__start_subtest("tailcall_bpf2bpf_1"))
812*4882a593Smuzhiyun test_tailcall_bpf2bpf_1();
813*4882a593Smuzhiyun if (test__start_subtest("tailcall_bpf2bpf_2"))
814*4882a593Smuzhiyun test_tailcall_bpf2bpf_2();
815*4882a593Smuzhiyun if (test__start_subtest("tailcall_bpf2bpf_3"))
816*4882a593Smuzhiyun test_tailcall_bpf2bpf_3();
817*4882a593Smuzhiyun if (test__start_subtest("tailcall_bpf2bpf_4"))
818*4882a593Smuzhiyun test_tailcall_bpf2bpf_4();
819*4882a593Smuzhiyun }
820