xref: /OK3568_Linux_fs/kernel/kernel/bpf/map_iter.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2020 Facebook */
3*4882a593Smuzhiyun #include <linux/bpf.h>
4*4882a593Smuzhiyun #include <linux/fs.h>
5*4882a593Smuzhiyun #include <linux/filter.h>
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/btf_ids.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun struct bpf_iter_seq_map_info {
10*4882a593Smuzhiyun 	u32 map_id;
11*4882a593Smuzhiyun };
12*4882a593Smuzhiyun 
bpf_map_seq_start(struct seq_file * seq,loff_t * pos)13*4882a593Smuzhiyun static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	struct bpf_iter_seq_map_info *info = seq->private;
16*4882a593Smuzhiyun 	struct bpf_map *map;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	map = bpf_map_get_curr_or_next(&info->map_id);
19*4882a593Smuzhiyun 	if (!map)
20*4882a593Smuzhiyun 		return NULL;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	if (*pos == 0)
23*4882a593Smuzhiyun 		++*pos;
24*4882a593Smuzhiyun 	return map;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
bpf_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)27*4882a593Smuzhiyun static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct bpf_iter_seq_map_info *info = seq->private;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	++*pos;
32*4882a593Smuzhiyun 	++info->map_id;
33*4882a593Smuzhiyun 	bpf_map_put((struct bpf_map *)v);
34*4882a593Smuzhiyun 	return bpf_map_get_curr_or_next(&info->map_id);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct bpf_iter__bpf_map {
38*4882a593Smuzhiyun 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
39*4882a593Smuzhiyun 	__bpf_md_ptr(struct bpf_map *, map);
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
DEFINE_BPF_ITER_FUNC(bpf_map,struct bpf_iter_meta * meta,struct bpf_map * map)42*4882a593Smuzhiyun DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct bpf_iter__bpf_map ctx;
47*4882a593Smuzhiyun 	struct bpf_iter_meta meta;
48*4882a593Smuzhiyun 	struct bpf_prog *prog;
49*4882a593Smuzhiyun 	int ret = 0;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	ctx.meta = &meta;
52*4882a593Smuzhiyun 	ctx.map = v;
53*4882a593Smuzhiyun 	meta.seq = seq;
54*4882a593Smuzhiyun 	prog = bpf_iter_get_info(&meta, in_stop);
55*4882a593Smuzhiyun 	if (prog)
56*4882a593Smuzhiyun 		ret = bpf_iter_run_prog(prog, &ctx);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return ret;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
bpf_map_seq_show(struct seq_file * seq,void * v)61*4882a593Smuzhiyun static int bpf_map_seq_show(struct seq_file *seq, void *v)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	return __bpf_map_seq_show(seq, v, false);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
bpf_map_seq_stop(struct seq_file * seq,void * v)66*4882a593Smuzhiyun static void bpf_map_seq_stop(struct seq_file *seq, void *v)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	if (!v)
69*4882a593Smuzhiyun 		(void)__bpf_map_seq_show(seq, v, true);
70*4882a593Smuzhiyun 	else
71*4882a593Smuzhiyun 		bpf_map_put((struct bpf_map *)v);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static const struct seq_operations bpf_map_seq_ops = {
75*4882a593Smuzhiyun 	.start	= bpf_map_seq_start,
76*4882a593Smuzhiyun 	.next	= bpf_map_seq_next,
77*4882a593Smuzhiyun 	.stop	= bpf_map_seq_stop,
78*4882a593Smuzhiyun 	.show	= bpf_map_seq_show,
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun BTF_ID_LIST(btf_bpf_map_id)
82*4882a593Smuzhiyun BTF_ID(struct, bpf_map)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static const struct bpf_iter_seq_info bpf_map_seq_info = {
85*4882a593Smuzhiyun 	.seq_ops		= &bpf_map_seq_ops,
86*4882a593Smuzhiyun 	.init_seq_private	= NULL,
87*4882a593Smuzhiyun 	.fini_seq_private	= NULL,
88*4882a593Smuzhiyun 	.seq_priv_size		= sizeof(struct bpf_iter_seq_map_info),
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static struct bpf_iter_reg bpf_map_reg_info = {
92*4882a593Smuzhiyun 	.target			= "bpf_map",
93*4882a593Smuzhiyun 	.ctx_arg_info_size	= 1,
94*4882a593Smuzhiyun 	.ctx_arg_info		= {
95*4882a593Smuzhiyun 		{ offsetof(struct bpf_iter__bpf_map, map),
96*4882a593Smuzhiyun 		  PTR_TO_BTF_ID_OR_NULL },
97*4882a593Smuzhiyun 	},
98*4882a593Smuzhiyun 	.seq_info		= &bpf_map_seq_info,
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
bpf_iter_attach_map(struct bpf_prog * prog,union bpf_iter_link_info * linfo,struct bpf_iter_aux_info * aux)101*4882a593Smuzhiyun static int bpf_iter_attach_map(struct bpf_prog *prog,
102*4882a593Smuzhiyun 			       union bpf_iter_link_info *linfo,
103*4882a593Smuzhiyun 			       struct bpf_iter_aux_info *aux)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	u32 key_acc_size, value_acc_size, key_size, value_size;
106*4882a593Smuzhiyun 	struct bpf_map *map;
107*4882a593Smuzhiyun 	bool is_percpu = false;
108*4882a593Smuzhiyun 	int err = -EINVAL;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (!linfo->map.map_fd)
111*4882a593Smuzhiyun 		return -EBADF;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	map = bpf_map_get_with_uref(linfo->map.map_fd);
114*4882a593Smuzhiyun 	if (IS_ERR(map))
115*4882a593Smuzhiyun 		return PTR_ERR(map);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
118*4882a593Smuzhiyun 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
119*4882a593Smuzhiyun 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
120*4882a593Smuzhiyun 		is_percpu = true;
121*4882a593Smuzhiyun 	else if (map->map_type != BPF_MAP_TYPE_HASH &&
122*4882a593Smuzhiyun 		 map->map_type != BPF_MAP_TYPE_LRU_HASH &&
123*4882a593Smuzhiyun 		 map->map_type != BPF_MAP_TYPE_ARRAY)
124*4882a593Smuzhiyun 		goto put_map;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	key_acc_size = prog->aux->max_rdonly_access;
127*4882a593Smuzhiyun 	value_acc_size = prog->aux->max_rdwr_access;
128*4882a593Smuzhiyun 	key_size = map->key_size;
129*4882a593Smuzhiyun 	if (!is_percpu)
130*4882a593Smuzhiyun 		value_size = map->value_size;
131*4882a593Smuzhiyun 	else
132*4882a593Smuzhiyun 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (key_acc_size > key_size || value_acc_size > value_size) {
135*4882a593Smuzhiyun 		err = -EACCES;
136*4882a593Smuzhiyun 		goto put_map;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	aux->map = map;
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun put_map:
143*4882a593Smuzhiyun 	bpf_map_put_with_uref(map);
144*4882a593Smuzhiyun 	return err;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
bpf_iter_detach_map(struct bpf_iter_aux_info * aux)147*4882a593Smuzhiyun static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	bpf_map_put_with_uref(aux->map);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info * aux,struct seq_file * seq)152*4882a593Smuzhiyun void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
153*4882a593Smuzhiyun 			      struct seq_file *seq)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	seq_printf(seq, "map_id:\t%u\n", aux->map->id);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info * aux,struct bpf_link_info * info)158*4882a593Smuzhiyun int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
159*4882a593Smuzhiyun 				struct bpf_link_info *info)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	info->iter.map.map_id = aux->map->id;
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
166*4882a593Smuzhiyun 		     struct bpf_map *map, void *key, void *value)
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun static const struct bpf_iter_reg bpf_map_elem_reg_info = {
169*4882a593Smuzhiyun 	.target			= "bpf_map_elem",
170*4882a593Smuzhiyun 	.attach_target		= bpf_iter_attach_map,
171*4882a593Smuzhiyun 	.detach_target		= bpf_iter_detach_map,
172*4882a593Smuzhiyun 	.show_fdinfo		= bpf_iter_map_show_fdinfo,
173*4882a593Smuzhiyun 	.fill_link_info		= bpf_iter_map_fill_link_info,
174*4882a593Smuzhiyun 	.ctx_arg_info_size	= 2,
175*4882a593Smuzhiyun 	.ctx_arg_info		= {
176*4882a593Smuzhiyun 		{ offsetof(struct bpf_iter__bpf_map_elem, key),
177*4882a593Smuzhiyun 		  PTR_TO_RDONLY_BUF_OR_NULL },
178*4882a593Smuzhiyun 		{ offsetof(struct bpf_iter__bpf_map_elem, value),
179*4882a593Smuzhiyun 		  PTR_TO_RDWR_BUF_OR_NULL },
180*4882a593Smuzhiyun 	},
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun 
bpf_map_iter_init(void)183*4882a593Smuzhiyun static int __init bpf_map_iter_init(void)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	int ret;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id;
188*4882a593Smuzhiyun 	ret = bpf_iter_reg_target(&bpf_map_reg_info);
189*4882a593Smuzhiyun 	if (ret)
190*4882a593Smuzhiyun 		return ret;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return bpf_iter_reg_target(&bpf_map_elem_reg_info);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun late_initcall(bpf_map_iter_init);
196