1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* XSKMAP used for AF_XDP sockets
3*4882a593Smuzhiyun * Copyright(c) 2018 Intel Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/bpf.h>
7*4882a593Smuzhiyun #include <linux/capability.h>
8*4882a593Smuzhiyun #include <net/xdp_sock.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "xsk.h"
13*4882a593Smuzhiyun
xsk_map_inc(struct xsk_map * map)14*4882a593Smuzhiyun int xsk_map_inc(struct xsk_map *map)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun bpf_map_inc(&map->map);
17*4882a593Smuzhiyun return 0;
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun
xsk_map_put(struct xsk_map * map)20*4882a593Smuzhiyun void xsk_map_put(struct xsk_map *map)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun bpf_map_put(&map->map);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
xsk_map_node_alloc(struct xsk_map * map,struct xdp_sock ** map_entry)25*4882a593Smuzhiyun static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
26*4882a593Smuzhiyun struct xdp_sock **map_entry)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct xsk_map_node *node;
29*4882a593Smuzhiyun int err;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN);
32*4882a593Smuzhiyun if (!node)
33*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun err = xsk_map_inc(map);
36*4882a593Smuzhiyun if (err) {
37*4882a593Smuzhiyun kfree(node);
38*4882a593Smuzhiyun return ERR_PTR(err);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun node->map = map;
42*4882a593Smuzhiyun node->map_entry = map_entry;
43*4882a593Smuzhiyun return node;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
xsk_map_node_free(struct xsk_map_node * node)46*4882a593Smuzhiyun static void xsk_map_node_free(struct xsk_map_node *node)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun xsk_map_put(node->map);
49*4882a593Smuzhiyun kfree(node);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
xsk_map_sock_add(struct xdp_sock * xs,struct xsk_map_node * node)52*4882a593Smuzhiyun static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun spin_lock_bh(&xs->map_list_lock);
55*4882a593Smuzhiyun list_add_tail(&node->node, &xs->map_list);
56*4882a593Smuzhiyun spin_unlock_bh(&xs->map_list_lock);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
xsk_map_sock_delete(struct xdp_sock * xs,struct xdp_sock ** map_entry)59*4882a593Smuzhiyun static void xsk_map_sock_delete(struct xdp_sock *xs,
60*4882a593Smuzhiyun struct xdp_sock **map_entry)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct xsk_map_node *n, *tmp;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun spin_lock_bh(&xs->map_list_lock);
65*4882a593Smuzhiyun list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
66*4882a593Smuzhiyun if (map_entry == n->map_entry) {
67*4882a593Smuzhiyun list_del(&n->node);
68*4882a593Smuzhiyun xsk_map_node_free(n);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun spin_unlock_bh(&xs->map_list_lock);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
xsk_map_alloc(union bpf_attr * attr)74*4882a593Smuzhiyun static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct bpf_map_memory mem;
77*4882a593Smuzhiyun int err, numa_node;
78*4882a593Smuzhiyun struct xsk_map *m;
79*4882a593Smuzhiyun u64 size;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
82*4882a593Smuzhiyun return ERR_PTR(-EPERM);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (attr->max_entries == 0 || attr->key_size != 4 ||
85*4882a593Smuzhiyun attr->value_size != 4 ||
86*4882a593Smuzhiyun attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
87*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun numa_node = bpf_map_attr_numa_node(attr);
90*4882a593Smuzhiyun size = struct_size(m, xsk_map, attr->max_entries);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun err = bpf_map_charge_init(&mem, size);
93*4882a593Smuzhiyun if (err < 0)
94*4882a593Smuzhiyun return ERR_PTR(err);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun m = bpf_map_area_alloc(size, numa_node);
97*4882a593Smuzhiyun if (!m) {
98*4882a593Smuzhiyun bpf_map_charge_finish(&mem);
99*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun bpf_map_init_from_attr(&m->map, attr);
103*4882a593Smuzhiyun bpf_map_charge_move(&m->map.memory, &mem);
104*4882a593Smuzhiyun spin_lock_init(&m->lock);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return &m->map;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
xsk_map_free(struct bpf_map * map)109*4882a593Smuzhiyun static void xsk_map_free(struct bpf_map *map)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct xsk_map *m = container_of(map, struct xsk_map, map);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun bpf_clear_redirect_map(map);
114*4882a593Smuzhiyun synchronize_net();
115*4882a593Smuzhiyun bpf_map_area_free(m);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
xsk_map_get_next_key(struct bpf_map * map,void * key,void * next_key)118*4882a593Smuzhiyun static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct xsk_map *m = container_of(map, struct xsk_map, map);
121*4882a593Smuzhiyun u32 index = key ? *(u32 *)key : U32_MAX;
122*4882a593Smuzhiyun u32 *next = next_key;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (index >= m->map.max_entries) {
125*4882a593Smuzhiyun *next = 0;
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (index == m->map.max_entries - 1)
130*4882a593Smuzhiyun return -ENOENT;
131*4882a593Smuzhiyun *next = index + 1;
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
xsk_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)135*4882a593Smuzhiyun static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
138*4882a593Smuzhiyun struct bpf_insn *insn = insn_buf;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
141*4882a593Smuzhiyun *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
142*4882a593Smuzhiyun *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
143*4882a593Smuzhiyun *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
144*4882a593Smuzhiyun *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
145*4882a593Smuzhiyun *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
146*4882a593Smuzhiyun *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
147*4882a593Smuzhiyun *insn++ = BPF_MOV64_IMM(ret, 0);
148*4882a593Smuzhiyun return insn - insn_buf;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
xsk_map_lookup_elem(struct bpf_map * map,void * key)151*4882a593Smuzhiyun static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun WARN_ON_ONCE(!rcu_read_lock_held());
154*4882a593Smuzhiyun return __xsk_map_lookup_elem(map, *(u32 *)key);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
xsk_map_lookup_elem_sys_only(struct bpf_map * map,void * key)157*4882a593Smuzhiyun static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
xsk_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)162*4882a593Smuzhiyun static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
163*4882a593Smuzhiyun u64 map_flags)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct xsk_map *m = container_of(map, struct xsk_map, map);
166*4882a593Smuzhiyun struct xdp_sock *xs, *old_xs, **map_entry;
167*4882a593Smuzhiyun u32 i = *(u32 *)key, fd = *(u32 *)value;
168*4882a593Smuzhiyun struct xsk_map_node *node;
169*4882a593Smuzhiyun struct socket *sock;
170*4882a593Smuzhiyun int err;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (unlikely(map_flags > BPF_EXIST))
173*4882a593Smuzhiyun return -EINVAL;
174*4882a593Smuzhiyun if (unlikely(i >= m->map.max_entries))
175*4882a593Smuzhiyun return -E2BIG;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun sock = sockfd_lookup(fd, &err);
178*4882a593Smuzhiyun if (!sock)
179*4882a593Smuzhiyun return err;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (sock->sk->sk_family != PF_XDP) {
182*4882a593Smuzhiyun sockfd_put(sock);
183*4882a593Smuzhiyun return -EOPNOTSUPP;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun xs = (struct xdp_sock *)sock->sk;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun map_entry = &m->xsk_map[i];
189*4882a593Smuzhiyun node = xsk_map_node_alloc(m, map_entry);
190*4882a593Smuzhiyun if (IS_ERR(node)) {
191*4882a593Smuzhiyun sockfd_put(sock);
192*4882a593Smuzhiyun return PTR_ERR(node);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun spin_lock_bh(&m->lock);
196*4882a593Smuzhiyun old_xs = READ_ONCE(*map_entry);
197*4882a593Smuzhiyun if (old_xs == xs) {
198*4882a593Smuzhiyun err = 0;
199*4882a593Smuzhiyun goto out;
200*4882a593Smuzhiyun } else if (old_xs && map_flags == BPF_NOEXIST) {
201*4882a593Smuzhiyun err = -EEXIST;
202*4882a593Smuzhiyun goto out;
203*4882a593Smuzhiyun } else if (!old_xs && map_flags == BPF_EXIST) {
204*4882a593Smuzhiyun err = -ENOENT;
205*4882a593Smuzhiyun goto out;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun xsk_map_sock_add(xs, node);
208*4882a593Smuzhiyun WRITE_ONCE(*map_entry, xs);
209*4882a593Smuzhiyun if (old_xs)
210*4882a593Smuzhiyun xsk_map_sock_delete(old_xs, map_entry);
211*4882a593Smuzhiyun spin_unlock_bh(&m->lock);
212*4882a593Smuzhiyun sockfd_put(sock);
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun out:
216*4882a593Smuzhiyun spin_unlock_bh(&m->lock);
217*4882a593Smuzhiyun sockfd_put(sock);
218*4882a593Smuzhiyun xsk_map_node_free(node);
219*4882a593Smuzhiyun return err;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
xsk_map_delete_elem(struct bpf_map * map,void * key)222*4882a593Smuzhiyun static int xsk_map_delete_elem(struct bpf_map *map, void *key)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct xsk_map *m = container_of(map, struct xsk_map, map);
225*4882a593Smuzhiyun struct xdp_sock *old_xs, **map_entry;
226*4882a593Smuzhiyun int k = *(u32 *)key;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (k >= map->max_entries)
229*4882a593Smuzhiyun return -EINVAL;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun spin_lock_bh(&m->lock);
232*4882a593Smuzhiyun map_entry = &m->xsk_map[k];
233*4882a593Smuzhiyun old_xs = xchg(map_entry, NULL);
234*4882a593Smuzhiyun if (old_xs)
235*4882a593Smuzhiyun xsk_map_sock_delete(old_xs, map_entry);
236*4882a593Smuzhiyun spin_unlock_bh(&m->lock);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
xsk_map_try_sock_delete(struct xsk_map * map,struct xdp_sock * xs,struct xdp_sock ** map_entry)241*4882a593Smuzhiyun void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
242*4882a593Smuzhiyun struct xdp_sock **map_entry)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun spin_lock_bh(&map->lock);
245*4882a593Smuzhiyun if (READ_ONCE(*map_entry) == xs) {
246*4882a593Smuzhiyun WRITE_ONCE(*map_entry, NULL);
247*4882a593Smuzhiyun xsk_map_sock_delete(xs, map_entry);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun spin_unlock_bh(&map->lock);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
xsk_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)252*4882a593Smuzhiyun static bool xsk_map_meta_equal(const struct bpf_map *meta0,
253*4882a593Smuzhiyun const struct bpf_map *meta1)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun return meta0->max_entries == meta1->max_entries &&
256*4882a593Smuzhiyun bpf_map_meta_equal(meta0, meta1);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun static int xsk_map_btf_id;
260*4882a593Smuzhiyun const struct bpf_map_ops xsk_map_ops = {
261*4882a593Smuzhiyun .map_meta_equal = xsk_map_meta_equal,
262*4882a593Smuzhiyun .map_alloc = xsk_map_alloc,
263*4882a593Smuzhiyun .map_free = xsk_map_free,
264*4882a593Smuzhiyun .map_get_next_key = xsk_map_get_next_key,
265*4882a593Smuzhiyun .map_lookup_elem = xsk_map_lookup_elem,
266*4882a593Smuzhiyun .map_gen_lookup = xsk_map_gen_lookup,
267*4882a593Smuzhiyun .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
268*4882a593Smuzhiyun .map_update_elem = xsk_map_update_elem,
269*4882a593Smuzhiyun .map_delete_elem = xsk_map_delete_elem,
270*4882a593Smuzhiyun .map_check_btf = map_check_no_btf,
271*4882a593Smuzhiyun .map_btf_name = "xsk_map",
272*4882a593Smuzhiyun .map_btf_id = &xsk_map_btf_id,
273*4882a593Smuzhiyun };
274