xref: /OK3568_Linux_fs/kernel/net/xdp/xdp_umem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* XDP user-space packet buffer
3*4882a593Smuzhiyun  * Copyright(c) 2018 Intel Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/init.h>
7*4882a593Smuzhiyun #include <linux/sched/mm.h>
8*4882a593Smuzhiyun #include <linux/sched/signal.h>
9*4882a593Smuzhiyun #include <linux/sched/task.h>
10*4882a593Smuzhiyun #include <linux/uaccess.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/bpf.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/rtnetlink.h>
16*4882a593Smuzhiyun #include <linux/idr.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "xdp_umem.h"
20*4882a593Smuzhiyun #include "xsk_queue.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define XDP_UMEM_MIN_CHUNK_SIZE 2048
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static DEFINE_IDA(umem_ida);
25*4882a593Smuzhiyun 
xdp_umem_unpin_pages(struct xdp_umem * umem)26*4882a593Smuzhiyun static void xdp_umem_unpin_pages(struct xdp_umem *umem)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	kfree(umem->pgs);
31*4882a593Smuzhiyun 	umem->pgs = NULL;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
xdp_umem_unaccount_pages(struct xdp_umem * umem)34*4882a593Smuzhiyun static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	if (umem->user) {
37*4882a593Smuzhiyun 		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
38*4882a593Smuzhiyun 		free_uid(umem->user);
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
xdp_umem_addr_unmap(struct xdp_umem * umem)42*4882a593Smuzhiyun static void xdp_umem_addr_unmap(struct xdp_umem *umem)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	vunmap(umem->addrs);
45*4882a593Smuzhiyun 	umem->addrs = NULL;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
xdp_umem_addr_map(struct xdp_umem * umem,struct page ** pages,u32 nr_pages)48*4882a593Smuzhiyun static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
49*4882a593Smuzhiyun 			     u32 nr_pages)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52*4882a593Smuzhiyun 	if (!umem->addrs)
53*4882a593Smuzhiyun 		return -ENOMEM;
54*4882a593Smuzhiyun 	return 0;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
xdp_umem_release(struct xdp_umem * umem)57*4882a593Smuzhiyun static void xdp_umem_release(struct xdp_umem *umem)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	umem->zc = false;
60*4882a593Smuzhiyun 	ida_simple_remove(&umem_ida, umem->id);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	xdp_umem_addr_unmap(umem);
63*4882a593Smuzhiyun 	xdp_umem_unpin_pages(umem);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	xdp_umem_unaccount_pages(umem);
66*4882a593Smuzhiyun 	kfree(umem);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
xdp_umem_release_deferred(struct work_struct * work)69*4882a593Smuzhiyun static void xdp_umem_release_deferred(struct work_struct *work)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	xdp_umem_release(umem);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
xdp_get_umem(struct xdp_umem * umem)76*4882a593Smuzhiyun void xdp_get_umem(struct xdp_umem *umem)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	refcount_inc(&umem->users);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
xdp_put_umem(struct xdp_umem * umem,bool defer_cleanup)81*4882a593Smuzhiyun void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	if (!umem)
84*4882a593Smuzhiyun 		return;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (refcount_dec_and_test(&umem->users)) {
87*4882a593Smuzhiyun 		if (defer_cleanup) {
88*4882a593Smuzhiyun 			INIT_WORK(&umem->work, xdp_umem_release_deferred);
89*4882a593Smuzhiyun 			schedule_work(&umem->work);
90*4882a593Smuzhiyun 		} else {
91*4882a593Smuzhiyun 			xdp_umem_release(umem);
92*4882a593Smuzhiyun 		}
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
xdp_umem_pin_pages(struct xdp_umem * umem,unsigned long address)96*4882a593Smuzhiyun static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	unsigned int gup_flags = FOLL_WRITE;
99*4882a593Smuzhiyun 	long npgs;
100*4882a593Smuzhiyun 	int err;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
103*4882a593Smuzhiyun 			    GFP_KERNEL | __GFP_NOWARN);
104*4882a593Smuzhiyun 	if (!umem->pgs)
105*4882a593Smuzhiyun 		return -ENOMEM;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	mmap_read_lock(current->mm);
108*4882a593Smuzhiyun 	npgs = pin_user_pages(address, umem->npgs,
109*4882a593Smuzhiyun 			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
110*4882a593Smuzhiyun 	mmap_read_unlock(current->mm);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (npgs != umem->npgs) {
113*4882a593Smuzhiyun 		if (npgs >= 0) {
114*4882a593Smuzhiyun 			umem->npgs = npgs;
115*4882a593Smuzhiyun 			err = -ENOMEM;
116*4882a593Smuzhiyun 			goto out_pin;
117*4882a593Smuzhiyun 		}
118*4882a593Smuzhiyun 		err = npgs;
119*4882a593Smuzhiyun 		goto out_pgs;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 	return 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun out_pin:
124*4882a593Smuzhiyun 	xdp_umem_unpin_pages(umem);
125*4882a593Smuzhiyun out_pgs:
126*4882a593Smuzhiyun 	kfree(umem->pgs);
127*4882a593Smuzhiyun 	umem->pgs = NULL;
128*4882a593Smuzhiyun 	return err;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
xdp_umem_account_pages(struct xdp_umem * umem)131*4882a593Smuzhiyun static int xdp_umem_account_pages(struct xdp_umem *umem)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	unsigned long lock_limit, new_npgs, old_npgs;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (capable(CAP_IPC_LOCK))
136*4882a593Smuzhiyun 		return 0;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
139*4882a593Smuzhiyun 	umem->user = get_uid(current_user());
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	do {
142*4882a593Smuzhiyun 		old_npgs = atomic_long_read(&umem->user->locked_vm);
143*4882a593Smuzhiyun 		new_npgs = old_npgs + umem->npgs;
144*4882a593Smuzhiyun 		if (new_npgs > lock_limit) {
145*4882a593Smuzhiyun 			free_uid(umem->user);
146*4882a593Smuzhiyun 			umem->user = NULL;
147*4882a593Smuzhiyun 			return -ENOBUFS;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
150*4882a593Smuzhiyun 				     new_npgs) != old_npgs);
151*4882a593Smuzhiyun 	return 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
xdp_umem_reg(struct xdp_umem * umem,struct xdp_umem_reg * mr)154*4882a593Smuzhiyun static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
157*4882a593Smuzhiyun 	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
158*4882a593Smuzhiyun 	u64 npgs, addr = mr->addr, size = mr->len;
159*4882a593Smuzhiyun 	unsigned int chunks, chunks_rem;
160*4882a593Smuzhiyun 	int err;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
163*4882a593Smuzhiyun 		/* Strictly speaking we could support this, if:
164*4882a593Smuzhiyun 		 * - huge pages, or*
165*4882a593Smuzhiyun 		 * - using an IOMMU, or
166*4882a593Smuzhiyun 		 * - making sure the memory area is consecutive
167*4882a593Smuzhiyun 		 * but for now, we simply say "computer says no".
168*4882a593Smuzhiyun 		 */
169*4882a593Smuzhiyun 		return -EINVAL;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
173*4882a593Smuzhiyun 		return -EINVAL;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!unaligned_chunks && !is_power_of_2(chunk_size))
176*4882a593Smuzhiyun 		return -EINVAL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (!PAGE_ALIGNED(addr)) {
179*4882a593Smuzhiyun 		/* Memory area has to be page size aligned. For
180*4882a593Smuzhiyun 		 * simplicity, this might change.
181*4882a593Smuzhiyun 		 */
182*4882a593Smuzhiyun 		return -EINVAL;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if ((addr + size) < addr)
186*4882a593Smuzhiyun 		return -EINVAL;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
189*4882a593Smuzhiyun 	if (npgs_rem)
190*4882a593Smuzhiyun 		npgs++;
191*4882a593Smuzhiyun 	if (npgs > U32_MAX)
192*4882a593Smuzhiyun 		return -EINVAL;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
195*4882a593Smuzhiyun 	if (chunks == 0)
196*4882a593Smuzhiyun 		return -EINVAL;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (!unaligned_chunks && chunks_rem)
199*4882a593Smuzhiyun 		return -EINVAL;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
202*4882a593Smuzhiyun 		return -EINVAL;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	umem->size = size;
205*4882a593Smuzhiyun 	umem->headroom = headroom;
206*4882a593Smuzhiyun 	umem->chunk_size = chunk_size;
207*4882a593Smuzhiyun 	umem->chunks = chunks;
208*4882a593Smuzhiyun 	umem->npgs = (u32)npgs;
209*4882a593Smuzhiyun 	umem->pgs = NULL;
210*4882a593Smuzhiyun 	umem->user = NULL;
211*4882a593Smuzhiyun 	umem->flags = mr->flags;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	INIT_LIST_HEAD(&umem->xsk_dma_list);
214*4882a593Smuzhiyun 	refcount_set(&umem->users, 1);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	err = xdp_umem_account_pages(umem);
217*4882a593Smuzhiyun 	if (err)
218*4882a593Smuzhiyun 		return err;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	err = xdp_umem_pin_pages(umem, (unsigned long)addr);
221*4882a593Smuzhiyun 	if (err)
222*4882a593Smuzhiyun 		goto out_account;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
225*4882a593Smuzhiyun 	if (err)
226*4882a593Smuzhiyun 		goto out_unpin;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return 0;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun out_unpin:
231*4882a593Smuzhiyun 	xdp_umem_unpin_pages(umem);
232*4882a593Smuzhiyun out_account:
233*4882a593Smuzhiyun 	xdp_umem_unaccount_pages(umem);
234*4882a593Smuzhiyun 	return err;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
xdp_umem_create(struct xdp_umem_reg * mr)237*4882a593Smuzhiyun struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct xdp_umem *umem;
240*4882a593Smuzhiyun 	int err;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
243*4882a593Smuzhiyun 	if (!umem)
244*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
247*4882a593Smuzhiyun 	if (err < 0) {
248*4882a593Smuzhiyun 		kfree(umem);
249*4882a593Smuzhiyun 		return ERR_PTR(err);
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 	umem->id = err;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	err = xdp_umem_reg(umem, mr);
254*4882a593Smuzhiyun 	if (err) {
255*4882a593Smuzhiyun 		ida_simple_remove(&umem_ida, umem->id);
256*4882a593Smuzhiyun 		kfree(umem);
257*4882a593Smuzhiyun 		return ERR_PTR(err);
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return umem;
261*4882a593Smuzhiyun }
262