xref: /OK3568_Linux_fs/kernel/net/core/page_pool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * page_pool.c
4*4882a593Smuzhiyun  *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
5*4882a593Smuzhiyun  *	Copyright (C) 2016 Red Hat, Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <net/page_pool.h>
14*4882a593Smuzhiyun #include <linux/dma-direction.h>
15*4882a593Smuzhiyun #include <linux/dma-mapping.h>
16*4882a593Smuzhiyun #include <linux/page-flags.h>
17*4882a593Smuzhiyun #include <linux/mm.h> /* for __put_page() */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <trace/events/page_pool.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define DEFER_TIME (msecs_to_jiffies(1000))
22*4882a593Smuzhiyun #define DEFER_WARN_INTERVAL (60 * HZ)
23*4882a593Smuzhiyun 
page_pool_init(struct page_pool * pool,const struct page_pool_params * params)24*4882a593Smuzhiyun static int page_pool_init(struct page_pool *pool,
25*4882a593Smuzhiyun 			  const struct page_pool_params *params)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	unsigned int ring_qsize = 1024; /* Default */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	memcpy(&pool->p, params, sizeof(pool->p));
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* Validate only known flags were used */
32*4882a593Smuzhiyun 	if (pool->p.flags & ~(PP_FLAG_ALL))
33*4882a593Smuzhiyun 		return -EINVAL;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	if (pool->p.pool_size)
36*4882a593Smuzhiyun 		ring_qsize = pool->p.pool_size;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/* Sanity limit mem that can be pinned down */
39*4882a593Smuzhiyun 	if (ring_qsize > 32768)
40*4882a593Smuzhiyun 		return -E2BIG;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43*4882a593Smuzhiyun 	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44*4882a593Smuzhiyun 	 * which is the XDP_TX use-case.
45*4882a593Smuzhiyun 	 */
46*4882a593Smuzhiyun 	if (pool->p.flags & PP_FLAG_DMA_MAP) {
47*4882a593Smuzhiyun 		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
48*4882a593Smuzhiyun 		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
49*4882a593Smuzhiyun 			return -EINVAL;
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
53*4882a593Smuzhiyun 		/* In order to request DMA-sync-for-device the page
54*4882a593Smuzhiyun 		 * needs to be mapped
55*4882a593Smuzhiyun 		 */
56*4882a593Smuzhiyun 		if (!(pool->p.flags & PP_FLAG_DMA_MAP))
57*4882a593Smuzhiyun 			return -EINVAL;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 		if (!pool->p.max_len)
60*4882a593Smuzhiyun 			return -EINVAL;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 		/* pool->p.offset has to be set according to the address
63*4882a593Smuzhiyun 		 * offset used by the DMA engine to start copying rx data
64*4882a593Smuzhiyun 		 */
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
68*4882a593Smuzhiyun 		return -ENOMEM;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	atomic_set(&pool->pages_state_release_cnt, 0);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* Driver calling page_pool_create() also call page_pool_destroy() */
73*4882a593Smuzhiyun 	refcount_set(&pool->user_cnt, 1);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (pool->p.flags & PP_FLAG_DMA_MAP)
76*4882a593Smuzhiyun 		get_device(pool->p.dev);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return 0;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
page_pool_create(const struct page_pool_params * params)81*4882a593Smuzhiyun struct page_pool *page_pool_create(const struct page_pool_params *params)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct page_pool *pool;
84*4882a593Smuzhiyun 	int err;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
87*4882a593Smuzhiyun 	if (!pool)
88*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	err = page_pool_init(pool, params);
91*4882a593Smuzhiyun 	if (err < 0) {
92*4882a593Smuzhiyun 		pr_warn("%s() gave up with errno %d\n", __func__, err);
93*4882a593Smuzhiyun 		kfree(pool);
94*4882a593Smuzhiyun 		return ERR_PTR(err);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	return pool;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_create);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun static void page_pool_return_page(struct page_pool *pool, struct page *page);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun noinline
page_pool_refill_alloc_cache(struct page_pool * pool)104*4882a593Smuzhiyun static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct ptr_ring *r = &pool->ring;
107*4882a593Smuzhiyun 	struct page *page;
108*4882a593Smuzhiyun 	int pref_nid; /* preferred NUMA node */
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* Quicker fallback, avoid locks when ring is empty */
111*4882a593Smuzhiyun 	if (__ptr_ring_empty(r))
112*4882a593Smuzhiyun 		return NULL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* Softirq guarantee CPU and thus NUMA node is stable. This,
115*4882a593Smuzhiyun 	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun #ifdef CONFIG_NUMA
118*4882a593Smuzhiyun 	pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
119*4882a593Smuzhiyun #else
120*4882a593Smuzhiyun 	/* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
121*4882a593Smuzhiyun 	pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Slower-path: Get pages from locked ring queue */
125*4882a593Smuzhiyun 	spin_lock(&r->consumer_lock);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Refill alloc array, but only if NUMA match */
128*4882a593Smuzhiyun 	do {
129*4882a593Smuzhiyun 		page = __ptr_ring_consume(r);
130*4882a593Smuzhiyun 		if (unlikely(!page))
131*4882a593Smuzhiyun 			break;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		if (likely(page_to_nid(page) == pref_nid)) {
134*4882a593Smuzhiyun 			pool->alloc.cache[pool->alloc.count++] = page;
135*4882a593Smuzhiyun 		} else {
136*4882a593Smuzhiyun 			/* NUMA mismatch;
137*4882a593Smuzhiyun 			 * (1) release 1 page to page-allocator and
138*4882a593Smuzhiyun 			 * (2) break out to fallthrough to alloc_pages_node.
139*4882a593Smuzhiyun 			 * This limit stress on page buddy alloactor.
140*4882a593Smuzhiyun 			 */
141*4882a593Smuzhiyun 			page_pool_return_page(pool, page);
142*4882a593Smuzhiyun 			page = NULL;
143*4882a593Smuzhiyun 			break;
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/* Return last page */
148*4882a593Smuzhiyun 	if (likely(pool->alloc.count > 0))
149*4882a593Smuzhiyun 		page = pool->alloc.cache[--pool->alloc.count];
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	spin_unlock(&r->consumer_lock);
152*4882a593Smuzhiyun 	return page;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /* fast path */
__page_pool_get_cached(struct page_pool * pool)156*4882a593Smuzhiyun static struct page *__page_pool_get_cached(struct page_pool *pool)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct page *page;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
161*4882a593Smuzhiyun 	if (likely(pool->alloc.count)) {
162*4882a593Smuzhiyun 		/* Fast-path */
163*4882a593Smuzhiyun 		page = pool->alloc.cache[--pool->alloc.count];
164*4882a593Smuzhiyun 	} else {
165*4882a593Smuzhiyun 		page = page_pool_refill_alloc_cache(pool);
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return page;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
page_pool_dma_sync_for_device(struct page_pool * pool,struct page * page,unsigned int dma_sync_size)171*4882a593Smuzhiyun static void page_pool_dma_sync_for_device(struct page_pool *pool,
172*4882a593Smuzhiyun 					  struct page *page,
173*4882a593Smuzhiyun 					  unsigned int dma_sync_size)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
178*4882a593Smuzhiyun 	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
179*4882a593Smuzhiyun 					 pool->p.offset, dma_sync_size,
180*4882a593Smuzhiyun 					 pool->p.dma_dir);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* slow path */
184*4882a593Smuzhiyun noinline
__page_pool_alloc_pages_slow(struct page_pool * pool,gfp_t _gfp)185*4882a593Smuzhiyun static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
186*4882a593Smuzhiyun 						 gfp_t _gfp)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct page *page;
189*4882a593Smuzhiyun 	gfp_t gfp = _gfp;
190*4882a593Smuzhiyun 	dma_addr_t dma;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* We could always set __GFP_COMP, and avoid this branch, as
193*4882a593Smuzhiyun 	 * prep_new_page() can handle order-0 with __GFP_COMP.
194*4882a593Smuzhiyun 	 */
195*4882a593Smuzhiyun 	if (pool->p.order)
196*4882a593Smuzhiyun 		gfp |= __GFP_COMP;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* FUTURE development:
199*4882a593Smuzhiyun 	 *
200*4882a593Smuzhiyun 	 * Current slow-path essentially falls back to single page
201*4882a593Smuzhiyun 	 * allocations, which doesn't improve performance.  This code
202*4882a593Smuzhiyun 	 * need bulk allocation support from the page allocator code.
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* Cache was empty, do real allocation */
206*4882a593Smuzhiyun #ifdef CONFIG_NUMA
207*4882a593Smuzhiyun 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
208*4882a593Smuzhiyun #else
209*4882a593Smuzhiyun 	page = alloc_pages(gfp, pool->p.order);
210*4882a593Smuzhiyun #endif
211*4882a593Smuzhiyun 	if (!page)
212*4882a593Smuzhiyun 		return NULL;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215*4882a593Smuzhiyun 		goto skip_dma_map;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
218*4882a593Smuzhiyun 	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
219*4882a593Smuzhiyun 	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
220*4882a593Smuzhiyun 	 * This mapping is kept for lifetime of page, until leaving pool.
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	dma = dma_map_page_attrs(pool->p.dev, page, 0,
223*4882a593Smuzhiyun 				 (PAGE_SIZE << pool->p.order),
224*4882a593Smuzhiyun 				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
225*4882a593Smuzhiyun 	if (dma_mapping_error(pool->p.dev, dma)) {
226*4882a593Smuzhiyun 		put_page(page);
227*4882a593Smuzhiyun 		return NULL;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 	page_pool_set_dma_addr(page, dma);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
232*4882a593Smuzhiyun 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun skip_dma_map:
235*4882a593Smuzhiyun 	/* Track how many pages are held 'in-flight' */
236*4882a593Smuzhiyun 	pool->pages_state_hold_cnt++;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* When page just alloc'ed is should/must have refcnt 1. */
241*4882a593Smuzhiyun 	return page;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /* For using page_pool replace: alloc_pages() API calls, but provide
245*4882a593Smuzhiyun  * synchronization guarantee for allocation side.
246*4882a593Smuzhiyun  */
page_pool_alloc_pages(struct page_pool * pool,gfp_t gfp)247*4882a593Smuzhiyun struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct page *page;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* Fast-path: Get a page from cache */
252*4882a593Smuzhiyun 	page = __page_pool_get_cached(pool);
253*4882a593Smuzhiyun 	if (page)
254*4882a593Smuzhiyun 		return page;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* Slow-path: cache empty, do real allocation */
257*4882a593Smuzhiyun 	page = __page_pool_alloc_pages_slow(pool, gfp);
258*4882a593Smuzhiyun 	return page;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_alloc_pages);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /* Calculate distance between two u32 values, valid if distance is below 2^(31)
263*4882a593Smuzhiyun  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
264*4882a593Smuzhiyun  */
265*4882a593Smuzhiyun #define _distance(a, b)	(s32)((a) - (b))
266*4882a593Smuzhiyun 
page_pool_inflight(struct page_pool * pool)267*4882a593Smuzhiyun static s32 page_pool_inflight(struct page_pool *pool)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
270*4882a593Smuzhiyun 	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
271*4882a593Smuzhiyun 	s32 inflight;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	inflight = _distance(hold_cnt, release_cnt);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
276*4882a593Smuzhiyun 	WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return inflight;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* Disconnects a page (from a page_pool).  API users can have a need
282*4882a593Smuzhiyun  * to disconnect a page (from a page_pool), to allow it to be used as
283*4882a593Smuzhiyun  * a regular page (that will eventually be returned to the normal
284*4882a593Smuzhiyun  * page-allocator via put_page).
285*4882a593Smuzhiyun  */
page_pool_release_page(struct page_pool * pool,struct page * page)286*4882a593Smuzhiyun void page_pool_release_page(struct page_pool *pool, struct page *page)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	dma_addr_t dma;
289*4882a593Smuzhiyun 	int count;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
292*4882a593Smuzhiyun 		/* Always account for inflight pages, even if we didn't
293*4882a593Smuzhiyun 		 * map them
294*4882a593Smuzhiyun 		 */
295*4882a593Smuzhiyun 		goto skip_dma_unmap;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	dma = page_pool_get_dma_addr(page);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* When page is unmapped, it cannot be returned to our pool */
300*4882a593Smuzhiyun 	dma_unmap_page_attrs(pool->p.dev, dma,
301*4882a593Smuzhiyun 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
302*4882a593Smuzhiyun 			     DMA_ATTR_SKIP_CPU_SYNC);
303*4882a593Smuzhiyun 	page_pool_set_dma_addr(page, 0);
304*4882a593Smuzhiyun skip_dma_unmap:
305*4882a593Smuzhiyun 	/* This may be the last page returned, releasing the pool, so
306*4882a593Smuzhiyun 	 * it is not safe to reference pool afterwards.
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	count = atomic_inc_return(&pool->pages_state_release_cnt);
309*4882a593Smuzhiyun 	trace_page_pool_state_release(pool, page, count);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_release_page);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /* Return a page to the page allocator, cleaning up our state */
page_pool_return_page(struct page_pool * pool,struct page * page)314*4882a593Smuzhiyun static void page_pool_return_page(struct page_pool *pool, struct page *page)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	page_pool_release_page(pool, page);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	put_page(page);
319*4882a593Smuzhiyun 	/* An optimization would be to call __free_pages(page, pool->p.order)
320*4882a593Smuzhiyun 	 * knowing page is not part of page-cache (thus avoiding a
321*4882a593Smuzhiyun 	 * __page_cache_release() call).
322*4882a593Smuzhiyun 	 */
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
page_pool_recycle_in_ring(struct page_pool * pool,struct page * page)325*4882a593Smuzhiyun static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int ret;
328*4882a593Smuzhiyun 	/* BH protection not needed if current is serving softirq */
329*4882a593Smuzhiyun 	if (in_serving_softirq())
330*4882a593Smuzhiyun 		ret = ptr_ring_produce(&pool->ring, page);
331*4882a593Smuzhiyun 	else
332*4882a593Smuzhiyun 		ret = ptr_ring_produce_bh(&pool->ring, page);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	return (ret == 0) ? true : false;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /* Only allow direct recycling in special circumstances, into the
338*4882a593Smuzhiyun  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
339*4882a593Smuzhiyun  *
340*4882a593Smuzhiyun  * Caller must provide appropriate safe context.
341*4882a593Smuzhiyun  */
page_pool_recycle_in_cache(struct page * page,struct page_pool * pool)342*4882a593Smuzhiyun static bool page_pool_recycle_in_cache(struct page *page,
343*4882a593Smuzhiyun 				       struct page_pool *pool)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
346*4882a593Smuzhiyun 		return false;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
349*4882a593Smuzhiyun 	pool->alloc.cache[pool->alloc.count++] = page;
350*4882a593Smuzhiyun 	return true;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /* page is NOT reusable when:
354*4882a593Smuzhiyun  * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
355*4882a593Smuzhiyun  */
pool_page_reusable(struct page_pool * pool,struct page * page)356*4882a593Smuzhiyun static bool pool_page_reusable(struct page_pool *pool, struct page *page)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	return !page_is_pfmemalloc(page);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /* If the page refcnt == 1, this will try to recycle the page.
362*4882a593Smuzhiyun  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
363*4882a593Smuzhiyun  * the configured size min(dma_sync_size, pool->max_len).
364*4882a593Smuzhiyun  * If the page refcnt != 1, then the page will be returned to memory
365*4882a593Smuzhiyun  * subsystem.
366*4882a593Smuzhiyun  */
page_pool_put_page(struct page_pool * pool,struct page * page,unsigned int dma_sync_size,bool allow_direct)367*4882a593Smuzhiyun void page_pool_put_page(struct page_pool *pool, struct page *page,
368*4882a593Smuzhiyun 			unsigned int dma_sync_size, bool allow_direct)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	/* This allocator is optimized for the XDP mode that uses
371*4882a593Smuzhiyun 	 * one-frame-per-page, but have fallbacks that act like the
372*4882a593Smuzhiyun 	 * regular page allocator APIs.
373*4882a593Smuzhiyun 	 *
374*4882a593Smuzhiyun 	 * refcnt == 1 means page_pool owns page, and can recycle it.
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	if (likely(page_ref_count(page) == 1 &&
377*4882a593Smuzhiyun 		   pool_page_reusable(pool, page))) {
378*4882a593Smuzhiyun 		/* Read barrier done in page_ref_count / READ_ONCE */
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
381*4882a593Smuzhiyun 			page_pool_dma_sync_for_device(pool, page,
382*4882a593Smuzhiyun 						      dma_sync_size);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		if (allow_direct && in_serving_softirq())
385*4882a593Smuzhiyun 			if (page_pool_recycle_in_cache(page, pool))
386*4882a593Smuzhiyun 				return;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 		if (!page_pool_recycle_in_ring(pool, page)) {
389*4882a593Smuzhiyun 			/* Cache full, fallback to free pages */
390*4882a593Smuzhiyun 			page_pool_return_page(pool, page);
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 		return;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 	/* Fallback/non-XDP mode: API user have elevated refcnt.
395*4882a593Smuzhiyun 	 *
396*4882a593Smuzhiyun 	 * Many drivers split up the page into fragments, and some
397*4882a593Smuzhiyun 	 * want to keep doing this to save memory and do refcnt based
398*4882a593Smuzhiyun 	 * recycling. Support this use case too, to ease drivers
399*4882a593Smuzhiyun 	 * switching between XDP/non-XDP.
400*4882a593Smuzhiyun 	 *
401*4882a593Smuzhiyun 	 * In-case page_pool maintains the DMA mapping, API user must
402*4882a593Smuzhiyun 	 * call page_pool_put_page once.  In this elevated refcnt
403*4882a593Smuzhiyun 	 * case, the DMA is unmapped/released, as driver is likely
404*4882a593Smuzhiyun 	 * doing refcnt based recycle tricks, meaning another process
405*4882a593Smuzhiyun 	 * will be invoking put_page.
406*4882a593Smuzhiyun 	 */
407*4882a593Smuzhiyun 	/* Do not replace this with page_pool_return_page() */
408*4882a593Smuzhiyun 	page_pool_release_page(pool, page);
409*4882a593Smuzhiyun 	put_page(page);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_put_page);
412*4882a593Smuzhiyun 
page_pool_empty_ring(struct page_pool * pool)413*4882a593Smuzhiyun static void page_pool_empty_ring(struct page_pool *pool)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct page *page;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* Empty recycle ring */
418*4882a593Smuzhiyun 	while ((page = ptr_ring_consume_bh(&pool->ring))) {
419*4882a593Smuzhiyun 		/* Verify the refcnt invariant of cached pages */
420*4882a593Smuzhiyun 		if (!(page_ref_count(page) == 1))
421*4882a593Smuzhiyun 			pr_crit("%s() page_pool refcnt %d violation\n",
422*4882a593Smuzhiyun 				__func__, page_ref_count(page));
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		page_pool_return_page(pool, page);
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
page_pool_free(struct page_pool * pool)428*4882a593Smuzhiyun static void page_pool_free(struct page_pool *pool)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	if (pool->disconnect)
431*4882a593Smuzhiyun 		pool->disconnect(pool);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	ptr_ring_cleanup(&pool->ring, NULL);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (pool->p.flags & PP_FLAG_DMA_MAP)
436*4882a593Smuzhiyun 		put_device(pool->p.dev);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	kfree(pool);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
page_pool_empty_alloc_cache_once(struct page_pool * pool)441*4882a593Smuzhiyun static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	struct page *page;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (pool->destroy_cnt)
446*4882a593Smuzhiyun 		return;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* Empty alloc cache, assume caller made sure this is
449*4882a593Smuzhiyun 	 * no-longer in use, and page_pool_alloc_pages() cannot be
450*4882a593Smuzhiyun 	 * call concurrently.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	while (pool->alloc.count) {
453*4882a593Smuzhiyun 		page = pool->alloc.cache[--pool->alloc.count];
454*4882a593Smuzhiyun 		page_pool_return_page(pool, page);
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
page_pool_scrub(struct page_pool * pool)458*4882a593Smuzhiyun static void page_pool_scrub(struct page_pool *pool)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	page_pool_empty_alloc_cache_once(pool);
461*4882a593Smuzhiyun 	pool->destroy_cnt++;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* No more consumers should exist, but producers could still
464*4882a593Smuzhiyun 	 * be in-flight.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	page_pool_empty_ring(pool);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
page_pool_release(struct page_pool * pool)469*4882a593Smuzhiyun static int page_pool_release(struct page_pool *pool)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	int inflight;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	page_pool_scrub(pool);
474*4882a593Smuzhiyun 	inflight = page_pool_inflight(pool);
475*4882a593Smuzhiyun 	if (!inflight)
476*4882a593Smuzhiyun 		page_pool_free(pool);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return inflight;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
page_pool_release_retry(struct work_struct * wq)481*4882a593Smuzhiyun static void page_pool_release_retry(struct work_struct *wq)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct delayed_work *dwq = to_delayed_work(wq);
484*4882a593Smuzhiyun 	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
485*4882a593Smuzhiyun 	int inflight;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	inflight = page_pool_release(pool);
488*4882a593Smuzhiyun 	if (!inflight)
489*4882a593Smuzhiyun 		return;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Periodic warning */
492*4882a593Smuzhiyun 	if (time_after_eq(jiffies, pool->defer_warn)) {
493*4882a593Smuzhiyun 		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
496*4882a593Smuzhiyun 			__func__, inflight, sec);
497*4882a593Smuzhiyun 		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* Still not ready to be disconnected, retry later */
501*4882a593Smuzhiyun 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *))504*4882a593Smuzhiyun void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	refcount_inc(&pool->user_cnt);
507*4882a593Smuzhiyun 	pool->disconnect = disconnect;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
page_pool_destroy(struct page_pool * pool)510*4882a593Smuzhiyun void page_pool_destroy(struct page_pool *pool)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	if (!pool)
513*4882a593Smuzhiyun 		return;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (!page_pool_put(pool))
516*4882a593Smuzhiyun 		return;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (!page_pool_release(pool))
519*4882a593Smuzhiyun 		return;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	pool->defer_start = jiffies;
522*4882a593Smuzhiyun 	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
525*4882a593Smuzhiyun 	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_destroy);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* Caller must provide appropriate safe context, e.g. NAPI. */
page_pool_update_nid(struct page_pool * pool,int new_nid)530*4882a593Smuzhiyun void page_pool_update_nid(struct page_pool *pool, int new_nid)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	struct page *page;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	trace_page_pool_update_nid(pool, new_nid);
535*4882a593Smuzhiyun 	pool->p.nid = new_nid;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Flush pool alloc cache, as refill will check NUMA node */
538*4882a593Smuzhiyun 	while (pool->alloc.count) {
539*4882a593Smuzhiyun 		page = pool->alloc.cache[--pool->alloc.count];
540*4882a593Smuzhiyun 		page_pool_return_page(pool, page);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun EXPORT_SYMBOL(page_pool_update_nid);
544