1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * page_pool.h
4*4882a593Smuzhiyun * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5*4882a593Smuzhiyun * Copyright (C) 2016 Red Hat, Inc.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /**
9*4882a593Smuzhiyun * DOC: page_pool allocator
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This page_pool allocator is optimized for the XDP mode that
12*4882a593Smuzhiyun * uses one-frame-per-page, but have fallbacks that act like the
13*4882a593Smuzhiyun * regular page allocator APIs.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Basic use involve replacing alloc_pages() calls with the
16*4882a593Smuzhiyun * page_pool_alloc_pages() call. Drivers should likely use
17*4882a593Smuzhiyun * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * API keeps track of in-flight pages, in-order to let API user know
20*4882a593Smuzhiyun * when it is safe to dealloactor page_pool object. Thus, API users
21*4882a593Smuzhiyun * must make sure to call page_pool_release_page() when a page is
22*4882a593Smuzhiyun * "leaving" the page_pool. Or call page_pool_put_page() where
23*4882a593Smuzhiyun * appropiate. For maintaining correct accounting.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * API user must only call page_pool_put_page() once on a page, as it
26*4882a593Smuzhiyun * will either recycle the page, or in case of elevated refcnt, it
27*4882a593Smuzhiyun * will release the DMA mapping and in-flight state accounting. We
28*4882a593Smuzhiyun * hope to lift this requirement in the future.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun #ifndef _NET_PAGE_POOL_H
31*4882a593Smuzhiyun #define _NET_PAGE_POOL_H
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/mm.h> /* Needed by ptr_ring */
34*4882a593Smuzhiyun #include <linux/ptr_ring.h>
35*4882a593Smuzhiyun #include <linux/dma-direction.h>
36*4882a593Smuzhiyun #include <linux/android_kabi.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
39*4882a593Smuzhiyun * map/unmap
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
42*4882a593Smuzhiyun * from page_pool will be
43*4882a593Smuzhiyun * DMA-synced-for-device according to
44*4882a593Smuzhiyun * the length provided by the device
45*4882a593Smuzhiyun * driver.
46*4882a593Smuzhiyun * Please note DMA-sync-for-CPU is still
47*4882a593Smuzhiyun * device driver responsibility
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * Fast allocation side cache array/stack
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * The cache size and refill watermark is related to the network
55*4882a593Smuzhiyun * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
56*4882a593Smuzhiyun * ring is usually refilled and the max consumed elements will be 64,
57*4882a593Smuzhiyun * thus a natural max size of objects needed in the cache.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Keeping room for more objects, is due to XDP_DROP use-case. As
60*4882a593Smuzhiyun * XDP_DROP allows the opportunity to recycle objects directly into
61*4882a593Smuzhiyun * this array, as it shares the same softirq/NAPI protection. If
62*4882a593Smuzhiyun * cache is already full (or partly full) then the XDP_DROP recycles
63*4882a593Smuzhiyun * would have to take a slower code path.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun #define PP_ALLOC_CACHE_SIZE 128
66*4882a593Smuzhiyun #define PP_ALLOC_CACHE_REFILL 64
67*4882a593Smuzhiyun struct pp_alloc_cache {
68*4882a593Smuzhiyun u32 count;
69*4882a593Smuzhiyun void *cache[PP_ALLOC_CACHE_SIZE];
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct page_pool_params {
73*4882a593Smuzhiyun unsigned int flags;
74*4882a593Smuzhiyun unsigned int order;
75*4882a593Smuzhiyun unsigned int pool_size;
76*4882a593Smuzhiyun int nid; /* Numa node id to allocate from pages from */
77*4882a593Smuzhiyun struct device *dev; /* device, for DMA pre-mapping purposes */
78*4882a593Smuzhiyun enum dma_data_direction dma_dir; /* DMA mapping direction */
79*4882a593Smuzhiyun unsigned int max_len; /* max DMA sync memory size */
80*4882a593Smuzhiyun unsigned int offset; /* DMA addr offset */
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun struct page_pool {
84*4882a593Smuzhiyun struct page_pool_params p;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct delayed_work release_dw;
87*4882a593Smuzhiyun void (*disconnect)(void *);
88*4882a593Smuzhiyun unsigned long defer_start;
89*4882a593Smuzhiyun unsigned long defer_warn;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun u32 pages_state_hold_cnt;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * Data structure for allocation side
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Drivers allocation side usually already perform some kind
97*4882a593Smuzhiyun * of resource protection. Piggyback on this protection, and
98*4882a593Smuzhiyun * require driver to protect allocation side.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * For NIC drivers this means, allocate a page_pool per
101*4882a593Smuzhiyun * RX-queue. As the RX-queue is already protected by
102*4882a593Smuzhiyun * Softirq/BH scheduling and napi_schedule. NAPI schedule
103*4882a593Smuzhiyun * guarantee that a single napi_struct will only be scheduled
104*4882a593Smuzhiyun * on a single CPU (see napi_schedule).
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Data structure for storing recycled pages.
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Returning/freeing pages is more complicated synchronization
111*4882a593Smuzhiyun * wise, because free's can happen on remote CPUs, with no
112*4882a593Smuzhiyun * association with allocation resource.
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * Use ptr_ring, as it separates consumer and producer
115*4882a593Smuzhiyun * effeciently, it a way that doesn't bounce cache-lines.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * TODO: Implement bulk return pages into this structure.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun struct ptr_ring ring;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun atomic_t pages_state_release_cnt;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* A page_pool is strictly tied to a single RX-queue being
124*4882a593Smuzhiyun * protected by NAPI, due to above pp_alloc_cache. This
125*4882a593Smuzhiyun * refcnt serves purpose is to simplify drivers error handling.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun refcount_t user_cnt;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun u64 destroy_cnt;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
135*4882a593Smuzhiyun
page_pool_dev_alloc_pages(struct page_pool * pool)136*4882a593Smuzhiyun static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return page_pool_alloc_pages(pool, gfp);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* get the stored dma direction. A driver might decide to treat this locally and
144*4882a593Smuzhiyun * avoid the extra cache line from page_pool to determine the direction
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun static
page_pool_get_dma_dir(struct page_pool * pool)147*4882a593Smuzhiyun inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun return pool->p.dma_dir;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun struct page_pool *page_pool_create(const struct page_pool_params *params);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun #ifdef CONFIG_PAGE_POOL
155*4882a593Smuzhiyun void page_pool_destroy(struct page_pool *pool);
156*4882a593Smuzhiyun void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
157*4882a593Smuzhiyun void page_pool_release_page(struct page_pool *pool, struct page *page);
158*4882a593Smuzhiyun #else
page_pool_destroy(struct page_pool * pool)159*4882a593Smuzhiyun static inline void page_pool_destroy(struct page_pool *pool)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
page_pool_use_xdp_mem(struct page_pool * pool,void (* disconnect)(void *))163*4882a593Smuzhiyun static inline void page_pool_use_xdp_mem(struct page_pool *pool,
164*4882a593Smuzhiyun void (*disconnect)(void *))
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun }
page_pool_release_page(struct page_pool * pool,struct page * page)167*4882a593Smuzhiyun static inline void page_pool_release_page(struct page_pool *pool,
168*4882a593Smuzhiyun struct page *page)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun void page_pool_put_page(struct page_pool *pool, struct page *page,
174*4882a593Smuzhiyun unsigned int dma_sync_size, bool allow_direct);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Same as above but will try to sync the entire area pool->max_len */
page_pool_put_full_page(struct page_pool * pool,struct page * page,bool allow_direct)177*4882a593Smuzhiyun static inline void page_pool_put_full_page(struct page_pool *pool,
178*4882a593Smuzhiyun struct page *page, bool allow_direct)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
181*4882a593Smuzhiyun * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun #ifdef CONFIG_PAGE_POOL
184*4882a593Smuzhiyun page_pool_put_page(pool, page, -1, allow_direct);
185*4882a593Smuzhiyun #endif
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Same as above but the caller must guarantee safe context. e.g NAPI */
page_pool_recycle_direct(struct page_pool * pool,struct page * page)189*4882a593Smuzhiyun static inline void page_pool_recycle_direct(struct page_pool *pool,
190*4882a593Smuzhiyun struct page *page)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun page_pool_put_full_page(pool, page, true);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
page_pool_get_dma_addr(struct page * page)195*4882a593Smuzhiyun static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun dma_addr_t ret = page->dma_addr[0];
198*4882a593Smuzhiyun if (sizeof(dma_addr_t) > sizeof(unsigned long))
199*4882a593Smuzhiyun ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
page_pool_set_dma_addr(struct page * page,dma_addr_t addr)203*4882a593Smuzhiyun static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun page->dma_addr[0] = addr;
206*4882a593Smuzhiyun if (sizeof(dma_addr_t) > sizeof(unsigned long))
207*4882a593Smuzhiyun page->dma_addr[1] = upper_32_bits(addr);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
is_page_pool_compiled_in(void)210*4882a593Smuzhiyun static inline bool is_page_pool_compiled_in(void)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun #ifdef CONFIG_PAGE_POOL
213*4882a593Smuzhiyun return true;
214*4882a593Smuzhiyun #else
215*4882a593Smuzhiyun return false;
216*4882a593Smuzhiyun #endif
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
page_pool_put(struct page_pool * pool)219*4882a593Smuzhiyun static inline bool page_pool_put(struct page_pool *pool)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return refcount_dec_and_test(&pool->user_cnt);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Caller must provide appropriate safe context, e.g. NAPI. */
225*4882a593Smuzhiyun void page_pool_update_nid(struct page_pool *pool, int new_nid);
page_pool_nid_changed(struct page_pool * pool,int new_nid)226*4882a593Smuzhiyun static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun if (unlikely(pool->p.nid != new_nid))
229*4882a593Smuzhiyun page_pool_update_nid(pool, new_nid);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun #endif /* _NET_PAGE_POOL_H */
232