1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* Copyright(c) 2019 Intel Corporation. */ 3*4882a593Smuzhiyun 4*4882a593Smuzhiyun #ifndef XSK_H_ 5*4882a593Smuzhiyun #define XSK_H_ 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun /* Masks for xdp_umem_page flags. 8*4882a593Smuzhiyun * The low 12-bits of the addr will be 0 since this is the page address, so we 9*4882a593Smuzhiyun * can use them for flags. 10*4882a593Smuzhiyun */ 11*4882a593Smuzhiyun #define XSK_NEXT_PG_CONTIG_SHIFT 0 12*4882a593Smuzhiyun #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun struct xdp_ring_offset_v1 { 15*4882a593Smuzhiyun __u64 producer; 16*4882a593Smuzhiyun __u64 consumer; 17*4882a593Smuzhiyun __u64 desc; 18*4882a593Smuzhiyun }; 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun struct xdp_mmap_offsets_v1 { 21*4882a593Smuzhiyun struct xdp_ring_offset_v1 rx; 22*4882a593Smuzhiyun struct xdp_ring_offset_v1 tx; 23*4882a593Smuzhiyun struct xdp_ring_offset_v1 fr; 24*4882a593Smuzhiyun struct xdp_ring_offset_v1 cr; 25*4882a593Smuzhiyun }; 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun /* Nodes are linked in the struct xdp_sock map_list field, and used to 28*4882a593Smuzhiyun * track which maps a certain socket reside in. 29*4882a593Smuzhiyun */ 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun struct xsk_map_node { 32*4882a593Smuzhiyun struct list_head node; 33*4882a593Smuzhiyun struct xsk_map *map; 34*4882a593Smuzhiyun struct xdp_sock **map_entry; 35*4882a593Smuzhiyun }; 36*4882a593Smuzhiyun xdp_sk(struct sock * sk)37*4882a593Smuzhiyunstatic inline struct xdp_sock *xdp_sk(struct sock *sk) 38*4882a593Smuzhiyun { 39*4882a593Smuzhiyun return (struct xdp_sock *)sk; 40*4882a593Smuzhiyun } 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, 43*4882a593Smuzhiyun struct xdp_sock **map_entry); 44*4882a593Smuzhiyun int xsk_map_inc(struct xsk_map *map); 45*4882a593Smuzhiyun void xsk_map_put(struct xsk_map *map); 46*4882a593Smuzhiyun void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id); 47*4882a593Smuzhiyun int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, 48*4882a593Smuzhiyun u16 queue_id); 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun #endif /* XSK_H_ */ 51