xref: /OK3568_Linux_fs/kernel/net/packet/internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __PACKET_INTERNAL_H__
3*4882a593Smuzhiyun #define __PACKET_INTERNAL_H__
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/refcount.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun struct packet_mclist {
8*4882a593Smuzhiyun 	struct packet_mclist	*next;
9*4882a593Smuzhiyun 	int			ifindex;
10*4882a593Smuzhiyun 	int			count;
11*4882a593Smuzhiyun 	unsigned short		type;
12*4882a593Smuzhiyun 	unsigned short		alen;
13*4882a593Smuzhiyun 	unsigned char		addr[MAX_ADDR_LEN];
14*4882a593Smuzhiyun };
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* kbdq - kernel block descriptor queue */
17*4882a593Smuzhiyun struct tpacket_kbdq_core {
18*4882a593Smuzhiyun 	struct pgv	*pkbdq;
19*4882a593Smuzhiyun 	unsigned int	feature_req_word;
20*4882a593Smuzhiyun 	unsigned int	hdrlen;
21*4882a593Smuzhiyun 	unsigned char	reset_pending_on_curr_blk;
22*4882a593Smuzhiyun 	unsigned char   delete_blk_timer;
23*4882a593Smuzhiyun 	unsigned short	kactive_blk_num;
24*4882a593Smuzhiyun 	unsigned short	blk_sizeof_priv;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	/* last_kactive_blk_num:
27*4882a593Smuzhiyun 	 * trick to see if user-space has caught up
28*4882a593Smuzhiyun 	 * in order to avoid refreshing timer when every single pkt arrives.
29*4882a593Smuzhiyun 	 */
30*4882a593Smuzhiyun 	unsigned short	last_kactive_blk_num;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	char		*pkblk_start;
33*4882a593Smuzhiyun 	char		*pkblk_end;
34*4882a593Smuzhiyun 	int		kblk_size;
35*4882a593Smuzhiyun 	unsigned int	max_frame_len;
36*4882a593Smuzhiyun 	unsigned int	knum_blocks;
37*4882a593Smuzhiyun 	uint64_t	knxt_seq_num;
38*4882a593Smuzhiyun 	char		*prev;
39*4882a593Smuzhiyun 	char		*nxt_offset;
40*4882a593Smuzhiyun 	struct sk_buff	*skb;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	rwlock_t	blk_fill_in_prog_lock;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/* Default is set to 8ms */
45*4882a593Smuzhiyun #define DEFAULT_PRB_RETIRE_TOV	(8)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	unsigned short  retire_blk_tov;
48*4882a593Smuzhiyun 	unsigned short  version;
49*4882a593Smuzhiyun 	unsigned long	tov_in_jiffies;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* timer to retire an outstanding block */
52*4882a593Smuzhiyun 	struct timer_list retire_blk_timer;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct pgv {
56*4882a593Smuzhiyun 	char *buffer;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun struct packet_ring_buffer {
60*4882a593Smuzhiyun 	struct pgv		*pg_vec;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	unsigned int		head;
63*4882a593Smuzhiyun 	unsigned int		frames_per_block;
64*4882a593Smuzhiyun 	unsigned int		frame_size;
65*4882a593Smuzhiyun 	unsigned int		frame_max;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	unsigned int		pg_vec_order;
68*4882a593Smuzhiyun 	unsigned int		pg_vec_pages;
69*4882a593Smuzhiyun 	unsigned int		pg_vec_len;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	unsigned int __percpu	*pending_refcnt;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	union {
74*4882a593Smuzhiyun 		unsigned long			*rx_owner_map;
75*4882a593Smuzhiyun 		struct tpacket_kbdq_core	prb_bdqc;
76*4882a593Smuzhiyun 	};
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun extern struct mutex fanout_mutex;
80*4882a593Smuzhiyun #define PACKET_FANOUT_MAX	(1 << 16)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun struct packet_fanout {
83*4882a593Smuzhiyun 	possible_net_t		net;
84*4882a593Smuzhiyun 	unsigned int		num_members;
85*4882a593Smuzhiyun 	u32			max_num_members;
86*4882a593Smuzhiyun 	u16			id;
87*4882a593Smuzhiyun 	u8			type;
88*4882a593Smuzhiyun 	u8			flags;
89*4882a593Smuzhiyun 	union {
90*4882a593Smuzhiyun 		atomic_t		rr_cur;
91*4882a593Smuzhiyun 		struct bpf_prog __rcu	*bpf_prog;
92*4882a593Smuzhiyun 	};
93*4882a593Smuzhiyun 	struct list_head	list;
94*4882a593Smuzhiyun 	spinlock_t		lock;
95*4882a593Smuzhiyun 	refcount_t		sk_ref;
96*4882a593Smuzhiyun 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
97*4882a593Smuzhiyun 	struct sock	__rcu	*arr[];
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun struct packet_rollover {
101*4882a593Smuzhiyun 	int			sock;
102*4882a593Smuzhiyun 	atomic_long_t		num;
103*4882a593Smuzhiyun 	atomic_long_t		num_huge;
104*4882a593Smuzhiyun 	atomic_long_t		num_failed;
105*4882a593Smuzhiyun #define ROLLOVER_HLEN	(L1_CACHE_BYTES / sizeof(u32))
106*4882a593Smuzhiyun 	u32			history[ROLLOVER_HLEN] ____cacheline_aligned;
107*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun struct packet_sock {
110*4882a593Smuzhiyun 	/* struct sock has to be the first member of packet_sock */
111*4882a593Smuzhiyun 	struct sock		sk;
112*4882a593Smuzhiyun 	struct packet_fanout	*fanout;
113*4882a593Smuzhiyun 	union  tpacket_stats_u	stats;
114*4882a593Smuzhiyun 	struct packet_ring_buffer	rx_ring;
115*4882a593Smuzhiyun 	struct packet_ring_buffer	tx_ring;
116*4882a593Smuzhiyun 	int			copy_thresh;
117*4882a593Smuzhiyun 	spinlock_t		bind_lock;
118*4882a593Smuzhiyun 	struct mutex		pg_vec_lock;
119*4882a593Smuzhiyun 	unsigned int		running;	/* bind_lock must be held */
120*4882a593Smuzhiyun 	unsigned int		auxdata:1,	/* writer must hold sock lock */
121*4882a593Smuzhiyun 				origdev:1,
122*4882a593Smuzhiyun 				has_vnet_hdr:1,
123*4882a593Smuzhiyun 				tp_loss:1,
124*4882a593Smuzhiyun 				tp_tx_has_off:1;
125*4882a593Smuzhiyun 	int			pressure;
126*4882a593Smuzhiyun 	int			ifindex;	/* bound device		*/
127*4882a593Smuzhiyun 	__be16			num;
128*4882a593Smuzhiyun 	struct packet_rollover	*rollover;
129*4882a593Smuzhiyun 	struct packet_mclist	*mclist;
130*4882a593Smuzhiyun 	atomic_t		mapped;
131*4882a593Smuzhiyun 	enum tpacket_versions	tp_version;
132*4882a593Smuzhiyun 	unsigned int		tp_hdrlen;
133*4882a593Smuzhiyun 	unsigned int		tp_reserve;
134*4882a593Smuzhiyun 	unsigned int		tp_tstamp;
135*4882a593Smuzhiyun 	struct completion	skb_completion;
136*4882a593Smuzhiyun 	struct net_device __rcu	*cached_dev;
137*4882a593Smuzhiyun 	int			(*xmit)(struct sk_buff *skb);
138*4882a593Smuzhiyun 	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
139*4882a593Smuzhiyun 	atomic_t		tp_drops ____cacheline_aligned_in_smp;
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun 
pkt_sk(struct sock * sk)142*4882a593Smuzhiyun static struct packet_sock *pkt_sk(struct sock *sk)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	return (struct packet_sock *)sk;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #endif
148