xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qlogic/qede/qede.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2*4882a593Smuzhiyun /* QLogic qede NIC Driver
3*4882a593Smuzhiyun  * Copyright (c) 2015-2017  QLogic Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef _QEDE_H_
8*4882a593Smuzhiyun #define _QEDE_H_
9*4882a593Smuzhiyun #include <linux/compiler.h>
10*4882a593Smuzhiyun #include <linux/version.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/bitmap.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/mutex.h>
17*4882a593Smuzhiyun #include <linux/bpf.h>
18*4882a593Smuzhiyun #include <net/xdp.h>
19*4882a593Smuzhiyun #include <linux/qed/qede_rdma.h>
20*4882a593Smuzhiyun #include <linux/io.h>
21*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
22*4882a593Smuzhiyun #include <linux/cpu_rmap.h>
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun #include <linux/qed/common_hsi.h>
25*4882a593Smuzhiyun #include <linux/qed/eth_common.h>
26*4882a593Smuzhiyun #include <linux/qed/qed_if.h>
27*4882a593Smuzhiyun #include <linux/qed/qed_chain.h>
28*4882a593Smuzhiyun #include <linux/qed/qed_eth_if.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <net/pkt_cls.h>
31*4882a593Smuzhiyun #include <net/tc_act/tc_gact.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define QEDE_MAJOR_VERSION		8
34*4882a593Smuzhiyun #define QEDE_MINOR_VERSION		37
35*4882a593Smuzhiyun #define QEDE_REVISION_VERSION		0
36*4882a593Smuzhiyun #define QEDE_ENGINEERING_VERSION	20
37*4882a593Smuzhiyun #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
38*4882a593Smuzhiyun 		__stringify(QEDE_MINOR_VERSION) "."		\
39*4882a593Smuzhiyun 		__stringify(QEDE_REVISION_VERSION) "."		\
40*4882a593Smuzhiyun 		__stringify(QEDE_ENGINEERING_VERSION)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define DRV_MODULE_SYM		qede
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct qede_stats_common {
45*4882a593Smuzhiyun 	u64 no_buff_discards;
46*4882a593Smuzhiyun 	u64 packet_too_big_discard;
47*4882a593Smuzhiyun 	u64 ttl0_discard;
48*4882a593Smuzhiyun 	u64 rx_ucast_bytes;
49*4882a593Smuzhiyun 	u64 rx_mcast_bytes;
50*4882a593Smuzhiyun 	u64 rx_bcast_bytes;
51*4882a593Smuzhiyun 	u64 rx_ucast_pkts;
52*4882a593Smuzhiyun 	u64 rx_mcast_pkts;
53*4882a593Smuzhiyun 	u64 rx_bcast_pkts;
54*4882a593Smuzhiyun 	u64 mftag_filter_discards;
55*4882a593Smuzhiyun 	u64 mac_filter_discards;
56*4882a593Smuzhiyun 	u64 gft_filter_drop;
57*4882a593Smuzhiyun 	u64 tx_ucast_bytes;
58*4882a593Smuzhiyun 	u64 tx_mcast_bytes;
59*4882a593Smuzhiyun 	u64 tx_bcast_bytes;
60*4882a593Smuzhiyun 	u64 tx_ucast_pkts;
61*4882a593Smuzhiyun 	u64 tx_mcast_pkts;
62*4882a593Smuzhiyun 	u64 tx_bcast_pkts;
63*4882a593Smuzhiyun 	u64 tx_err_drop_pkts;
64*4882a593Smuzhiyun 	u64 coalesced_pkts;
65*4882a593Smuzhiyun 	u64 coalesced_events;
66*4882a593Smuzhiyun 	u64 coalesced_aborts_num;
67*4882a593Smuzhiyun 	u64 non_coalesced_pkts;
68*4882a593Smuzhiyun 	u64 coalesced_bytes;
69*4882a593Smuzhiyun 	u64 link_change_count;
70*4882a593Smuzhiyun 	u64 ptp_skip_txts;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* port */
73*4882a593Smuzhiyun 	u64 rx_64_byte_packets;
74*4882a593Smuzhiyun 	u64 rx_65_to_127_byte_packets;
75*4882a593Smuzhiyun 	u64 rx_128_to_255_byte_packets;
76*4882a593Smuzhiyun 	u64 rx_256_to_511_byte_packets;
77*4882a593Smuzhiyun 	u64 rx_512_to_1023_byte_packets;
78*4882a593Smuzhiyun 	u64 rx_1024_to_1518_byte_packets;
79*4882a593Smuzhiyun 	u64 rx_crc_errors;
80*4882a593Smuzhiyun 	u64 rx_mac_crtl_frames;
81*4882a593Smuzhiyun 	u64 rx_pause_frames;
82*4882a593Smuzhiyun 	u64 rx_pfc_frames;
83*4882a593Smuzhiyun 	u64 rx_align_errors;
84*4882a593Smuzhiyun 	u64 rx_carrier_errors;
85*4882a593Smuzhiyun 	u64 rx_oversize_packets;
86*4882a593Smuzhiyun 	u64 rx_jabbers;
87*4882a593Smuzhiyun 	u64 rx_undersize_packets;
88*4882a593Smuzhiyun 	u64 rx_fragments;
89*4882a593Smuzhiyun 	u64 tx_64_byte_packets;
90*4882a593Smuzhiyun 	u64 tx_65_to_127_byte_packets;
91*4882a593Smuzhiyun 	u64 tx_128_to_255_byte_packets;
92*4882a593Smuzhiyun 	u64 tx_256_to_511_byte_packets;
93*4882a593Smuzhiyun 	u64 tx_512_to_1023_byte_packets;
94*4882a593Smuzhiyun 	u64 tx_1024_to_1518_byte_packets;
95*4882a593Smuzhiyun 	u64 tx_pause_frames;
96*4882a593Smuzhiyun 	u64 tx_pfc_frames;
97*4882a593Smuzhiyun 	u64 brb_truncates;
98*4882a593Smuzhiyun 	u64 brb_discards;
99*4882a593Smuzhiyun 	u64 tx_mac_ctrl_frames;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct qede_stats_bb {
103*4882a593Smuzhiyun 	u64 rx_1519_to_1522_byte_packets;
104*4882a593Smuzhiyun 	u64 rx_1519_to_2047_byte_packets;
105*4882a593Smuzhiyun 	u64 rx_2048_to_4095_byte_packets;
106*4882a593Smuzhiyun 	u64 rx_4096_to_9216_byte_packets;
107*4882a593Smuzhiyun 	u64 rx_9217_to_16383_byte_packets;
108*4882a593Smuzhiyun 	u64 tx_1519_to_2047_byte_packets;
109*4882a593Smuzhiyun 	u64 tx_2048_to_4095_byte_packets;
110*4882a593Smuzhiyun 	u64 tx_4096_to_9216_byte_packets;
111*4882a593Smuzhiyun 	u64 tx_9217_to_16383_byte_packets;
112*4882a593Smuzhiyun 	u64 tx_lpi_entry_count;
113*4882a593Smuzhiyun 	u64 tx_total_collisions;
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun struct qede_stats_ah {
117*4882a593Smuzhiyun 	u64 rx_1519_to_max_byte_packets;
118*4882a593Smuzhiyun 	u64 tx_1519_to_max_byte_packets;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun struct qede_stats {
122*4882a593Smuzhiyun 	struct qede_stats_common common;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	union {
125*4882a593Smuzhiyun 		struct qede_stats_bb bb;
126*4882a593Smuzhiyun 		struct qede_stats_ah ah;
127*4882a593Smuzhiyun 	};
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct qede_vlan {
131*4882a593Smuzhiyun 	struct list_head list;
132*4882a593Smuzhiyun 	u16 vid;
133*4882a593Smuzhiyun 	bool configured;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun struct qede_rdma_dev {
137*4882a593Smuzhiyun 	struct qedr_dev *qedr_dev;
138*4882a593Smuzhiyun 	struct list_head entry;
139*4882a593Smuzhiyun 	struct list_head rdma_event_list;
140*4882a593Smuzhiyun 	struct workqueue_struct *rdma_wq;
141*4882a593Smuzhiyun 	struct kref refcnt;
142*4882a593Smuzhiyun 	struct completion event_comp;
143*4882a593Smuzhiyun 	bool exp_recovery;
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun struct qede_ptp;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #define QEDE_RFS_MAX_FLTR	256
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun enum qede_flags_bit {
151*4882a593Smuzhiyun 	QEDE_FLAGS_IS_VF = 0,
152*4882a593Smuzhiyun 	QEDE_FLAGS_LINK_REQUESTED,
153*4882a593Smuzhiyun 	QEDE_FLAGS_PTP_TX_IN_PRORGESS,
154*4882a593Smuzhiyun 	QEDE_FLAGS_TX_TIMESTAMPING_EN
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define QEDE_DUMP_MAX_ARGS 4
158*4882a593Smuzhiyun enum qede_dump_cmd {
159*4882a593Smuzhiyun 	QEDE_DUMP_CMD_NONE = 0,
160*4882a593Smuzhiyun 	QEDE_DUMP_CMD_NVM_CFG,
161*4882a593Smuzhiyun 	QEDE_DUMP_CMD_GRCDUMP,
162*4882a593Smuzhiyun 	QEDE_DUMP_CMD_MAX
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun struct qede_dump_info {
166*4882a593Smuzhiyun 	enum qede_dump_cmd cmd;
167*4882a593Smuzhiyun 	u8 num_args;
168*4882a593Smuzhiyun 	u32 args[QEDE_DUMP_MAX_ARGS];
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun struct qede_dev {
172*4882a593Smuzhiyun 	struct qed_dev			*cdev;
173*4882a593Smuzhiyun 	struct net_device		*ndev;
174*4882a593Smuzhiyun 	struct pci_dev			*pdev;
175*4882a593Smuzhiyun 	struct devlink			*devlink;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	u32				dp_module;
178*4882a593Smuzhiyun 	u8				dp_level;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	unsigned long			flags;
181*4882a593Smuzhiyun #define IS_VF(edev)			test_bit(QEDE_FLAGS_IS_VF, \
182*4882a593Smuzhiyun 						 &(edev)->flags)
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	const struct qed_eth_ops	*ops;
185*4882a593Smuzhiyun 	struct qede_ptp			*ptp;
186*4882a593Smuzhiyun 	u64				ptp_skip_txts;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	struct qed_dev_eth_info		dev_info;
189*4882a593Smuzhiyun #define QEDE_MAX_RSS_CNT(edev)		((edev)->dev_info.num_queues)
190*4882a593Smuzhiyun #define QEDE_MAX_TSS_CNT(edev)		((edev)->dev_info.num_queues)
191*4882a593Smuzhiyun #define QEDE_IS_BB(edev) \
192*4882a593Smuzhiyun 	((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
193*4882a593Smuzhiyun #define QEDE_IS_AH(edev) \
194*4882a593Smuzhiyun 	((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	struct qede_fastpath		*fp_array;
197*4882a593Smuzhiyun 	u8				req_num_tx;
198*4882a593Smuzhiyun 	u8				fp_num_tx;
199*4882a593Smuzhiyun 	u8				req_num_rx;
200*4882a593Smuzhiyun 	u8				fp_num_rx;
201*4882a593Smuzhiyun 	u16				req_queues;
202*4882a593Smuzhiyun 	u16				num_queues;
203*4882a593Smuzhiyun 	u16				total_xdp_queues;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun #define QEDE_QUEUE_CNT(edev)		((edev)->num_queues)
206*4882a593Smuzhiyun #define QEDE_RSS_COUNT(edev)		((edev)->num_queues - (edev)->fp_num_tx)
207*4882a593Smuzhiyun #define QEDE_RX_QUEUE_IDX(edev, i)	(i)
208*4882a593Smuzhiyun #define QEDE_TSS_COUNT(edev)		((edev)->num_queues - (edev)->fp_num_rx)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	struct qed_int_info		int_info;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Smaller private variant of the RTNL lock */
213*4882a593Smuzhiyun 	struct mutex			qede_lock;
214*4882a593Smuzhiyun 	u32				state; /* Protected by qede_lock */
215*4882a593Smuzhiyun 	u16				rx_buf_size;
216*4882a593Smuzhiyun 	u32				rx_copybreak;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
219*4882a593Smuzhiyun #define ETH_OVERHEAD			(ETH_HLEN + 8 + 8)
220*4882a593Smuzhiyun 	/* Max supported alignment is 256 (8 shift)
221*4882a593Smuzhiyun 	 * minimal alignment shift 6 is optimal for 57xxx HW performance
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun #define QEDE_RX_ALIGN_SHIFT		max(6, min(8, L1_CACHE_SHIFT))
224*4882a593Smuzhiyun 	/* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
225*4882a593Smuzhiyun 	 * at the end of skb->data, to avoid wasting a full cache line.
226*4882a593Smuzhiyun 	 * This reduces memory use (skb->truesize).
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun #define QEDE_FW_RX_ALIGN_END					\
229*4882a593Smuzhiyun 	max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT,			\
230*4882a593Smuzhiyun 	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	struct qede_stats		stats;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Bitfield to track initialized RSS params */
235*4882a593Smuzhiyun 	u32				rss_params_inited;
236*4882a593Smuzhiyun #define QEDE_RSS_INDIR_INITED		BIT(0)
237*4882a593Smuzhiyun #define QEDE_RSS_KEY_INITED		BIT(1)
238*4882a593Smuzhiyun #define QEDE_RSS_CAPS_INITED		BIT(2)
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	u16				rss_ind_table[128];
241*4882a593Smuzhiyun 	u32				rss_key[10];
242*4882a593Smuzhiyun 	u8				rss_caps;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Both must be a power of two */
245*4882a593Smuzhiyun 	u16				q_num_rx_buffers;
246*4882a593Smuzhiyun 	u16				q_num_tx_buffers;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	bool				gro_disable;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	struct list_head		vlan_list;
251*4882a593Smuzhiyun 	u16				configured_vlans;
252*4882a593Smuzhiyun 	u16				non_configured_vlans;
253*4882a593Smuzhiyun 	bool				accept_any_vlan;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	struct delayed_work		sp_task;
256*4882a593Smuzhiyun 	unsigned long			sp_flags;
257*4882a593Smuzhiyun 	u16				vxlan_dst_port;
258*4882a593Smuzhiyun 	u16				geneve_dst_port;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	struct qede_arfs		*arfs;
261*4882a593Smuzhiyun 	bool				wol_enabled;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	struct qede_rdma_dev		rdma_info;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	struct bpf_prog			*xdp_prog;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	enum qed_hw_err_type		last_err_type;
268*4882a593Smuzhiyun 	unsigned long			err_flags;
269*4882a593Smuzhiyun #define QEDE_ERR_IS_HANDLED		31
270*4882a593Smuzhiyun #define QEDE_ERR_ATTN_CLR_EN		0
271*4882a593Smuzhiyun #define QEDE_ERR_GET_DBG_INFO		1
272*4882a593Smuzhiyun #define QEDE_ERR_IS_RECOVERABLE		2
273*4882a593Smuzhiyun #define QEDE_ERR_WARN			3
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	struct qede_dump_info		dump_info;
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun enum QEDE_STATE {
279*4882a593Smuzhiyun 	QEDE_STATE_CLOSED,
280*4882a593Smuzhiyun 	QEDE_STATE_OPEN,
281*4882a593Smuzhiyun 	QEDE_STATE_RECOVERY,
282*4882a593Smuzhiyun };
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define	MAX_NUM_TC	8
287*4882a593Smuzhiyun #define	MAX_NUM_PRI	8
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /* The driver supports the new build_skb() API:
290*4882a593Smuzhiyun  * RX ring buffer contains pointer to kmalloc() data only,
291*4882a593Smuzhiyun  * skb are built only after the frame was DMA-ed.
292*4882a593Smuzhiyun  */
293*4882a593Smuzhiyun struct sw_rx_data {
294*4882a593Smuzhiyun 	struct page *data;
295*4882a593Smuzhiyun 	dma_addr_t mapping;
296*4882a593Smuzhiyun 	unsigned int page_offset;
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun enum qede_agg_state {
300*4882a593Smuzhiyun 	QEDE_AGG_STATE_NONE  = 0,
301*4882a593Smuzhiyun 	QEDE_AGG_STATE_START = 1,
302*4882a593Smuzhiyun 	QEDE_AGG_STATE_ERROR = 2
303*4882a593Smuzhiyun };
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun struct qede_agg_info {
306*4882a593Smuzhiyun 	/* rx_buf is a data buffer that can be placed / consumed from rx bd
307*4882a593Smuzhiyun 	 * chain. It has two purposes: We will preallocate the data buffer
308*4882a593Smuzhiyun 	 * for each aggregation when we open the interface and will place this
309*4882a593Smuzhiyun 	 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
310*4882a593Smuzhiyun 	 * to be in a state where allocation fails, as we can't reuse the
311*4882a593Smuzhiyun 	 * consumer buffer in the rx-chain since FW may still be writing to it
312*4882a593Smuzhiyun 	 * (since header needs to be modified for TPA).
313*4882a593Smuzhiyun 	 * The second purpose is to keep a pointer to the bd buffer during
314*4882a593Smuzhiyun 	 * aggregation.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 	struct sw_rx_data buffer;
317*4882a593Smuzhiyun 	struct sk_buff *skb;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* We need some structs from the start cookie until termination */
320*4882a593Smuzhiyun 	u16 vlan_tag;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	bool tpa_start_fail;
323*4882a593Smuzhiyun 	u8 state;
324*4882a593Smuzhiyun 	u8 frag_id;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	u8 tunnel_type;
327*4882a593Smuzhiyun };
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun struct qede_rx_queue {
330*4882a593Smuzhiyun 	__le16 *hw_cons_ptr;
331*4882a593Smuzhiyun 	void __iomem *hw_rxq_prod_addr;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* Required for the allocation of replacement buffers */
334*4882a593Smuzhiyun 	struct device *dev;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	struct bpf_prog *xdp_prog;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	u16 sw_rx_cons;
339*4882a593Smuzhiyun 	u16 sw_rx_prod;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	u16 filled_buffers;
342*4882a593Smuzhiyun 	u8 data_direction;
343*4882a593Smuzhiyun 	u8 rxq_id;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* Used once per each NAPI run */
346*4882a593Smuzhiyun 	u16 num_rx_buffers;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	u16 rx_headroom;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	u32 rx_buf_size;
351*4882a593Smuzhiyun 	u32 rx_buf_seg_size;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	struct sw_rx_data *sw_rx_ring;
354*4882a593Smuzhiyun 	struct qed_chain rx_bd_ring;
355*4882a593Smuzhiyun 	struct qed_chain rx_comp_ring ____cacheline_aligned;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* GRO */
358*4882a593Smuzhiyun 	struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Used once per each NAPI run */
361*4882a593Smuzhiyun 	u64 rcv_pkts;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	u64 rx_hw_errors;
364*4882a593Smuzhiyun 	u64 rx_alloc_errors;
365*4882a593Smuzhiyun 	u64 rx_ip_frags;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	u64 xdp_no_pass;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	void *handle;
370*4882a593Smuzhiyun 	struct xdp_rxq_info xdp_rxq;
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun union db_prod {
374*4882a593Smuzhiyun 	struct eth_db_data data;
375*4882a593Smuzhiyun 	u32		raw;
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun struct sw_tx_bd {
379*4882a593Smuzhiyun 	struct sk_buff *skb;
380*4882a593Smuzhiyun 	u8 flags;
381*4882a593Smuzhiyun /* Set on the first BD descriptor when there is a split BD */
382*4882a593Smuzhiyun #define QEDE_TSO_SPLIT_BD		BIT(0)
383*4882a593Smuzhiyun };
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun struct sw_tx_xdp {
386*4882a593Smuzhiyun 	struct page			*page;
387*4882a593Smuzhiyun 	struct xdp_frame		*xdpf;
388*4882a593Smuzhiyun 	dma_addr_t			mapping;
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun struct qede_tx_queue {
392*4882a593Smuzhiyun 	u8				is_xdp;
393*4882a593Smuzhiyun 	bool				is_legacy;
394*4882a593Smuzhiyun 	u16				sw_tx_cons;
395*4882a593Smuzhiyun 	u16				sw_tx_prod;
396*4882a593Smuzhiyun 	u16				num_tx_buffers; /* Slowpath only */
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	u64				xmit_pkts;
399*4882a593Smuzhiyun 	u64				stopped_cnt;
400*4882a593Smuzhiyun 	u64				tx_mem_alloc_err;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	__le16				*hw_cons_ptr;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* Needed for the mapping of packets */
405*4882a593Smuzhiyun 	struct device			*dev;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	void __iomem			*doorbell_addr;
408*4882a593Smuzhiyun 	union db_prod			tx_db;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* Spinlock for XDP queues in case of XDP_REDIRECT */
411*4882a593Smuzhiyun 	spinlock_t			xdp_tx_lock;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	int				index; /* Slowpath only */
414*4882a593Smuzhiyun #define QEDE_TXQ_XDP_TO_IDX(edev, txq)	((txq)->index - \
415*4882a593Smuzhiyun 					 QEDE_MAX_TSS_CNT(edev))
416*4882a593Smuzhiyun #define QEDE_TXQ_IDX_TO_XDP(edev, idx)	((idx) + QEDE_MAX_TSS_CNT(edev))
417*4882a593Smuzhiyun #define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)	((edev)->fp_num_rx + \
418*4882a593Smuzhiyun 						 ((idx) % QEDE_TSS_COUNT(edev)))
419*4882a593Smuzhiyun #define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)	((idx) / QEDE_TSS_COUNT(edev))
420*4882a593Smuzhiyun #define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq)	((QEDE_TSS_COUNT(edev) * \
421*4882a593Smuzhiyun 						 (txq)->cos) + (txq)->index)
422*4882a593Smuzhiyun #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx)	\
423*4882a593Smuzhiyun 	(&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
424*4882a593Smuzhiyun 	[QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
425*4882a593Smuzhiyun #define QEDE_FP_TC0_TXQ(fp)		(&((fp)->txq[0]))
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/* Regular Tx requires skb + metadata for release purpose,
428*4882a593Smuzhiyun 	 * while XDP requires the pages and the mapped address.
429*4882a593Smuzhiyun 	 */
430*4882a593Smuzhiyun 	union {
431*4882a593Smuzhiyun 		struct sw_tx_bd		*skbs;
432*4882a593Smuzhiyun 		struct sw_tx_xdp	*xdp;
433*4882a593Smuzhiyun 	}				sw_tx_ring;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	struct qed_chain		tx_pbl;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* Slowpath; Should be kept in end [unless missing padding] */
438*4882a593Smuzhiyun 	void				*handle;
439*4882a593Smuzhiyun 	u16				cos;
440*4882a593Smuzhiyun 	u16				ndev_txq_id;
441*4882a593Smuzhiyun };
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun #define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr.hi), \
444*4882a593Smuzhiyun 						 le32_to_cpu((bd)->addr.lo))
445*4882a593Smuzhiyun #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len)				\
446*4882a593Smuzhiyun 	do {								\
447*4882a593Smuzhiyun 		(bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr));	\
448*4882a593Smuzhiyun 		(bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr));	\
449*4882a593Smuzhiyun 		(bd)->nbytes = cpu_to_le16(len);			\
450*4882a593Smuzhiyun 	} while (0)
451*4882a593Smuzhiyun #define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun struct qede_fastpath {
454*4882a593Smuzhiyun 	struct qede_dev			*edev;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	u8				type;
457*4882a593Smuzhiyun #define QEDE_FASTPATH_TX		BIT(0)
458*4882a593Smuzhiyun #define QEDE_FASTPATH_RX		BIT(1)
459*4882a593Smuzhiyun #define QEDE_FASTPATH_XDP		BIT(2)
460*4882a593Smuzhiyun #define QEDE_FASTPATH_COMBINED		(QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	u8				id;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	u8				xdp_xmit;
465*4882a593Smuzhiyun #define QEDE_XDP_TX			BIT(0)
466*4882a593Smuzhiyun #define QEDE_XDP_REDIRECT		BIT(1)
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	struct napi_struct		napi;
469*4882a593Smuzhiyun 	struct qed_sb_info		*sb_info;
470*4882a593Smuzhiyun 	struct qede_rx_queue		*rxq;
471*4882a593Smuzhiyun 	struct qede_tx_queue		*txq;
472*4882a593Smuzhiyun 	struct qede_tx_queue		*xdp_tx;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	char				name[IFNAMSIZ + 8];
475*4882a593Smuzhiyun };
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /* Debug print definitions */
478*4882a593Smuzhiyun #define DP_NAME(edev)			netdev_name((edev)->ndev)
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun #define XMIT_PLAIN			0
481*4882a593Smuzhiyun #define XMIT_L4_CSUM			BIT(0)
482*4882a593Smuzhiyun #define XMIT_LSO			BIT(1)
483*4882a593Smuzhiyun #define XMIT_ENC			BIT(2)
484*4882a593Smuzhiyun #define XMIT_ENC_GSO_L4_CSUM		BIT(3)
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #define QEDE_CSUM_ERROR			BIT(0)
487*4882a593Smuzhiyun #define QEDE_CSUM_UNNECESSARY		BIT(1)
488*4882a593Smuzhiyun #define QEDE_TUNN_CSUM_UNNECESSARY	BIT(2)
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun #define QEDE_SP_RECOVERY		0
491*4882a593Smuzhiyun #define QEDE_SP_RX_MODE			1
492*4882a593Smuzhiyun #define QEDE_SP_RSVD1                   2
493*4882a593Smuzhiyun #define QEDE_SP_RSVD2                   3
494*4882a593Smuzhiyun #define QEDE_SP_HW_ERR                  4
495*4882a593Smuzhiyun #define QEDE_SP_ARFS_CONFIG             5
496*4882a593Smuzhiyun #define QEDE_SP_AER			7
497*4882a593Smuzhiyun #define QEDE_SP_DISABLE			8
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
500*4882a593Smuzhiyun int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
501*4882a593Smuzhiyun 		       u16 rxq_index, u32 flow_id);
502*4882a593Smuzhiyun #define QEDE_SP_TASK_POLL_DELAY	(5 * HZ)
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
506*4882a593Smuzhiyun void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
507*4882a593Smuzhiyun void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
508*4882a593Smuzhiyun void qede_free_arfs(struct qede_dev *edev);
509*4882a593Smuzhiyun int qede_alloc_arfs(struct qede_dev *edev);
510*4882a593Smuzhiyun int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
511*4882a593Smuzhiyun int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
512*4882a593Smuzhiyun int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
513*4882a593Smuzhiyun int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
514*4882a593Smuzhiyun 			  u32 *rule_locs);
515*4882a593Smuzhiyun int qede_get_arfs_filter_count(struct qede_dev *edev);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun struct qede_reload_args {
518*4882a593Smuzhiyun 	void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
519*4882a593Smuzhiyun 	union {
520*4882a593Smuzhiyun 		netdev_features_t features;
521*4882a593Smuzhiyun 		struct bpf_prog *new_prog;
522*4882a593Smuzhiyun 		u16 mtu;
523*4882a593Smuzhiyun 	} u;
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun /* Datapath functions definition */
527*4882a593Smuzhiyun netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
528*4882a593Smuzhiyun int qede_xdp_transmit(struct net_device *dev, int n_frames,
529*4882a593Smuzhiyun 		      struct xdp_frame **frames, u32 flags);
530*4882a593Smuzhiyun u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
531*4882a593Smuzhiyun 		      struct net_device *sb_dev);
532*4882a593Smuzhiyun netdev_features_t qede_features_check(struct sk_buff *skb,
533*4882a593Smuzhiyun 				      struct net_device *dev,
534*4882a593Smuzhiyun 				      netdev_features_t features);
535*4882a593Smuzhiyun int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
536*4882a593Smuzhiyun int qede_free_tx_pkt(struct qede_dev *edev,
537*4882a593Smuzhiyun 		     struct qede_tx_queue *txq, int *len);
538*4882a593Smuzhiyun int qede_poll(struct napi_struct *napi, int budget);
539*4882a593Smuzhiyun irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun /* Filtering function definitions */
542*4882a593Smuzhiyun void qede_force_mac(void *dev, u8 *mac, bool forced);
543*4882a593Smuzhiyun void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
544*4882a593Smuzhiyun int qede_set_mac_addr(struct net_device *ndev, void *p);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
547*4882a593Smuzhiyun int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
548*4882a593Smuzhiyun void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
549*4882a593Smuzhiyun int qede_configure_vlan_filters(struct qede_dev *edev);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun netdev_features_t qede_fix_features(struct net_device *dev,
552*4882a593Smuzhiyun 				    netdev_features_t features);
553*4882a593Smuzhiyun int qede_set_features(struct net_device *dev, netdev_features_t features);
554*4882a593Smuzhiyun void qede_set_rx_mode(struct net_device *ndev);
555*4882a593Smuzhiyun void qede_config_rx_mode(struct net_device *ndev);
556*4882a593Smuzhiyun void qede_fill_rss_params(struct qede_dev *edev,
557*4882a593Smuzhiyun 			  struct qed_update_vport_rss_params *rss, u8 *update);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
560*4882a593Smuzhiyun void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun #ifdef CONFIG_DCB
565*4882a593Smuzhiyun void qede_set_dcbnl_ops(struct net_device *ndev);
566*4882a593Smuzhiyun #endif
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
569*4882a593Smuzhiyun void qede_set_ethtool_ops(struct net_device *netdev);
570*4882a593Smuzhiyun void qede_set_udp_tunnels(struct qede_dev *edev);
571*4882a593Smuzhiyun void qede_reload(struct qede_dev *edev,
572*4882a593Smuzhiyun 		 struct qede_reload_args *args, bool is_locked);
573*4882a593Smuzhiyun int qede_change_mtu(struct net_device *dev, int new_mtu);
574*4882a593Smuzhiyun void qede_fill_by_demand_stats(struct qede_dev *edev);
575*4882a593Smuzhiyun void __qede_lock(struct qede_dev *edev);
576*4882a593Smuzhiyun void __qede_unlock(struct qede_dev *edev);
577*4882a593Smuzhiyun bool qede_has_rx_work(struct qede_rx_queue *rxq);
578*4882a593Smuzhiyun int qede_txq_has_work(struct qede_tx_queue *txq);
579*4882a593Smuzhiyun void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
580*4882a593Smuzhiyun void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
581*4882a593Smuzhiyun int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
582*4882a593Smuzhiyun 			    struct flow_cls_offload *f);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun void qede_forced_speed_maps_init(void);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun #define RX_RING_SIZE_POW	13
587*4882a593Smuzhiyun #define RX_RING_SIZE		((u16)BIT(RX_RING_SIZE_POW))
588*4882a593Smuzhiyun #define NUM_RX_BDS_MAX		(RX_RING_SIZE - 1)
589*4882a593Smuzhiyun #define NUM_RX_BDS_MIN		128
590*4882a593Smuzhiyun #define NUM_RX_BDS_KDUMP_MIN	63
591*4882a593Smuzhiyun #define NUM_RX_BDS_DEF		((u16)BIT(10) - 1)
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun #define TX_RING_SIZE_POW	13
594*4882a593Smuzhiyun #define TX_RING_SIZE		((u16)BIT(TX_RING_SIZE_POW))
595*4882a593Smuzhiyun #define NUM_TX_BDS_MAX		(TX_RING_SIZE - 1)
596*4882a593Smuzhiyun #define NUM_TX_BDS_MIN		128
597*4882a593Smuzhiyun #define NUM_TX_BDS_KDUMP_MIN	63
598*4882a593Smuzhiyun #define NUM_TX_BDS_DEF		NUM_TX_BDS_MAX
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun #define QEDE_MIN_PKT_LEN		64
601*4882a593Smuzhiyun #define QEDE_RX_HDR_SIZE		256
602*4882a593Smuzhiyun #define QEDE_MAX_JUMBO_PACKET_SIZE	9600
603*4882a593Smuzhiyun #define	for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
604*4882a593Smuzhiyun #define for_each_cos_in_txq(edev, var) \
605*4882a593Smuzhiyun 	for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun #endif /* _QEDE_H_ */
608