xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amazon/ena/ena_netdev.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef ENA_H
7*4882a593Smuzhiyun #define ENA_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/dim.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/if_vlan.h>
13*4882a593Smuzhiyun #include <linux/inetdevice.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/netdevice.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "ena_com.h"
19*4882a593Smuzhiyun #include "ena_eth_com.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define DRV_MODULE_GEN_MAJOR	2
22*4882a593Smuzhiyun #define DRV_MODULE_GEN_MINOR	1
23*4882a593Smuzhiyun #define DRV_MODULE_GEN_SUBMINOR 0
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define DRV_MODULE_NAME		"ena"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define DEVICE_NAME	"Elastic Network Adapter (ENA)"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* 1 for AENQ + ADMIN */
30*4882a593Smuzhiyun #define ENA_ADMIN_MSIX_VEC		1
31*4882a593Smuzhiyun #define ENA_MAX_MSIX_VEC(io_queues)	(ENA_ADMIN_MSIX_VEC + (io_queues))
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
34*4882a593Smuzhiyun  * driver passes 0.
35*4882a593Smuzhiyun  * Since the max packet size the ENA handles is ~9kB limit the buffer length to
36*4882a593Smuzhiyun  * 16kB.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun #if PAGE_SIZE > SZ_16K
39*4882a593Smuzhiyun #define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define ENA_PAGE_SIZE PAGE_SIZE
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define ENA_MIN_MSIX_VEC		2
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define ENA_REG_BAR			0
47*4882a593Smuzhiyun #define ENA_MEM_BAR			2
48*4882a593Smuzhiyun #define ENA_BAR_MASK (BIT(ENA_REG_BAR) | BIT(ENA_MEM_BAR))
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define ENA_DEFAULT_RING_SIZE	(1024)
51*4882a593Smuzhiyun #define ENA_MIN_RING_SIZE	(256)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define ENA_MIN_NUM_IO_QUEUES	(1)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define ENA_TX_WAKEUP_THRESH		(MAX_SKB_FRAGS + 2)
56*4882a593Smuzhiyun #define ENA_DEFAULT_RX_COPYBREAK	(256 - NET_IP_ALIGN)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* limit the buffer size to 600 bytes to handle MTU changes from very
59*4882a593Smuzhiyun  * small to very large, in which case the number of buffers per packet
60*4882a593Smuzhiyun  * could exceed ENA_PKT_MAX_BUFS
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun #define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define ENA_MIN_MTU		128
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define ENA_NAME_MAX_LEN	20
67*4882a593Smuzhiyun #define ENA_IRQNAME_SIZE	40
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define ENA_PKT_MAX_BUFS	19
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define ENA_RX_RSS_TABLE_LOG_SIZE  7
72*4882a593Smuzhiyun #define ENA_RX_RSS_TABLE_SIZE	(1 << ENA_RX_RSS_TABLE_LOG_SIZE)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* The number of tx packet completions that will be handled each NAPI poll
75*4882a593Smuzhiyun  * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define ENA_TX_POLL_BUDGET_DIVIDER	4
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /* Refill Rx queue when number of required descriptors is above
80*4882a593Smuzhiyun  * QUEUE_SIZE / ENA_RX_REFILL_THRESH_DIVIDER or ENA_RX_REFILL_THRESH_PACKET
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun #define ENA_RX_REFILL_THRESH_DIVIDER	8
83*4882a593Smuzhiyun #define ENA_RX_REFILL_THRESH_PACKET	256
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /* Number of queues to check for missing queues per timer service */
86*4882a593Smuzhiyun #define ENA_MONITORED_TX_QUEUES	4
87*4882a593Smuzhiyun /* Max timeout packets before device reset */
88*4882a593Smuzhiyun #define MAX_NUM_OF_TIMEOUTED_PACKETS 128
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
93*4882a593Smuzhiyun #define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
94*4882a593Smuzhiyun 	(((idx) + (n)) & ((ring_size) - 1))
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define ENA_IO_TXQ_IDX(q)	(2 * (q))
97*4882a593Smuzhiyun #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
98*4882a593Smuzhiyun #define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q)	((q) / 2)
99*4882a593Smuzhiyun #define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q)	(((q) - 1) / 2)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define ENA_MGMNT_IRQ_IDX		0
102*4882a593Smuzhiyun #define ENA_IO_IRQ_FIRST_IDX		1
103*4882a593Smuzhiyun #define ENA_IO_IRQ_IDX(q)		(ENA_IO_IRQ_FIRST_IDX + (q))
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define ENA_ADMIN_POLL_DELAY_US 100
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* ENA device should send keep alive msg every 1 sec.
108*4882a593Smuzhiyun  * We wait for 6 sec just to be on the safe side.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun #define ENA_DEVICE_KALIVE_TIMEOUT	(6 * HZ)
111*4882a593Smuzhiyun #define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* The max MTU size is configured to be the ethernet frame size without
116*4882a593Smuzhiyun  * the overhead of the ethernet header, which can have a VLAN header, and
117*4882a593Smuzhiyun  * a frame check sequence (FCS).
118*4882a593Smuzhiyun  * The buffer size we share with the device is defined to be ENA_PAGE_SIZE
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN -	\
122*4882a593Smuzhiyun 			 VLAN_HLEN - XDP_PACKET_HEADROOM -		\
123*4882a593Smuzhiyun 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
126*4882a593Smuzhiyun 	((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct ena_irq {
129*4882a593Smuzhiyun 	irq_handler_t handler;
130*4882a593Smuzhiyun 	void *data;
131*4882a593Smuzhiyun 	int cpu;
132*4882a593Smuzhiyun 	u32 vector;
133*4882a593Smuzhiyun 	cpumask_t affinity_hint_mask;
134*4882a593Smuzhiyun 	char name[ENA_IRQNAME_SIZE];
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct ena_napi {
138*4882a593Smuzhiyun 	struct napi_struct napi ____cacheline_aligned;
139*4882a593Smuzhiyun 	struct ena_ring *tx_ring;
140*4882a593Smuzhiyun 	struct ena_ring *rx_ring;
141*4882a593Smuzhiyun 	struct ena_ring *xdp_ring;
142*4882a593Smuzhiyun 	bool first_interrupt;
143*4882a593Smuzhiyun 	bool interrupts_masked;
144*4882a593Smuzhiyun 	u32 qid;
145*4882a593Smuzhiyun 	struct dim dim;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun struct ena_calc_queue_size_ctx {
149*4882a593Smuzhiyun 	struct ena_com_dev_get_features_ctx *get_feat_ctx;
150*4882a593Smuzhiyun 	struct ena_com_dev *ena_dev;
151*4882a593Smuzhiyun 	struct pci_dev *pdev;
152*4882a593Smuzhiyun 	u32 tx_queue_size;
153*4882a593Smuzhiyun 	u32 rx_queue_size;
154*4882a593Smuzhiyun 	u32 max_tx_queue_size;
155*4882a593Smuzhiyun 	u32 max_rx_queue_size;
156*4882a593Smuzhiyun 	u16 max_tx_sgl_size;
157*4882a593Smuzhiyun 	u16 max_rx_sgl_size;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun struct ena_tx_buffer {
161*4882a593Smuzhiyun 	struct sk_buff *skb;
162*4882a593Smuzhiyun 	/* num of ena desc for this specific skb
163*4882a593Smuzhiyun 	 * (includes data desc and metadata desc)
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	u32 tx_descs;
166*4882a593Smuzhiyun 	/* num of buffers used by this skb */
167*4882a593Smuzhiyun 	u32 num_of_bufs;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* XDP buffer structure which is used for sending packets in
170*4882a593Smuzhiyun 	 * the xdp queues
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	struct xdp_frame *xdpf;
173*4882a593Smuzhiyun 	/* The rx page for the rx buffer that was received in rx and
174*4882a593Smuzhiyun 	 * re transmitted on xdp tx queues as a result of XDP_TX action.
175*4882a593Smuzhiyun 	 * We need to free the page once we finished cleaning the buffer in
176*4882a593Smuzhiyun 	 * clean_xdp_irq()
177*4882a593Smuzhiyun 	 */
178*4882a593Smuzhiyun 	struct page *xdp_rx_page;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* Indicate if bufs[0] map the linear data of the skb. */
181*4882a593Smuzhiyun 	u8 map_linear_data;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Used for detect missing tx packets to limit the number of prints */
184*4882a593Smuzhiyun 	u32 print_once;
185*4882a593Smuzhiyun 	/* Save the last jiffies to detect missing tx packets
186*4882a593Smuzhiyun 	 *
187*4882a593Smuzhiyun 	 * sets to non zero value on ena_start_xmit and set to zero on
188*4882a593Smuzhiyun 	 * napi and timer_Service_routine.
189*4882a593Smuzhiyun 	 *
190*4882a593Smuzhiyun 	 * while this value is not protected by lock,
191*4882a593Smuzhiyun 	 * a given packet is not expected to be handled by ena_start_xmit
192*4882a593Smuzhiyun 	 * and by napi/timer_service at the same time.
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	unsigned long last_jiffies;
195*4882a593Smuzhiyun 	struct ena_com_buf bufs[ENA_PKT_MAX_BUFS];
196*4882a593Smuzhiyun } ____cacheline_aligned;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun struct ena_rx_buffer {
199*4882a593Smuzhiyun 	struct sk_buff *skb;
200*4882a593Smuzhiyun 	struct page *page;
201*4882a593Smuzhiyun 	u32 page_offset;
202*4882a593Smuzhiyun 	struct ena_com_buf ena_buf;
203*4882a593Smuzhiyun } ____cacheline_aligned;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct ena_stats_tx {
206*4882a593Smuzhiyun 	u64 cnt;
207*4882a593Smuzhiyun 	u64 bytes;
208*4882a593Smuzhiyun 	u64 queue_stop;
209*4882a593Smuzhiyun 	u64 prepare_ctx_err;
210*4882a593Smuzhiyun 	u64 queue_wakeup;
211*4882a593Smuzhiyun 	u64 dma_mapping_err;
212*4882a593Smuzhiyun 	u64 linearize;
213*4882a593Smuzhiyun 	u64 linearize_failed;
214*4882a593Smuzhiyun 	u64 napi_comp;
215*4882a593Smuzhiyun 	u64 tx_poll;
216*4882a593Smuzhiyun 	u64 doorbells;
217*4882a593Smuzhiyun 	u64 bad_req_id;
218*4882a593Smuzhiyun 	u64 llq_buffer_copy;
219*4882a593Smuzhiyun 	u64 missed_tx;
220*4882a593Smuzhiyun 	u64 unmask_interrupt;
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun struct ena_stats_rx {
224*4882a593Smuzhiyun 	u64 cnt;
225*4882a593Smuzhiyun 	u64 bytes;
226*4882a593Smuzhiyun 	u64 rx_copybreak_pkt;
227*4882a593Smuzhiyun 	u64 csum_good;
228*4882a593Smuzhiyun 	u64 refil_partial;
229*4882a593Smuzhiyun 	u64 bad_csum;
230*4882a593Smuzhiyun 	u64 page_alloc_fail;
231*4882a593Smuzhiyun 	u64 skb_alloc_fail;
232*4882a593Smuzhiyun 	u64 dma_mapping_err;
233*4882a593Smuzhiyun 	u64 bad_desc_num;
234*4882a593Smuzhiyun 	u64 bad_req_id;
235*4882a593Smuzhiyun 	u64 empty_rx_ring;
236*4882a593Smuzhiyun 	u64 csum_unchecked;
237*4882a593Smuzhiyun 	u64 xdp_aborted;
238*4882a593Smuzhiyun 	u64 xdp_drop;
239*4882a593Smuzhiyun 	u64 xdp_pass;
240*4882a593Smuzhiyun 	u64 xdp_tx;
241*4882a593Smuzhiyun 	u64 xdp_invalid;
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun struct ena_ring {
245*4882a593Smuzhiyun 	/* Holds the empty requests for TX/RX
246*4882a593Smuzhiyun 	 * out of order completions
247*4882a593Smuzhiyun 	 */
248*4882a593Smuzhiyun 	u16 *free_ids;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	union {
251*4882a593Smuzhiyun 		struct ena_tx_buffer *tx_buffer_info;
252*4882a593Smuzhiyun 		struct ena_rx_buffer *rx_buffer_info;
253*4882a593Smuzhiyun 	};
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* cache ptr to avoid using the adapter */
256*4882a593Smuzhiyun 	struct device *dev;
257*4882a593Smuzhiyun 	struct pci_dev *pdev;
258*4882a593Smuzhiyun 	struct napi_struct *napi;
259*4882a593Smuzhiyun 	struct net_device *netdev;
260*4882a593Smuzhiyun 	struct ena_com_dev *ena_dev;
261*4882a593Smuzhiyun 	struct ena_adapter *adapter;
262*4882a593Smuzhiyun 	struct ena_com_io_cq *ena_com_io_cq;
263*4882a593Smuzhiyun 	struct ena_com_io_sq *ena_com_io_sq;
264*4882a593Smuzhiyun 	struct bpf_prog *xdp_bpf_prog;
265*4882a593Smuzhiyun 	struct xdp_rxq_info xdp_rxq;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	u16 next_to_use;
268*4882a593Smuzhiyun 	u16 next_to_clean;
269*4882a593Smuzhiyun 	u16 rx_copybreak;
270*4882a593Smuzhiyun 	u16 rx_headroom;
271*4882a593Smuzhiyun 	u16 qid;
272*4882a593Smuzhiyun 	u16 mtu;
273*4882a593Smuzhiyun 	u16 sgl_size;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* The maximum header length the device can handle */
276*4882a593Smuzhiyun 	u8 tx_max_header_size;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	bool first_interrupt;
279*4882a593Smuzhiyun 	bool disable_meta_caching;
280*4882a593Smuzhiyun 	u16 no_interrupt_event_cnt;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* cpu for TPH */
283*4882a593Smuzhiyun 	int cpu;
284*4882a593Smuzhiyun 	 /* number of tx/rx_buffer_info's entries */
285*4882a593Smuzhiyun 	int ring_size;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	enum ena_admin_placement_policy_type tx_mem_queue_type;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
290*4882a593Smuzhiyun 	u32  smoothed_interval;
291*4882a593Smuzhiyun 	u32  per_napi_packets;
292*4882a593Smuzhiyun 	u16 non_empty_napi_events;
293*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
294*4882a593Smuzhiyun 	union {
295*4882a593Smuzhiyun 		struct ena_stats_tx tx_stats;
296*4882a593Smuzhiyun 		struct ena_stats_rx rx_stats;
297*4882a593Smuzhiyun 	};
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	u8 *push_buf_intermediate_buf;
300*4882a593Smuzhiyun 	int empty_rx_queue;
301*4882a593Smuzhiyun } ____cacheline_aligned;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun struct ena_stats_dev {
304*4882a593Smuzhiyun 	u64 tx_timeout;
305*4882a593Smuzhiyun 	u64 suspend;
306*4882a593Smuzhiyun 	u64 resume;
307*4882a593Smuzhiyun 	u64 wd_expired;
308*4882a593Smuzhiyun 	u64 interface_up;
309*4882a593Smuzhiyun 	u64 interface_down;
310*4882a593Smuzhiyun 	u64 admin_q_pause;
311*4882a593Smuzhiyun 	u64 rx_drops;
312*4882a593Smuzhiyun 	u64 tx_drops;
313*4882a593Smuzhiyun };
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun enum ena_flags_t {
316*4882a593Smuzhiyun 	ENA_FLAG_DEVICE_RUNNING,
317*4882a593Smuzhiyun 	ENA_FLAG_DEV_UP,
318*4882a593Smuzhiyun 	ENA_FLAG_LINK_UP,
319*4882a593Smuzhiyun 	ENA_FLAG_MSIX_ENABLED,
320*4882a593Smuzhiyun 	ENA_FLAG_TRIGGER_RESET,
321*4882a593Smuzhiyun 	ENA_FLAG_ONGOING_RESET
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* adapter specific private data structure */
325*4882a593Smuzhiyun struct ena_adapter {
326*4882a593Smuzhiyun 	struct ena_com_dev *ena_dev;
327*4882a593Smuzhiyun 	/* OS defined structs */
328*4882a593Smuzhiyun 	struct net_device *netdev;
329*4882a593Smuzhiyun 	struct pci_dev *pdev;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* rx packets that shorter that this len will be copied to the skb
332*4882a593Smuzhiyun 	 * header
333*4882a593Smuzhiyun 	 */
334*4882a593Smuzhiyun 	u32 rx_copybreak;
335*4882a593Smuzhiyun 	u32 max_mtu;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	u32 num_io_queues;
338*4882a593Smuzhiyun 	u32 max_num_io_queues;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	int msix_vecs;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	u32 missing_tx_completion_threshold;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	u32 requested_tx_ring_size;
345*4882a593Smuzhiyun 	u32 requested_rx_ring_size;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	u32 max_tx_ring_size;
348*4882a593Smuzhiyun 	u32 max_rx_ring_size;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	u32 msg_enable;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	u16 max_tx_sgl_size;
353*4882a593Smuzhiyun 	u16 max_rx_sgl_size;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	u8 mac_addr[ETH_ALEN];
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	unsigned long keep_alive_timeout;
358*4882a593Smuzhiyun 	unsigned long missing_tx_completion_to;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	char name[ENA_NAME_MAX_LEN];
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	unsigned long flags;
363*4882a593Smuzhiyun 	/* TX */
364*4882a593Smuzhiyun 	struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
365*4882a593Smuzhiyun 		____cacheline_aligned_in_smp;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* RX */
368*4882a593Smuzhiyun 	struct ena_ring rx_ring[ENA_MAX_NUM_IO_QUEUES]
369*4882a593Smuzhiyun 		____cacheline_aligned_in_smp;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	struct ena_napi ena_napi[ENA_MAX_NUM_IO_QUEUES];
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	struct ena_irq irq_tbl[ENA_MAX_MSIX_VEC(ENA_MAX_NUM_IO_QUEUES)];
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* timer service */
376*4882a593Smuzhiyun 	struct work_struct reset_task;
377*4882a593Smuzhiyun 	struct timer_list timer_service;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	bool wd_state;
380*4882a593Smuzhiyun 	bool dev_up_before_reset;
381*4882a593Smuzhiyun 	bool disable_meta_caching;
382*4882a593Smuzhiyun 	unsigned long last_keep_alive_jiffies;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
385*4882a593Smuzhiyun 	struct ena_stats_dev dev_stats;
386*4882a593Smuzhiyun 	struct ena_admin_eni_stats eni_stats;
387*4882a593Smuzhiyun 	bool eni_stats_supported;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* last queue index that was checked for uncompleted tx packets */
390*4882a593Smuzhiyun 	u32 last_monitored_tx_qid;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	enum ena_regs_reset_reason_types reset_reason;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	struct bpf_prog *xdp_bpf_prog;
395*4882a593Smuzhiyun 	u32 xdp_first_ring;
396*4882a593Smuzhiyun 	u32 xdp_num_queues;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun void ena_set_ethtool_ops(struct net_device *netdev);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun int ena_update_hw_stats(struct ena_adapter *adapter);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun int ena_update_queue_sizes(struct ena_adapter *adapter,
408*4882a593Smuzhiyun 			   u32 new_tx_size,
409*4882a593Smuzhiyun 			   u32 new_rx_size);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun int ena_get_sset_count(struct net_device *netdev, int sset);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun enum ena_xdp_errors_t {
416*4882a593Smuzhiyun 	ENA_XDP_ALLOWED = 0,
417*4882a593Smuzhiyun 	ENA_XDP_CURRENT_MTU_TOO_LARGE,
418*4882a593Smuzhiyun 	ENA_XDP_NO_ENOUGH_QUEUES,
419*4882a593Smuzhiyun };
420*4882a593Smuzhiyun 
ena_xdp_queues_present(struct ena_adapter * adapter)421*4882a593Smuzhiyun static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	return adapter->xdp_first_ring != 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
ena_xdp_present(struct ena_adapter * adapter)426*4882a593Smuzhiyun static inline bool ena_xdp_present(struct ena_adapter *adapter)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	return !!adapter->xdp_bpf_prog;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
ena_xdp_present_ring(struct ena_ring * ring)431*4882a593Smuzhiyun static inline bool ena_xdp_present_ring(struct ena_ring *ring)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	return !!ring->xdp_bpf_prog;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
ena_xdp_legal_queue_count(struct ena_adapter * adapter,u32 queues)436*4882a593Smuzhiyun static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter,
437*4882a593Smuzhiyun 					    u32 queues)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	return 2 * queues <= adapter->max_num_io_queues;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
ena_xdp_allowed(struct ena_adapter * adapter)442*4882a593Smuzhiyun static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
447*4882a593Smuzhiyun 		rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
448*4882a593Smuzhiyun 	else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
449*4882a593Smuzhiyun 		rc = ENA_XDP_NO_ENOUGH_QUEUES;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return rc;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun #endif /* !(ENA_H) */
455