xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/igbvf/igbvf.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 2009 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /* Linux PRO/1000 Ethernet Driver main header file */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _IGBVF_H_
7*4882a593Smuzhiyun #define _IGBVF_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/timer.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/if_vlan.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "vf.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Forward declarations */
18*4882a593Smuzhiyun struct igbvf_info;
19*4882a593Smuzhiyun struct igbvf_adapter;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* Interrupt defines */
22*4882a593Smuzhiyun #define IGBVF_START_ITR		488 /* ~8000 ints/sec */
23*4882a593Smuzhiyun #define IGBVF_4K_ITR		980
24*4882a593Smuzhiyun #define IGBVF_20K_ITR		196
25*4882a593Smuzhiyun #define IGBVF_70K_ITR		56
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun enum latency_range {
28*4882a593Smuzhiyun 	lowest_latency = 0,
29*4882a593Smuzhiyun 	low_latency = 1,
30*4882a593Smuzhiyun 	bulk_latency = 2,
31*4882a593Smuzhiyun 	latency_invalid = 255
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Interrupt modes, as used by the IntMode parameter */
35*4882a593Smuzhiyun #define IGBVF_INT_MODE_LEGACY	0
36*4882a593Smuzhiyun #define IGBVF_INT_MODE_MSI	1
37*4882a593Smuzhiyun #define IGBVF_INT_MODE_MSIX	2
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Tx/Rx descriptor defines */
40*4882a593Smuzhiyun #define IGBVF_DEFAULT_TXD	256
41*4882a593Smuzhiyun #define IGBVF_MAX_TXD		4096
42*4882a593Smuzhiyun #define IGBVF_MIN_TXD		80
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define IGBVF_DEFAULT_RXD	256
45*4882a593Smuzhiyun #define IGBVF_MAX_RXD		4096
46*4882a593Smuzhiyun #define IGBVF_MIN_RXD		80
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define IGBVF_MIN_ITR_USECS	10 /* 100000 irq/sec */
49*4882a593Smuzhiyun #define IGBVF_MAX_ITR_USECS	10000 /* 100    irq/sec */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* RX descriptor control thresholds.
52*4882a593Smuzhiyun  * PTHRESH - MAC will consider prefetch if it has fewer than this number of
53*4882a593Smuzhiyun  *	   descriptors available in its onboard memory.
54*4882a593Smuzhiyun  *	   Setting this to 0 disables RX descriptor prefetch.
55*4882a593Smuzhiyun  * HTHRESH - MAC will only prefetch if there are at least this many descriptors
56*4882a593Smuzhiyun  *	   available in host memory.
57*4882a593Smuzhiyun  *	   If PTHRESH is 0, this should also be 0.
58*4882a593Smuzhiyun  * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
59*4882a593Smuzhiyun  *	   descriptors until either it has this many to write back, or the
60*4882a593Smuzhiyun  *	   ITR timer expires.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun #define IGBVF_RX_PTHRESH	16
63*4882a593Smuzhiyun #define IGBVF_RX_HTHRESH	8
64*4882a593Smuzhiyun #define IGBVF_RX_WTHRESH	1
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* this is the size past which hardware will drop packets when setting LPE=0 */
67*4882a593Smuzhiyun #define MAXIMUM_ETHERNET_VLAN_SIZE	1522
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define IGBVF_FC_PAUSE_TIME	0x0680 /* 858 usec */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* How many Tx Descriptors do we need to call netif_wake_queue ? */
72*4882a593Smuzhiyun #define IGBVF_TX_QUEUE_WAKE	32
73*4882a593Smuzhiyun /* How many Rx Buffers do we bundle into one write to the hardware ? */
74*4882a593Smuzhiyun #define IGBVF_RX_BUFFER_WRITE	16 /* Must be power of 2 */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define AUTO_ALL_MODES		0
77*4882a593Smuzhiyun #define IGBVF_EEPROM_APME	0x0400
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define IGBVF_MNG_VLAN_NONE	(-1)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define IGBVF_MAX_MAC_FILTERS	3
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Number of packet split data buffers (not including the header buffer) */
84*4882a593Smuzhiyun #define PS_PAGE_BUFFERS		(MAX_PS_BUFFERS - 1)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun enum igbvf_boards {
87*4882a593Smuzhiyun 	board_vf,
88*4882a593Smuzhiyun 	board_i350_vf,
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun struct igbvf_queue_stats {
92*4882a593Smuzhiyun 	u64 packets;
93*4882a593Smuzhiyun 	u64 bytes;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* wrappers around a pointer to a socket buffer,
97*4882a593Smuzhiyun  * so a DMA handle can be stored along with the buffer
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun struct igbvf_buffer {
100*4882a593Smuzhiyun 	dma_addr_t dma;
101*4882a593Smuzhiyun 	struct sk_buff *skb;
102*4882a593Smuzhiyun 	union {
103*4882a593Smuzhiyun 		/* Tx */
104*4882a593Smuzhiyun 		struct {
105*4882a593Smuzhiyun 			unsigned long time_stamp;
106*4882a593Smuzhiyun 			union e1000_adv_tx_desc *next_to_watch;
107*4882a593Smuzhiyun 			u16 length;
108*4882a593Smuzhiyun 			u16 mapped_as_page;
109*4882a593Smuzhiyun 		};
110*4882a593Smuzhiyun 		/* Rx */
111*4882a593Smuzhiyun 		struct {
112*4882a593Smuzhiyun 			struct page *page;
113*4882a593Smuzhiyun 			u64 page_dma;
114*4882a593Smuzhiyun 			unsigned int page_offset;
115*4882a593Smuzhiyun 		};
116*4882a593Smuzhiyun 	};
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun union igbvf_desc {
120*4882a593Smuzhiyun 	union e1000_adv_rx_desc rx_desc;
121*4882a593Smuzhiyun 	union e1000_adv_tx_desc tx_desc;
122*4882a593Smuzhiyun 	struct e1000_adv_tx_context_desc tx_context_desc;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun struct igbvf_ring {
126*4882a593Smuzhiyun 	struct igbvf_adapter *adapter;  /* backlink */
127*4882a593Smuzhiyun 	union igbvf_desc *desc;	/* pointer to ring memory  */
128*4882a593Smuzhiyun 	dma_addr_t dma;		/* phys address of ring    */
129*4882a593Smuzhiyun 	unsigned int size;	/* length of ring in bytes */
130*4882a593Smuzhiyun 	unsigned int count;	/* number of desc. in ring */
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	u16 next_to_use;
133*4882a593Smuzhiyun 	u16 next_to_clean;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	u16 head;
136*4882a593Smuzhiyun 	u16 tail;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* array of buffer information structs */
139*4882a593Smuzhiyun 	struct igbvf_buffer *buffer_info;
140*4882a593Smuzhiyun 	struct napi_struct napi;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	char name[IFNAMSIZ + 5];
143*4882a593Smuzhiyun 	u32 eims_value;
144*4882a593Smuzhiyun 	u32 itr_val;
145*4882a593Smuzhiyun 	enum latency_range itr_range;
146*4882a593Smuzhiyun 	u16 itr_register;
147*4882a593Smuzhiyun 	int set_itr;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	struct sk_buff *rx_skb_top;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	struct igbvf_queue_stats stats;
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* board specific private data structure */
155*4882a593Smuzhiyun struct igbvf_adapter {
156*4882a593Smuzhiyun 	struct timer_list watchdog_timer;
157*4882a593Smuzhiyun 	struct timer_list blink_timer;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	struct work_struct reset_task;
160*4882a593Smuzhiyun 	struct work_struct watchdog_task;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	const struct igbvf_info *ei;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
165*4882a593Smuzhiyun 	u32 bd_number;
166*4882a593Smuzhiyun 	u32 rx_buffer_len;
167*4882a593Smuzhiyun 	u32 polling_interval;
168*4882a593Smuzhiyun 	u16 mng_vlan_id;
169*4882a593Smuzhiyun 	u16 link_speed;
170*4882a593Smuzhiyun 	u16 link_duplex;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* track device up/down/testing state */
175*4882a593Smuzhiyun 	unsigned long state;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* Interrupt Throttle Rate */
178*4882a593Smuzhiyun 	u32 requested_itr; /* ints/sec or adaptive */
179*4882a593Smuzhiyun 	u32 current_itr; /* Actual ITR register value, not ints/sec */
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Tx */
182*4882a593Smuzhiyun 	struct igbvf_ring *tx_ring /* One per active queue */
183*4882a593Smuzhiyun 	____cacheline_aligned_in_smp;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	unsigned int restart_queue;
186*4882a593Smuzhiyun 	u32 txd_cmd;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	u32 tx_int_delay;
189*4882a593Smuzhiyun 	u32 tx_abs_int_delay;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	unsigned int total_tx_bytes;
192*4882a593Smuzhiyun 	unsigned int total_tx_packets;
193*4882a593Smuzhiyun 	unsigned int total_rx_bytes;
194*4882a593Smuzhiyun 	unsigned int total_rx_packets;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* Tx stats */
197*4882a593Smuzhiyun 	u32 tx_timeout_count;
198*4882a593Smuzhiyun 	u32 tx_fifo_head;
199*4882a593Smuzhiyun 	u32 tx_head_addr;
200*4882a593Smuzhiyun 	u32 tx_fifo_size;
201*4882a593Smuzhiyun 	u32 tx_dma_failed;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* Rx */
204*4882a593Smuzhiyun 	struct igbvf_ring *rx_ring;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	u32 rx_int_delay;
207*4882a593Smuzhiyun 	u32 rx_abs_int_delay;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* Rx stats */
210*4882a593Smuzhiyun 	u64 hw_csum_err;
211*4882a593Smuzhiyun 	u64 hw_csum_good;
212*4882a593Smuzhiyun 	u64 rx_hdr_split;
213*4882a593Smuzhiyun 	u32 alloc_rx_buff_failed;
214*4882a593Smuzhiyun 	u32 rx_dma_failed;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	unsigned int rx_ps_hdr_size;
217*4882a593Smuzhiyun 	u32 max_frame_size;
218*4882a593Smuzhiyun 	u32 min_frame_size;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* OS defined structs */
221*4882a593Smuzhiyun 	struct net_device *netdev;
222*4882a593Smuzhiyun 	struct pci_dev *pdev;
223*4882a593Smuzhiyun 	spinlock_t stats_lock; /* prevent concurrent stats updates */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* structs defined in e1000_hw.h */
226*4882a593Smuzhiyun 	struct e1000_hw hw;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* The VF counters don't clear on read so we have to get a base
229*4882a593Smuzhiyun 	 * count on driver start up and always subtract that base on
230*4882a593Smuzhiyun 	 * on the first update, thus the flag..
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	struct e1000_vf_stats stats;
233*4882a593Smuzhiyun 	u64 zero_base;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	struct igbvf_ring test_tx_ring;
236*4882a593Smuzhiyun 	struct igbvf_ring test_rx_ring;
237*4882a593Smuzhiyun 	u32 test_icr;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	u32 msg_enable;
240*4882a593Smuzhiyun 	struct msix_entry *msix_entries;
241*4882a593Smuzhiyun 	int int_mode;
242*4882a593Smuzhiyun 	u32 eims_enable_mask;
243*4882a593Smuzhiyun 	u32 eims_other;
244*4882a593Smuzhiyun 	u32 int_counter0;
245*4882a593Smuzhiyun 	u32 int_counter1;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	u32 eeprom_wol;
248*4882a593Smuzhiyun 	u32 wol;
249*4882a593Smuzhiyun 	u32 pba;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	bool fc_autoneg;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	unsigned long led_status;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	unsigned int flags;
256*4882a593Smuzhiyun 	unsigned long last_reset;
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun struct igbvf_info {
260*4882a593Smuzhiyun 	enum e1000_mac_type	mac;
261*4882a593Smuzhiyun 	unsigned int		flags;
262*4882a593Smuzhiyun 	u32			pba;
263*4882a593Smuzhiyun 	void			(*init_ops)(struct e1000_hw *);
264*4882a593Smuzhiyun 	s32			(*get_variants)(struct igbvf_adapter *);
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /* hardware capability, feature, and workaround flags */
268*4882a593Smuzhiyun #define IGBVF_FLAG_RX_CSUM_DISABLED	BIT(0)
269*4882a593Smuzhiyun #define IGBVF_FLAG_RX_LB_VLAN_BSWAP	BIT(1)
270*4882a593Smuzhiyun #define IGBVF_RX_DESC_ADV(R, i)     \
271*4882a593Smuzhiyun 	(&((((R).desc))[i].rx_desc))
272*4882a593Smuzhiyun #define IGBVF_TX_DESC_ADV(R, i)     \
273*4882a593Smuzhiyun 	(&((((R).desc))[i].tx_desc))
274*4882a593Smuzhiyun #define IGBVF_TX_CTXTDESC_ADV(R, i) \
275*4882a593Smuzhiyun 	(&((((R).desc))[i].tx_context_desc))
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun enum igbvf_state_t {
278*4882a593Smuzhiyun 	__IGBVF_TESTING,
279*4882a593Smuzhiyun 	__IGBVF_RESETTING,
280*4882a593Smuzhiyun 	__IGBVF_DOWN
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun extern char igbvf_driver_name[];
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun void igbvf_check_options(struct igbvf_adapter *);
286*4882a593Smuzhiyun void igbvf_set_ethtool_ops(struct net_device *);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun int igbvf_up(struct igbvf_adapter *);
289*4882a593Smuzhiyun void igbvf_down(struct igbvf_adapter *);
290*4882a593Smuzhiyun void igbvf_reinit_locked(struct igbvf_adapter *);
291*4882a593Smuzhiyun int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
292*4882a593Smuzhiyun int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
293*4882a593Smuzhiyun void igbvf_free_rx_resources(struct igbvf_ring *);
294*4882a593Smuzhiyun void igbvf_free_tx_resources(struct igbvf_ring *);
295*4882a593Smuzhiyun void igbvf_update_stats(struct igbvf_adapter *);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun extern unsigned int copybreak;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #endif /* _IGBVF_H_ */
300