xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/neterion/vxge/vxge-main.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  * This software may be used and distributed according to the terms of
3*4882a593Smuzhiyun  * the GNU General Public License (GPL), incorporated herein by reference.
4*4882a593Smuzhiyun  * Drivers based on or derived from this code fall under the GPL and must
5*4882a593Smuzhiyun  * retain the authorship, copyright and license notice.  This file is not
6*4882a593Smuzhiyun  * a complete program and may only be used when the entire operating
7*4882a593Smuzhiyun  * system is licensed under the GPL.
8*4882a593Smuzhiyun  * See the file COPYING in this distribution for more information.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * vxge-main.h: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11*4882a593Smuzhiyun  *              Virtualized Server Adapter.
12*4882a593Smuzhiyun  * Copyright(c) 2002-2010 Exar Corp.
13*4882a593Smuzhiyun  ******************************************************************************/
14*4882a593Smuzhiyun #ifndef VXGE_MAIN_H
15*4882a593Smuzhiyun #define VXGE_MAIN_H
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "vxge-traffic.h"
18*4882a593Smuzhiyun #include "vxge-config.h"
19*4882a593Smuzhiyun #include "vxge-version.h"
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/bitops.h>
22*4882a593Smuzhiyun #include <linux/if_vlan.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define VXGE_DRIVER_NAME		"vxge"
25*4882a593Smuzhiyun #define VXGE_DRIVER_VENDOR		"Neterion, Inc"
26*4882a593Smuzhiyun #define VXGE_DRIVER_FW_VERSION_MAJOR	1
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define DRV_VERSION	VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
29*4882a593Smuzhiyun 	VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
30*4882a593Smuzhiyun 	VXGE_VERSION_FOR
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define PCI_DEVICE_ID_TITAN_WIN		0x5733
33*4882a593Smuzhiyun #define PCI_DEVICE_ID_TITAN_UNI		0x5833
34*4882a593Smuzhiyun #define VXGE_HW_TITAN1_PCI_REVISION	1
35*4882a593Smuzhiyun #define VXGE_HW_TITAN1A_PCI_REVISION	2
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define	VXGE_USE_DEFAULT		0xffffffff
38*4882a593Smuzhiyun #define VXGE_HW_VPATH_MSIX_ACTIVE	4
39*4882a593Smuzhiyun #define VXGE_ALARM_MSIX_ID		2
40*4882a593Smuzhiyun #define VXGE_HW_RXSYNC_FREQ_CNT		4
41*4882a593Smuzhiyun #define VXGE_LL_WATCH_DOG_TIMEOUT	(15 * HZ)
42*4882a593Smuzhiyun #define VXGE_LL_RX_COPY_THRESHOLD	256
43*4882a593Smuzhiyun #define VXGE_DEF_FIFO_LENGTH		84
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define NO_STEERING		0
46*4882a593Smuzhiyun #define PORT_STEERING		0x1
47*4882a593Smuzhiyun #define RTH_STEERING		0x2
48*4882a593Smuzhiyun #define RX_TOS_STEERING		0x3
49*4882a593Smuzhiyun #define RX_VLAN_STEERING	0x4
50*4882a593Smuzhiyun #define RTH_BUCKET_SIZE		4
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define	TX_PRIORITY_STEERING	1
53*4882a593Smuzhiyun #define	TX_VLAN_STEERING	2
54*4882a593Smuzhiyun #define	TX_PORT_STEERING	3
55*4882a593Smuzhiyun #define	TX_MULTIQ_STEERING	4
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define VXGE_TTI_BTIMER_VAL 250000
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define VXGE_TTI_LTIMER_VAL	1000
62*4882a593Smuzhiyun #define VXGE_T1A_TTI_LTIMER_VAL	80
63*4882a593Smuzhiyun #define VXGE_TTI_RTIMER_VAL	0
64*4882a593Smuzhiyun #define VXGE_TTI_RTIMER_ADAPT_VAL	10
65*4882a593Smuzhiyun #define VXGE_T1A_TTI_RTIMER_VAL	400
66*4882a593Smuzhiyun #define VXGE_RTI_BTIMER_VAL	250
67*4882a593Smuzhiyun #define VXGE_RTI_LTIMER_VAL	100
68*4882a593Smuzhiyun #define VXGE_RTI_RTIMER_VAL	0
69*4882a593Smuzhiyun #define VXGE_RTI_RTIMER_ADAPT_VAL	15
70*4882a593Smuzhiyun #define VXGE_FIFO_INDICATE_MAX_PKTS	VXGE_DEF_FIFO_LENGTH
71*4882a593Smuzhiyun #define VXGE_ISR_POLLING_CNT 	8
72*4882a593Smuzhiyun #define VXGE_MAX_CONFIG_DEV	0xFF
73*4882a593Smuzhiyun #define VXGE_EXEC_MODE_DISABLE	0
74*4882a593Smuzhiyun #define VXGE_EXEC_MODE_ENABLE	1
75*4882a593Smuzhiyun #define VXGE_MAX_CONFIG_PORT	1
76*4882a593Smuzhiyun #define VXGE_ALL_VID_DISABLE	0
77*4882a593Smuzhiyun #define VXGE_ALL_VID_ENABLE	1
78*4882a593Smuzhiyun #define VXGE_PAUSE_CTRL_DISABLE	0
79*4882a593Smuzhiyun #define VXGE_PAUSE_CTRL_ENABLE	1
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define TTI_TX_URANGE_A	5
82*4882a593Smuzhiyun #define TTI_TX_URANGE_B	15
83*4882a593Smuzhiyun #define TTI_TX_URANGE_C	40
84*4882a593Smuzhiyun #define TTI_TX_UFC_A	5
85*4882a593Smuzhiyun #define TTI_TX_UFC_B	40
86*4882a593Smuzhiyun #define TTI_TX_UFC_C	60
87*4882a593Smuzhiyun #define TTI_TX_UFC_D	100
88*4882a593Smuzhiyun #define TTI_T1A_TX_UFC_A	30
89*4882a593Smuzhiyun #define TTI_T1A_TX_UFC_B	80
90*4882a593Smuzhiyun /* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
91*4882a593Smuzhiyun /* Slope - 93 */
92*4882a593Smuzhiyun /* 60 - 9k Mtu, 140 - 1.5k mtu */
93*4882a593Smuzhiyun #define TTI_T1A_TX_UFC_C(mtu)	(60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* Slope - 37 */
96*4882a593Smuzhiyun /* 100 - 9k Mtu, 300 - 1.5k mtu */
97*4882a593Smuzhiyun #define TTI_T1A_TX_UFC_D(mtu)	(100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define RTI_RX_URANGE_A		5
101*4882a593Smuzhiyun #define RTI_RX_URANGE_B		15
102*4882a593Smuzhiyun #define RTI_RX_URANGE_C		40
103*4882a593Smuzhiyun #define RTI_T1A_RX_URANGE_A	1
104*4882a593Smuzhiyun #define RTI_T1A_RX_URANGE_B	20
105*4882a593Smuzhiyun #define RTI_T1A_RX_URANGE_C	50
106*4882a593Smuzhiyun #define RTI_RX_UFC_A		1
107*4882a593Smuzhiyun #define RTI_RX_UFC_B		5
108*4882a593Smuzhiyun #define RTI_RX_UFC_C		10
109*4882a593Smuzhiyun #define RTI_RX_UFC_D		15
110*4882a593Smuzhiyun #define RTI_T1A_RX_UFC_B	20
111*4882a593Smuzhiyun #define RTI_T1A_RX_UFC_C	50
112*4882a593Smuzhiyun #define RTI_T1A_RX_UFC_D	60
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * The interrupt rate is maintained at 3k per second with the moderation
116*4882a593Smuzhiyun  * parameters for most traffic but not all. This is the maximum interrupt
117*4882a593Smuzhiyun  * count allowed per function with INTA or per vector in the case of
118*4882a593Smuzhiyun  * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
119*4882a593Smuzhiyun  */
120*4882a593Smuzhiyun #define VXGE_T1A_MAX_INTERRUPT_COUNT	100
121*4882a593Smuzhiyun #define VXGE_T1A_MAX_TX_INTERRUPT_COUNT	200
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* Milli secs timer period */
124*4882a593Smuzhiyun #define VXGE_TIMER_DELAY		10000
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define is_sriov(function_mode) \
129*4882a593Smuzhiyun 	((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
130*4882a593Smuzhiyun 	(function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
131*4882a593Smuzhiyun 	(function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun enum vxge_reset_event {
134*4882a593Smuzhiyun 	/* reset events */
135*4882a593Smuzhiyun 	VXGE_LL_VPATH_RESET	= 0,
136*4882a593Smuzhiyun 	VXGE_LL_DEVICE_RESET	= 1,
137*4882a593Smuzhiyun 	VXGE_LL_FULL_RESET	= 2,
138*4882a593Smuzhiyun 	VXGE_LL_START_RESET	= 3,
139*4882a593Smuzhiyun 	VXGE_LL_COMPL_RESET	= 4
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun /* These flags represent the devices temporary state */
142*4882a593Smuzhiyun enum vxge_device_state_t {
143*4882a593Smuzhiyun __VXGE_STATE_RESET_CARD = 0,
144*4882a593Smuzhiyun __VXGE_STATE_CARD_UP
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun enum vxge_mac_addr_state {
148*4882a593Smuzhiyun 	/* mac address states */
149*4882a593Smuzhiyun 	VXGE_LL_MAC_ADDR_IN_LIST        = 0,
150*4882a593Smuzhiyun 	VXGE_LL_MAC_ADDR_IN_DA_TABLE    = 1
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct vxge_drv_config {
154*4882a593Smuzhiyun 	int config_dev_cnt;
155*4882a593Smuzhiyun 	int total_dev_cnt;
156*4882a593Smuzhiyun 	int g_no_cpus;
157*4882a593Smuzhiyun 	unsigned int vpath_per_dev;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun struct macInfo {
161*4882a593Smuzhiyun 	unsigned char macaddr[ETH_ALEN];
162*4882a593Smuzhiyun 	unsigned char macmask[ETH_ALEN];
163*4882a593Smuzhiyun 	unsigned int vpath_no;
164*4882a593Smuzhiyun 	enum vxge_mac_addr_state state;
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun struct vxge_config {
168*4882a593Smuzhiyun 	int		tx_pause_enable;
169*4882a593Smuzhiyun 	int		rx_pause_enable;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #define	NEW_NAPI_WEIGHT	64
172*4882a593Smuzhiyun 	int		napi_weight;
173*4882a593Smuzhiyun 	int		intr_type;
174*4882a593Smuzhiyun #define INTA	0
175*4882a593Smuzhiyun #define MSI	1
176*4882a593Smuzhiyun #define MSI_X	2
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	int		addr_learn_en;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	u32		rth_steering:2,
181*4882a593Smuzhiyun 			rth_algorithm:2,
182*4882a593Smuzhiyun 			rth_hash_type_tcpipv4:1,
183*4882a593Smuzhiyun 			rth_hash_type_ipv4:1,
184*4882a593Smuzhiyun 			rth_hash_type_tcpipv6:1,
185*4882a593Smuzhiyun 			rth_hash_type_ipv6:1,
186*4882a593Smuzhiyun 			rth_hash_type_tcpipv6ex:1,
187*4882a593Smuzhiyun 			rth_hash_type_ipv6ex:1,
188*4882a593Smuzhiyun 			rth_bkt_sz:8;
189*4882a593Smuzhiyun 	int		rth_jhash_golden_ratio;
190*4882a593Smuzhiyun 	int		tx_steering_type;
191*4882a593Smuzhiyun 	int 	fifo_indicate_max_pkts;
192*4882a593Smuzhiyun 	struct vxge_hw_device_hw_info device_hw_info;
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun struct vxge_msix_entry {
196*4882a593Smuzhiyun 	/* Mimicing the msix_entry struct of Kernel. */
197*4882a593Smuzhiyun 	u16 vector;
198*4882a593Smuzhiyun 	u16 entry;
199*4882a593Smuzhiyun 	u16 in_use;
200*4882a593Smuzhiyun 	void *arg;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Software Statistics */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct vxge_sw_stats {
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Virtual Path */
208*4882a593Smuzhiyun 	unsigned long vpaths_open;
209*4882a593Smuzhiyun 	unsigned long vpath_open_fail;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Misc. */
212*4882a593Smuzhiyun 	unsigned long link_up;
213*4882a593Smuzhiyun 	unsigned long link_down;
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun struct vxge_mac_addrs {
217*4882a593Smuzhiyun 	struct list_head item;
218*4882a593Smuzhiyun 	u64 macaddr;
219*4882a593Smuzhiyun 	u64 macmask;
220*4882a593Smuzhiyun 	enum vxge_mac_addr_state state;
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun struct vxgedev;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun struct vxge_fifo_stats {
226*4882a593Smuzhiyun 	struct u64_stats_sync	syncp;
227*4882a593Smuzhiyun 	u64 tx_frms;
228*4882a593Smuzhiyun 	u64 tx_bytes;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	unsigned long tx_errors;
231*4882a593Smuzhiyun 	unsigned long txd_not_free;
232*4882a593Smuzhiyun 	unsigned long txd_out_of_desc;
233*4882a593Smuzhiyun 	unsigned long pci_map_fail;
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun struct vxge_fifo {
237*4882a593Smuzhiyun 	struct net_device *ndev;
238*4882a593Smuzhiyun 	struct pci_dev *pdev;
239*4882a593Smuzhiyun 	struct __vxge_hw_fifo *handle;
240*4882a593Smuzhiyun 	struct netdev_queue *txq;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	int tx_steering_type;
243*4882a593Smuzhiyun 	int indicate_max_pkts;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* Adaptive interrupt moderation parameters used in T1A */
246*4882a593Smuzhiyun 	unsigned long interrupt_count;
247*4882a593Smuzhiyun 	unsigned long jiffies;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	u32 tx_vector_no;
250*4882a593Smuzhiyun 	/* Tx stats */
251*4882a593Smuzhiyun 	struct vxge_fifo_stats stats;
252*4882a593Smuzhiyun } ____cacheline_aligned;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun struct vxge_ring_stats {
255*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
256*4882a593Smuzhiyun 	u64 rx_frms;
257*4882a593Smuzhiyun 	u64 rx_mcast;
258*4882a593Smuzhiyun 	u64 rx_bytes;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	unsigned long rx_errors;
261*4882a593Smuzhiyun 	unsigned long rx_dropped;
262*4882a593Smuzhiyun 	unsigned long prev_rx_frms;
263*4882a593Smuzhiyun 	unsigned long pci_map_fail;
264*4882a593Smuzhiyun 	unsigned long skb_alloc_fail;
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun struct vxge_ring {
268*4882a593Smuzhiyun 	struct net_device	*ndev;
269*4882a593Smuzhiyun 	struct pci_dev		*pdev;
270*4882a593Smuzhiyun 	struct __vxge_hw_ring	*handle;
271*4882a593Smuzhiyun 	/* The vpath id maintained in the driver -
272*4882a593Smuzhiyun 	 * 0 to 'maximum_vpaths_in_function - 1'
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	int driver_id;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* Adaptive interrupt moderation parameters used in T1A */
277*4882a593Smuzhiyun 	unsigned long interrupt_count;
278*4882a593Smuzhiyun 	unsigned long jiffies;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* copy of the flag indicating whether rx_hwts is to be used */
281*4882a593Smuzhiyun 	u32 rx_hwts:1;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	int pkts_processed;
284*4882a593Smuzhiyun 	int budget;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	struct napi_struct napi;
287*4882a593Smuzhiyun 	struct napi_struct *napi_p;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define VXGE_MAX_MAC_ADDR_COUNT		30
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	int vlan_tag_strip;
292*4882a593Smuzhiyun 	u32 rx_vector_no;
293*4882a593Smuzhiyun 	enum vxge_hw_status last_status;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* Rx stats */
296*4882a593Smuzhiyun 	struct vxge_ring_stats stats;
297*4882a593Smuzhiyun } ____cacheline_aligned;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun struct vxge_vpath {
300*4882a593Smuzhiyun 	struct vxge_fifo fifo;
301*4882a593Smuzhiyun 	struct vxge_ring ring;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *handle;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* Actual vpath id for this vpath in the device - 0 to 16 */
306*4882a593Smuzhiyun 	int device_id;
307*4882a593Smuzhiyun 	int max_mac_addr_cnt;
308*4882a593Smuzhiyun 	int is_configured;
309*4882a593Smuzhiyun 	int is_open;
310*4882a593Smuzhiyun 	struct vxgedev *vdev;
311*4882a593Smuzhiyun 	u8 macaddr[ETH_ALEN];
312*4882a593Smuzhiyun 	u8 macmask[ETH_ALEN];
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #define VXGE_MAX_LEARN_MAC_ADDR_CNT	2048
315*4882a593Smuzhiyun 	/* mac addresses currently programmed into NIC */
316*4882a593Smuzhiyun 	u16 mac_addr_cnt;
317*4882a593Smuzhiyun 	u16 mcast_addr_cnt;
318*4882a593Smuzhiyun 	struct list_head mac_addr_list;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	u32 level_err;
321*4882a593Smuzhiyun 	u32 level_trace;
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun #define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) {	\
324*4882a593Smuzhiyun 	for (i = 0; i < vdev->no_of_vpath; i++) {		\
325*4882a593Smuzhiyun 		vdev->vpaths[i].level_err = err;		\
326*4882a593Smuzhiyun 		vdev->vpaths[i].level_trace = trace;		\
327*4882a593Smuzhiyun 	}							\
328*4882a593Smuzhiyun 	vdev->level_err = err;					\
329*4882a593Smuzhiyun 	vdev->level_trace = trace;				\
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun struct vxgedev {
333*4882a593Smuzhiyun 	struct net_device	*ndev;
334*4882a593Smuzhiyun 	struct pci_dev		*pdev;
335*4882a593Smuzhiyun 	struct __vxge_hw_device *devh;
336*4882a593Smuzhiyun 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
337*4882a593Smuzhiyun 	int vlan_tag_strip;
338*4882a593Smuzhiyun 	struct vxge_config	config;
339*4882a593Smuzhiyun 	unsigned long	state;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* Indicates which vpath to reset */
342*4882a593Smuzhiyun 	unsigned long  vp_reset;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* Timer used for polling vpath resets */
345*4882a593Smuzhiyun 	struct timer_list vp_reset_timer;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* Timer used for polling vpath lockup */
348*4882a593Smuzhiyun 	struct timer_list vp_lockup_timer;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/*
351*4882a593Smuzhiyun 	 * Flags to track whether device is in All Multicast
352*4882a593Smuzhiyun 	 * or in promiscuous mode.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	u16		all_multi_flg;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* A flag indicating whether rx_hwts is to be used or not. */
357*4882a593Smuzhiyun 	u32	rx_hwts:1,
358*4882a593Smuzhiyun 		titan1:1;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	struct vxge_msix_entry *vxge_entries;
361*4882a593Smuzhiyun 	struct msix_entry *entries;
362*4882a593Smuzhiyun 	/*
363*4882a593Smuzhiyun 	 * 4 for each vpath * 17;
364*4882a593Smuzhiyun 	 * total is 68
365*4882a593Smuzhiyun 	 */
366*4882a593Smuzhiyun #define	VXGE_MAX_REQUESTED_MSIX	68
367*4882a593Smuzhiyun #define VXGE_INTR_STRLEN 80
368*4882a593Smuzhiyun 	char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	enum vxge_hw_event cric_err_event;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	int max_vpath_supported;
373*4882a593Smuzhiyun 	int no_of_vpath;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	struct napi_struct napi;
376*4882a593Smuzhiyun 	/* A debug option, when enabled and if error condition occurs,
377*4882a593Smuzhiyun 	 * the driver will do following steps:
378*4882a593Smuzhiyun 	 * - mask all interrupts
379*4882a593Smuzhiyun 	 * - Not clear the source of the alarm
380*4882a593Smuzhiyun 	 * - gracefully stop all I/O
381*4882a593Smuzhiyun 	 * A diagnostic dump of register and stats at this point
382*4882a593Smuzhiyun 	 * reveals very useful information.
383*4882a593Smuzhiyun 	 */
384*4882a593Smuzhiyun 	int exec_mode;
385*4882a593Smuzhiyun 	int max_config_port;
386*4882a593Smuzhiyun 	struct vxge_vpath	*vpaths;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
389*4882a593Smuzhiyun 	void __iomem *bar0;
390*4882a593Smuzhiyun 	struct vxge_sw_stats	stats;
391*4882a593Smuzhiyun 	int		mtu;
392*4882a593Smuzhiyun 	/* Below variables are used for vpath selection to transmit a packet */
393*4882a593Smuzhiyun 	u8 		vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
394*4882a593Smuzhiyun 	u64		vpaths_deployed;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	u32 		intr_cnt;
397*4882a593Smuzhiyun 	u32 		level_err;
398*4882a593Smuzhiyun 	u32 		level_trace;
399*4882a593Smuzhiyun 	char		fw_version[VXGE_HW_FW_STRLEN];
400*4882a593Smuzhiyun 	struct work_struct reset_task;
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun struct vxge_rx_priv {
404*4882a593Smuzhiyun 	struct sk_buff		*skb;
405*4882a593Smuzhiyun 	unsigned char		*skb_data;
406*4882a593Smuzhiyun 	dma_addr_t		data_dma;
407*4882a593Smuzhiyun 	dma_addr_t		data_size;
408*4882a593Smuzhiyun };
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun struct vxge_tx_priv {
411*4882a593Smuzhiyun 	struct sk_buff		*skb;
412*4882a593Smuzhiyun 	dma_addr_t		dma_buffers[MAX_SKB_FRAGS+1];
413*4882a593Smuzhiyun };
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun #define VXGE_MODULE_PARAM_INT(p, val) \
416*4882a593Smuzhiyun 	static int p = val; \
417*4882a593Smuzhiyun 	module_param(p, int, 0)
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun static inline
vxge_os_timer(struct timer_list * timer,void (* func)(struct timer_list *),unsigned long timeout)420*4882a593Smuzhiyun void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
421*4882a593Smuzhiyun 		   unsigned long timeout)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	timer_setup(timer, func, 0);
424*4882a593Smuzhiyun 	mod_timer(timer, jiffies + timeout);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun void vxge_initialize_ethtool_ops(struct net_device *ndev);
428*4882a593Smuzhiyun int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /* #define VXGE_DEBUG_INIT: debug for initialization functions
431*4882a593Smuzhiyun  * #define VXGE_DEBUG_TX	 : debug transmit related functions
432*4882a593Smuzhiyun  * #define VXGE_DEBUG_RX  : debug recevice related functions
433*4882a593Smuzhiyun  * #define VXGE_DEBUG_MEM : debug memory module
434*4882a593Smuzhiyun  * #define VXGE_DEBUG_LOCK: debug locks
435*4882a593Smuzhiyun  * #define VXGE_DEBUG_SEM : debug semaphore
436*4882a593Smuzhiyun  * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun #define VXGE_DEBUG_INIT		0x00000001
439*4882a593Smuzhiyun #define VXGE_DEBUG_TX		0x00000002
440*4882a593Smuzhiyun #define VXGE_DEBUG_RX		0x00000004
441*4882a593Smuzhiyun #define VXGE_DEBUG_MEM		0x00000008
442*4882a593Smuzhiyun #define VXGE_DEBUG_LOCK		0x00000010
443*4882a593Smuzhiyun #define VXGE_DEBUG_SEM		0x00000020
444*4882a593Smuzhiyun #define VXGE_DEBUG_ENTRYEXIT	0x00000040
445*4882a593Smuzhiyun #define VXGE_DEBUG_INTR		0x00000080
446*4882a593Smuzhiyun #define VXGE_DEBUG_LL_CONFIG	0x00000100
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /* Debug tracing for VXGE driver */
449*4882a593Smuzhiyun #ifndef VXGE_DEBUG_MASK
450*4882a593Smuzhiyun #define VXGE_DEBUG_MASK	0x0
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
454*4882a593Smuzhiyun #define vxge_debug_ll_config(level, fmt, ...) \
455*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
456*4882a593Smuzhiyun #else
457*4882a593Smuzhiyun #define vxge_debug_ll_config(level, fmt, ...)
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
461*4882a593Smuzhiyun #define vxge_debug_init(level, fmt, ...) \
462*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
463*4882a593Smuzhiyun #else
464*4882a593Smuzhiyun #define vxge_debug_init(level, fmt, ...)
465*4882a593Smuzhiyun #endif
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
468*4882a593Smuzhiyun #define vxge_debug_tx(level, fmt, ...) \
469*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
470*4882a593Smuzhiyun #else
471*4882a593Smuzhiyun #define vxge_debug_tx(level, fmt, ...)
472*4882a593Smuzhiyun #endif
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
475*4882a593Smuzhiyun #define vxge_debug_rx(level, fmt, ...) \
476*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
477*4882a593Smuzhiyun #else
478*4882a593Smuzhiyun #define vxge_debug_rx(level, fmt, ...)
479*4882a593Smuzhiyun #endif
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
482*4882a593Smuzhiyun #define vxge_debug_mem(level, fmt, ...) \
483*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
484*4882a593Smuzhiyun #else
485*4882a593Smuzhiyun #define vxge_debug_mem(level, fmt, ...)
486*4882a593Smuzhiyun #endif
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
489*4882a593Smuzhiyun #define vxge_debug_entryexit(level, fmt, ...) \
490*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
491*4882a593Smuzhiyun #else
492*4882a593Smuzhiyun #define vxge_debug_entryexit(level, fmt, ...)
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
496*4882a593Smuzhiyun #define vxge_debug_intr(level, fmt, ...) \
497*4882a593Smuzhiyun 	vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
498*4882a593Smuzhiyun #else
499*4882a593Smuzhiyun #define vxge_debug_intr(level, fmt, ...)
500*4882a593Smuzhiyun #endif
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun #define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
503*4882a593Smuzhiyun 	vxge_hw_device_debug_set((struct __vxge_hw_device  *)vdev->devh, \
504*4882a593Smuzhiyun 		level, mask);\
505*4882a593Smuzhiyun 	VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
506*4882a593Smuzhiyun 		vxge_hw_device_error_level_get((struct __vxge_hw_device  *) \
507*4882a593Smuzhiyun 			vdev->devh), \
508*4882a593Smuzhiyun 		vxge_hw_device_trace_level_get((struct __vxge_hw_device  *) \
509*4882a593Smuzhiyun 			vdev->devh));\
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun #ifdef NETIF_F_GSO
513*4882a593Smuzhiyun #define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
514*4882a593Smuzhiyun #define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
515*4882a593Smuzhiyun #define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
516*4882a593Smuzhiyun #endif
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun #endif
519