1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Linux driver for VMware's vmxnet3 ethernet NIC.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
7*4882a593Smuzhiyun * under the terms of the GNU General Public License as published by the
8*4882a593Smuzhiyun * Free Software Foundation; version 2 of the License and no later version.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
11*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
12*4882a593Smuzhiyun * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13*4882a593Smuzhiyun * NON INFRINGEMENT. See the GNU General Public License for more
14*4882a593Smuzhiyun * details.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
17*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
18*4882a593Smuzhiyun * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution in
21*4882a593Smuzhiyun * the file called "COPYING".
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Maintained by: pv-drivers@vmware.com
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifndef _VMXNET3_INT_H
28*4882a593Smuzhiyun #define _VMXNET3_INT_H
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/bitops.h>
31*4882a593Smuzhiyun #include <linux/ethtool.h>
32*4882a593Smuzhiyun #include <linux/delay.h>
33*4882a593Smuzhiyun #include <linux/netdevice.h>
34*4882a593Smuzhiyun #include <linux/pci.h>
35*4882a593Smuzhiyun #include <linux/compiler.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/spinlock.h>
38*4882a593Smuzhiyun #include <linux/ioport.h>
39*4882a593Smuzhiyun #include <linux/highmem.h>
40*4882a593Smuzhiyun #include <linux/timer.h>
41*4882a593Smuzhiyun #include <linux/skbuff.h>
42*4882a593Smuzhiyun #include <linux/interrupt.h>
43*4882a593Smuzhiyun #include <linux/workqueue.h>
44*4882a593Smuzhiyun #include <linux/uaccess.h>
45*4882a593Smuzhiyun #include <asm/dma.h>
46*4882a593Smuzhiyun #include <asm/page.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <linux/tcp.h>
49*4882a593Smuzhiyun #include <linux/udp.h>
50*4882a593Smuzhiyun #include <linux/ip.h>
51*4882a593Smuzhiyun #include <linux/ipv6.h>
52*4882a593Smuzhiyun #include <linux/in.h>
53*4882a593Smuzhiyun #include <linux/etherdevice.h>
54*4882a593Smuzhiyun #include <asm/checksum.h>
55*4882a593Smuzhiyun #include <linux/if_vlan.h>
56*4882a593Smuzhiyun #include <linux/if_arp.h>
57*4882a593Smuzhiyun #include <linux/inetdevice.h>
58*4882a593Smuzhiyun #include <linux/log2.h>
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #include "vmxnet3_defs.h"
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #ifdef DEBUG
63*4882a593Smuzhiyun # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64*4882a593Smuzhiyun #else
65*4882a593Smuzhiyun # define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Version numbers
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k"
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Each byte of this 32-bit integer encodes a version number in
75*4882a593Smuzhiyun * VMXNET3_DRIVER_VERSION_STRING.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun #define VMXNET3_DRIVER_VERSION_NUM 0x01050000
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #if defined(CONFIG_PCI_MSI)
80*4882a593Smuzhiyun /* RSS only makes sense if MSI-X is supported. */
81*4882a593Smuzhiyun #define VMXNET3_RSS
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
85*4882a593Smuzhiyun #define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
86*4882a593Smuzhiyun #define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
87*4882a593Smuzhiyun #define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Capabilities
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun enum {
94*4882a593Smuzhiyun VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
95*4882a593Smuzhiyun VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
96*4882a593Smuzhiyun * IPv4 */
97*4882a593Smuzhiyun VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
98*4882a593Smuzhiyun VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
99*4882a593Smuzhiyun VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
100*4882a593Smuzhiyun VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
101*4882a593Smuzhiyun * offload */
102*4882a593Smuzhiyun VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
103*4882a593Smuzhiyun VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
104*4882a593Smuzhiyun VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
105*4882a593Smuzhiyun VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
106*4882a593Smuzhiyun VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
107*4882a593Smuzhiyun VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
108*4882a593Smuzhiyun VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
109*4882a593Smuzhiyun VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
110*4882a593Smuzhiyun VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
111*4882a593Smuzhiyun * for a pkt */
112*4882a593Smuzhiyun VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
113*4882a593Smuzhiyun VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
114*4882a593Smuzhiyun VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
115*4882a593Smuzhiyun VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
116*4882a593Smuzhiyun /* pages transmits */
117*4882a593Smuzhiyun VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
118*4882a593Smuzhiyun VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
119*4882a593Smuzhiyun VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
120*4882a593Smuzhiyun /* pkts up to 256kB. */
121*4882a593Smuzhiyun VMNET_CAP_UPT = 0x400000 /* Support UPT */
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Maximum devices supported.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #define MAX_ETHERNET_CARDS 10
128*4882a593Smuzhiyun #define MAX_PCI_PASSTHRU_DEVICE 6
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct vmxnet3_cmd_ring {
131*4882a593Smuzhiyun union Vmxnet3_GenericDesc *base;
132*4882a593Smuzhiyun u32 size;
133*4882a593Smuzhiyun u32 next2fill;
134*4882a593Smuzhiyun u32 next2comp;
135*4882a593Smuzhiyun u8 gen;
136*4882a593Smuzhiyun dma_addr_t basePA;
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun static inline void
vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring * ring)140*4882a593Smuzhiyun vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun ring->next2fill++;
143*4882a593Smuzhiyun if (unlikely(ring->next2fill == ring->size)) {
144*4882a593Smuzhiyun ring->next2fill = 0;
145*4882a593Smuzhiyun VMXNET3_FLIP_RING_GEN(ring->gen);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun static inline void
vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring * ring)150*4882a593Smuzhiyun vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun static inline int
vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring * ring)156*4882a593Smuzhiyun vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
159*4882a593Smuzhiyun ring->next2comp - ring->next2fill - 1;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun struct vmxnet3_comp_ring {
163*4882a593Smuzhiyun union Vmxnet3_GenericDesc *base;
164*4882a593Smuzhiyun u32 size;
165*4882a593Smuzhiyun u32 next2proc;
166*4882a593Smuzhiyun u8 gen;
167*4882a593Smuzhiyun u8 intr_idx;
168*4882a593Smuzhiyun dma_addr_t basePA;
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static inline void
vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring * ring)172*4882a593Smuzhiyun vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun ring->next2proc++;
175*4882a593Smuzhiyun if (unlikely(ring->next2proc == ring->size)) {
176*4882a593Smuzhiyun ring->next2proc = 0;
177*4882a593Smuzhiyun VMXNET3_FLIP_RING_GEN(ring->gen);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun struct vmxnet3_tx_data_ring {
182*4882a593Smuzhiyun struct Vmxnet3_TxDataDesc *base;
183*4882a593Smuzhiyun u32 size;
184*4882a593Smuzhiyun dma_addr_t basePA;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun enum vmxnet3_buf_map_type {
188*4882a593Smuzhiyun VMXNET3_MAP_INVALID = 0,
189*4882a593Smuzhiyun VMXNET3_MAP_NONE,
190*4882a593Smuzhiyun VMXNET3_MAP_SINGLE,
191*4882a593Smuzhiyun VMXNET3_MAP_PAGE,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun struct vmxnet3_tx_buf_info {
195*4882a593Smuzhiyun u32 map_type;
196*4882a593Smuzhiyun u16 len;
197*4882a593Smuzhiyun u16 sop_idx;
198*4882a593Smuzhiyun dma_addr_t dma_addr;
199*4882a593Smuzhiyun struct sk_buff *skb;
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct vmxnet3_tq_driver_stats {
203*4882a593Smuzhiyun u64 drop_total; /* # of pkts dropped by the driver, the
204*4882a593Smuzhiyun * counters below track droppings due to
205*4882a593Smuzhiyun * different reasons
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun u64 drop_too_many_frags;
208*4882a593Smuzhiyun u64 drop_oversized_hdr;
209*4882a593Smuzhiyun u64 drop_hdr_inspect_err;
210*4882a593Smuzhiyun u64 drop_tso;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun u64 tx_ring_full;
213*4882a593Smuzhiyun u64 linearized; /* # of pkts linearized */
214*4882a593Smuzhiyun u64 copy_skb_header; /* # of times we have to copy skb header */
215*4882a593Smuzhiyun u64 oversized_hdr;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun struct vmxnet3_tx_ctx {
219*4882a593Smuzhiyun bool ipv4;
220*4882a593Smuzhiyun bool ipv6;
221*4882a593Smuzhiyun u16 mss;
222*4882a593Smuzhiyun u32 l4_offset; /* only valid for pkts requesting tso or csum
223*4882a593Smuzhiyun * offloading. For encap offload, it refers to
224*4882a593Smuzhiyun * inner L4 offset i.e. it includes outer header
225*4882a593Smuzhiyun * encap header and inner eth and ip header size
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun u32 l4_hdr_size; /* only valid if mss != 0
229*4882a593Smuzhiyun * Refers to inner L4 hdr size for encap
230*4882a593Smuzhiyun * offload
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun u32 copy_size; /* # of bytes copied into the data ring */
233*4882a593Smuzhiyun union Vmxnet3_GenericDesc *sop_txd;
234*4882a593Smuzhiyun union Vmxnet3_GenericDesc *eop_txd;
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun struct vmxnet3_tx_queue {
238*4882a593Smuzhiyun char name[IFNAMSIZ+8]; /* To identify interrupt */
239*4882a593Smuzhiyun struct vmxnet3_adapter *adapter;
240*4882a593Smuzhiyun spinlock_t tx_lock;
241*4882a593Smuzhiyun struct vmxnet3_cmd_ring tx_ring;
242*4882a593Smuzhiyun struct vmxnet3_tx_buf_info *buf_info;
243*4882a593Smuzhiyun dma_addr_t buf_info_pa;
244*4882a593Smuzhiyun struct vmxnet3_tx_data_ring data_ring;
245*4882a593Smuzhiyun struct vmxnet3_comp_ring comp_ring;
246*4882a593Smuzhiyun struct Vmxnet3_TxQueueCtrl *shared;
247*4882a593Smuzhiyun struct vmxnet3_tq_driver_stats stats;
248*4882a593Smuzhiyun bool stopped;
249*4882a593Smuzhiyun int num_stop; /* # of times the queue is
250*4882a593Smuzhiyun * stopped */
251*4882a593Smuzhiyun int qid;
252*4882a593Smuzhiyun u16 txdata_desc_size;
253*4882a593Smuzhiyun } __attribute__((__aligned__(SMP_CACHE_BYTES)));
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun enum vmxnet3_rx_buf_type {
256*4882a593Smuzhiyun VMXNET3_RX_BUF_NONE = 0,
257*4882a593Smuzhiyun VMXNET3_RX_BUF_SKB = 1,
258*4882a593Smuzhiyun VMXNET3_RX_BUF_PAGE = 2
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun struct vmxnet3_rx_buf_info {
262*4882a593Smuzhiyun enum vmxnet3_rx_buf_type buf_type;
263*4882a593Smuzhiyun u16 len;
264*4882a593Smuzhiyun union {
265*4882a593Smuzhiyun struct sk_buff *skb;
266*4882a593Smuzhiyun struct page *page;
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun dma_addr_t dma_addr;
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun struct vmxnet3_rx_ctx {
272*4882a593Smuzhiyun struct sk_buff *skb;
273*4882a593Smuzhiyun u32 sop_idx;
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun struct vmxnet3_rq_driver_stats {
277*4882a593Smuzhiyun u64 drop_total;
278*4882a593Smuzhiyun u64 drop_err;
279*4882a593Smuzhiyun u64 drop_fcs;
280*4882a593Smuzhiyun u64 rx_buf_alloc_failure;
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun struct vmxnet3_rx_data_ring {
284*4882a593Smuzhiyun Vmxnet3_RxDataDesc *base;
285*4882a593Smuzhiyun dma_addr_t basePA;
286*4882a593Smuzhiyun u16 desc_size;
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun struct vmxnet3_rx_queue {
290*4882a593Smuzhiyun char name[IFNAMSIZ + 8]; /* To identify interrupt */
291*4882a593Smuzhiyun struct vmxnet3_adapter *adapter;
292*4882a593Smuzhiyun struct napi_struct napi;
293*4882a593Smuzhiyun struct vmxnet3_cmd_ring rx_ring[2];
294*4882a593Smuzhiyun struct vmxnet3_rx_data_ring data_ring;
295*4882a593Smuzhiyun struct vmxnet3_comp_ring comp_ring;
296*4882a593Smuzhiyun struct vmxnet3_rx_ctx rx_ctx;
297*4882a593Smuzhiyun u32 qid; /* rqID in RCD for buffer from 1st ring */
298*4882a593Smuzhiyun u32 qid2; /* rqID in RCD for buffer from 2nd ring */
299*4882a593Smuzhiyun u32 dataRingQid; /* rqID in RCD for buffer from data ring */
300*4882a593Smuzhiyun struct vmxnet3_rx_buf_info *buf_info[2];
301*4882a593Smuzhiyun dma_addr_t buf_info_pa;
302*4882a593Smuzhiyun struct Vmxnet3_RxQueueCtrl *shared;
303*4882a593Smuzhiyun struct vmxnet3_rq_driver_stats stats;
304*4882a593Smuzhiyun } __attribute__((__aligned__(SMP_CACHE_BYTES)));
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun #define VMXNET3_DEVICE_MAX_TX_QUEUES 8
307*4882a593Smuzhiyun #define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
310*4882a593Smuzhiyun #define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun #define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
313*4882a593Smuzhiyun VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
314*4882a593Smuzhiyun #define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun struct vmxnet3_intr {
318*4882a593Smuzhiyun enum vmxnet3_intr_mask_mode mask_mode;
319*4882a593Smuzhiyun enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
320*4882a593Smuzhiyun u8 num_intrs; /* # of intr vectors */
321*4882a593Smuzhiyun u8 event_intr_idx; /* idx of the intr vector for event */
322*4882a593Smuzhiyun u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
323*4882a593Smuzhiyun char event_msi_vector_name[IFNAMSIZ+17];
324*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
325*4882a593Smuzhiyun struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun };
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Interrupt sharing schemes, share_intr */
330*4882a593Smuzhiyun #define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
331*4882a593Smuzhiyun #define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
332*4882a593Smuzhiyun #define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun #define VMXNET3_STATE_BIT_RESETTING 0
336*4882a593Smuzhiyun #define VMXNET3_STATE_BIT_QUIESCED 1
337*4882a593Smuzhiyun struct vmxnet3_adapter {
338*4882a593Smuzhiyun struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
339*4882a593Smuzhiyun struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
340*4882a593Smuzhiyun unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
341*4882a593Smuzhiyun struct vmxnet3_intr intr;
342*4882a593Smuzhiyun spinlock_t cmd_lock;
343*4882a593Smuzhiyun struct Vmxnet3_DriverShared *shared;
344*4882a593Smuzhiyun struct Vmxnet3_PMConf *pm_conf;
345*4882a593Smuzhiyun struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
346*4882a593Smuzhiyun struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
347*4882a593Smuzhiyun struct net_device *netdev;
348*4882a593Smuzhiyun struct pci_dev *pdev;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun u8 __iomem *hw_addr0; /* for BAR 0 */
351*4882a593Smuzhiyun u8 __iomem *hw_addr1; /* for BAR 1 */
352*4882a593Smuzhiyun u8 version;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun #ifdef VMXNET3_RSS
355*4882a593Smuzhiyun struct UPT1_RSSConf *rss_conf;
356*4882a593Smuzhiyun bool rss;
357*4882a593Smuzhiyun #endif
358*4882a593Smuzhiyun u32 num_rx_queues;
359*4882a593Smuzhiyun u32 num_tx_queues;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* rx buffer related */
362*4882a593Smuzhiyun unsigned skb_buf_size;
363*4882a593Smuzhiyun int rx_buf_per_pkt; /* only apply to the 1st ring */
364*4882a593Smuzhiyun dma_addr_t shared_pa;
365*4882a593Smuzhiyun dma_addr_t queue_desc_pa;
366*4882a593Smuzhiyun dma_addr_t coal_conf_pa;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Wake-on-LAN */
369*4882a593Smuzhiyun u32 wol;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* Link speed */
372*4882a593Smuzhiyun u32 link_speed; /* in mbps */
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun u64 tx_timeout_count;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Ring sizes */
377*4882a593Smuzhiyun u32 tx_ring_size;
378*4882a593Smuzhiyun u32 rx_ring_size;
379*4882a593Smuzhiyun u32 rx_ring2_size;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Size of buffer in the data ring */
382*4882a593Smuzhiyun u16 txdata_desc_size;
383*4882a593Smuzhiyun u16 rxdata_desc_size;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun bool rxdataring_enabled;
386*4882a593Smuzhiyun bool default_rss_fields;
387*4882a593Smuzhiyun enum Vmxnet3_RSSField rss_fields;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun struct work_struct work;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun unsigned long state; /* VMXNET3_STATE_BIT_xxx */
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun int share_intr;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun struct Vmxnet3_CoalesceScheme *coal_conf;
396*4882a593Smuzhiyun bool default_coal_mode;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun dma_addr_t adapter_pa;
399*4882a593Smuzhiyun dma_addr_t pm_conf_pa;
400*4882a593Smuzhiyun dma_addr_t rss_conf_pa;
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
404*4882a593Smuzhiyun writel((val), (adapter)->hw_addr0 + (reg))
405*4882a593Smuzhiyun #define VMXNET3_READ_BAR0_REG(adapter, reg) \
406*4882a593Smuzhiyun readl((adapter)->hw_addr0 + (reg))
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
409*4882a593Smuzhiyun writel((val), (adapter)->hw_addr1 + (reg))
410*4882a593Smuzhiyun #define VMXNET3_READ_BAR1_REG(adapter, reg) \
411*4882a593Smuzhiyun readl((adapter)->hw_addr1 + (reg))
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
414*4882a593Smuzhiyun #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
415*4882a593Smuzhiyun ((rq)->rx_ring[ring_idx].size >> 3)
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun #define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
418*4882a593Smuzhiyun #define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun #define VMXNET3_VERSION_GE_2(adapter) \
421*4882a593Smuzhiyun (adapter->version >= VMXNET3_REV_2 + 1)
422*4882a593Smuzhiyun #define VMXNET3_VERSION_GE_3(adapter) \
423*4882a593Smuzhiyun (adapter->version >= VMXNET3_REV_3 + 1)
424*4882a593Smuzhiyun #define VMXNET3_VERSION_GE_4(adapter) \
425*4882a593Smuzhiyun (adapter->version >= VMXNET3_REV_4 + 1)
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
428*4882a593Smuzhiyun #define VMXNET3_DEF_TX_RING_SIZE 512
429*4882a593Smuzhiyun #define VMXNET3_DEF_RX_RING_SIZE 1024
430*4882a593Smuzhiyun #define VMXNET3_DEF_RX_RING2_SIZE 256
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun #define VMXNET3_DEF_RXDATA_DESC_SIZE 128
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun #define VMXNET3_MAX_ETH_HDR_SIZE 22
435*4882a593Smuzhiyun #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #define VMXNET3_GET_RING_IDX(adapter, rqID) \
438*4882a593Smuzhiyun ((rqID >= adapter->num_rx_queues && \
439*4882a593Smuzhiyun rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun #define VMXNET3_RX_DATA_RING(adapter, rqID) \
442*4882a593Smuzhiyun (rqID >= 2 * adapter->num_rx_queues && \
443*4882a593Smuzhiyun rqID < 3 * adapter->num_rx_queues) \
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun #define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun #define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
448*4882a593Smuzhiyun #define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
449*4882a593Smuzhiyun #define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
450*4882a593Smuzhiyun VMXNET3_RSS_FIELDS_TCPIP6)
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun int
453*4882a593Smuzhiyun vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun int
456*4882a593Smuzhiyun vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun void
459*4882a593Smuzhiyun vmxnet3_force_close(struct vmxnet3_adapter *adapter);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun void
462*4882a593Smuzhiyun vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun void
465*4882a593Smuzhiyun vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun void
468*4882a593Smuzhiyun vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun netdev_features_t
471*4882a593Smuzhiyun vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun netdev_features_t
474*4882a593Smuzhiyun vmxnet3_features_check(struct sk_buff *skb,
475*4882a593Smuzhiyun struct net_device *netdev, netdev_features_t features);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun int
478*4882a593Smuzhiyun vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun int
481*4882a593Smuzhiyun vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
482*4882a593Smuzhiyun u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
483*4882a593Smuzhiyun u16 txdata_desc_size, u16 rxdata_desc_size);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun void vmxnet3_set_ethtool_ops(struct net_device *netdev);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun void vmxnet3_get_stats64(struct net_device *dev,
488*4882a593Smuzhiyun struct rtnl_link_stats64 *stats);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun extern char vmxnet3_driver_name[];
491*4882a593Smuzhiyun #endif
492