1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3*4882a593Smuzhiyun * driver for Linux.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun * OpenIB.org BSD license below:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun * without modification, are permitted provided that the following
15*4882a593Smuzhiyun * conditions are met:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * - Redistributions of source code must retain the above
18*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun * disclaimer.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun * provided with the distribution.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun * SOFTWARE.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * This file should not be included directly. Include t4vf_common.h instead.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #ifndef __CXGB4VF_ADAPTER_H__
41*4882a593Smuzhiyun #define __CXGB4VF_ADAPTER_H__
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include <linux/interrupt.h>
44*4882a593Smuzhiyun #include <linux/pci.h>
45*4882a593Smuzhiyun #include <linux/spinlock.h>
46*4882a593Smuzhiyun #include <linux/skbuff.h>
47*4882a593Smuzhiyun #include <linux/if_ether.h>
48*4882a593Smuzhiyun #include <linux/netdevice.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "../cxgb4/t4_hw.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Constants of the implementation.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun enum {
56*4882a593Smuzhiyun MAX_NPORTS = 1, /* max # of "ports" */
57*4882a593Smuzhiyun MAX_PORT_QSETS = 8, /* max # of Queue Sets / "port" */
58*4882a593Smuzhiyun MAX_ETH_QSETS = MAX_NPORTS*MAX_PORT_QSETS,
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * MSI-X interrupt index usage.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun MSIX_FW = 0, /* MSI-X index for firmware Q */
64*4882a593Smuzhiyun MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
65*4882a593Smuzhiyun MSIX_EXTRAS = 1,
66*4882a593Smuzhiyun MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * The maximum number of Ingress and Egress Queues is determined by
70*4882a593Smuzhiyun * the maximum number of "Queue Sets" which we support plus any
71*4882a593Smuzhiyun * ancillary queues. Each "Queue Set" requires one Ingress Queue
72*4882a593Smuzhiyun * for RX Packet Ingress Event notifications and two Egress Queues for
73*4882a593Smuzhiyun * a Free List and an Ethernet TX list.
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun INGQ_EXTRAS = 2, /* firmware event queue and */
76*4882a593Smuzhiyun /* forwarded interrupts */
77*4882a593Smuzhiyun MAX_INGQ = MAX_ETH_QSETS+INGQ_EXTRAS,
78*4882a593Smuzhiyun MAX_EGRQ = MAX_ETH_QSETS*2,
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Forward structure definition references.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun struct adapter;
85*4882a593Smuzhiyun struct sge_eth_rxq;
86*4882a593Smuzhiyun struct sge_rspq;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Per-"port" information. This is really per-Virtual Interface information
90*4882a593Smuzhiyun * but the use of the "port" nomanclature makes it easier to go back and forth
91*4882a593Smuzhiyun * between the PF and VF drivers ...
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun struct port_info {
94*4882a593Smuzhiyun struct adapter *adapter; /* our adapter */
95*4882a593Smuzhiyun u32 vlan_id; /* vlan id for VST */
96*4882a593Smuzhiyun u16 viid; /* virtual interface ID */
97*4882a593Smuzhiyun int xact_addr_filt; /* index of our MAC address filter */
98*4882a593Smuzhiyun u16 rss_size; /* size of VI's RSS table slice */
99*4882a593Smuzhiyun u8 pidx; /* index into adapter port[] */
100*4882a593Smuzhiyun s8 mdio_addr;
101*4882a593Smuzhiyun u8 port_type; /* firmware port type */
102*4882a593Smuzhiyun u8 mod_type; /* firmware module type */
103*4882a593Smuzhiyun u8 port_id; /* physical port ID */
104*4882a593Smuzhiyun u8 nqsets; /* # of "Queue Sets" */
105*4882a593Smuzhiyun u8 first_qset; /* index of first "Queue Set" */
106*4882a593Smuzhiyun struct link_config link_cfg; /* physical port configuration */
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Scatter Gather Engine resources for the "adapter". Our ingress and egress
111*4882a593Smuzhiyun * queues are organized into "Queue Sets" with one ingress and one egress
112*4882a593Smuzhiyun * queue per Queue Set. These Queue Sets are aportionable between the "ports"
113*4882a593Smuzhiyun * (Virtual Interfaces). One extra ingress queue is used to receive
114*4882a593Smuzhiyun * asynchronous messages from the firmware. Note that the "Queue IDs" that we
115*4882a593Smuzhiyun * use here are really "Relative Queue IDs" which are returned as part of the
116*4882a593Smuzhiyun * firmware command to allocate queues. These queue IDs are relative to the
117*4882a593Smuzhiyun * absolute Queue ID base of the section of the Queue ID space allocated to
118*4882a593Smuzhiyun * the PF/VF.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * SGE free-list queue state.
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun struct rx_sw_desc;
125*4882a593Smuzhiyun struct sge_fl {
126*4882a593Smuzhiyun unsigned int avail; /* # of available RX buffers */
127*4882a593Smuzhiyun unsigned int pend_cred; /* new buffers since last FL DB ring */
128*4882a593Smuzhiyun unsigned int cidx; /* consumer index */
129*4882a593Smuzhiyun unsigned int pidx; /* producer index */
130*4882a593Smuzhiyun unsigned long alloc_failed; /* # of buffer allocation failures */
131*4882a593Smuzhiyun unsigned long large_alloc_failed;
132*4882a593Smuzhiyun unsigned long starving; /* # of times FL was found starving */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * Write-once/infrequently fields.
136*4882a593Smuzhiyun * -------------------------------
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun unsigned int cntxt_id; /* SGE relative QID for the free list */
140*4882a593Smuzhiyun unsigned int abs_id; /* SGE absolute QID for the free list */
141*4882a593Smuzhiyun unsigned int size; /* capacity of free list */
142*4882a593Smuzhiyun struct rx_sw_desc *sdesc; /* address of SW RX descriptor ring */
143*4882a593Smuzhiyun __be64 *desc; /* address of HW RX descriptor ring */
144*4882a593Smuzhiyun dma_addr_t addr; /* PCI bus address of hardware ring */
145*4882a593Smuzhiyun void __iomem *bar2_addr; /* address of BAR2 Queue registers */
146*4882a593Smuzhiyun unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * An ingress packet gather list.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun struct pkt_gl {
153*4882a593Smuzhiyun struct page_frag frags[MAX_SKB_FRAGS];
154*4882a593Smuzhiyun void *va; /* virtual address of first byte */
155*4882a593Smuzhiyun unsigned int nfrags; /* # of fragments */
156*4882a593Smuzhiyun unsigned int tot_len; /* total length of fragments */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun typedef int (*rspq_handler_t)(struct sge_rspq *, const __be64 *,
160*4882a593Smuzhiyun const struct pkt_gl *);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * State for an SGE Response Queue.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun struct sge_rspq {
166*4882a593Smuzhiyun struct napi_struct napi; /* NAPI scheduling control */
167*4882a593Smuzhiyun const __be64 *cur_desc; /* current descriptor in queue */
168*4882a593Smuzhiyun unsigned int cidx; /* consumer index */
169*4882a593Smuzhiyun u8 gen; /* current generation bit */
170*4882a593Smuzhiyun u8 next_intr_params; /* holdoff params for next interrupt */
171*4882a593Smuzhiyun int offset; /* offset into current FL buffer */
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun unsigned int unhandled_irqs; /* bogus interrupts */
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Write-once/infrequently fields.
177*4882a593Smuzhiyun * -------------------------------
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun u8 intr_params; /* interrupt holdoff parameters */
181*4882a593Smuzhiyun u8 pktcnt_idx; /* interrupt packet threshold */
182*4882a593Smuzhiyun u8 idx; /* queue index within its group */
183*4882a593Smuzhiyun u16 cntxt_id; /* SGE rel QID for the response Q */
184*4882a593Smuzhiyun u16 abs_id; /* SGE abs QID for the response Q */
185*4882a593Smuzhiyun __be64 *desc; /* address of hardware response ring */
186*4882a593Smuzhiyun dma_addr_t phys_addr; /* PCI bus address of ring */
187*4882a593Smuzhiyun void __iomem *bar2_addr; /* address of BAR2 Queue registers */
188*4882a593Smuzhiyun unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
189*4882a593Smuzhiyun unsigned int iqe_len; /* entry size */
190*4882a593Smuzhiyun unsigned int size; /* capcity of response Q */
191*4882a593Smuzhiyun struct adapter *adapter; /* our adapter */
192*4882a593Smuzhiyun struct net_device *netdev; /* associated net device */
193*4882a593Smuzhiyun rspq_handler_t handler; /* the handler for this response Q */
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * Ethernet queue statistics
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun struct sge_eth_stats {
200*4882a593Smuzhiyun unsigned long pkts; /* # of ethernet packets */
201*4882a593Smuzhiyun unsigned long lro_pkts; /* # of LRO super packets */
202*4882a593Smuzhiyun unsigned long lro_merged; /* # of wire packets merged by LRO */
203*4882a593Smuzhiyun unsigned long rx_cso; /* # of Rx checksum offloads */
204*4882a593Smuzhiyun unsigned long vlan_ex; /* # of Rx VLAN extractions */
205*4882a593Smuzhiyun unsigned long rx_drops; /* # of packets dropped due to no mem */
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * State for an Ethernet Receive Queue.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun struct sge_eth_rxq {
212*4882a593Smuzhiyun struct sge_rspq rspq; /* Response Queue */
213*4882a593Smuzhiyun struct sge_fl fl; /* Free List */
214*4882a593Smuzhiyun struct sge_eth_stats stats; /* receive statistics */
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * SGE Transmit Queue state. This contains all of the resources associated
219*4882a593Smuzhiyun * with the hardware status of a TX Queue which is a circular ring of hardware
220*4882a593Smuzhiyun * TX Descriptors. For convenience, it also contains a pointer to a parallel
221*4882a593Smuzhiyun * "Software Descriptor" array but we don't know anything about it here other
222*4882a593Smuzhiyun * than its type name.
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun struct tx_desc {
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * Egress Queues are measured in units of SGE_EQ_IDXSIZE by the
227*4882a593Smuzhiyun * hardware: Sizes, Producer and Consumer indices, etc.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun __be64 flit[SGE_EQ_IDXSIZE/sizeof(__be64)];
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun struct tx_sw_desc;
232*4882a593Smuzhiyun struct sge_txq {
233*4882a593Smuzhiyun unsigned int in_use; /* # of in-use TX descriptors */
234*4882a593Smuzhiyun unsigned int size; /* # of descriptors */
235*4882a593Smuzhiyun unsigned int cidx; /* SW consumer index */
236*4882a593Smuzhiyun unsigned int pidx; /* producer index */
237*4882a593Smuzhiyun unsigned long stops; /* # of times queue has been stopped */
238*4882a593Smuzhiyun unsigned long restarts; /* # of queue restarts */
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * Write-once/infrequently fields.
242*4882a593Smuzhiyun * -------------------------------
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun unsigned int cntxt_id; /* SGE relative QID for the TX Q */
246*4882a593Smuzhiyun unsigned int abs_id; /* SGE absolute QID for the TX Q */
247*4882a593Smuzhiyun struct tx_desc *desc; /* address of HW TX descriptor ring */
248*4882a593Smuzhiyun struct tx_sw_desc *sdesc; /* address of SW TX descriptor ring */
249*4882a593Smuzhiyun struct sge_qstat *stat; /* queue status entry */
250*4882a593Smuzhiyun dma_addr_t phys_addr; /* PCI bus address of hardware ring */
251*4882a593Smuzhiyun void __iomem *bar2_addr; /* address of BAR2 Queue registers */
252*4882a593Smuzhiyun unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * State for an Ethernet Transmit Queue.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun struct sge_eth_txq {
259*4882a593Smuzhiyun struct sge_txq q; /* SGE TX Queue */
260*4882a593Smuzhiyun struct netdev_queue *txq; /* associated netdev TX queue */
261*4882a593Smuzhiyun unsigned long tso; /* # of TSO requests */
262*4882a593Smuzhiyun unsigned long tx_cso; /* # of TX checksum offloads */
263*4882a593Smuzhiyun unsigned long vlan_ins; /* # of TX VLAN insertions */
264*4882a593Smuzhiyun unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * The complete set of Scatter/Gather Engine resources.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun struct sge {
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Our "Queue Sets" ...
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
275*4882a593Smuzhiyun struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * Extra ingress queues for asynchronous firmware events and
279*4882a593Smuzhiyun * forwarded interrupts (when in MSI mode).
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun struct sge_rspq intrq ____cacheline_aligned_in_smp;
284*4882a593Smuzhiyun spinlock_t intrq_lock;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * State for managing "starving Free Lists" -- Free Lists which have
288*4882a593Smuzhiyun * fallen below a certain threshold of buffers available to the
289*4882a593Smuzhiyun * hardware and attempts to refill them up to that threshold have
290*4882a593Smuzhiyun * failed. We have a regular "slow tick" timer process which will
291*4882a593Smuzhiyun * make periodic attempts to refill these starving Free Lists ...
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun DECLARE_BITMAP(starving_fl, MAX_EGRQ);
294*4882a593Smuzhiyun struct timer_list rx_timer;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * State for cleaning up completed TX descriptors.
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun struct timer_list tx_timer;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * Write-once/infrequently fields.
303*4882a593Smuzhiyun * -------------------------------
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun u16 max_ethqsets; /* # of available Ethernet queue sets */
307*4882a593Smuzhiyun u16 ethqsets; /* # of active Ethernet queue sets */
308*4882a593Smuzhiyun u16 ethtxq_rover; /* Tx queue to clean up next */
309*4882a593Smuzhiyun u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
310*4882a593Smuzhiyun u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Decoded Adapter Parameters.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun u32 fl_pg_order; /* large page allocation size */
315*4882a593Smuzhiyun u32 stat_len; /* length of status page at ring end */
316*4882a593Smuzhiyun u32 pktshift; /* padding between CPL & packet data */
317*4882a593Smuzhiyun u32 fl_align; /* response queue message alignment */
318*4882a593Smuzhiyun u32 fl_starve_thres; /* Free List starvation threshold */
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * Reverse maps from Absolute Queue IDs to associated queue pointers.
322*4882a593Smuzhiyun * The absolute Queue IDs are in a compact range which start at a
323*4882a593Smuzhiyun * [potentially large] Base Queue ID. We perform the reverse map by
324*4882a593Smuzhiyun * first converting the Absolute Queue ID into a Relative Queue ID by
325*4882a593Smuzhiyun * subtracting off the Base Queue ID and then use a Relative Queue ID
326*4882a593Smuzhiyun * indexed table to get the pointer to the corresponding software
327*4882a593Smuzhiyun * queue structure.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun unsigned int egr_base;
330*4882a593Smuzhiyun unsigned int ingr_base;
331*4882a593Smuzhiyun void *egr_map[MAX_EGRQ];
332*4882a593Smuzhiyun struct sge_rspq *ingr_map[MAX_INGQ];
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * Utility macros to convert Absolute- to Relative-Queue indices and Egress-
337*4882a593Smuzhiyun * and Ingress-Queues. The EQ_MAP() and IQ_MAP() macros which provide
338*4882a593Smuzhiyun * pointers to Ingress- and Egress-Queues can be used as both L- and R-values
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun #define EQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->egr_base))
341*4882a593Smuzhiyun #define IQ_IDX(s, abs_id) ((unsigned int)((abs_id) - (s)->ingr_base))
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun #define EQ_MAP(s, abs_id) ((s)->egr_map[EQ_IDX(s, abs_id)])
344*4882a593Smuzhiyun #define IQ_MAP(s, abs_id) ((s)->ingr_map[IQ_IDX(s, abs_id)])
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun #define for_each_ethrxq(sge, iter) \
350*4882a593Smuzhiyun for (iter = 0; iter < (sge)->ethqsets; iter++)
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun struct hash_mac_addr {
353*4882a593Smuzhiyun struct list_head list;
354*4882a593Smuzhiyun u8 addr[ETH_ALEN];
355*4882a593Smuzhiyun unsigned int iface_mac;
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun struct mbox_list {
359*4882a593Smuzhiyun struct list_head list;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * Per-"adapter" (Virtual Function) information.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun struct adapter {
366*4882a593Smuzhiyun /* PCI resources */
367*4882a593Smuzhiyun void __iomem *regs;
368*4882a593Smuzhiyun void __iomem *bar2;
369*4882a593Smuzhiyun struct pci_dev *pdev;
370*4882a593Smuzhiyun struct device *pdev_dev;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* "adapter" resources */
373*4882a593Smuzhiyun unsigned long registered_device_map;
374*4882a593Smuzhiyun unsigned long open_device_map;
375*4882a593Smuzhiyun unsigned long flags;
376*4882a593Smuzhiyun struct adapter_params params;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* queue and interrupt resources */
379*4882a593Smuzhiyun struct {
380*4882a593Smuzhiyun unsigned short vec;
381*4882a593Smuzhiyun char desc[22];
382*4882a593Smuzhiyun } msix_info[MSIX_ENTRIES];
383*4882a593Smuzhiyun struct sge sge;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Linux network device resources */
386*4882a593Smuzhiyun struct net_device *port[MAX_NPORTS];
387*4882a593Smuzhiyun const char *name;
388*4882a593Smuzhiyun unsigned int msg_enable;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* debugfs resources */
391*4882a593Smuzhiyun struct dentry *debugfs_root;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* various locks */
394*4882a593Smuzhiyun spinlock_t stats_lock;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* lock for mailbox cmd list */
397*4882a593Smuzhiyun spinlock_t mbox_lock;
398*4882a593Smuzhiyun struct mbox_list mlist;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* support for mailbox command/reply logging */
401*4882a593Smuzhiyun #define T4VF_OS_LOG_MBOX_CMDS 256
402*4882a593Smuzhiyun struct mbox_cmd_log *mbox_log;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* list of MAC addresses in MPS Hash */
405*4882a593Smuzhiyun struct list_head mac_hlist;
406*4882a593Smuzhiyun };
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun enum { /* adapter flags */
409*4882a593Smuzhiyun CXGB4VF_FULL_INIT_DONE = (1UL << 0),
410*4882a593Smuzhiyun CXGB4VF_USING_MSI = (1UL << 1),
411*4882a593Smuzhiyun CXGB4VF_USING_MSIX = (1UL << 2),
412*4882a593Smuzhiyun CXGB4VF_QUEUES_BOUND = (1UL << 3),
413*4882a593Smuzhiyun CXGB4VF_ROOT_NO_RELAXED_ORDERING = (1UL << 4),
414*4882a593Smuzhiyun CXGB4VF_FW_OK = (1UL << 5),
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * The following register read/write routine definitions are required by
419*4882a593Smuzhiyun * the common code.
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /**
423*4882a593Smuzhiyun * t4_read_reg - read a HW register
424*4882a593Smuzhiyun * @adapter: the adapter
425*4882a593Smuzhiyun * @reg_addr: the register address
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * Returns the 32-bit value of the given HW register.
428*4882a593Smuzhiyun */
t4_read_reg(struct adapter * adapter,u32 reg_addr)429*4882a593Smuzhiyun static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun return readl(adapter->regs + reg_addr);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /**
435*4882a593Smuzhiyun * t4_write_reg - write a HW register
436*4882a593Smuzhiyun * @adapter: the adapter
437*4882a593Smuzhiyun * @reg_addr: the register address
438*4882a593Smuzhiyun * @val: the value to write
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * Write a 32-bit value into the given HW register.
441*4882a593Smuzhiyun */
t4_write_reg(struct adapter * adapter,u32 reg_addr,u32 val)442*4882a593Smuzhiyun static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun writel(val, adapter->regs + reg_addr);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun #ifndef readq
readq(const volatile void __iomem * addr)448*4882a593Smuzhiyun static inline u64 readq(const volatile void __iomem *addr)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun return readl(addr) + ((u64)readl(addr + 4) << 32);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
writeq(u64 val,volatile void __iomem * addr)453*4882a593Smuzhiyun static inline void writeq(u64 val, volatile void __iomem *addr)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun writel(val, addr);
456*4882a593Smuzhiyun writel(val >> 32, addr + 4);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * t4_read_reg64 - read a 64-bit HW register
462*4882a593Smuzhiyun * @adapter: the adapter
463*4882a593Smuzhiyun * @reg_addr: the register address
464*4882a593Smuzhiyun *
465*4882a593Smuzhiyun * Returns the 64-bit value of the given HW register.
466*4882a593Smuzhiyun */
t4_read_reg64(struct adapter * adapter,u32 reg_addr)467*4882a593Smuzhiyun static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun return readq(adapter->regs + reg_addr);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun * t4_write_reg64 - write a 64-bit HW register
474*4882a593Smuzhiyun * @adapter: the adapter
475*4882a593Smuzhiyun * @reg_addr: the register address
476*4882a593Smuzhiyun * @val: the value to write
477*4882a593Smuzhiyun *
478*4882a593Smuzhiyun * Write a 64-bit value into the given HW register.
479*4882a593Smuzhiyun */
t4_write_reg64(struct adapter * adapter,u32 reg_addr,u64 val)480*4882a593Smuzhiyun static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
481*4882a593Smuzhiyun u64 val)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun writeq(val, adapter->regs + reg_addr);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun * port_name - return the string name of a port
488*4882a593Smuzhiyun * @adapter: the adapter
489*4882a593Smuzhiyun * @pidx: the port index
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * Return the string name of the selected port.
492*4882a593Smuzhiyun */
port_name(struct adapter * adapter,int pidx)493*4882a593Smuzhiyun static inline const char *port_name(struct adapter *adapter, int pidx)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun return adapter->port[pidx]->name;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun * t4_os_set_hw_addr - store a port's MAC address in SW
500*4882a593Smuzhiyun * @adapter: the adapter
501*4882a593Smuzhiyun * @pidx: the port index
502*4882a593Smuzhiyun * @hw_addr: the Ethernet address
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * Store the Ethernet address of the given port in SW. Called by the common
505*4882a593Smuzhiyun * code when it retrieves a port's Ethernet address from EEPROM.
506*4882a593Smuzhiyun */
t4_os_set_hw_addr(struct adapter * adapter,int pidx,u8 hw_addr[])507*4882a593Smuzhiyun static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
508*4882a593Smuzhiyun u8 hw_addr[])
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * netdev2pinfo - return the port_info structure associated with a net_device
515*4882a593Smuzhiyun * @dev: the netdev
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Return the struct port_info associated with a net_device
518*4882a593Smuzhiyun */
netdev2pinfo(const struct net_device * dev)519*4882a593Smuzhiyun static inline struct port_info *netdev2pinfo(const struct net_device *dev)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun return netdev_priv(dev);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun * adap2pinfo - return the port_info of a port
526*4882a593Smuzhiyun * @adap: the adapter
527*4882a593Smuzhiyun * @pidx: the port index
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Return the port_info structure for the adapter.
530*4882a593Smuzhiyun */
adap2pinfo(struct adapter * adapter,int pidx)531*4882a593Smuzhiyun static inline struct port_info *adap2pinfo(struct adapter *adapter, int pidx)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun return netdev_priv(adapter->port[pidx]);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /**
537*4882a593Smuzhiyun * netdev2adap - return the adapter structure associated with a net_device
538*4882a593Smuzhiyun * @dev: the netdev
539*4882a593Smuzhiyun *
540*4882a593Smuzhiyun * Return the struct adapter associated with a net_device
541*4882a593Smuzhiyun */
netdev2adap(const struct net_device * dev)542*4882a593Smuzhiyun static inline struct adapter *netdev2adap(const struct net_device *dev)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun return netdev2pinfo(dev)->adapter;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun * OS "Callback" function declarations. These are functions that the OS code
549*4882a593Smuzhiyun * is "contracted" to provide for the common code.
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun void t4vf_os_link_changed(struct adapter *, int, int);
552*4882a593Smuzhiyun void t4vf_os_portmod_changed(struct adapter *, int);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * SGE function prototype declarations.
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun int t4vf_sge_alloc_rxq(struct adapter *, struct sge_rspq *, bool,
558*4882a593Smuzhiyun struct net_device *, int,
559*4882a593Smuzhiyun struct sge_fl *, rspq_handler_t);
560*4882a593Smuzhiyun int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
561*4882a593Smuzhiyun struct net_device *, struct netdev_queue *,
562*4882a593Smuzhiyun unsigned int);
563*4882a593Smuzhiyun void t4vf_free_sge_resources(struct adapter *);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *);
566*4882a593Smuzhiyun int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
567*4882a593Smuzhiyun const struct pkt_gl *);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun irq_handler_t t4vf_intr_handler(struct adapter *);
570*4882a593Smuzhiyun irqreturn_t t4vf_sge_intr_msix(int, void *);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun int t4vf_sge_init(struct adapter *);
573*4882a593Smuzhiyun void t4vf_sge_start(struct adapter *);
574*4882a593Smuzhiyun void t4vf_sge_stop(struct adapter *);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun #endif /* __CXGB4VF_ADAPTER_H__ */
577