1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for Gigabit Ethernet adapters based on the Session Layer
4*4882a593Smuzhiyun * Interface (SLIC) technology by Alacritech. The driver does not
5*4882a593Smuzhiyun * support the hardware acceleration features provided by these cards.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2016 Lino Sanfilippo <LinoSanfilippo@gmx.de>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/etherdevice.h>
15*4882a593Smuzhiyun #include <linux/if_ether.h>
16*4882a593Smuzhiyun #include <linux/crc32.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/ethtool.h>
19*4882a593Smuzhiyun #include <linux/mii.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/firmware.h>
23*4882a593Smuzhiyun #include <linux/list.h>
24*4882a593Smuzhiyun #include <linux/u64_stats_sync.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "slic.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define DRV_NAME "slicoss"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static const struct pci_device_id slic_id_tbl[] = {
31*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
32*4882a593Smuzhiyun PCI_DEVICE_ID_ALACRITECH_MOJAVE) },
33*4882a593Smuzhiyun { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH,
34*4882a593Smuzhiyun PCI_DEVICE_ID_ALACRITECH_OASIS) },
35*4882a593Smuzhiyun { 0 }
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static const char slic_stats_strings[][ETH_GSTRING_LEN] = {
39*4882a593Smuzhiyun "rx_packets",
40*4882a593Smuzhiyun "rx_bytes",
41*4882a593Smuzhiyun "rx_multicasts",
42*4882a593Smuzhiyun "rx_errors",
43*4882a593Smuzhiyun "rx_buff_miss",
44*4882a593Smuzhiyun "rx_tp_csum",
45*4882a593Smuzhiyun "rx_tp_oflow",
46*4882a593Smuzhiyun "rx_tp_hlen",
47*4882a593Smuzhiyun "rx_ip_csum",
48*4882a593Smuzhiyun "rx_ip_len",
49*4882a593Smuzhiyun "rx_ip_hdr_len",
50*4882a593Smuzhiyun "rx_early",
51*4882a593Smuzhiyun "rx_buff_oflow",
52*4882a593Smuzhiyun "rx_lcode",
53*4882a593Smuzhiyun "rx_drbl",
54*4882a593Smuzhiyun "rx_crc",
55*4882a593Smuzhiyun "rx_oflow_802",
56*4882a593Smuzhiyun "rx_uflow_802",
57*4882a593Smuzhiyun "tx_packets",
58*4882a593Smuzhiyun "tx_bytes",
59*4882a593Smuzhiyun "tx_carrier",
60*4882a593Smuzhiyun "tx_dropped",
61*4882a593Smuzhiyun "irq_errs",
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
slic_next_queue_idx(unsigned int idx,unsigned int qlen)64*4882a593Smuzhiyun static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return (idx + 1) & (qlen - 1);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
slic_get_free_queue_descs(unsigned int put_idx,unsigned int done_idx,unsigned int qlen)69*4882a593Smuzhiyun static inline int slic_get_free_queue_descs(unsigned int put_idx,
70*4882a593Smuzhiyun unsigned int done_idx,
71*4882a593Smuzhiyun unsigned int qlen)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun if (put_idx >= done_idx)
74*4882a593Smuzhiyun return (qlen - (put_idx - done_idx) - 1);
75*4882a593Smuzhiyun return (done_idx - put_idx - 1);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
slic_next_compl_idx(struct slic_device * sdev)78*4882a593Smuzhiyun static unsigned int slic_next_compl_idx(struct slic_device *sdev)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct slic_stat_queue *stq = &sdev->stq;
81*4882a593Smuzhiyun unsigned int active = stq->active_array;
82*4882a593Smuzhiyun struct slic_stat_desc *descs;
83*4882a593Smuzhiyun struct slic_stat_desc *stat;
84*4882a593Smuzhiyun unsigned int idx;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun descs = stq->descs[active];
87*4882a593Smuzhiyun stat = &descs[stq->done_idx];
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (!stat->status)
90*4882a593Smuzhiyun return SLIC_INVALID_STAT_DESC_IDX;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun idx = (le32_to_cpu(stat->hnd) & 0xffff) - 1;
93*4882a593Smuzhiyun /* reset desc */
94*4882a593Smuzhiyun stat->hnd = 0;
95*4882a593Smuzhiyun stat->status = 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun stq->done_idx = slic_next_queue_idx(stq->done_idx, stq->len);
98*4882a593Smuzhiyun /* check for wraparound */
99*4882a593Smuzhiyun if (!stq->done_idx) {
100*4882a593Smuzhiyun dma_addr_t paddr = stq->paddr[active];
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
103*4882a593Smuzhiyun stq->len);
104*4882a593Smuzhiyun /* make sure new status descriptors are immediately available */
105*4882a593Smuzhiyun slic_flush_write(sdev);
106*4882a593Smuzhiyun active++;
107*4882a593Smuzhiyun active &= (SLIC_NUM_STAT_DESC_ARRAYS - 1);
108*4882a593Smuzhiyun stq->active_array = active;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun return idx;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
slic_get_free_tx_descs(struct slic_tx_queue * txq)113*4882a593Smuzhiyun static unsigned int slic_get_free_tx_descs(struct slic_tx_queue *txq)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun /* ensure tail idx is updated */
116*4882a593Smuzhiyun smp_mb();
117*4882a593Smuzhiyun return slic_get_free_queue_descs(txq->put_idx, txq->done_idx, txq->len);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
slic_get_free_rx_descs(struct slic_rx_queue * rxq)120*4882a593Smuzhiyun static unsigned int slic_get_free_rx_descs(struct slic_rx_queue *rxq)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return slic_get_free_queue_descs(rxq->put_idx, rxq->done_idx, rxq->len);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
slic_clear_upr_list(struct slic_upr_list * upr_list)125*4882a593Smuzhiyun static void slic_clear_upr_list(struct slic_upr_list *upr_list)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct slic_upr *upr;
128*4882a593Smuzhiyun struct slic_upr *tmp;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_lock_bh(&upr_list->lock);
131*4882a593Smuzhiyun list_for_each_entry_safe(upr, tmp, &upr_list->list, list) {
132*4882a593Smuzhiyun list_del(&upr->list);
133*4882a593Smuzhiyun kfree(upr);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun upr_list->pending = false;
136*4882a593Smuzhiyun spin_unlock_bh(&upr_list->lock);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
slic_start_upr(struct slic_device * sdev,struct slic_upr * upr)139*4882a593Smuzhiyun static void slic_start_upr(struct slic_device *sdev, struct slic_upr *upr)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun u32 reg;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun reg = (upr->type == SLIC_UPR_CONFIG) ? SLIC_REG_RCONFIG :
144*4882a593Smuzhiyun SLIC_REG_LSTAT;
145*4882a593Smuzhiyun slic_write(sdev, reg, lower_32_bits(upr->paddr));
146*4882a593Smuzhiyun slic_flush_write(sdev);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
slic_queue_upr(struct slic_device * sdev,struct slic_upr * upr)149*4882a593Smuzhiyun static void slic_queue_upr(struct slic_device *sdev, struct slic_upr *upr)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct slic_upr_list *upr_list = &sdev->upr_list;
152*4882a593Smuzhiyun bool pending;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun spin_lock_bh(&upr_list->lock);
155*4882a593Smuzhiyun pending = upr_list->pending;
156*4882a593Smuzhiyun INIT_LIST_HEAD(&upr->list);
157*4882a593Smuzhiyun list_add_tail(&upr->list, &upr_list->list);
158*4882a593Smuzhiyun upr_list->pending = true;
159*4882a593Smuzhiyun spin_unlock_bh(&upr_list->lock);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (!pending)
162*4882a593Smuzhiyun slic_start_upr(sdev, upr);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
slic_dequeue_upr(struct slic_device * sdev)165*4882a593Smuzhiyun static struct slic_upr *slic_dequeue_upr(struct slic_device *sdev)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct slic_upr_list *upr_list = &sdev->upr_list;
168*4882a593Smuzhiyun struct slic_upr *next_upr = NULL;
169*4882a593Smuzhiyun struct slic_upr *upr = NULL;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun spin_lock_bh(&upr_list->lock);
172*4882a593Smuzhiyun if (!list_empty(&upr_list->list)) {
173*4882a593Smuzhiyun upr = list_first_entry(&upr_list->list, struct slic_upr, list);
174*4882a593Smuzhiyun list_del(&upr->list);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (list_empty(&upr_list->list))
177*4882a593Smuzhiyun upr_list->pending = false;
178*4882a593Smuzhiyun else
179*4882a593Smuzhiyun next_upr = list_first_entry(&upr_list->list,
180*4882a593Smuzhiyun struct slic_upr, list);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun spin_unlock_bh(&upr_list->lock);
183*4882a593Smuzhiyun /* trigger processing of the next upr in list */
184*4882a593Smuzhiyun if (next_upr)
185*4882a593Smuzhiyun slic_start_upr(sdev, next_upr);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return upr;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
slic_new_upr(struct slic_device * sdev,unsigned int type,dma_addr_t paddr)190*4882a593Smuzhiyun static int slic_new_upr(struct slic_device *sdev, unsigned int type,
191*4882a593Smuzhiyun dma_addr_t paddr)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct slic_upr *upr;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
196*4882a593Smuzhiyun if (!upr)
197*4882a593Smuzhiyun return -ENOMEM;
198*4882a593Smuzhiyun upr->type = type;
199*4882a593Smuzhiyun upr->paddr = paddr;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun slic_queue_upr(sdev, upr);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
slic_set_mcast_bit(u64 * mcmask,unsigned char const * addr)206*4882a593Smuzhiyun static void slic_set_mcast_bit(u64 *mcmask, unsigned char const *addr)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun u64 mask = *mcmask;
209*4882a593Smuzhiyun u8 crc;
210*4882a593Smuzhiyun /* Get the CRC polynomial for the mac address: we use bits 1-8 (lsb),
211*4882a593Smuzhiyun * bitwise reversed, msb (= lsb bit 0 before bitrev) is automatically
212*4882a593Smuzhiyun * discarded.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun crc = ether_crc(ETH_ALEN, addr) >> 23;
215*4882a593Smuzhiyun /* we only have space on the SLIC for 64 entries */
216*4882a593Smuzhiyun crc &= 0x3F;
217*4882a593Smuzhiyun mask |= (u64)1 << crc;
218*4882a593Smuzhiyun *mcmask = mask;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* must be called with link_lock held */
slic_configure_rcv(struct slic_device * sdev)222*4882a593Smuzhiyun static void slic_configure_rcv(struct slic_device *sdev)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun u32 val;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun val = SLIC_GRCR_RESET | SLIC_GRCR_ADDRAEN | SLIC_GRCR_RCVEN |
227*4882a593Smuzhiyun SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT | SLIC_GRCR_RCVBAD;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (sdev->duplex == DUPLEX_FULL)
230*4882a593Smuzhiyun val |= SLIC_GRCR_CTLEN;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (sdev->promisc)
233*4882a593Smuzhiyun val |= SLIC_GRCR_RCVALL;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRCFG, val);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* must be called with link_lock held */
slic_configure_xmt(struct slic_device * sdev)239*4882a593Smuzhiyun static void slic_configure_xmt(struct slic_device *sdev)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun u32 val;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun val = SLIC_GXCR_RESET | SLIC_GXCR_XMTEN;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (sdev->duplex == DUPLEX_FULL)
246*4882a593Smuzhiyun val |= SLIC_GXCR_PAUSEEN;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WXCFG, val);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* must be called with link_lock held */
slic_configure_mac(struct slic_device * sdev)252*4882a593Smuzhiyun static void slic_configure_mac(struct slic_device *sdev)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun u32 val;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (sdev->speed == SPEED_1000) {
257*4882a593Smuzhiyun val = SLIC_GMCR_GAPBB_1000 << SLIC_GMCR_GAPBB_SHIFT |
258*4882a593Smuzhiyun SLIC_GMCR_GAPR1_1000 << SLIC_GMCR_GAPR1_SHIFT |
259*4882a593Smuzhiyun SLIC_GMCR_GAPR2_1000 << SLIC_GMCR_GAPR2_SHIFT |
260*4882a593Smuzhiyun SLIC_GMCR_GBIT; /* enable GMII */
261*4882a593Smuzhiyun } else {
262*4882a593Smuzhiyun val = SLIC_GMCR_GAPBB_100 << SLIC_GMCR_GAPBB_SHIFT |
263*4882a593Smuzhiyun SLIC_GMCR_GAPR1_100 << SLIC_GMCR_GAPR1_SHIFT |
264*4882a593Smuzhiyun SLIC_GMCR_GAPR2_100 << SLIC_GMCR_GAPR2_SHIFT;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (sdev->duplex == DUPLEX_FULL)
268*4882a593Smuzhiyun val |= SLIC_GMCR_FULLD;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WMCFG, val);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
slic_configure_link_locked(struct slic_device * sdev,int speed,unsigned int duplex)273*4882a593Smuzhiyun static void slic_configure_link_locked(struct slic_device *sdev, int speed,
274*4882a593Smuzhiyun unsigned int duplex)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct net_device *dev = sdev->netdev;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (sdev->speed == speed && sdev->duplex == duplex)
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun sdev->speed = speed;
282*4882a593Smuzhiyun sdev->duplex = duplex;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (sdev->speed == SPEED_UNKNOWN) {
285*4882a593Smuzhiyun if (netif_carrier_ok(dev))
286*4882a593Smuzhiyun netif_carrier_off(dev);
287*4882a593Smuzhiyun } else {
288*4882a593Smuzhiyun /* (re)configure link settings */
289*4882a593Smuzhiyun slic_configure_mac(sdev);
290*4882a593Smuzhiyun slic_configure_xmt(sdev);
291*4882a593Smuzhiyun slic_configure_rcv(sdev);
292*4882a593Smuzhiyun slic_flush_write(sdev);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (!netif_carrier_ok(dev))
295*4882a593Smuzhiyun netif_carrier_on(dev);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
slic_configure_link(struct slic_device * sdev,int speed,unsigned int duplex)299*4882a593Smuzhiyun static void slic_configure_link(struct slic_device *sdev, int speed,
300*4882a593Smuzhiyun unsigned int duplex)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun spin_lock_bh(&sdev->link_lock);
303*4882a593Smuzhiyun slic_configure_link_locked(sdev, speed, duplex);
304*4882a593Smuzhiyun spin_unlock_bh(&sdev->link_lock);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
slic_set_rx_mode(struct net_device * dev)307*4882a593Smuzhiyun static void slic_set_rx_mode(struct net_device *dev)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
310*4882a593Smuzhiyun struct netdev_hw_addr *hwaddr;
311*4882a593Smuzhiyun bool set_promisc;
312*4882a593Smuzhiyun u64 mcmask;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
315*4882a593Smuzhiyun /* Turn on all multicast addresses. We have to do this for
316*4882a593Smuzhiyun * promiscuous mode as well as ALLMCAST mode (it saves the
317*4882a593Smuzhiyun * microcode from having to keep state about the MAC
318*4882a593Smuzhiyun * configuration).
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun mcmask = ~(u64)0;
321*4882a593Smuzhiyun } else {
322*4882a593Smuzhiyun mcmask = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun netdev_for_each_mc_addr(hwaddr, dev) {
325*4882a593Smuzhiyun slic_set_mcast_bit(&mcmask, hwaddr->addr);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_MCASTLOW, lower_32_bits(mcmask));
330*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_MCASTHIGH, upper_32_bits(mcmask));
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun set_promisc = !!(dev->flags & IFF_PROMISC);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun spin_lock_bh(&sdev->link_lock);
335*4882a593Smuzhiyun if (sdev->promisc != set_promisc) {
336*4882a593Smuzhiyun sdev->promisc = set_promisc;
337*4882a593Smuzhiyun slic_configure_rcv(sdev);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun spin_unlock_bh(&sdev->link_lock);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
slic_xmit_complete(struct slic_device * sdev)342*4882a593Smuzhiyun static void slic_xmit_complete(struct slic_device *sdev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct slic_tx_queue *txq = &sdev->txq;
345*4882a593Smuzhiyun struct net_device *dev = sdev->netdev;
346*4882a593Smuzhiyun struct slic_tx_buffer *buff;
347*4882a593Smuzhiyun unsigned int frames = 0;
348*4882a593Smuzhiyun unsigned int bytes = 0;
349*4882a593Smuzhiyun unsigned int idx;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
352*4882a593Smuzhiyun * completions during processing keeps the loop running endlessly.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun do {
355*4882a593Smuzhiyun idx = slic_next_compl_idx(sdev);
356*4882a593Smuzhiyun if (idx == SLIC_INVALID_STAT_DESC_IDX)
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun txq->done_idx = idx;
360*4882a593Smuzhiyun buff = &txq->txbuffs[idx];
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (unlikely(!buff->skb)) {
363*4882a593Smuzhiyun netdev_warn(dev,
364*4882a593Smuzhiyun "no skb found for desc idx %i\n", idx);
365*4882a593Smuzhiyun continue;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun dma_unmap_single(&sdev->pdev->dev,
368*4882a593Smuzhiyun dma_unmap_addr(buff, map_addr),
369*4882a593Smuzhiyun dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun bytes += buff->skb->len;
372*4882a593Smuzhiyun frames++;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun dev_kfree_skb_any(buff->skb);
375*4882a593Smuzhiyun buff->skb = NULL;
376*4882a593Smuzhiyun } while (frames < SLIC_MAX_TX_COMPLETIONS);
377*4882a593Smuzhiyun /* make sure xmit sees the new value for done_idx */
378*4882a593Smuzhiyun smp_wmb();
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun u64_stats_update_begin(&sdev->stats.syncp);
381*4882a593Smuzhiyun sdev->stats.tx_bytes += bytes;
382*4882a593Smuzhiyun sdev->stats.tx_packets += frames;
383*4882a593Smuzhiyun u64_stats_update_end(&sdev->stats.syncp);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun netif_tx_lock(dev);
386*4882a593Smuzhiyun if (netif_queue_stopped(dev) &&
387*4882a593Smuzhiyun (slic_get_free_tx_descs(txq) >= SLIC_MIN_TX_WAKEUP_DESCS))
388*4882a593Smuzhiyun netif_wake_queue(dev);
389*4882a593Smuzhiyun netif_tx_unlock(dev);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
slic_refill_rx_queue(struct slic_device * sdev,gfp_t gfp)392*4882a593Smuzhiyun static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun const unsigned int ALIGN_MASK = SLIC_RX_BUFF_ALIGN - 1;
395*4882a593Smuzhiyun unsigned int maplen = SLIC_RX_BUFF_SIZE;
396*4882a593Smuzhiyun struct slic_rx_queue *rxq = &sdev->rxq;
397*4882a593Smuzhiyun struct net_device *dev = sdev->netdev;
398*4882a593Smuzhiyun struct slic_rx_buffer *buff;
399*4882a593Smuzhiyun struct slic_rx_desc *desc;
400*4882a593Smuzhiyun unsigned int misalign;
401*4882a593Smuzhiyun unsigned int offset;
402*4882a593Smuzhiyun struct sk_buff *skb;
403*4882a593Smuzhiyun dma_addr_t paddr;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun while (slic_get_free_rx_descs(rxq) > SLIC_MAX_REQ_RX_DESCS) {
406*4882a593Smuzhiyun skb = alloc_skb(maplen + ALIGN_MASK, gfp);
407*4882a593Smuzhiyun if (!skb)
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
411*4882a593Smuzhiyun DMA_FROM_DEVICE);
412*4882a593Smuzhiyun if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
413*4882a593Smuzhiyun netdev_err(dev, "mapping rx packet failed\n");
414*4882a593Smuzhiyun /* drop skb */
415*4882a593Smuzhiyun dev_kfree_skb_any(skb);
416*4882a593Smuzhiyun break;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun /* ensure head buffer descriptors are 256 byte aligned */
419*4882a593Smuzhiyun offset = 0;
420*4882a593Smuzhiyun misalign = paddr & ALIGN_MASK;
421*4882a593Smuzhiyun if (misalign) {
422*4882a593Smuzhiyun offset = SLIC_RX_BUFF_ALIGN - misalign;
423*4882a593Smuzhiyun skb_reserve(skb, offset);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun /* the HW expects dma chunks for descriptor + frame data */
426*4882a593Smuzhiyun desc = (struct slic_rx_desc *)skb->data;
427*4882a593Smuzhiyun /* temporarily sync descriptor for CPU to clear status */
428*4882a593Smuzhiyun dma_sync_single_for_cpu(&sdev->pdev->dev, paddr,
429*4882a593Smuzhiyun offset + sizeof(*desc),
430*4882a593Smuzhiyun DMA_FROM_DEVICE);
431*4882a593Smuzhiyun desc->status = 0;
432*4882a593Smuzhiyun /* return it to HW again */
433*4882a593Smuzhiyun dma_sync_single_for_device(&sdev->pdev->dev, paddr,
434*4882a593Smuzhiyun offset + sizeof(*desc),
435*4882a593Smuzhiyun DMA_FROM_DEVICE);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun buff = &rxq->rxbuffs[rxq->put_idx];
438*4882a593Smuzhiyun buff->skb = skb;
439*4882a593Smuzhiyun dma_unmap_addr_set(buff, map_addr, paddr);
440*4882a593Smuzhiyun dma_unmap_len_set(buff, map_len, maplen);
441*4882a593Smuzhiyun buff->addr_offset = offset;
442*4882a593Smuzhiyun /* complete write to descriptor before it is handed to HW */
443*4882a593Smuzhiyun wmb();
444*4882a593Smuzhiyun /* head buffer descriptors are placed immediately before skb */
445*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_HBAR, lower_32_bits(paddr) + offset);
446*4882a593Smuzhiyun rxq->put_idx = slic_next_queue_idx(rxq->put_idx, rxq->len);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
slic_handle_frame_error(struct slic_device * sdev,struct sk_buff * skb)450*4882a593Smuzhiyun static void slic_handle_frame_error(struct slic_device *sdev,
451*4882a593Smuzhiyun struct sk_buff *skb)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun struct slic_stats *stats = &sdev->stats;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (sdev->model == SLIC_MODEL_OASIS) {
456*4882a593Smuzhiyun struct slic_rx_info_oasis *info;
457*4882a593Smuzhiyun u32 status_b;
458*4882a593Smuzhiyun u32 status;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun info = (struct slic_rx_info_oasis *)skb->data;
461*4882a593Smuzhiyun status = le32_to_cpu(info->frame_status);
462*4882a593Smuzhiyun status_b = le32_to_cpu(info->frame_status_b);
463*4882a593Smuzhiyun /* transport layer */
464*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_TPCSUM)
465*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
466*4882a593Smuzhiyun if (status & SLIC_VRHSTAT_TPOFLO)
467*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
468*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_TPHLEN)
469*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
470*4882a593Smuzhiyun /* ip layer */
471*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_IPCSUM)
472*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
473*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_IPLERR)
474*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_iplen);
475*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_IPHERR)
476*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
477*4882a593Smuzhiyun /* link layer */
478*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_RCVE)
479*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_early);
480*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_BUFF)
481*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
482*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_CODE)
483*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_lcode);
484*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_DRBL)
485*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_drbl);
486*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_CRC)
487*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_crc);
488*4882a593Smuzhiyun if (status & SLIC_VRHSTAT_802OE)
489*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
490*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_802UE)
491*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
492*4882a593Smuzhiyun if (status_b & SLIC_VRHSTATB_CARRE)
493*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, tx_carrier);
494*4882a593Smuzhiyun } else { /* mojave */
495*4882a593Smuzhiyun struct slic_rx_info_mojave *info;
496*4882a593Smuzhiyun u32 status;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun info = (struct slic_rx_info_mojave *)skb->data;
499*4882a593Smuzhiyun status = le32_to_cpu(info->frame_status);
500*4882a593Smuzhiyun /* transport layer */
501*4882a593Smuzhiyun if (status & SLIC_VGBSTAT_XPERR) {
502*4882a593Smuzhiyun u32 xerr = status >> SLIC_VGBSTAT_XERRSHFT;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (xerr == SLIC_VGBSTAT_XCSERR)
505*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tpcsum);
506*4882a593Smuzhiyun if (xerr == SLIC_VGBSTAT_XUFLOW)
507*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tpoflow);
508*4882a593Smuzhiyun if (xerr == SLIC_VGBSTAT_XHLEN)
509*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_tphlen);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun /* ip layer */
512*4882a593Smuzhiyun if (status & SLIC_VGBSTAT_NETERR) {
513*4882a593Smuzhiyun u32 nerr = status >> SLIC_VGBSTAT_NERRSHFT &
514*4882a593Smuzhiyun SLIC_VGBSTAT_NERRMSK;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun if (nerr == SLIC_VGBSTAT_NCSERR)
517*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_ipcsum);
518*4882a593Smuzhiyun if (nerr == SLIC_VGBSTAT_NUFLOW)
519*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_iplen);
520*4882a593Smuzhiyun if (nerr == SLIC_VGBSTAT_NHLEN)
521*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_iphlen);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun /* link layer */
524*4882a593Smuzhiyun if (status & SLIC_VGBSTAT_LNKERR) {
525*4882a593Smuzhiyun u32 lerr = status & SLIC_VGBSTAT_LERRMSK;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LDEARLY)
528*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_early);
529*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LBOFLO)
530*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_buffoflow);
531*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LCODERR)
532*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_lcode);
533*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LDBLNBL)
534*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_drbl);
535*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LCRCERR)
536*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_crc);
537*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LOFLO)
538*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_oflow802);
539*4882a593Smuzhiyun if (lerr == SLIC_VGBSTAT_LUFLO)
540*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_uflow802);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_errors);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
slic_handle_receive(struct slic_device * sdev,unsigned int todo,unsigned int * done)546*4882a593Smuzhiyun static void slic_handle_receive(struct slic_device *sdev, unsigned int todo,
547*4882a593Smuzhiyun unsigned int *done)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct slic_rx_queue *rxq = &sdev->rxq;
550*4882a593Smuzhiyun struct net_device *dev = sdev->netdev;
551*4882a593Smuzhiyun struct slic_rx_buffer *buff;
552*4882a593Smuzhiyun struct slic_rx_desc *desc;
553*4882a593Smuzhiyun unsigned int frames = 0;
554*4882a593Smuzhiyun unsigned int bytes = 0;
555*4882a593Smuzhiyun struct sk_buff *skb;
556*4882a593Smuzhiyun u32 status;
557*4882a593Smuzhiyun u32 len;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun while (todo && (rxq->done_idx != rxq->put_idx)) {
560*4882a593Smuzhiyun buff = &rxq->rxbuffs[rxq->done_idx];
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun skb = buff->skb;
563*4882a593Smuzhiyun if (!skb)
564*4882a593Smuzhiyun break;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun desc = (struct slic_rx_desc *)skb->data;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun dma_sync_single_for_cpu(&sdev->pdev->dev,
569*4882a593Smuzhiyun dma_unmap_addr(buff, map_addr),
570*4882a593Smuzhiyun buff->addr_offset + sizeof(*desc),
571*4882a593Smuzhiyun DMA_FROM_DEVICE);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun status = le32_to_cpu(desc->status);
574*4882a593Smuzhiyun if (!(status & SLIC_IRHDDR_SVALID)) {
575*4882a593Smuzhiyun dma_sync_single_for_device(&sdev->pdev->dev,
576*4882a593Smuzhiyun dma_unmap_addr(buff,
577*4882a593Smuzhiyun map_addr),
578*4882a593Smuzhiyun buff->addr_offset +
579*4882a593Smuzhiyun sizeof(*desc),
580*4882a593Smuzhiyun DMA_FROM_DEVICE);
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun buff->skb = NULL;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun dma_unmap_single(&sdev->pdev->dev,
587*4882a593Smuzhiyun dma_unmap_addr(buff, map_addr),
588*4882a593Smuzhiyun dma_unmap_len(buff, map_len),
589*4882a593Smuzhiyun DMA_FROM_DEVICE);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* skip rx descriptor that is placed before the frame data */
592*4882a593Smuzhiyun skb_reserve(skb, SLIC_RX_BUFF_HDR_SIZE);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (unlikely(status & SLIC_IRHDDR_ERR)) {
595*4882a593Smuzhiyun slic_handle_frame_error(sdev, skb);
596*4882a593Smuzhiyun dev_kfree_skb_any(skb);
597*4882a593Smuzhiyun } else {
598*4882a593Smuzhiyun struct ethhdr *eh = (struct ethhdr *)skb->data;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (is_multicast_ether_addr(eh->h_dest))
601*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(&sdev->stats, rx_mcasts);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun len = le32_to_cpu(desc->length) & SLIC_IRHDDR_FLEN_MSK;
604*4882a593Smuzhiyun skb_put(skb, len);
605*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
606*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun napi_gro_receive(&sdev->napi, skb);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun bytes += len;
611*4882a593Smuzhiyun frames++;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun rxq->done_idx = slic_next_queue_idx(rxq->done_idx, rxq->len);
614*4882a593Smuzhiyun todo--;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun u64_stats_update_begin(&sdev->stats.syncp);
618*4882a593Smuzhiyun sdev->stats.rx_bytes += bytes;
619*4882a593Smuzhiyun sdev->stats.rx_packets += frames;
620*4882a593Smuzhiyun u64_stats_update_end(&sdev->stats.syncp);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun slic_refill_rx_queue(sdev, GFP_ATOMIC);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
slic_handle_link_irq(struct slic_device * sdev)625*4882a593Smuzhiyun static void slic_handle_link_irq(struct slic_device *sdev)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
628*4882a593Smuzhiyun struct slic_shmem_data *sm_data = sm->shmem_data;
629*4882a593Smuzhiyun unsigned int duplex;
630*4882a593Smuzhiyun int speed;
631*4882a593Smuzhiyun u32 link;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun link = le32_to_cpu(sm_data->link);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (link & SLIC_GIG_LINKUP) {
636*4882a593Smuzhiyun if (link & SLIC_GIG_SPEED_1000)
637*4882a593Smuzhiyun speed = SPEED_1000;
638*4882a593Smuzhiyun else if (link & SLIC_GIG_SPEED_100)
639*4882a593Smuzhiyun speed = SPEED_100;
640*4882a593Smuzhiyun else
641*4882a593Smuzhiyun speed = SPEED_10;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun duplex = (link & SLIC_GIG_FULLDUPLEX) ? DUPLEX_FULL :
644*4882a593Smuzhiyun DUPLEX_HALF;
645*4882a593Smuzhiyun } else {
646*4882a593Smuzhiyun duplex = DUPLEX_UNKNOWN;
647*4882a593Smuzhiyun speed = SPEED_UNKNOWN;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun slic_configure_link(sdev, speed, duplex);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
slic_handle_upr_irq(struct slic_device * sdev,u32 irqs)652*4882a593Smuzhiyun static void slic_handle_upr_irq(struct slic_device *sdev, u32 irqs)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct slic_upr *upr;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* remove upr that caused this irq (always the first entry in list) */
657*4882a593Smuzhiyun upr = slic_dequeue_upr(sdev);
658*4882a593Smuzhiyun if (!upr) {
659*4882a593Smuzhiyun netdev_warn(sdev->netdev, "no upr found on list\n");
660*4882a593Smuzhiyun return;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (upr->type == SLIC_UPR_LSTAT) {
664*4882a593Smuzhiyun if (unlikely(irqs & SLIC_ISR_UPCERR_MASK)) {
665*4882a593Smuzhiyun /* try again */
666*4882a593Smuzhiyun slic_queue_upr(sdev, upr);
667*4882a593Smuzhiyun return;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun slic_handle_link_irq(sdev);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun kfree(upr);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
slic_handle_link_change(struct slic_device * sdev)674*4882a593Smuzhiyun static int slic_handle_link_change(struct slic_device *sdev)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun return slic_new_upr(sdev, SLIC_UPR_LSTAT, sdev->shmem.link_paddr);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
slic_handle_err_irq(struct slic_device * sdev,u32 isr)679*4882a593Smuzhiyun static void slic_handle_err_irq(struct slic_device *sdev, u32 isr)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct slic_stats *stats = &sdev->stats;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (isr & SLIC_ISR_RMISS)
684*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, rx_buff_miss);
685*4882a593Smuzhiyun if (isr & SLIC_ISR_XDROP)
686*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, tx_dropped);
687*4882a593Smuzhiyun if (!(isr & (SLIC_ISR_RMISS | SLIC_ISR_XDROP)))
688*4882a593Smuzhiyun SLIC_INC_STATS_COUNTER(stats, irq_errs);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
slic_handle_irq(struct slic_device * sdev,u32 isr,unsigned int todo,unsigned int * done)691*4882a593Smuzhiyun static void slic_handle_irq(struct slic_device *sdev, u32 isr,
692*4882a593Smuzhiyun unsigned int todo, unsigned int *done)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun if (isr & SLIC_ISR_ERR)
695*4882a593Smuzhiyun slic_handle_err_irq(sdev, isr);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (isr & SLIC_ISR_LEVENT)
698*4882a593Smuzhiyun slic_handle_link_change(sdev);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (isr & SLIC_ISR_UPC_MASK)
701*4882a593Smuzhiyun slic_handle_upr_irq(sdev, isr);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (isr & SLIC_ISR_RCV)
704*4882a593Smuzhiyun slic_handle_receive(sdev, todo, done);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (isr & SLIC_ISR_CMD)
707*4882a593Smuzhiyun slic_xmit_complete(sdev);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
slic_poll(struct napi_struct * napi,int todo)710*4882a593Smuzhiyun static int slic_poll(struct napi_struct *napi, int todo)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun struct slic_device *sdev = container_of(napi, struct slic_device, napi);
713*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
714*4882a593Smuzhiyun struct slic_shmem_data *sm_data = sm->shmem_data;
715*4882a593Smuzhiyun u32 isr = le32_to_cpu(sm_data->isr);
716*4882a593Smuzhiyun int done = 0;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun slic_handle_irq(sdev, isr, todo, &done);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (done < todo) {
721*4882a593Smuzhiyun napi_complete_done(napi, done);
722*4882a593Smuzhiyun /* reenable irqs */
723*4882a593Smuzhiyun sm_data->isr = 0;
724*4882a593Smuzhiyun /* make sure sm_data->isr is cleard before irqs are reenabled */
725*4882a593Smuzhiyun wmb();
726*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISR, 0);
727*4882a593Smuzhiyun slic_flush_write(sdev);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return done;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
slic_irq(int irq,void * dev_id)733*4882a593Smuzhiyun static irqreturn_t slic_irq(int irq, void *dev_id)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct slic_device *sdev = dev_id;
736*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
737*4882a593Smuzhiyun struct slic_shmem_data *sm_data = sm->shmem_data;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_MASK);
740*4882a593Smuzhiyun slic_flush_write(sdev);
741*4882a593Smuzhiyun /* make sure sm_data->isr is read after ICR_INT_MASK is set */
742*4882a593Smuzhiyun wmb();
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (!sm_data->isr) {
745*4882a593Smuzhiyun dma_rmb();
746*4882a593Smuzhiyun /* spurious interrupt */
747*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISR, 0);
748*4882a593Smuzhiyun slic_flush_write(sdev);
749*4882a593Smuzhiyun return IRQ_NONE;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun napi_schedule_irqoff(&sdev->napi);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return IRQ_HANDLED;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
slic_card_reset(struct slic_device * sdev)757*4882a593Smuzhiyun static void slic_card_reset(struct slic_device *sdev)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun u16 cmd;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RESET, SLIC_RESET_MAGIC);
762*4882a593Smuzhiyun /* flush write by means of config space */
763*4882a593Smuzhiyun pci_read_config_word(sdev->pdev, PCI_COMMAND, &cmd);
764*4882a593Smuzhiyun mdelay(1);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
slic_init_stat_queue(struct slic_device * sdev)767*4882a593Smuzhiyun static int slic_init_stat_queue(struct slic_device *sdev)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun const unsigned int DESC_ALIGN_MASK = SLIC_STATS_DESC_ALIGN - 1;
770*4882a593Smuzhiyun struct slic_stat_queue *stq = &sdev->stq;
771*4882a593Smuzhiyun struct slic_stat_desc *descs;
772*4882a593Smuzhiyun unsigned int misalign;
773*4882a593Smuzhiyun unsigned int offset;
774*4882a593Smuzhiyun dma_addr_t paddr;
775*4882a593Smuzhiyun size_t size;
776*4882a593Smuzhiyun int err;
777*4882a593Smuzhiyun int i;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun stq->len = SLIC_NUM_STAT_DESCS;
780*4882a593Smuzhiyun stq->active_array = 0;
781*4882a593Smuzhiyun stq->done_idx = 0;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
786*4882a593Smuzhiyun descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
787*4882a593Smuzhiyun GFP_KERNEL);
788*4882a593Smuzhiyun if (!descs) {
789*4882a593Smuzhiyun netdev_err(sdev->netdev,
790*4882a593Smuzhiyun "failed to allocate status descriptors\n");
791*4882a593Smuzhiyun err = -ENOMEM;
792*4882a593Smuzhiyun goto free_descs;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun /* ensure correct alignment */
795*4882a593Smuzhiyun offset = 0;
796*4882a593Smuzhiyun misalign = paddr & DESC_ALIGN_MASK;
797*4882a593Smuzhiyun if (misalign) {
798*4882a593Smuzhiyun offset = SLIC_STATS_DESC_ALIGN - misalign;
799*4882a593Smuzhiyun descs += offset;
800*4882a593Smuzhiyun paddr += offset;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RBAR, lower_32_bits(paddr) |
804*4882a593Smuzhiyun stq->len);
805*4882a593Smuzhiyun stq->descs[i] = descs;
806*4882a593Smuzhiyun stq->paddr[i] = paddr;
807*4882a593Smuzhiyun stq->addr_offset[i] = offset;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun stq->mem_size = size;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun return 0;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun free_descs:
815*4882a593Smuzhiyun while (i--) {
816*4882a593Smuzhiyun dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
817*4882a593Smuzhiyun stq->descs[i] - stq->addr_offset[i],
818*4882a593Smuzhiyun stq->paddr[i] - stq->addr_offset[i]);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun return err;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
slic_free_stat_queue(struct slic_device * sdev)824*4882a593Smuzhiyun static void slic_free_stat_queue(struct slic_device *sdev)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct slic_stat_queue *stq = &sdev->stq;
827*4882a593Smuzhiyun int i;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
830*4882a593Smuzhiyun dma_free_coherent(&sdev->pdev->dev, stq->mem_size,
831*4882a593Smuzhiyun stq->descs[i] - stq->addr_offset[i],
832*4882a593Smuzhiyun stq->paddr[i] - stq->addr_offset[i]);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
slic_init_tx_queue(struct slic_device * sdev)836*4882a593Smuzhiyun static int slic_init_tx_queue(struct slic_device *sdev)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct slic_tx_queue *txq = &sdev->txq;
839*4882a593Smuzhiyun struct slic_tx_buffer *buff;
840*4882a593Smuzhiyun struct slic_tx_desc *desc;
841*4882a593Smuzhiyun unsigned int i;
842*4882a593Smuzhiyun int err;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun txq->len = SLIC_NUM_TX_DESCS;
845*4882a593Smuzhiyun txq->put_idx = 0;
846*4882a593Smuzhiyun txq->done_idx = 0;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun txq->txbuffs = kcalloc(txq->len, sizeof(*buff), GFP_KERNEL);
849*4882a593Smuzhiyun if (!txq->txbuffs)
850*4882a593Smuzhiyun return -ENOMEM;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun txq->dma_pool = dma_pool_create("slic_pool", &sdev->pdev->dev,
853*4882a593Smuzhiyun sizeof(*desc), SLIC_TX_DESC_ALIGN,
854*4882a593Smuzhiyun 4096);
855*4882a593Smuzhiyun if (!txq->dma_pool) {
856*4882a593Smuzhiyun err = -ENOMEM;
857*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to create dma pool\n");
858*4882a593Smuzhiyun goto free_buffs;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun for (i = 0; i < txq->len; i++) {
862*4882a593Smuzhiyun buff = &txq->txbuffs[i];
863*4882a593Smuzhiyun desc = dma_pool_zalloc(txq->dma_pool, GFP_KERNEL,
864*4882a593Smuzhiyun &buff->desc_paddr);
865*4882a593Smuzhiyun if (!desc) {
866*4882a593Smuzhiyun netdev_err(sdev->netdev,
867*4882a593Smuzhiyun "failed to alloc pool chunk (%i)\n", i);
868*4882a593Smuzhiyun err = -ENOMEM;
869*4882a593Smuzhiyun goto free_descs;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun desc->hnd = cpu_to_le32((u32)(i + 1));
873*4882a593Smuzhiyun desc->cmd = SLIC_CMD_XMT_REQ;
874*4882a593Smuzhiyun desc->flags = 0;
875*4882a593Smuzhiyun desc->type = cpu_to_le32(SLIC_CMD_TYPE_DUMB);
876*4882a593Smuzhiyun buff->desc = desc;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun return 0;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun free_descs:
882*4882a593Smuzhiyun while (i--) {
883*4882a593Smuzhiyun buff = &txq->txbuffs[i];
884*4882a593Smuzhiyun dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun dma_pool_destroy(txq->dma_pool);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun free_buffs:
889*4882a593Smuzhiyun kfree(txq->txbuffs);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun return err;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
slic_free_tx_queue(struct slic_device * sdev)894*4882a593Smuzhiyun static void slic_free_tx_queue(struct slic_device *sdev)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun struct slic_tx_queue *txq = &sdev->txq;
897*4882a593Smuzhiyun struct slic_tx_buffer *buff;
898*4882a593Smuzhiyun unsigned int i;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun for (i = 0; i < txq->len; i++) {
901*4882a593Smuzhiyun buff = &txq->txbuffs[i];
902*4882a593Smuzhiyun dma_pool_free(txq->dma_pool, buff->desc, buff->desc_paddr);
903*4882a593Smuzhiyun if (!buff->skb)
904*4882a593Smuzhiyun continue;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun dma_unmap_single(&sdev->pdev->dev,
907*4882a593Smuzhiyun dma_unmap_addr(buff, map_addr),
908*4882a593Smuzhiyun dma_unmap_len(buff, map_len), DMA_TO_DEVICE);
909*4882a593Smuzhiyun consume_skb(buff->skb);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun dma_pool_destroy(txq->dma_pool);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun kfree(txq->txbuffs);
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
slic_init_rx_queue(struct slic_device * sdev)916*4882a593Smuzhiyun static int slic_init_rx_queue(struct slic_device *sdev)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct slic_rx_queue *rxq = &sdev->rxq;
919*4882a593Smuzhiyun struct slic_rx_buffer *buff;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun rxq->len = SLIC_NUM_RX_LES;
922*4882a593Smuzhiyun rxq->done_idx = 0;
923*4882a593Smuzhiyun rxq->put_idx = 0;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun buff = kcalloc(rxq->len, sizeof(*buff), GFP_KERNEL);
926*4882a593Smuzhiyun if (!buff)
927*4882a593Smuzhiyun return -ENOMEM;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun rxq->rxbuffs = buff;
930*4882a593Smuzhiyun slic_refill_rx_queue(sdev, GFP_KERNEL);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun return 0;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
slic_free_rx_queue(struct slic_device * sdev)935*4882a593Smuzhiyun static void slic_free_rx_queue(struct slic_device *sdev)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct slic_rx_queue *rxq = &sdev->rxq;
938*4882a593Smuzhiyun struct slic_rx_buffer *buff;
939*4882a593Smuzhiyun unsigned int i;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* free rx buffers */
942*4882a593Smuzhiyun for (i = 0; i < rxq->len; i++) {
943*4882a593Smuzhiyun buff = &rxq->rxbuffs[i];
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if (!buff->skb)
946*4882a593Smuzhiyun continue;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun dma_unmap_single(&sdev->pdev->dev,
949*4882a593Smuzhiyun dma_unmap_addr(buff, map_addr),
950*4882a593Smuzhiyun dma_unmap_len(buff, map_len),
951*4882a593Smuzhiyun DMA_FROM_DEVICE);
952*4882a593Smuzhiyun consume_skb(buff->skb);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun kfree(rxq->rxbuffs);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
slic_set_link_autoneg(struct slic_device * sdev)957*4882a593Smuzhiyun static void slic_set_link_autoneg(struct slic_device *sdev)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun unsigned int subid = sdev->pdev->subsystem_device;
960*4882a593Smuzhiyun u32 val;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (sdev->is_fiber) {
963*4882a593Smuzhiyun /* We've got a fiber gigabit interface, and register 4 is
964*4882a593Smuzhiyun * different in fiber mode than in copper mode.
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun /* advertise FD only @1000 Mb */
967*4882a593Smuzhiyun val = MII_ADVERTISE << 16 | ADVERTISE_1000XFULL |
968*4882a593Smuzhiyun ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
969*4882a593Smuzhiyun /* enable PAUSE frames */
970*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
971*4882a593Smuzhiyun /* reset phy, enable auto-neg */
972*4882a593Smuzhiyun val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
973*4882a593Smuzhiyun BMCR_ANRESTART;
974*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
975*4882a593Smuzhiyun } else { /* copper gigabit */
976*4882a593Smuzhiyun /* We've got a copper gigabit interface, and register 4 is
977*4882a593Smuzhiyun * different in copper mode than in fiber mode.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun /* advertise 10/100 Mb modes */
980*4882a593Smuzhiyun val = MII_ADVERTISE << 16 | ADVERTISE_100FULL |
981*4882a593Smuzhiyun ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF;
982*4882a593Smuzhiyun /* enable PAUSE frames */
983*4882a593Smuzhiyun val |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
984*4882a593Smuzhiyun /* required by the Cicada PHY */
985*4882a593Smuzhiyun val |= ADVERTISE_CSMA;
986*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /* advertise FD only @1000 Mb */
989*4882a593Smuzhiyun val = MII_CTRL1000 << 16 | ADVERTISE_1000FULL;
990*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (subid != PCI_SUBDEVICE_ID_ALACRITECH_CICADA) {
993*4882a593Smuzhiyun /* if a Marvell PHY enable auto crossover */
994*4882a593Smuzhiyun val = SLIC_MIICR_REG_16 | SLIC_MRV_REG16_XOVERON;
995*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* reset phy, enable auto-neg */
998*4882a593Smuzhiyun val = MII_BMCR << 16 | BMCR_RESET | BMCR_ANENABLE |
999*4882a593Smuzhiyun BMCR_ANRESTART;
1000*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
1001*4882a593Smuzhiyun } else {
1002*4882a593Smuzhiyun /* enable and restart auto-neg (don't reset) */
1003*4882a593Smuzhiyun val = MII_BMCR << 16 | BMCR_ANENABLE | BMCR_ANRESTART;
1004*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
slic_set_mac_address(struct slic_device * sdev)1009*4882a593Smuzhiyun static void slic_set_mac_address(struct slic_device *sdev)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun u8 *addr = sdev->netdev->dev_addr;
1012*4882a593Smuzhiyun u32 val;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRADDRAL, val);
1017*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRADDRBL, val);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun val = addr[0] << 8 | addr[1];
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRADDRAH, val);
1022*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRADDRBH, val);
1023*4882a593Smuzhiyun slic_flush_write(sdev);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
slic_read_dword_from_firmware(const struct firmware * fw,int * offset)1026*4882a593Smuzhiyun static u32 slic_read_dword_from_firmware(const struct firmware *fw, int *offset)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun int idx = *offset;
1029*4882a593Smuzhiyun __le32 val;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun memcpy(&val, fw->data + *offset, sizeof(val));
1032*4882a593Smuzhiyun idx += 4;
1033*4882a593Smuzhiyun *offset = idx;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun return le32_to_cpu(val);
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_MOJAVE);
1039*4882a593Smuzhiyun MODULE_FIRMWARE(SLIC_RCV_FIRMWARE_OASIS);
1040*4882a593Smuzhiyun
slic_load_rcvseq_firmware(struct slic_device * sdev)1041*4882a593Smuzhiyun static int slic_load_rcvseq_firmware(struct slic_device *sdev)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun const struct firmware *fw;
1044*4882a593Smuzhiyun const char *file;
1045*4882a593Smuzhiyun u32 codelen;
1046*4882a593Smuzhiyun int idx = 0;
1047*4882a593Smuzhiyun u32 instr;
1048*4882a593Smuzhiyun u32 addr;
1049*4882a593Smuzhiyun int err;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_RCV_FIRMWARE_OASIS :
1052*4882a593Smuzhiyun SLIC_RCV_FIRMWARE_MOJAVE;
1053*4882a593Smuzhiyun err = request_firmware(&fw, file, &sdev->pdev->dev);
1054*4882a593Smuzhiyun if (err) {
1055*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1056*4882a593Smuzhiyun "failed to load receive sequencer firmware %s\n", file);
1057*4882a593Smuzhiyun return err;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun /* Do an initial sanity check concerning firmware size now. A further
1060*4882a593Smuzhiyun * check follows below.
1061*4882a593Smuzhiyun */
1062*4882a593Smuzhiyun if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
1063*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1064*4882a593Smuzhiyun "invalid firmware size %zu (min %u expected)\n",
1065*4882a593Smuzhiyun fw->size, SLIC_FIRMWARE_MIN_SIZE);
1066*4882a593Smuzhiyun err = -EINVAL;
1067*4882a593Smuzhiyun goto release;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun codelen = slic_read_dword_from_firmware(fw, &idx);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun /* do another sanity check against firmware size */
1073*4882a593Smuzhiyun if ((codelen + 4) > fw->size) {
1074*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1075*4882a593Smuzhiyun "invalid rcv-sequencer firmware size %zu\n", fw->size);
1076*4882a593Smuzhiyun err = -EINVAL;
1077*4882a593Smuzhiyun goto release;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /* download sequencer code to card */
1081*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
1082*4882a593Smuzhiyun for (addr = 0; addr < codelen; addr++) {
1083*4882a593Smuzhiyun __le32 val;
1084*4882a593Smuzhiyun /* write out instruction address */
1085*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RCV_WCS, addr);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1088*4882a593Smuzhiyun /* write out the instruction data low addr */
1089*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RCV_WCS, instr);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun val = (__le32)fw->data[idx];
1092*4882a593Smuzhiyun instr = le32_to_cpu(val);
1093*4882a593Smuzhiyun idx++;
1094*4882a593Smuzhiyun /* write out the instruction data high addr */
1095*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RCV_WCS, instr);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun /* finish download */
1098*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
1099*4882a593Smuzhiyun slic_flush_write(sdev);
1100*4882a593Smuzhiyun release:
1101*4882a593Smuzhiyun release_firmware(fw);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun return err;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun MODULE_FIRMWARE(SLIC_FIRMWARE_MOJAVE);
1107*4882a593Smuzhiyun MODULE_FIRMWARE(SLIC_FIRMWARE_OASIS);
1108*4882a593Smuzhiyun
slic_load_firmware(struct slic_device * sdev)1109*4882a593Smuzhiyun static int slic_load_firmware(struct slic_device *sdev)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun u32 sectstart[SLIC_FIRMWARE_MAX_SECTIONS];
1112*4882a593Smuzhiyun u32 sectsize[SLIC_FIRMWARE_MAX_SECTIONS];
1113*4882a593Smuzhiyun const struct firmware *fw;
1114*4882a593Smuzhiyun unsigned int datalen;
1115*4882a593Smuzhiyun const char *file;
1116*4882a593Smuzhiyun int code_start;
1117*4882a593Smuzhiyun unsigned int i;
1118*4882a593Smuzhiyun u32 numsects;
1119*4882a593Smuzhiyun int idx = 0;
1120*4882a593Smuzhiyun u32 sect;
1121*4882a593Smuzhiyun u32 instr;
1122*4882a593Smuzhiyun u32 addr;
1123*4882a593Smuzhiyun u32 base;
1124*4882a593Smuzhiyun int err;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun file = (sdev->model == SLIC_MODEL_OASIS) ? SLIC_FIRMWARE_OASIS :
1127*4882a593Smuzhiyun SLIC_FIRMWARE_MOJAVE;
1128*4882a593Smuzhiyun err = request_firmware(&fw, file, &sdev->pdev->dev);
1129*4882a593Smuzhiyun if (err) {
1130*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "failed to load firmware %s\n", file);
1131*4882a593Smuzhiyun return err;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun /* Do an initial sanity check concerning firmware size now. A further
1134*4882a593Smuzhiyun * check follows below.
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun if (fw->size < SLIC_FIRMWARE_MIN_SIZE) {
1137*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1138*4882a593Smuzhiyun "invalid firmware size %zu (min is %u)\n", fw->size,
1139*4882a593Smuzhiyun SLIC_FIRMWARE_MIN_SIZE);
1140*4882a593Smuzhiyun err = -EINVAL;
1141*4882a593Smuzhiyun goto release;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun numsects = slic_read_dword_from_firmware(fw, &idx);
1145*4882a593Smuzhiyun if (numsects == 0 || numsects > SLIC_FIRMWARE_MAX_SECTIONS) {
1146*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1147*4882a593Smuzhiyun "invalid number of sections in firmware: %u", numsects);
1148*4882a593Smuzhiyun err = -EINVAL;
1149*4882a593Smuzhiyun goto release;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun datalen = numsects * 8 + 4;
1153*4882a593Smuzhiyun for (i = 0; i < numsects; i++) {
1154*4882a593Smuzhiyun sectsize[i] = slic_read_dword_from_firmware(fw, &idx);
1155*4882a593Smuzhiyun datalen += sectsize[i];
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /* do another sanity check against firmware size */
1159*4882a593Smuzhiyun if (datalen > fw->size) {
1160*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1161*4882a593Smuzhiyun "invalid firmware size %zu (expected >= %u)\n",
1162*4882a593Smuzhiyun fw->size, datalen);
1163*4882a593Smuzhiyun err = -EINVAL;
1164*4882a593Smuzhiyun goto release;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun /* get sections */
1167*4882a593Smuzhiyun for (i = 0; i < numsects; i++)
1168*4882a593Smuzhiyun sectstart[i] = slic_read_dword_from_firmware(fw, &idx);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun code_start = idx;
1171*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun for (sect = 0; sect < numsects; sect++) {
1174*4882a593Smuzhiyun unsigned int ssize = sectsize[sect] >> 3;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun base = sectstart[sect];
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun for (addr = 0; addr < ssize; addr++) {
1179*4882a593Smuzhiyun /* write out instruction address */
1180*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, base + addr);
1181*4882a593Smuzhiyun /* write out instruction to low addr */
1182*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, instr);
1183*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1184*4882a593Smuzhiyun /* write out instruction to high addr */
1185*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, instr);
1186*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun idx = code_start;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun for (sect = 0; sect < numsects; sect++) {
1193*4882a593Smuzhiyun unsigned int ssize = sectsize[sect] >> 3;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1196*4882a593Smuzhiyun base = sectstart[sect];
1197*4882a593Smuzhiyun if (base < 0x8000)
1198*4882a593Smuzhiyun continue;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun for (addr = 0; addr < ssize; addr++) {
1201*4882a593Smuzhiyun /* write out instruction address */
1202*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS,
1203*4882a593Smuzhiyun SLIC_WCS_COMPARE | (base + addr));
1204*4882a593Smuzhiyun /* write out instruction to low addr */
1205*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, instr);
1206*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1207*4882a593Smuzhiyun /* write out instruction to high addr */
1208*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, instr);
1209*4882a593Smuzhiyun instr = slic_read_dword_from_firmware(fw, &idx);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun slic_flush_write(sdev);
1213*4882a593Smuzhiyun mdelay(10);
1214*4882a593Smuzhiyun /* everything OK, kick off the card */
1215*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WCS, SLIC_WCS_START);
1216*4882a593Smuzhiyun slic_flush_write(sdev);
1217*4882a593Smuzhiyun /* wait long enough for ucode to init card and reach the mainloop */
1218*4882a593Smuzhiyun mdelay(20);
1219*4882a593Smuzhiyun release:
1220*4882a593Smuzhiyun release_firmware(fw);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun return err;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
slic_init_shmem(struct slic_device * sdev)1225*4882a593Smuzhiyun static int slic_init_shmem(struct slic_device *sdev)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
1228*4882a593Smuzhiyun struct slic_shmem_data *sm_data;
1229*4882a593Smuzhiyun dma_addr_t paddr;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
1232*4882a593Smuzhiyun &paddr, GFP_KERNEL);
1233*4882a593Smuzhiyun if (!sm_data) {
1234*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
1235*4882a593Smuzhiyun return -ENOMEM;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun sm->shmem_data = sm_data;
1239*4882a593Smuzhiyun sm->isr_paddr = paddr;
1240*4882a593Smuzhiyun sm->link_paddr = paddr + offsetof(struct slic_shmem_data, link);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun return 0;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
slic_free_shmem(struct slic_device * sdev)1245*4882a593Smuzhiyun static void slic_free_shmem(struct slic_device *sdev)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
1248*4882a593Smuzhiyun struct slic_shmem_data *sm_data = sm->shmem_data;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun dma_free_coherent(&sdev->pdev->dev, sizeof(*sm_data), sm_data,
1251*4882a593Smuzhiyun sm->isr_paddr);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
slic_init_iface(struct slic_device * sdev)1254*4882a593Smuzhiyun static int slic_init_iface(struct slic_device *sdev)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
1257*4882a593Smuzhiyun int err;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun sdev->upr_list.pending = false;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun err = slic_init_shmem(sdev);
1262*4882a593Smuzhiyun if (err) {
1263*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to init shared memory\n");
1264*4882a593Smuzhiyun return err;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun err = slic_load_firmware(sdev);
1268*4882a593Smuzhiyun if (err) {
1269*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to load firmware\n");
1270*4882a593Smuzhiyun goto free_sm;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun err = slic_load_rcvseq_firmware(sdev);
1274*4882a593Smuzhiyun if (err) {
1275*4882a593Smuzhiyun netdev_err(sdev->netdev,
1276*4882a593Smuzhiyun "failed to load firmware for receive sequencer\n");
1277*4882a593Smuzhiyun goto free_sm;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
1281*4882a593Smuzhiyun slic_flush_write(sdev);
1282*4882a593Smuzhiyun mdelay(1);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun err = slic_init_rx_queue(sdev);
1285*4882a593Smuzhiyun if (err) {
1286*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to init rx queue: %u\n", err);
1287*4882a593Smuzhiyun goto free_sm;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun err = slic_init_tx_queue(sdev);
1291*4882a593Smuzhiyun if (err) {
1292*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to init tx queue: %u\n", err);
1293*4882a593Smuzhiyun goto free_rxq;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun err = slic_init_stat_queue(sdev);
1297*4882a593Smuzhiyun if (err) {
1298*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to init status queue: %u\n",
1299*4882a593Smuzhiyun err);
1300*4882a593Smuzhiyun goto free_txq;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
1304*4882a593Smuzhiyun napi_enable(&sdev->napi);
1305*4882a593Smuzhiyun /* disable irq mitigation */
1306*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_INTAGG, 0);
1307*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISR, 0);
1308*4882a593Smuzhiyun slic_flush_write(sdev);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun slic_set_mac_address(sdev);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun spin_lock_bh(&sdev->link_lock);
1313*4882a593Smuzhiyun sdev->duplex = DUPLEX_UNKNOWN;
1314*4882a593Smuzhiyun sdev->speed = SPEED_UNKNOWN;
1315*4882a593Smuzhiyun spin_unlock_bh(&sdev->link_lock);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun slic_set_link_autoneg(sdev);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun err = request_irq(sdev->pdev->irq, slic_irq, IRQF_SHARED, DRV_NAME,
1320*4882a593Smuzhiyun sdev);
1321*4882a593Smuzhiyun if (err) {
1322*4882a593Smuzhiyun netdev_err(sdev->netdev, "failed to request irq: %u\n", err);
1323*4882a593Smuzhiyun goto disable_napi;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_ON);
1327*4882a593Smuzhiyun slic_flush_write(sdev);
1328*4882a593Smuzhiyun /* request initial link status */
1329*4882a593Smuzhiyun err = slic_handle_link_change(sdev);
1330*4882a593Smuzhiyun if (err)
1331*4882a593Smuzhiyun netdev_warn(sdev->netdev,
1332*4882a593Smuzhiyun "failed to set initial link state: %u\n", err);
1333*4882a593Smuzhiyun return 0;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun disable_napi:
1336*4882a593Smuzhiyun napi_disable(&sdev->napi);
1337*4882a593Smuzhiyun slic_free_stat_queue(sdev);
1338*4882a593Smuzhiyun free_txq:
1339*4882a593Smuzhiyun slic_free_tx_queue(sdev);
1340*4882a593Smuzhiyun free_rxq:
1341*4882a593Smuzhiyun slic_free_rx_queue(sdev);
1342*4882a593Smuzhiyun free_sm:
1343*4882a593Smuzhiyun slic_free_shmem(sdev);
1344*4882a593Smuzhiyun slic_card_reset(sdev);
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun return err;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun
slic_open(struct net_device * dev)1349*4882a593Smuzhiyun static int slic_open(struct net_device *dev)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1352*4882a593Smuzhiyun int err;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun netif_carrier_off(dev);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun err = slic_init_iface(sdev);
1357*4882a593Smuzhiyun if (err) {
1358*4882a593Smuzhiyun netdev_err(dev, "failed to initialize interface: %i\n", err);
1359*4882a593Smuzhiyun return err;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun netif_start_queue(dev);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun return 0;
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun
slic_close(struct net_device * dev)1367*4882a593Smuzhiyun static int slic_close(struct net_device *dev)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1370*4882a593Smuzhiyun u32 val;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun netif_stop_queue(dev);
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /* stop irq handling */
1375*4882a593Smuzhiyun napi_disable(&sdev->napi);
1376*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
1377*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISR, 0);
1378*4882a593Smuzhiyun slic_flush_write(sdev);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun free_irq(sdev->pdev->irq, sdev);
1381*4882a593Smuzhiyun /* turn off RCV and XMT and power down PHY */
1382*4882a593Smuzhiyun val = SLIC_GXCR_RESET | SLIC_GXCR_PAUSEEN;
1383*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WXCFG, val);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun val = SLIC_GRCR_RESET | SLIC_GRCR_CTLEN | SLIC_GRCR_ADDRAEN |
1386*4882a593Smuzhiyun SLIC_GRCR_HASHSIZE << SLIC_GRCR_HASHSIZE_SHIFT;
1387*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WRCFG, val);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun val = MII_BMCR << 16 | BMCR_PDOWN;
1390*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_WPHY, val);
1391*4882a593Smuzhiyun slic_flush_write(sdev);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun slic_clear_upr_list(&sdev->upr_list);
1394*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_QUIESCE, 0);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun slic_free_stat_queue(sdev);
1397*4882a593Smuzhiyun slic_free_tx_queue(sdev);
1398*4882a593Smuzhiyun slic_free_rx_queue(sdev);
1399*4882a593Smuzhiyun slic_free_shmem(sdev);
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun slic_card_reset(sdev);
1402*4882a593Smuzhiyun netif_carrier_off(dev);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun return 0;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
slic_xmit(struct sk_buff * skb,struct net_device * dev)1407*4882a593Smuzhiyun static netdev_tx_t slic_xmit(struct sk_buff *skb, struct net_device *dev)
1408*4882a593Smuzhiyun {
1409*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1410*4882a593Smuzhiyun struct slic_tx_queue *txq = &sdev->txq;
1411*4882a593Smuzhiyun struct slic_tx_buffer *buff;
1412*4882a593Smuzhiyun struct slic_tx_desc *desc;
1413*4882a593Smuzhiyun dma_addr_t paddr;
1414*4882a593Smuzhiyun u32 cbar_val;
1415*4882a593Smuzhiyun u32 maplen;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (unlikely(slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)) {
1418*4882a593Smuzhiyun netdev_err(dev, "BUG! not enough tx LEs left: %u\n",
1419*4882a593Smuzhiyun slic_get_free_tx_descs(txq));
1420*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun maplen = skb_headlen(skb);
1424*4882a593Smuzhiyun paddr = dma_map_single(&sdev->pdev->dev, skb->data, maplen,
1425*4882a593Smuzhiyun DMA_TO_DEVICE);
1426*4882a593Smuzhiyun if (dma_mapping_error(&sdev->pdev->dev, paddr)) {
1427*4882a593Smuzhiyun netdev_err(dev, "failed to map tx buffer\n");
1428*4882a593Smuzhiyun goto drop_skb;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun buff = &txq->txbuffs[txq->put_idx];
1432*4882a593Smuzhiyun buff->skb = skb;
1433*4882a593Smuzhiyun dma_unmap_addr_set(buff, map_addr, paddr);
1434*4882a593Smuzhiyun dma_unmap_len_set(buff, map_len, maplen);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun desc = buff->desc;
1437*4882a593Smuzhiyun desc->totlen = cpu_to_le32(maplen);
1438*4882a593Smuzhiyun desc->paddrl = cpu_to_le32(lower_32_bits(paddr));
1439*4882a593Smuzhiyun desc->paddrh = cpu_to_le32(upper_32_bits(paddr));
1440*4882a593Smuzhiyun desc->len = cpu_to_le32(maplen);
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun txq->put_idx = slic_next_queue_idx(txq->put_idx, txq->len);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun cbar_val = lower_32_bits(buff->desc_paddr) | 1;
1445*4882a593Smuzhiyun /* complete writes to RAM and DMA before hardware is informed */
1446*4882a593Smuzhiyun wmb();
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_CBAR, cbar_val);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun if (slic_get_free_tx_descs(txq) < SLIC_MAX_REQ_TX_DESCS)
1451*4882a593Smuzhiyun netif_stop_queue(dev);
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun return NETDEV_TX_OK;
1454*4882a593Smuzhiyun drop_skb:
1455*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun return NETDEV_TX_OK;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
slic_get_stats(struct net_device * dev,struct rtnl_link_stats64 * lst)1460*4882a593Smuzhiyun static void slic_get_stats(struct net_device *dev,
1461*4882a593Smuzhiyun struct rtnl_link_stats64 *lst)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1464*4882a593Smuzhiyun struct slic_stats *stats = &sdev->stats;
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_packets, stats, rx_packets);
1467*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->tx_packets, stats, tx_packets);
1468*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_bytes, stats, rx_bytes);
1469*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->tx_bytes, stats, tx_bytes);
1470*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_errors, stats, rx_errors);
1471*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_dropped, stats, rx_buff_miss);
1472*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->tx_dropped, stats, tx_dropped);
1473*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->multicast, stats, rx_mcasts);
1474*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_over_errors, stats, rx_buffoflow);
1475*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
1476*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
1477*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun
slic_get_sset_count(struct net_device * dev,int sset)1480*4882a593Smuzhiyun static int slic_get_sset_count(struct net_device *dev, int sset)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun switch (sset) {
1483*4882a593Smuzhiyun case ETH_SS_STATS:
1484*4882a593Smuzhiyun return ARRAY_SIZE(slic_stats_strings);
1485*4882a593Smuzhiyun default:
1486*4882a593Smuzhiyun return -EOPNOTSUPP;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
slic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * eth_stats,u64 * data)1490*4882a593Smuzhiyun static void slic_get_ethtool_stats(struct net_device *dev,
1491*4882a593Smuzhiyun struct ethtool_stats *eth_stats, u64 *data)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1494*4882a593Smuzhiyun struct slic_stats *stats = &sdev->stats;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[0], stats, rx_packets);
1497*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[1], stats, rx_bytes);
1498*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[2], stats, rx_mcasts);
1499*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[3], stats, rx_errors);
1500*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[4], stats, rx_buff_miss);
1501*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[5], stats, rx_tpcsum);
1502*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[6], stats, rx_tpoflow);
1503*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[7], stats, rx_tphlen);
1504*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[8], stats, rx_ipcsum);
1505*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[9], stats, rx_iplen);
1506*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[10], stats, rx_iphlen);
1507*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[11], stats, rx_early);
1508*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[12], stats, rx_buffoflow);
1509*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[13], stats, rx_lcode);
1510*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[14], stats, rx_drbl);
1511*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[15], stats, rx_crc);
1512*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[16], stats, rx_oflow802);
1513*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[17], stats, rx_uflow802);
1514*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[18], stats, tx_packets);
1515*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[19], stats, tx_bytes);
1516*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[20], stats, tx_carrier);
1517*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[21], stats, tx_dropped);
1518*4882a593Smuzhiyun SLIC_GET_STATS_COUNTER(data[22], stats, irq_errs);
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
slic_get_strings(struct net_device * dev,u32 stringset,u8 * data)1521*4882a593Smuzhiyun static void slic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun if (stringset == ETH_SS_STATS) {
1524*4882a593Smuzhiyun memcpy(data, slic_stats_strings, sizeof(slic_stats_strings));
1525*4882a593Smuzhiyun data += sizeof(slic_stats_strings);
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
slic_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1529*4882a593Smuzhiyun static void slic_get_drvinfo(struct net_device *dev,
1530*4882a593Smuzhiyun struct ethtool_drvinfo *info)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1535*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun static const struct ethtool_ops slic_ethtool_ops = {
1539*4882a593Smuzhiyun .get_drvinfo = slic_get_drvinfo,
1540*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
1541*4882a593Smuzhiyun .get_strings = slic_get_strings,
1542*4882a593Smuzhiyun .get_ethtool_stats = slic_get_ethtool_stats,
1543*4882a593Smuzhiyun .get_sset_count = slic_get_sset_count,
1544*4882a593Smuzhiyun };
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun static const struct net_device_ops slic_netdev_ops = {
1547*4882a593Smuzhiyun .ndo_open = slic_open,
1548*4882a593Smuzhiyun .ndo_stop = slic_close,
1549*4882a593Smuzhiyun .ndo_start_xmit = slic_xmit,
1550*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
1551*4882a593Smuzhiyun .ndo_get_stats64 = slic_get_stats,
1552*4882a593Smuzhiyun .ndo_set_rx_mode = slic_set_rx_mode,
1553*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
1554*4882a593Smuzhiyun };
1555*4882a593Smuzhiyun
slic_eeprom_csum(unsigned char * eeprom,unsigned int len)1556*4882a593Smuzhiyun static u16 slic_eeprom_csum(unsigned char *eeprom, unsigned int len)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun unsigned char *ptr = eeprom;
1559*4882a593Smuzhiyun u32 csum = 0;
1560*4882a593Smuzhiyun __le16 data;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun while (len > 1) {
1563*4882a593Smuzhiyun memcpy(&data, ptr, sizeof(data));
1564*4882a593Smuzhiyun csum += le16_to_cpu(data);
1565*4882a593Smuzhiyun ptr += 2;
1566*4882a593Smuzhiyun len -= 2;
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun if (len > 0)
1569*4882a593Smuzhiyun csum += *(u8 *)ptr;
1570*4882a593Smuzhiyun while (csum >> 16)
1571*4882a593Smuzhiyun csum = (csum & 0xFFFF) + ((csum >> 16) & 0xFFFF);
1572*4882a593Smuzhiyun return ~csum;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun /* check eeprom size, magic and checksum */
slic_eeprom_valid(unsigned char * eeprom,unsigned int size)1576*4882a593Smuzhiyun static bool slic_eeprom_valid(unsigned char *eeprom, unsigned int size)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun const unsigned int MAX_SIZE = 128;
1579*4882a593Smuzhiyun const unsigned int MIN_SIZE = 98;
1580*4882a593Smuzhiyun __le16 magic;
1581*4882a593Smuzhiyun __le16 csum;
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun if (size < MIN_SIZE || size > MAX_SIZE)
1584*4882a593Smuzhiyun return false;
1585*4882a593Smuzhiyun memcpy(&magic, eeprom, sizeof(magic));
1586*4882a593Smuzhiyun if (le16_to_cpu(magic) != SLIC_EEPROM_MAGIC)
1587*4882a593Smuzhiyun return false;
1588*4882a593Smuzhiyun /* cut checksum bytes */
1589*4882a593Smuzhiyun size -= 2;
1590*4882a593Smuzhiyun memcpy(&csum, eeprom + size, sizeof(csum));
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun return (le16_to_cpu(csum) == slic_eeprom_csum(eeprom, size));
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
slic_read_eeprom(struct slic_device * sdev)1595*4882a593Smuzhiyun static int slic_read_eeprom(struct slic_device *sdev)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun unsigned int devfn = PCI_FUNC(sdev->pdev->devfn);
1598*4882a593Smuzhiyun struct slic_shmem *sm = &sdev->shmem;
1599*4882a593Smuzhiyun struct slic_shmem_data *sm_data = sm->shmem_data;
1600*4882a593Smuzhiyun const unsigned int MAX_LOOPS = 5000;
1601*4882a593Smuzhiyun unsigned int codesize;
1602*4882a593Smuzhiyun unsigned char *eeprom;
1603*4882a593Smuzhiyun struct slic_upr *upr;
1604*4882a593Smuzhiyun unsigned int i = 0;
1605*4882a593Smuzhiyun dma_addr_t paddr;
1606*4882a593Smuzhiyun int err = 0;
1607*4882a593Smuzhiyun u8 *mac[2];
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
1610*4882a593Smuzhiyun &paddr, GFP_KERNEL);
1611*4882a593Smuzhiyun if (!eeprom)
1612*4882a593Smuzhiyun return -ENOMEM;
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ICR, SLIC_ICR_INT_OFF);
1615*4882a593Smuzhiyun /* setup ISP temporarily */
1616*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISP, lower_32_bits(sm->isr_paddr));
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun err = slic_new_upr(sdev, SLIC_UPR_CONFIG, paddr);
1619*4882a593Smuzhiyun if (!err) {
1620*4882a593Smuzhiyun for (i = 0; i < MAX_LOOPS; i++) {
1621*4882a593Smuzhiyun if (le32_to_cpu(sm_data->isr) & SLIC_ISR_UPC)
1622*4882a593Smuzhiyun break;
1623*4882a593Smuzhiyun mdelay(1);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun if (i == MAX_LOOPS) {
1626*4882a593Smuzhiyun dev_err(&sdev->pdev->dev,
1627*4882a593Smuzhiyun "timed out while waiting for eeprom data\n");
1628*4882a593Smuzhiyun err = -ETIMEDOUT;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun upr = slic_dequeue_upr(sdev);
1631*4882a593Smuzhiyun kfree(upr);
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISP, 0);
1635*4882a593Smuzhiyun slic_write(sdev, SLIC_REG_ISR, 0);
1636*4882a593Smuzhiyun slic_flush_write(sdev);
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun if (err)
1639*4882a593Smuzhiyun goto free_eeprom;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun if (sdev->model == SLIC_MODEL_OASIS) {
1642*4882a593Smuzhiyun struct slic_oasis_eeprom *oee;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun oee = (struct slic_oasis_eeprom *)eeprom;
1645*4882a593Smuzhiyun mac[0] = oee->mac;
1646*4882a593Smuzhiyun mac[1] = oee->mac2;
1647*4882a593Smuzhiyun codesize = le16_to_cpu(oee->eeprom_code_size);
1648*4882a593Smuzhiyun } else {
1649*4882a593Smuzhiyun struct slic_mojave_eeprom *mee;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun mee = (struct slic_mojave_eeprom *)eeprom;
1652*4882a593Smuzhiyun mac[0] = mee->mac;
1653*4882a593Smuzhiyun mac[1] = mee->mac2;
1654*4882a593Smuzhiyun codesize = le16_to_cpu(mee->eeprom_code_size);
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun if (!slic_eeprom_valid(eeprom, codesize)) {
1658*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "invalid checksum in eeprom\n");
1659*4882a593Smuzhiyun err = -EINVAL;
1660*4882a593Smuzhiyun goto free_eeprom;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun /* set mac address */
1663*4882a593Smuzhiyun ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
1664*4882a593Smuzhiyun free_eeprom:
1665*4882a593Smuzhiyun dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun return err;
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun
slic_init(struct slic_device * sdev)1670*4882a593Smuzhiyun static int slic_init(struct slic_device *sdev)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun int err;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun spin_lock_init(&sdev->upper_lock);
1675*4882a593Smuzhiyun spin_lock_init(&sdev->link_lock);
1676*4882a593Smuzhiyun INIT_LIST_HEAD(&sdev->upr_list.list);
1677*4882a593Smuzhiyun spin_lock_init(&sdev->upr_list.lock);
1678*4882a593Smuzhiyun u64_stats_init(&sdev->stats.syncp);
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun slic_card_reset(sdev);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun err = slic_load_firmware(sdev);
1683*4882a593Smuzhiyun if (err) {
1684*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "failed to load firmware\n");
1685*4882a593Smuzhiyun return err;
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun /* we need the shared memory to read EEPROM so set it up temporarily */
1689*4882a593Smuzhiyun err = slic_init_shmem(sdev);
1690*4882a593Smuzhiyun if (err) {
1691*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "failed to init shared memory\n");
1692*4882a593Smuzhiyun return err;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun err = slic_read_eeprom(sdev);
1696*4882a593Smuzhiyun if (err) {
1697*4882a593Smuzhiyun dev_err(&sdev->pdev->dev, "failed to read eeprom\n");
1698*4882a593Smuzhiyun goto free_sm;
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun slic_card_reset(sdev);
1702*4882a593Smuzhiyun slic_free_shmem(sdev);
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun return 0;
1705*4882a593Smuzhiyun free_sm:
1706*4882a593Smuzhiyun slic_free_shmem(sdev);
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun return err;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun
slic_is_fiber(unsigned short subdev)1711*4882a593Smuzhiyun static bool slic_is_fiber(unsigned short subdev)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun switch (subdev) {
1714*4882a593Smuzhiyun /* Mojave */
1715*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_1000X1F:
1716*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_SES1001F: fallthrough;
1717*4882a593Smuzhiyun /* Oasis */
1718*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_SEN2002XF:
1719*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_SEN2001XF:
1720*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_SEN2104EF:
1721*4882a593Smuzhiyun case PCI_SUBDEVICE_ID_ALACRITECH_SEN2102EF:
1722*4882a593Smuzhiyun return true;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun return false;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun
slic_configure_pci(struct pci_dev * pdev)1727*4882a593Smuzhiyun static void slic_configure_pci(struct pci_dev *pdev)
1728*4882a593Smuzhiyun {
1729*4882a593Smuzhiyun u16 old;
1730*4882a593Smuzhiyun u16 cmd;
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &old);
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun cmd = old | PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
1735*4882a593Smuzhiyun if (old != cmd)
1736*4882a593Smuzhiyun pci_write_config_word(pdev, PCI_COMMAND, cmd);
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun
slic_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1739*4882a593Smuzhiyun static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun struct slic_device *sdev;
1742*4882a593Smuzhiyun struct net_device *dev;
1743*4882a593Smuzhiyun int err;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun err = pci_enable_device(pdev);
1746*4882a593Smuzhiyun if (err) {
1747*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to enable PCI device\n");
1748*4882a593Smuzhiyun return err;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun pci_set_master(pdev);
1752*4882a593Smuzhiyun pci_try_set_mwi(pdev);
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun slic_configure_pci(pdev);
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1757*4882a593Smuzhiyun if (err) {
1758*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to setup DMA\n");
1759*4882a593Smuzhiyun goto disable;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun err = pci_request_regions(pdev, DRV_NAME);
1765*4882a593Smuzhiyun if (err) {
1766*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to obtain PCI regions\n");
1767*4882a593Smuzhiyun goto disable;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun dev = alloc_etherdev(sizeof(*sdev));
1771*4882a593Smuzhiyun if (!dev) {
1772*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to alloc ethernet device\n");
1773*4882a593Smuzhiyun err = -ENOMEM;
1774*4882a593Smuzhiyun goto free_regions;
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
1778*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
1779*4882a593Smuzhiyun dev->irq = pdev->irq;
1780*4882a593Smuzhiyun dev->netdev_ops = &slic_netdev_ops;
1781*4882a593Smuzhiyun dev->hw_features = NETIF_F_RXCSUM;
1782*4882a593Smuzhiyun dev->features |= dev->hw_features;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun dev->ethtool_ops = &slic_ethtool_ops;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun sdev = netdev_priv(dev);
1787*4882a593Smuzhiyun sdev->model = (pdev->device == PCI_DEVICE_ID_ALACRITECH_OASIS) ?
1788*4882a593Smuzhiyun SLIC_MODEL_OASIS : SLIC_MODEL_MOJAVE;
1789*4882a593Smuzhiyun sdev->is_fiber = slic_is_fiber(pdev->subsystem_device);
1790*4882a593Smuzhiyun sdev->pdev = pdev;
1791*4882a593Smuzhiyun sdev->netdev = dev;
1792*4882a593Smuzhiyun sdev->regs = ioremap(pci_resource_start(pdev, 0),
1793*4882a593Smuzhiyun pci_resource_len(pdev, 0));
1794*4882a593Smuzhiyun if (!sdev->regs) {
1795*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to map registers\n");
1796*4882a593Smuzhiyun err = -ENOMEM;
1797*4882a593Smuzhiyun goto free_netdev;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun err = slic_init(sdev);
1801*4882a593Smuzhiyun if (err) {
1802*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to initialize driver\n");
1803*4882a593Smuzhiyun goto unmap;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
1807*4882a593Smuzhiyun netif_carrier_off(dev);
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun err = register_netdev(dev);
1810*4882a593Smuzhiyun if (err) {
1811*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to register net device: %i\n", err);
1812*4882a593Smuzhiyun goto unmap;
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun return 0;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun unmap:
1818*4882a593Smuzhiyun iounmap(sdev->regs);
1819*4882a593Smuzhiyun free_netdev:
1820*4882a593Smuzhiyun free_netdev(dev);
1821*4882a593Smuzhiyun free_regions:
1822*4882a593Smuzhiyun pci_release_regions(pdev);
1823*4882a593Smuzhiyun disable:
1824*4882a593Smuzhiyun pci_disable_device(pdev);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun return err;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
slic_remove(struct pci_dev * pdev)1829*4882a593Smuzhiyun static void slic_remove(struct pci_dev *pdev)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
1832*4882a593Smuzhiyun struct slic_device *sdev = netdev_priv(dev);
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun unregister_netdev(dev);
1835*4882a593Smuzhiyun iounmap(sdev->regs);
1836*4882a593Smuzhiyun free_netdev(dev);
1837*4882a593Smuzhiyun pci_release_regions(pdev);
1838*4882a593Smuzhiyun pci_disable_device(pdev);
1839*4882a593Smuzhiyun }
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun static struct pci_driver slic_driver = {
1842*4882a593Smuzhiyun .name = DRV_NAME,
1843*4882a593Smuzhiyun .id_table = slic_id_tbl,
1844*4882a593Smuzhiyun .probe = slic_probe,
1845*4882a593Smuzhiyun .remove = slic_remove,
1846*4882a593Smuzhiyun };
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun module_pci_driver(slic_driver);
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun MODULE_DESCRIPTION("Alacritech non-accelerated SLIC driver");
1851*4882a593Smuzhiyun MODULE_AUTHOR("Lino Sanfilippo <LinoSanfilippo@gmx.de>");
1852*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1853