1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Network device driver for Cell Processor-Based Blade and Celleb platform
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright IBM Corp. 2005
6*4882a593Smuzhiyun * (C) Copyright 2006 TOSHIBA CORPORATION
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Authors : Utz Bacher <utz.bacher@de.ibm.com>
9*4882a593Smuzhiyun * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/crc32.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/etherdevice.h>
16*4882a593Smuzhiyun #include <linux/ethtool.h>
17*4882a593Smuzhiyun #include <linux/firmware.h>
18*4882a593Smuzhiyun #include <linux/if_vlan.h>
19*4882a593Smuzhiyun #include <linux/in.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/gfp.h>
23*4882a593Smuzhiyun #include <linux/ioport.h>
24*4882a593Smuzhiyun #include <linux/ip.h>
25*4882a593Smuzhiyun #include <linux/kernel.h>
26*4882a593Smuzhiyun #include <linux/mii.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/netdevice.h>
29*4882a593Smuzhiyun #include <linux/device.h>
30*4882a593Smuzhiyun #include <linux/pci.h>
31*4882a593Smuzhiyun #include <linux/skbuff.h>
32*4882a593Smuzhiyun #include <linux/tcp.h>
33*4882a593Smuzhiyun #include <linux/types.h>
34*4882a593Smuzhiyun #include <linux/vmalloc.h>
35*4882a593Smuzhiyun #include <linux/wait.h>
36*4882a593Smuzhiyun #include <linux/workqueue.h>
37*4882a593Smuzhiyun #include <linux/bitops.h>
38*4882a593Smuzhiyun #include <net/checksum.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "spider_net.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
43*4882a593Smuzhiyun "<Jens.Osterkamp@de.ibm.com>");
44*4882a593Smuzhiyun MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
45*4882a593Smuzhiyun MODULE_LICENSE("GPL");
46*4882a593Smuzhiyun MODULE_VERSION(VERSION);
47*4882a593Smuzhiyun MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
50*4882a593Smuzhiyun static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun module_param(rx_descriptors, int, 0444);
53*4882a593Smuzhiyun module_param(tx_descriptors, int, 0444);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
56*4882a593Smuzhiyun "in rx chains");
57*4882a593Smuzhiyun MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
58*4882a593Smuzhiyun "in tx chain");
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun char spider_net_driver_name[] = "spidernet";
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static const struct pci_device_id spider_net_pci_tbl[] = {
63*4882a593Smuzhiyun { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
64*4882a593Smuzhiyun PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
65*4882a593Smuzhiyun { 0, }
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun * spider_net_read_reg - reads an SMMIO register of a card
72*4882a593Smuzhiyun * @card: device structure
73*4882a593Smuzhiyun * @reg: register to read from
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * returns the content of the specified SMMIO register.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun static inline u32
spider_net_read_reg(struct spider_net_card * card,u32 reg)78*4882a593Smuzhiyun spider_net_read_reg(struct spider_net_card *card, u32 reg)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun /* We use the powerpc specific variants instead of readl_be() because
81*4882a593Smuzhiyun * we know spidernet is not a real PCI device and we can thus avoid the
82*4882a593Smuzhiyun * performance hit caused by the PCI workarounds.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun return in_be32(card->regs + reg);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * spider_net_write_reg - writes to an SMMIO register of a card
89*4882a593Smuzhiyun * @card: device structure
90*4882a593Smuzhiyun * @reg: register to write to
91*4882a593Smuzhiyun * @value: value to write into the specified SMMIO register
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun static inline void
spider_net_write_reg(struct spider_net_card * card,u32 reg,u32 value)94*4882a593Smuzhiyun spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun /* We use the powerpc specific variants instead of writel_be() because
97*4882a593Smuzhiyun * we know spidernet is not a real PCI device and we can thus avoid the
98*4882a593Smuzhiyun * performance hit caused by the PCI workarounds.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun out_be32(card->regs + reg, value);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun * spider_net_write_phy - write to phy register
105*4882a593Smuzhiyun * @netdev: adapter to be written to
106*4882a593Smuzhiyun * @mii_id: id of MII
107*4882a593Smuzhiyun * @reg: PHY register
108*4882a593Smuzhiyun * @val: value to be written to phy register
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * spider_net_write_phy_register writes to an arbitrary PHY
111*4882a593Smuzhiyun * register via the spider GPCWOPCMD register. We assume the queue does
112*4882a593Smuzhiyun * not run full (not more than 15 commands outstanding).
113*4882a593Smuzhiyun **/
114*4882a593Smuzhiyun static void
spider_net_write_phy(struct net_device * netdev,int mii_id,int reg,int val)115*4882a593Smuzhiyun spider_net_write_phy(struct net_device *netdev, int mii_id,
116*4882a593Smuzhiyun int reg, int val)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
119*4882a593Smuzhiyun u32 writevalue;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun writevalue = ((u32)mii_id << 21) |
122*4882a593Smuzhiyun ((u32)reg << 16) | ((u32)val);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun * spider_net_read_phy - read from phy register
129*4882a593Smuzhiyun * @netdev: network device to be read from
130*4882a593Smuzhiyun * @mii_id: id of MII
131*4882a593Smuzhiyun * @reg: PHY register
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * Returns value read from PHY register
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun * spider_net_write_phy reads from an arbitrary PHY
136*4882a593Smuzhiyun * register via the spider GPCROPCMD register
137*4882a593Smuzhiyun **/
138*4882a593Smuzhiyun static int
spider_net_read_phy(struct net_device * netdev,int mii_id,int reg)139*4882a593Smuzhiyun spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
142*4882a593Smuzhiyun u32 readvalue;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
145*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
148*4882a593Smuzhiyun * interrupt, as we poll for the completion of the read operation
149*4882a593Smuzhiyun * in spider_net_read_phy. Should take about 50 us */
150*4882a593Smuzhiyun do {
151*4882a593Smuzhiyun readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
152*4882a593Smuzhiyun } while (readvalue & SPIDER_NET_GPREXEC);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun readvalue &= SPIDER_NET_GPRDAT_MASK;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return readvalue;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun * spider_net_setup_aneg - initial auto-negotiation setup
161*4882a593Smuzhiyun * @card: device structure
162*4882a593Smuzhiyun **/
163*4882a593Smuzhiyun static void
spider_net_setup_aneg(struct spider_net_card * card)164*4882a593Smuzhiyun spider_net_setup_aneg(struct spider_net_card *card)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct mii_phy *phy = &card->phy;
167*4882a593Smuzhiyun u32 advertise = 0;
168*4882a593Smuzhiyun u16 bmsr, estat;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
171*4882a593Smuzhiyun estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (bmsr & BMSR_10HALF)
174*4882a593Smuzhiyun advertise |= ADVERTISED_10baseT_Half;
175*4882a593Smuzhiyun if (bmsr & BMSR_10FULL)
176*4882a593Smuzhiyun advertise |= ADVERTISED_10baseT_Full;
177*4882a593Smuzhiyun if (bmsr & BMSR_100HALF)
178*4882a593Smuzhiyun advertise |= ADVERTISED_100baseT_Half;
179*4882a593Smuzhiyun if (bmsr & BMSR_100FULL)
180*4882a593Smuzhiyun advertise |= ADVERTISED_100baseT_Full;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
183*4882a593Smuzhiyun advertise |= SUPPORTED_1000baseT_Full;
184*4882a593Smuzhiyun if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
185*4882a593Smuzhiyun advertise |= SUPPORTED_1000baseT_Half;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun sungem_phy_probe(phy, phy->mii_id);
188*4882a593Smuzhiyun phy->def->ops->setup_aneg(phy, advertise);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun * spider_net_rx_irq_off - switch off rx irq on this spider card
194*4882a593Smuzhiyun * @card: device structure
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * switches off rx irq by masking them out in the GHIINTnMSK register
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun static void
spider_net_rx_irq_off(struct spider_net_card * card)199*4882a593Smuzhiyun spider_net_rx_irq_off(struct spider_net_card *card)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun u32 regvalue;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
204*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * spider_net_rx_irq_on - switch on rx irq on this spider card
209*4882a593Smuzhiyun * @card: device structure
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * switches on rx irq by enabling them in the GHIINTnMSK register
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun static void
spider_net_rx_irq_on(struct spider_net_card * card)214*4882a593Smuzhiyun spider_net_rx_irq_on(struct spider_net_card *card)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun u32 regvalue;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
219*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * spider_net_set_promisc - sets the unicast address or the promiscuous mode
224*4882a593Smuzhiyun * @card: card structure
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * spider_net_set_promisc sets the unicast destination address filter and
227*4882a593Smuzhiyun * thus either allows for non-promisc mode or promisc mode
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun static void
spider_net_set_promisc(struct spider_net_card * card)230*4882a593Smuzhiyun spider_net_set_promisc(struct spider_net_card *card)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun u32 macu, macl;
233*4882a593Smuzhiyun struct net_device *netdev = card->netdev;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (netdev->flags & IFF_PROMISC) {
236*4882a593Smuzhiyun /* clear destination entry 0 */
237*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
238*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
239*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
240*4882a593Smuzhiyun SPIDER_NET_PROMISC_VALUE);
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun macu = netdev->dev_addr[0];
243*4882a593Smuzhiyun macu <<= 8;
244*4882a593Smuzhiyun macu |= netdev->dev_addr[1];
245*4882a593Smuzhiyun memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun macu |= SPIDER_NET_UA_DESCR_VALUE;
248*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
249*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
250*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
251*4882a593Smuzhiyun SPIDER_NET_NONPROMISC_VALUE);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * spider_net_get_descr_status -- returns the status of a descriptor
257*4882a593Smuzhiyun * @descr: descriptor to look at
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * returns the status as in the dmac_cmd_status field of the descriptor
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun static inline int
spider_net_get_descr_status(struct spider_net_hw_descr * hwdescr)262*4882a593Smuzhiyun spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /**
268*4882a593Smuzhiyun * spider_net_free_chain - free descriptor chain
269*4882a593Smuzhiyun * @card: card structure
270*4882a593Smuzhiyun * @chain: address of chain
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun static void
spider_net_free_chain(struct spider_net_card * card,struct spider_net_descr_chain * chain)274*4882a593Smuzhiyun spider_net_free_chain(struct spider_net_card *card,
275*4882a593Smuzhiyun struct spider_net_descr_chain *chain)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct spider_net_descr *descr;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun descr = chain->ring;
280*4882a593Smuzhiyun do {
281*4882a593Smuzhiyun descr->bus_addr = 0;
282*4882a593Smuzhiyun descr->hwdescr->next_descr_addr = 0;
283*4882a593Smuzhiyun descr = descr->next;
284*4882a593Smuzhiyun } while (descr != chain->ring);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
287*4882a593Smuzhiyun chain->hwring, chain->dma_addr);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun * spider_net_init_chain - alloc and link descriptor chain
292*4882a593Smuzhiyun * @card: card structure
293*4882a593Smuzhiyun * @chain: address of chain
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * We manage a circular list that mirrors the hardware structure,
296*4882a593Smuzhiyun * except that the hardware uses bus addresses.
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun * Returns 0 on success, <0 on failure
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun static int
spider_net_init_chain(struct spider_net_card * card,struct spider_net_descr_chain * chain)301*4882a593Smuzhiyun spider_net_init_chain(struct spider_net_card *card,
302*4882a593Smuzhiyun struct spider_net_descr_chain *chain)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun int i;
305*4882a593Smuzhiyun struct spider_net_descr *descr;
306*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr;
307*4882a593Smuzhiyun dma_addr_t buf;
308*4882a593Smuzhiyun size_t alloc_size;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
313*4882a593Smuzhiyun &chain->dma_addr, GFP_KERNEL);
314*4882a593Smuzhiyun if (!chain->hwring)
315*4882a593Smuzhiyun return -ENOMEM;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Set up the hardware pointers in each descriptor */
318*4882a593Smuzhiyun descr = chain->ring;
319*4882a593Smuzhiyun hwdescr = chain->hwring;
320*4882a593Smuzhiyun buf = chain->dma_addr;
321*4882a593Smuzhiyun for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
322*4882a593Smuzhiyun hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
323*4882a593Smuzhiyun hwdescr->next_descr_addr = 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun descr->hwdescr = hwdescr;
326*4882a593Smuzhiyun descr->bus_addr = buf;
327*4882a593Smuzhiyun descr->next = descr + 1;
328*4882a593Smuzhiyun descr->prev = descr - 1;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun buf += sizeof(struct spider_net_hw_descr);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun /* do actual circular list */
333*4882a593Smuzhiyun (descr-1)->next = chain->ring;
334*4882a593Smuzhiyun chain->ring->prev = descr-1;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun spin_lock_init(&chain->lock);
337*4882a593Smuzhiyun chain->head = chain->ring;
338*4882a593Smuzhiyun chain->tail = chain->ring;
339*4882a593Smuzhiyun return 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun * spider_net_free_rx_chain_contents - frees descr contents in rx chain
344*4882a593Smuzhiyun * @card: card structure
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * returns 0 on success, <0 on failure
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun static void
spider_net_free_rx_chain_contents(struct spider_net_card * card)349*4882a593Smuzhiyun spider_net_free_rx_chain_contents(struct spider_net_card *card)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct spider_net_descr *descr;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun descr = card->rx_chain.head;
354*4882a593Smuzhiyun do {
355*4882a593Smuzhiyun if (descr->skb) {
356*4882a593Smuzhiyun pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
357*4882a593Smuzhiyun SPIDER_NET_MAX_FRAME,
358*4882a593Smuzhiyun PCI_DMA_BIDIRECTIONAL);
359*4882a593Smuzhiyun dev_kfree_skb(descr->skb);
360*4882a593Smuzhiyun descr->skb = NULL;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun descr = descr->next;
363*4882a593Smuzhiyun } while (descr != card->rx_chain.head);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * spider_net_prepare_rx_descr - Reinitialize RX descriptor
368*4882a593Smuzhiyun * @card: card structure
369*4882a593Smuzhiyun * @descr: descriptor to re-init
370*4882a593Smuzhiyun *
371*4882a593Smuzhiyun * Return 0 on success, <0 on failure.
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * Allocates a new rx skb, iommu-maps it and attaches it to the
374*4882a593Smuzhiyun * descriptor. Mark the descriptor as activated, ready-to-use.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun static int
spider_net_prepare_rx_descr(struct spider_net_card * card,struct spider_net_descr * descr)377*4882a593Smuzhiyun spider_net_prepare_rx_descr(struct spider_net_card *card,
378*4882a593Smuzhiyun struct spider_net_descr *descr)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr = descr->hwdescr;
381*4882a593Smuzhiyun dma_addr_t buf;
382*4882a593Smuzhiyun int offset;
383*4882a593Smuzhiyun int bufsize;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* we need to round up the buffer size to a multiple of 128 */
386*4882a593Smuzhiyun bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
387*4882a593Smuzhiyun (~(SPIDER_NET_RXBUF_ALIGN - 1));
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* and we need to have it 128 byte aligned, therefore we allocate a
390*4882a593Smuzhiyun * bit more */
391*4882a593Smuzhiyun /* allocate an skb */
392*4882a593Smuzhiyun descr->skb = netdev_alloc_skb(card->netdev,
393*4882a593Smuzhiyun bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
394*4882a593Smuzhiyun if (!descr->skb) {
395*4882a593Smuzhiyun if (netif_msg_rx_err(card) && net_ratelimit())
396*4882a593Smuzhiyun dev_err(&card->netdev->dev,
397*4882a593Smuzhiyun "Not enough memory to allocate rx buffer\n");
398*4882a593Smuzhiyun card->spider_stats.alloc_rx_skb_error++;
399*4882a593Smuzhiyun return -ENOMEM;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun hwdescr->buf_size = bufsize;
402*4882a593Smuzhiyun hwdescr->result_size = 0;
403*4882a593Smuzhiyun hwdescr->valid_size = 0;
404*4882a593Smuzhiyun hwdescr->data_status = 0;
405*4882a593Smuzhiyun hwdescr->data_error = 0;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun offset = ((unsigned long)descr->skb->data) &
408*4882a593Smuzhiyun (SPIDER_NET_RXBUF_ALIGN - 1);
409*4882a593Smuzhiyun if (offset)
410*4882a593Smuzhiyun skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
411*4882a593Smuzhiyun /* iommu-map the skb */
412*4882a593Smuzhiyun buf = pci_map_single(card->pdev, descr->skb->data,
413*4882a593Smuzhiyun SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
414*4882a593Smuzhiyun if (pci_dma_mapping_error(card->pdev, buf)) {
415*4882a593Smuzhiyun dev_kfree_skb_any(descr->skb);
416*4882a593Smuzhiyun descr->skb = NULL;
417*4882a593Smuzhiyun if (netif_msg_rx_err(card) && net_ratelimit())
418*4882a593Smuzhiyun dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
419*4882a593Smuzhiyun card->spider_stats.rx_iommu_map_error++;
420*4882a593Smuzhiyun hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
421*4882a593Smuzhiyun } else {
422*4882a593Smuzhiyun hwdescr->buf_addr = buf;
423*4882a593Smuzhiyun wmb();
424*4882a593Smuzhiyun hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
425*4882a593Smuzhiyun SPIDER_NET_DMAC_NOINTR_COMPLETE;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
433*4882a593Smuzhiyun * @card: card structure
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
436*4882a593Smuzhiyun * chip by writing to the appropriate register. DMA is enabled in
437*4882a593Smuzhiyun * spider_net_enable_rxdmac.
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun static inline void
spider_net_enable_rxchtails(struct spider_net_card * card)440*4882a593Smuzhiyun spider_net_enable_rxchtails(struct spider_net_card *card)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun /* assume chain is aligned correctly */
443*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
444*4882a593Smuzhiyun card->rx_chain.tail->bus_addr);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * spider_net_enable_rxdmac - enables a receive DMA controller
449*4882a593Smuzhiyun * @card: card structure
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
452*4882a593Smuzhiyun * in the GDADMACCNTR register
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun static inline void
spider_net_enable_rxdmac(struct spider_net_card * card)455*4882a593Smuzhiyun spider_net_enable_rxdmac(struct spider_net_card *card)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun wmb();
458*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
459*4882a593Smuzhiyun SPIDER_NET_DMA_RX_VALUE);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun * spider_net_disable_rxdmac - disables the receive DMA controller
464*4882a593Smuzhiyun * @card: card structure
465*4882a593Smuzhiyun *
466*4882a593Smuzhiyun * spider_net_disable_rxdmac terminates processing on the DMA controller
467*4882a593Smuzhiyun * by turing off the DMA controller, with the force-end flag set.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun static inline void
spider_net_disable_rxdmac(struct spider_net_card * card)470*4882a593Smuzhiyun spider_net_disable_rxdmac(struct spider_net_card *card)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
473*4882a593Smuzhiyun SPIDER_NET_DMA_RX_FEND_VALUE);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
478*4882a593Smuzhiyun * @card: card structure
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun static void
spider_net_refill_rx_chain(struct spider_net_card * card)483*4882a593Smuzhiyun spider_net_refill_rx_chain(struct spider_net_card *card)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
486*4882a593Smuzhiyun unsigned long flags;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* one context doing the refill (and a second context seeing that
489*4882a593Smuzhiyun * and omitting it) is ok. If called by NAPI, we'll be called again
490*4882a593Smuzhiyun * as spider_net_decode_one_descr is called several times. If some
491*4882a593Smuzhiyun * interrupt calls us, the NAPI is about to clean up anyway. */
492*4882a593Smuzhiyun if (!spin_trylock_irqsave(&chain->lock, flags))
493*4882a593Smuzhiyun return;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun while (spider_net_get_descr_status(chain->head->hwdescr) ==
496*4882a593Smuzhiyun SPIDER_NET_DESCR_NOT_IN_USE) {
497*4882a593Smuzhiyun if (spider_net_prepare_rx_descr(card, chain->head))
498*4882a593Smuzhiyun break;
499*4882a593Smuzhiyun chain->head = chain->head->next;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
507*4882a593Smuzhiyun * @card: card structure
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * Returns 0 on success, <0 on failure.
510*4882a593Smuzhiyun */
511*4882a593Smuzhiyun static int
spider_net_alloc_rx_skbs(struct spider_net_card * card)512*4882a593Smuzhiyun spider_net_alloc_rx_skbs(struct spider_net_card *card)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
515*4882a593Smuzhiyun struct spider_net_descr *start = chain->tail;
516*4882a593Smuzhiyun struct spider_net_descr *descr = start;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* Link up the hardware chain pointers */
519*4882a593Smuzhiyun do {
520*4882a593Smuzhiyun descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
521*4882a593Smuzhiyun descr = descr->next;
522*4882a593Smuzhiyun } while (descr != start);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* Put at least one buffer into the chain. if this fails,
525*4882a593Smuzhiyun * we've got a problem. If not, spider_net_refill_rx_chain
526*4882a593Smuzhiyun * will do the rest at the end of this function. */
527*4882a593Smuzhiyun if (spider_net_prepare_rx_descr(card, chain->head))
528*4882a593Smuzhiyun goto error;
529*4882a593Smuzhiyun else
530*4882a593Smuzhiyun chain->head = chain->head->next;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* This will allocate the rest of the rx buffers;
533*4882a593Smuzhiyun * if not, it's business as usual later on. */
534*4882a593Smuzhiyun spider_net_refill_rx_chain(card);
535*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun error:
539*4882a593Smuzhiyun spider_net_free_rx_chain_contents(card);
540*4882a593Smuzhiyun return -ENOMEM;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /**
544*4882a593Smuzhiyun * spider_net_get_multicast_hash - generates hash for multicast filter table
545*4882a593Smuzhiyun * @addr: multicast address
546*4882a593Smuzhiyun *
547*4882a593Smuzhiyun * returns the hash value.
548*4882a593Smuzhiyun *
549*4882a593Smuzhiyun * spider_net_get_multicast_hash calculates a hash value for a given multicast
550*4882a593Smuzhiyun * address, that is used to set the multicast filter tables
551*4882a593Smuzhiyun */
552*4882a593Smuzhiyun static u8
spider_net_get_multicast_hash(struct net_device * netdev,__u8 * addr)553*4882a593Smuzhiyun spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u32 crc;
556*4882a593Smuzhiyun u8 hash;
557*4882a593Smuzhiyun char addr_for_crc[ETH_ALEN] = { 0, };
558*4882a593Smuzhiyun int i, bit;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN * 8; i++) {
561*4882a593Smuzhiyun bit = (addr[i / 8] >> (i % 8)) & 1;
562*4882a593Smuzhiyun addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun hash = (crc >> 27);
568*4882a593Smuzhiyun hash <<= 3;
569*4882a593Smuzhiyun hash |= crc & 7;
570*4882a593Smuzhiyun hash &= 0xff;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return hash;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun * spider_net_set_multi - sets multicast addresses and promisc flags
577*4882a593Smuzhiyun * @netdev: interface device structure
578*4882a593Smuzhiyun *
579*4882a593Smuzhiyun * spider_net_set_multi configures multicast addresses as needed for the
580*4882a593Smuzhiyun * netdev interface. It also sets up multicast, allmulti and promisc
581*4882a593Smuzhiyun * flags appropriately
582*4882a593Smuzhiyun */
583*4882a593Smuzhiyun static void
spider_net_set_multi(struct net_device * netdev)584*4882a593Smuzhiyun spider_net_set_multi(struct net_device *netdev)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct netdev_hw_addr *ha;
587*4882a593Smuzhiyun u8 hash;
588*4882a593Smuzhiyun int i;
589*4882a593Smuzhiyun u32 reg;
590*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
591*4882a593Smuzhiyun DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun spider_net_set_promisc(card);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (netdev->flags & IFF_ALLMULTI) {
596*4882a593Smuzhiyun for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
597*4882a593Smuzhiyun set_bit(i, bitmask);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun goto write_hash;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* well, we know, what the broadcast hash value is: it's xfd
603*4882a593Smuzhiyun hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
604*4882a593Smuzhiyun set_bit(0xfd, bitmask);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, netdev) {
607*4882a593Smuzhiyun hash = spider_net_get_multicast_hash(netdev, ha->addr);
608*4882a593Smuzhiyun set_bit(hash, bitmask);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun write_hash:
612*4882a593Smuzhiyun for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
613*4882a593Smuzhiyun reg = 0;
614*4882a593Smuzhiyun if (test_bit(i * 4, bitmask))
615*4882a593Smuzhiyun reg += 0x08;
616*4882a593Smuzhiyun reg <<= 8;
617*4882a593Smuzhiyun if (test_bit(i * 4 + 1, bitmask))
618*4882a593Smuzhiyun reg += 0x08;
619*4882a593Smuzhiyun reg <<= 8;
620*4882a593Smuzhiyun if (test_bit(i * 4 + 2, bitmask))
621*4882a593Smuzhiyun reg += 0x08;
622*4882a593Smuzhiyun reg <<= 8;
623*4882a593Smuzhiyun if (test_bit(i * 4 + 3, bitmask))
624*4882a593Smuzhiyun reg += 0x08;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /**
631*4882a593Smuzhiyun * spider_net_prepare_tx_descr - fill tx descriptor with skb data
632*4882a593Smuzhiyun * @card: card structure
633*4882a593Smuzhiyun * @skb: packet to use
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * returns 0 on success, <0 on failure.
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun * fills out the descriptor structure with skb data and len. Copies data,
638*4882a593Smuzhiyun * if needed (32bit DMA!)
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun static int
spider_net_prepare_tx_descr(struct spider_net_card * card,struct sk_buff * skb)641*4882a593Smuzhiyun spider_net_prepare_tx_descr(struct spider_net_card *card,
642*4882a593Smuzhiyun struct sk_buff *skb)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->tx_chain;
645*4882a593Smuzhiyun struct spider_net_descr *descr;
646*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr;
647*4882a593Smuzhiyun dma_addr_t buf;
648*4882a593Smuzhiyun unsigned long flags;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
651*4882a593Smuzhiyun if (pci_dma_mapping_error(card->pdev, buf)) {
652*4882a593Smuzhiyun if (netif_msg_tx_err(card) && net_ratelimit())
653*4882a593Smuzhiyun dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
654*4882a593Smuzhiyun "Dropping packet\n", skb->data, skb->len);
655*4882a593Smuzhiyun card->spider_stats.tx_iommu_map_error++;
656*4882a593Smuzhiyun return -ENOMEM;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun spin_lock_irqsave(&chain->lock, flags);
660*4882a593Smuzhiyun descr = card->tx_chain.head;
661*4882a593Smuzhiyun if (descr->next == chain->tail->prev) {
662*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
663*4882a593Smuzhiyun pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
664*4882a593Smuzhiyun return -ENOMEM;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun hwdescr = descr->hwdescr;
667*4882a593Smuzhiyun chain->head = descr->next;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun descr->skb = skb;
670*4882a593Smuzhiyun hwdescr->buf_addr = buf;
671*4882a593Smuzhiyun hwdescr->buf_size = skb->len;
672*4882a593Smuzhiyun hwdescr->next_descr_addr = 0;
673*4882a593Smuzhiyun hwdescr->data_status = 0;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun hwdescr->dmac_cmd_status =
676*4882a593Smuzhiyun SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
677*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
680*4882a593Smuzhiyun switch (ip_hdr(skb)->protocol) {
681*4882a593Smuzhiyun case IPPROTO_TCP:
682*4882a593Smuzhiyun hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
683*4882a593Smuzhiyun break;
684*4882a593Smuzhiyun case IPPROTO_UDP:
685*4882a593Smuzhiyun hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
686*4882a593Smuzhiyun break;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* Chain the bus address, so that the DMA engine finds this descr. */
690*4882a593Smuzhiyun wmb();
691*4882a593Smuzhiyun descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun netif_trans_update(card->netdev); /* set netdev watchdog timer */
694*4882a593Smuzhiyun return 0;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun static int
spider_net_set_low_watermark(struct spider_net_card * card)698*4882a593Smuzhiyun spider_net_set_low_watermark(struct spider_net_card *card)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct spider_net_descr *descr = card->tx_chain.tail;
701*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr;
702*4882a593Smuzhiyun unsigned long flags;
703*4882a593Smuzhiyun int status;
704*4882a593Smuzhiyun int cnt=0;
705*4882a593Smuzhiyun int i;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Measure the length of the queue. Measurement does not
708*4882a593Smuzhiyun * need to be precise -- does not need a lock. */
709*4882a593Smuzhiyun while (descr != card->tx_chain.head) {
710*4882a593Smuzhiyun status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
711*4882a593Smuzhiyun if (status == SPIDER_NET_DESCR_NOT_IN_USE)
712*4882a593Smuzhiyun break;
713*4882a593Smuzhiyun descr = descr->next;
714*4882a593Smuzhiyun cnt++;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* If TX queue is short, don't even bother with interrupts */
718*4882a593Smuzhiyun if (cnt < card->tx_chain.num_desc/4)
719*4882a593Smuzhiyun return cnt;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* Set low-watermark 3/4th's of the way into the queue. */
722*4882a593Smuzhiyun descr = card->tx_chain.tail;
723*4882a593Smuzhiyun cnt = (cnt*3)/4;
724*4882a593Smuzhiyun for (i=0;i<cnt; i++)
725*4882a593Smuzhiyun descr = descr->next;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /* Set the new watermark, clear the old watermark */
728*4882a593Smuzhiyun spin_lock_irqsave(&card->tx_chain.lock, flags);
729*4882a593Smuzhiyun descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
730*4882a593Smuzhiyun if (card->low_watermark && card->low_watermark != descr) {
731*4882a593Smuzhiyun hwdescr = card->low_watermark->hwdescr;
732*4882a593Smuzhiyun hwdescr->dmac_cmd_status =
733*4882a593Smuzhiyun hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun card->low_watermark = descr;
736*4882a593Smuzhiyun spin_unlock_irqrestore(&card->tx_chain.lock, flags);
737*4882a593Smuzhiyun return cnt;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /**
741*4882a593Smuzhiyun * spider_net_release_tx_chain - processes sent tx descriptors
742*4882a593Smuzhiyun * @card: adapter structure
743*4882a593Smuzhiyun * @brutal: if set, don't care about whether descriptor seems to be in use
744*4882a593Smuzhiyun *
745*4882a593Smuzhiyun * returns 0 if the tx ring is empty, otherwise 1.
746*4882a593Smuzhiyun *
747*4882a593Smuzhiyun * spider_net_release_tx_chain releases the tx descriptors that spider has
748*4882a593Smuzhiyun * finished with (if non-brutal) or simply release tx descriptors (if brutal).
749*4882a593Smuzhiyun * If some other context is calling this function, we return 1 so that we're
750*4882a593Smuzhiyun * scheduled again (if we were scheduled) and will not lose initiative.
751*4882a593Smuzhiyun */
752*4882a593Smuzhiyun static int
spider_net_release_tx_chain(struct spider_net_card * card,int brutal)753*4882a593Smuzhiyun spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct net_device *dev = card->netdev;
756*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->tx_chain;
757*4882a593Smuzhiyun struct spider_net_descr *descr;
758*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr;
759*4882a593Smuzhiyun struct sk_buff *skb;
760*4882a593Smuzhiyun u32 buf_addr;
761*4882a593Smuzhiyun unsigned long flags;
762*4882a593Smuzhiyun int status;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun while (1) {
765*4882a593Smuzhiyun spin_lock_irqsave(&chain->lock, flags);
766*4882a593Smuzhiyun if (chain->tail == chain->head) {
767*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun descr = chain->tail;
771*4882a593Smuzhiyun hwdescr = descr->hwdescr;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun status = spider_net_get_descr_status(hwdescr);
774*4882a593Smuzhiyun switch (status) {
775*4882a593Smuzhiyun case SPIDER_NET_DESCR_COMPLETE:
776*4882a593Smuzhiyun dev->stats.tx_packets++;
777*4882a593Smuzhiyun dev->stats.tx_bytes += descr->skb->len;
778*4882a593Smuzhiyun break;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun case SPIDER_NET_DESCR_CARDOWNED:
781*4882a593Smuzhiyun if (!brutal) {
782*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
783*4882a593Smuzhiyun return 1;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* fallthrough, if we release the descriptors
787*4882a593Smuzhiyun * brutally (then we don't care about
788*4882a593Smuzhiyun * SPIDER_NET_DESCR_CARDOWNED) */
789*4882a593Smuzhiyun fallthrough;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun case SPIDER_NET_DESCR_RESPONSE_ERROR:
792*4882a593Smuzhiyun case SPIDER_NET_DESCR_PROTECTION_ERROR:
793*4882a593Smuzhiyun case SPIDER_NET_DESCR_FORCE_END:
794*4882a593Smuzhiyun if (netif_msg_tx_err(card))
795*4882a593Smuzhiyun dev_err(&card->netdev->dev, "forcing end of tx descriptor "
796*4882a593Smuzhiyun "with status x%02x\n", status);
797*4882a593Smuzhiyun dev->stats.tx_errors++;
798*4882a593Smuzhiyun break;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun default:
801*4882a593Smuzhiyun dev->stats.tx_dropped++;
802*4882a593Smuzhiyun if (!brutal) {
803*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
804*4882a593Smuzhiyun return 1;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun chain->tail = descr->next;
809*4882a593Smuzhiyun hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
810*4882a593Smuzhiyun skb = descr->skb;
811*4882a593Smuzhiyun descr->skb = NULL;
812*4882a593Smuzhiyun buf_addr = hwdescr->buf_addr;
813*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* unmap the skb */
816*4882a593Smuzhiyun if (skb) {
817*4882a593Smuzhiyun pci_unmap_single(card->pdev, buf_addr, skb->len,
818*4882a593Smuzhiyun PCI_DMA_TODEVICE);
819*4882a593Smuzhiyun dev_consume_skb_any(skb);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /**
826*4882a593Smuzhiyun * spider_net_kick_tx_dma - enables TX DMA processing
827*4882a593Smuzhiyun * @card: card structure
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * This routine will start the transmit DMA running if
830*4882a593Smuzhiyun * it is not already running. This routine ned only be
831*4882a593Smuzhiyun * called when queueing a new packet to an empty tx queue.
832*4882a593Smuzhiyun * Writes the current tx chain head as start address
833*4882a593Smuzhiyun * of the tx descriptor chain and enables the transmission
834*4882a593Smuzhiyun * DMA engine.
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun static inline void
spider_net_kick_tx_dma(struct spider_net_card * card)837*4882a593Smuzhiyun spider_net_kick_tx_dma(struct spider_net_card *card)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun struct spider_net_descr *descr;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
842*4882a593Smuzhiyun SPIDER_NET_TX_DMA_EN)
843*4882a593Smuzhiyun goto out;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun descr = card->tx_chain.tail;
846*4882a593Smuzhiyun for (;;) {
847*4882a593Smuzhiyun if (spider_net_get_descr_status(descr->hwdescr) ==
848*4882a593Smuzhiyun SPIDER_NET_DESCR_CARDOWNED) {
849*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
850*4882a593Smuzhiyun descr->bus_addr);
851*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
852*4882a593Smuzhiyun SPIDER_NET_DMA_TX_VALUE);
853*4882a593Smuzhiyun break;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun if (descr == card->tx_chain.head)
856*4882a593Smuzhiyun break;
857*4882a593Smuzhiyun descr = descr->next;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun out:
861*4882a593Smuzhiyun mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun /**
865*4882a593Smuzhiyun * spider_net_xmit - transmits a frame over the device
866*4882a593Smuzhiyun * @skb: packet to send out
867*4882a593Smuzhiyun * @netdev: interface device structure
868*4882a593Smuzhiyun *
869*4882a593Smuzhiyun * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun static netdev_tx_t
spider_net_xmit(struct sk_buff * skb,struct net_device * netdev)872*4882a593Smuzhiyun spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun int cnt;
875*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun spider_net_release_tx_chain(card, 0);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (spider_net_prepare_tx_descr(card, skb) != 0) {
880*4882a593Smuzhiyun netdev->stats.tx_dropped++;
881*4882a593Smuzhiyun netif_stop_queue(netdev);
882*4882a593Smuzhiyun return NETDEV_TX_BUSY;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun cnt = spider_net_set_low_watermark(card);
886*4882a593Smuzhiyun if (cnt < 5)
887*4882a593Smuzhiyun spider_net_kick_tx_dma(card);
888*4882a593Smuzhiyun return NETDEV_TX_OK;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /**
892*4882a593Smuzhiyun * spider_net_cleanup_tx_ring - cleans up the TX ring
893*4882a593Smuzhiyun * @card: card structure
894*4882a593Smuzhiyun *
895*4882a593Smuzhiyun * spider_net_cleanup_tx_ring is called by either the tx_timer
896*4882a593Smuzhiyun * or from the NAPI polling routine.
897*4882a593Smuzhiyun * This routine releases resources associted with transmitted
898*4882a593Smuzhiyun * packets, including updating the queue tail pointer.
899*4882a593Smuzhiyun */
900*4882a593Smuzhiyun static void
spider_net_cleanup_tx_ring(struct timer_list * t)901*4882a593Smuzhiyun spider_net_cleanup_tx_ring(struct timer_list *t)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun struct spider_net_card *card = from_timer(card, t, tx_timer);
904*4882a593Smuzhiyun if ((spider_net_release_tx_chain(card, 0) != 0) &&
905*4882a593Smuzhiyun (card->netdev->flags & IFF_UP)) {
906*4882a593Smuzhiyun spider_net_kick_tx_dma(card);
907*4882a593Smuzhiyun netif_wake_queue(card->netdev);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /**
912*4882a593Smuzhiyun * spider_net_do_ioctl - called for device ioctls
913*4882a593Smuzhiyun * @netdev: interface device structure
914*4882a593Smuzhiyun * @ifr: request parameter structure for ioctl
915*4882a593Smuzhiyun * @cmd: command code for ioctl
916*4882a593Smuzhiyun *
917*4882a593Smuzhiyun * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
918*4882a593Smuzhiyun * -EOPNOTSUPP is returned, if an unknown ioctl was requested
919*4882a593Smuzhiyun */
920*4882a593Smuzhiyun static int
spider_net_do_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)921*4882a593Smuzhiyun spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun switch (cmd) {
924*4882a593Smuzhiyun default:
925*4882a593Smuzhiyun return -EOPNOTSUPP;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /**
930*4882a593Smuzhiyun * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
931*4882a593Smuzhiyun * @descr: descriptor to process
932*4882a593Smuzhiyun * @card: card structure
933*4882a593Smuzhiyun *
934*4882a593Smuzhiyun * Fills out skb structure and passes the data to the stack.
935*4882a593Smuzhiyun * The descriptor state is not changed.
936*4882a593Smuzhiyun */
937*4882a593Smuzhiyun static void
spider_net_pass_skb_up(struct spider_net_descr * descr,struct spider_net_card * card)938*4882a593Smuzhiyun spider_net_pass_skb_up(struct spider_net_descr *descr,
939*4882a593Smuzhiyun struct spider_net_card *card)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr = descr->hwdescr;
942*4882a593Smuzhiyun struct sk_buff *skb = descr->skb;
943*4882a593Smuzhiyun struct net_device *netdev = card->netdev;
944*4882a593Smuzhiyun u32 data_status = hwdescr->data_status;
945*4882a593Smuzhiyun u32 data_error = hwdescr->data_error;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun skb_put(skb, hwdescr->valid_size);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* the card seems to add 2 bytes of junk in front
950*4882a593Smuzhiyun * of the ethernet frame */
951*4882a593Smuzhiyun #define SPIDER_MISALIGN 2
952*4882a593Smuzhiyun skb_pull(skb, SPIDER_MISALIGN);
953*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, netdev);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* checksum offload */
956*4882a593Smuzhiyun skb_checksum_none_assert(skb);
957*4882a593Smuzhiyun if (netdev->features & NETIF_F_RXCSUM) {
958*4882a593Smuzhiyun if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
959*4882a593Smuzhiyun SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
960*4882a593Smuzhiyun !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
961*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (data_status & SPIDER_NET_VLAN_PACKET) {
965*4882a593Smuzhiyun /* further enhancements: HW-accel VLAN */
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* update netdevice statistics */
969*4882a593Smuzhiyun netdev->stats.rx_packets++;
970*4882a593Smuzhiyun netdev->stats.rx_bytes += skb->len;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* pass skb up to stack */
973*4882a593Smuzhiyun netif_receive_skb(skb);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
show_rx_chain(struct spider_net_card * card)976*4882a593Smuzhiyun static void show_rx_chain(struct spider_net_card *card)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
979*4882a593Smuzhiyun struct spider_net_descr *start= chain->tail;
980*4882a593Smuzhiyun struct spider_net_descr *descr= start;
981*4882a593Smuzhiyun struct spider_net_hw_descr *hwd = start->hwdescr;
982*4882a593Smuzhiyun struct device *dev = &card->netdev->dev;
983*4882a593Smuzhiyun u32 curr_desc, next_desc;
984*4882a593Smuzhiyun int status;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun int tot = 0;
987*4882a593Smuzhiyun int cnt = 0;
988*4882a593Smuzhiyun int off = start - chain->ring;
989*4882a593Smuzhiyun int cstat = hwd->dmac_cmd_status;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun dev_info(dev, "Total number of descrs=%d\n",
992*4882a593Smuzhiyun chain->num_desc);
993*4882a593Smuzhiyun dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
994*4882a593Smuzhiyun off, cstat);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
997*4882a593Smuzhiyun next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun status = cstat;
1000*4882a593Smuzhiyun do
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun hwd = descr->hwdescr;
1003*4882a593Smuzhiyun off = descr - chain->ring;
1004*4882a593Smuzhiyun status = hwd->dmac_cmd_status;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (descr == chain->head)
1007*4882a593Smuzhiyun dev_info(dev, "Chain head is at %d, head status=0x%x\n",
1008*4882a593Smuzhiyun off, status);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun if (curr_desc == descr->bus_addr)
1011*4882a593Smuzhiyun dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
1012*4882a593Smuzhiyun off, status);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun if (next_desc == descr->bus_addr)
1015*4882a593Smuzhiyun dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
1016*4882a593Smuzhiyun off, status);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (hwd->next_descr_addr == 0)
1019*4882a593Smuzhiyun dev_info(dev, "chain is cut at %d\n", off);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (cstat != status) {
1022*4882a593Smuzhiyun int from = (chain->num_desc + off - cnt) % chain->num_desc;
1023*4882a593Smuzhiyun int to = (chain->num_desc + off - 1) % chain->num_desc;
1024*4882a593Smuzhiyun dev_info(dev, "Have %d (from %d to %d) descrs "
1025*4882a593Smuzhiyun "with stat=0x%08x\n", cnt, from, to, cstat);
1026*4882a593Smuzhiyun cstat = status;
1027*4882a593Smuzhiyun cnt = 0;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun cnt ++;
1031*4882a593Smuzhiyun tot ++;
1032*4882a593Smuzhiyun descr = descr->next;
1033*4882a593Smuzhiyun } while (descr != start);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun dev_info(dev, "Last %d descrs with stat=0x%08x "
1036*4882a593Smuzhiyun "for a total of %d descrs\n", cnt, cstat, tot);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun #ifdef DEBUG
1039*4882a593Smuzhiyun /* Now dump the whole ring */
1040*4882a593Smuzhiyun descr = start;
1041*4882a593Smuzhiyun do
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct spider_net_hw_descr *hwd = descr->hwdescr;
1044*4882a593Smuzhiyun status = spider_net_get_descr_status(hwd);
1045*4882a593Smuzhiyun cnt = descr - chain->ring;
1046*4882a593Smuzhiyun dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
1047*4882a593Smuzhiyun cnt, status, descr->skb);
1048*4882a593Smuzhiyun dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
1049*4882a593Smuzhiyun descr->bus_addr, hwd->buf_addr, hwd->buf_size);
1050*4882a593Smuzhiyun dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
1051*4882a593Smuzhiyun hwd->next_descr_addr, hwd->result_size,
1052*4882a593Smuzhiyun hwd->valid_size);
1053*4882a593Smuzhiyun dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
1054*4882a593Smuzhiyun hwd->dmac_cmd_status, hwd->data_status,
1055*4882a593Smuzhiyun hwd->data_error);
1056*4882a593Smuzhiyun dev_info(dev, "\n");
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun descr = descr->next;
1059*4882a593Smuzhiyun } while (descr != start);
1060*4882a593Smuzhiyun #endif
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /**
1065*4882a593Smuzhiyun * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1066*4882a593Smuzhiyun *
1067*4882a593Smuzhiyun * If the driver fails to keep up and empty the queue, then the
1068*4882a593Smuzhiyun * hardware wil run out of room to put incoming packets. This
1069*4882a593Smuzhiyun * will cause the hardware to skip descrs that are full (instead
1070*4882a593Smuzhiyun * of halting/retrying). Thus, once the driver runs, it wil need
1071*4882a593Smuzhiyun * to "catch up" to where the hardware chain pointer is at.
1072*4882a593Smuzhiyun */
spider_net_resync_head_ptr(struct spider_net_card * card)1073*4882a593Smuzhiyun static void spider_net_resync_head_ptr(struct spider_net_card *card)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun unsigned long flags;
1076*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
1077*4882a593Smuzhiyun struct spider_net_descr *descr;
1078*4882a593Smuzhiyun int i, status;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /* Advance head pointer past any empty descrs */
1081*4882a593Smuzhiyun descr = chain->head;
1082*4882a593Smuzhiyun status = spider_net_get_descr_status(descr->hwdescr);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1085*4882a593Smuzhiyun return;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun spin_lock_irqsave(&chain->lock, flags);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun descr = chain->head;
1090*4882a593Smuzhiyun status = spider_net_get_descr_status(descr->hwdescr);
1091*4882a593Smuzhiyun for (i=0; i<chain->num_desc; i++) {
1092*4882a593Smuzhiyun if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1093*4882a593Smuzhiyun descr = descr->next;
1094*4882a593Smuzhiyun status = spider_net_get_descr_status(descr->hwdescr);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun chain->head = descr;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun spin_unlock_irqrestore(&chain->lock, flags);
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
spider_net_resync_tail_ptr(struct spider_net_card * card)1101*4882a593Smuzhiyun static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
1104*4882a593Smuzhiyun struct spider_net_descr *descr;
1105*4882a593Smuzhiyun int i, status;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun /* Advance tail pointer past any empty and reaped descrs */
1108*4882a593Smuzhiyun descr = chain->tail;
1109*4882a593Smuzhiyun status = spider_net_get_descr_status(descr->hwdescr);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun for (i=0; i<chain->num_desc; i++) {
1112*4882a593Smuzhiyun if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1113*4882a593Smuzhiyun (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1114*4882a593Smuzhiyun descr = descr->next;
1115*4882a593Smuzhiyun status = spider_net_get_descr_status(descr->hwdescr);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun chain->tail = descr;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun if ((i == chain->num_desc) || (i == 0))
1120*4882a593Smuzhiyun return 1;
1121*4882a593Smuzhiyun return 0;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /**
1125*4882a593Smuzhiyun * spider_net_decode_one_descr - processes an RX descriptor
1126*4882a593Smuzhiyun * @card: card structure
1127*4882a593Smuzhiyun *
1128*4882a593Smuzhiyun * Returns 1 if a packet has been sent to the stack, otherwise 0.
1129*4882a593Smuzhiyun *
1130*4882a593Smuzhiyun * Processes an RX descriptor by iommu-unmapping the data buffer
1131*4882a593Smuzhiyun * and passing the packet up to the stack. This function is called
1132*4882a593Smuzhiyun * in softirq context, e.g. either bottom half from interrupt or
1133*4882a593Smuzhiyun * NAPI polling context.
1134*4882a593Smuzhiyun */
1135*4882a593Smuzhiyun static int
spider_net_decode_one_descr(struct spider_net_card * card)1136*4882a593Smuzhiyun spider_net_decode_one_descr(struct spider_net_card *card)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun struct net_device *dev = card->netdev;
1139*4882a593Smuzhiyun struct spider_net_descr_chain *chain = &card->rx_chain;
1140*4882a593Smuzhiyun struct spider_net_descr *descr = chain->tail;
1141*4882a593Smuzhiyun struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1142*4882a593Smuzhiyun u32 hw_buf_addr;
1143*4882a593Smuzhiyun int status;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun status = spider_net_get_descr_status(hwdescr);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* Nothing in the descriptor, or ring must be empty */
1148*4882a593Smuzhiyun if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1149*4882a593Smuzhiyun (status == SPIDER_NET_DESCR_NOT_IN_USE))
1150*4882a593Smuzhiyun return 0;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* descriptor definitively used -- move on tail */
1153*4882a593Smuzhiyun chain->tail = descr->next;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* unmap descriptor */
1156*4882a593Smuzhiyun hw_buf_addr = hwdescr->buf_addr;
1157*4882a593Smuzhiyun hwdescr->buf_addr = 0xffffffff;
1158*4882a593Smuzhiyun pci_unmap_single(card->pdev, hw_buf_addr,
1159*4882a593Smuzhiyun SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1162*4882a593Smuzhiyun (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1163*4882a593Smuzhiyun (status == SPIDER_NET_DESCR_FORCE_END) ) {
1164*4882a593Smuzhiyun if (netif_msg_rx_err(card))
1165*4882a593Smuzhiyun dev_err(&dev->dev,
1166*4882a593Smuzhiyun "dropping RX descriptor with state %d\n", status);
1167*4882a593Smuzhiyun dev->stats.rx_dropped++;
1168*4882a593Smuzhiyun goto bad_desc;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1172*4882a593Smuzhiyun (status != SPIDER_NET_DESCR_FRAME_END) ) {
1173*4882a593Smuzhiyun if (netif_msg_rx_err(card))
1174*4882a593Smuzhiyun dev_err(&card->netdev->dev,
1175*4882a593Smuzhiyun "RX descriptor with unknown state %d\n", status);
1176*4882a593Smuzhiyun card->spider_stats.rx_desc_unk_state++;
1177*4882a593Smuzhiyun goto bad_desc;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun /* The cases we'll throw away the packet immediately */
1181*4882a593Smuzhiyun if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1182*4882a593Smuzhiyun if (netif_msg_rx_err(card))
1183*4882a593Smuzhiyun dev_err(&card->netdev->dev,
1184*4882a593Smuzhiyun "error in received descriptor found, "
1185*4882a593Smuzhiyun "data_status=x%08x, data_error=x%08x\n",
1186*4882a593Smuzhiyun hwdescr->data_status, hwdescr->data_error);
1187*4882a593Smuzhiyun goto bad_desc;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
1191*4882a593Smuzhiyun dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
1192*4882a593Smuzhiyun hwdescr->dmac_cmd_status);
1193*4882a593Smuzhiyun pr_err("buf_addr=x%08x\n", hw_buf_addr);
1194*4882a593Smuzhiyun pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1195*4882a593Smuzhiyun pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1196*4882a593Smuzhiyun pr_err("result_size=x%08x\n", hwdescr->result_size);
1197*4882a593Smuzhiyun pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1198*4882a593Smuzhiyun pr_err("data_status=x%08x\n", hwdescr->data_status);
1199*4882a593Smuzhiyun pr_err("data_error=x%08x\n", hwdescr->data_error);
1200*4882a593Smuzhiyun pr_err("which=%ld\n", descr - card->rx_chain.ring);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun card->spider_stats.rx_desc_error++;
1203*4882a593Smuzhiyun goto bad_desc;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun /* Ok, we've got a packet in descr */
1207*4882a593Smuzhiyun spider_net_pass_skb_up(descr, card);
1208*4882a593Smuzhiyun descr->skb = NULL;
1209*4882a593Smuzhiyun hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1210*4882a593Smuzhiyun return 1;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun bad_desc:
1213*4882a593Smuzhiyun if (netif_msg_rx_err(card))
1214*4882a593Smuzhiyun show_rx_chain(card);
1215*4882a593Smuzhiyun dev_kfree_skb_irq(descr->skb);
1216*4882a593Smuzhiyun descr->skb = NULL;
1217*4882a593Smuzhiyun hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1218*4882a593Smuzhiyun return 0;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun /**
1222*4882a593Smuzhiyun * spider_net_poll - NAPI poll function called by the stack to return packets
1223*4882a593Smuzhiyun * @netdev: interface device structure
1224*4882a593Smuzhiyun * @budget: number of packets we can pass to the stack at most
1225*4882a593Smuzhiyun *
1226*4882a593Smuzhiyun * returns 0 if no more packets available to the driver/stack. Returns 1,
1227*4882a593Smuzhiyun * if the quota is exceeded, but the driver has still packets.
1228*4882a593Smuzhiyun *
1229*4882a593Smuzhiyun * spider_net_poll returns all packets from the rx descriptors to the stack
1230*4882a593Smuzhiyun * (using netif_receive_skb). If all/enough packets are up, the driver
1231*4882a593Smuzhiyun * reenables interrupts and returns 0. If not, 1 is returned.
1232*4882a593Smuzhiyun */
spider_net_poll(struct napi_struct * napi,int budget)1233*4882a593Smuzhiyun static int spider_net_poll(struct napi_struct *napi, int budget)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
1236*4882a593Smuzhiyun int packets_done = 0;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun while (packets_done < budget) {
1239*4882a593Smuzhiyun if (!spider_net_decode_one_descr(card))
1240*4882a593Smuzhiyun break;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun packets_done++;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1246*4882a593Smuzhiyun if (!spider_net_resync_tail_ptr(card))
1247*4882a593Smuzhiyun packets_done = budget;
1248*4882a593Smuzhiyun spider_net_resync_head_ptr(card);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun card->num_rx_ints = 0;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun spider_net_refill_rx_chain(card);
1253*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun spider_net_cleanup_tx_ring(&card->tx_timer);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /* if all packets are in the stack, enable interrupts and return 0 */
1258*4882a593Smuzhiyun /* if not, return 1 */
1259*4882a593Smuzhiyun if (packets_done < budget) {
1260*4882a593Smuzhiyun napi_complete_done(napi, packets_done);
1261*4882a593Smuzhiyun spider_net_rx_irq_on(card);
1262*4882a593Smuzhiyun card->ignore_rx_ramfull = 0;
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun return packets_done;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /**
1269*4882a593Smuzhiyun * spider_net_set_mac - sets the MAC of an interface
1270*4882a593Smuzhiyun * @netdev: interface device structure
1271*4882a593Smuzhiyun * @ptr: pointer to new MAC address
1272*4882a593Smuzhiyun *
1273*4882a593Smuzhiyun * Returns 0 on success, <0 on failure. Currently, we don't support this
1274*4882a593Smuzhiyun * and will always return EOPNOTSUPP.
1275*4882a593Smuzhiyun */
1276*4882a593Smuzhiyun static int
spider_net_set_mac(struct net_device * netdev,void * p)1277*4882a593Smuzhiyun spider_net_set_mac(struct net_device *netdev, void *p)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
1280*4882a593Smuzhiyun u32 macl, macu, regvalue;
1281*4882a593Smuzhiyun struct sockaddr *addr = p;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun if (!is_valid_ether_addr(addr->sa_data))
1284*4882a593Smuzhiyun return -EADDRNOTAVAIL;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /* switch off GMACTPE and GMACRPE */
1289*4882a593Smuzhiyun regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1290*4882a593Smuzhiyun regvalue &= ~((1 << 5) | (1 << 6));
1291*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun /* write mac */
1294*4882a593Smuzhiyun macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
1295*4882a593Smuzhiyun (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
1296*4882a593Smuzhiyun macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
1297*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1298*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* switch GMACTPE and GMACRPE back on */
1301*4882a593Smuzhiyun regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1302*4882a593Smuzhiyun regvalue |= ((1 << 5) | (1 << 6));
1303*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun spider_net_set_promisc(card);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return 0;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun /**
1311*4882a593Smuzhiyun * spider_net_link_reset
1312*4882a593Smuzhiyun * @netdev: net device structure
1313*4882a593Smuzhiyun *
1314*4882a593Smuzhiyun * This is called when the PHY_LINK signal is asserted. For the blade this is
1315*4882a593Smuzhiyun * not connected so we should never get here.
1316*4882a593Smuzhiyun *
1317*4882a593Smuzhiyun */
1318*4882a593Smuzhiyun static void
spider_net_link_reset(struct net_device * netdev)1319*4882a593Smuzhiyun spider_net_link_reset(struct net_device *netdev)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun del_timer_sync(&card->aneg_timer);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /* clear interrupt, block further interrupts */
1327*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACST,
1328*4882a593Smuzhiyun spider_net_read_reg(card, SPIDER_NET_GMACST));
1329*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* reset phy and setup aneg */
1332*4882a593Smuzhiyun card->aneg_count = 0;
1333*4882a593Smuzhiyun card->medium = BCM54XX_COPPER;
1334*4882a593Smuzhiyun spider_net_setup_aneg(card);
1335*4882a593Smuzhiyun mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun /**
1340*4882a593Smuzhiyun * spider_net_handle_error_irq - handles errors raised by an interrupt
1341*4882a593Smuzhiyun * @card: card structure
1342*4882a593Smuzhiyun * @status_reg: interrupt status register 0 (GHIINT0STS)
1343*4882a593Smuzhiyun *
1344*4882a593Smuzhiyun * spider_net_handle_error_irq treats or ignores all error conditions
1345*4882a593Smuzhiyun * found when an interrupt is presented
1346*4882a593Smuzhiyun */
1347*4882a593Smuzhiyun static void
spider_net_handle_error_irq(struct spider_net_card * card,u32 status_reg,u32 error_reg1,u32 error_reg2)1348*4882a593Smuzhiyun spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1349*4882a593Smuzhiyun u32 error_reg1, u32 error_reg2)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun u32 i;
1352*4882a593Smuzhiyun int show_error = 1;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* check GHIINT0STS ************************************/
1355*4882a593Smuzhiyun if (status_reg)
1356*4882a593Smuzhiyun for (i = 0; i < 32; i++)
1357*4882a593Smuzhiyun if (status_reg & (1<<i))
1358*4882a593Smuzhiyun switch (i)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun /* let error_reg1 and error_reg2 evaluation decide, what to do
1361*4882a593Smuzhiyun case SPIDER_NET_PHYINT:
1362*4882a593Smuzhiyun case SPIDER_NET_GMAC2INT:
1363*4882a593Smuzhiyun case SPIDER_NET_GMAC1INT:
1364*4882a593Smuzhiyun case SPIDER_NET_GFIFOINT:
1365*4882a593Smuzhiyun case SPIDER_NET_DMACINT:
1366*4882a593Smuzhiyun case SPIDER_NET_GSYSINT:
1367*4882a593Smuzhiyun break; */
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun case SPIDER_NET_GIPSINT:
1370*4882a593Smuzhiyun show_error = 0;
1371*4882a593Smuzhiyun break;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun case SPIDER_NET_GPWOPCMPINT:
1374*4882a593Smuzhiyun /* PHY write operation completed */
1375*4882a593Smuzhiyun show_error = 0;
1376*4882a593Smuzhiyun break;
1377*4882a593Smuzhiyun case SPIDER_NET_GPROPCMPINT:
1378*4882a593Smuzhiyun /* PHY read operation completed */
1379*4882a593Smuzhiyun /* we don't use semaphores, as we poll for the completion
1380*4882a593Smuzhiyun * of the read operation in spider_net_read_phy. Should take
1381*4882a593Smuzhiyun * about 50 us */
1382*4882a593Smuzhiyun show_error = 0;
1383*4882a593Smuzhiyun break;
1384*4882a593Smuzhiyun case SPIDER_NET_GPWFFINT:
1385*4882a593Smuzhiyun /* PHY command queue full */
1386*4882a593Smuzhiyun if (netif_msg_intr(card))
1387*4882a593Smuzhiyun dev_err(&card->netdev->dev, "PHY write queue full\n");
1388*4882a593Smuzhiyun show_error = 0;
1389*4882a593Smuzhiyun break;
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1392*4882a593Smuzhiyun /* case SPIDER_NET_GRMARPINT: not used. print a message */
1393*4882a593Smuzhiyun /* case SPIDER_NET_GRMMPINT: not used. print a message */
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun case SPIDER_NET_GDTDEN0INT:
1396*4882a593Smuzhiyun /* someone has set TX_DMA_EN to 0 */
1397*4882a593Smuzhiyun show_error = 0;
1398*4882a593Smuzhiyun break;
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun case SPIDER_NET_GDDDEN0INT:
1401*4882a593Smuzhiyun case SPIDER_NET_GDCDEN0INT:
1402*4882a593Smuzhiyun case SPIDER_NET_GDBDEN0INT:
1403*4882a593Smuzhiyun case SPIDER_NET_GDADEN0INT:
1404*4882a593Smuzhiyun /* someone has set RX_DMA_EN to 0 */
1405*4882a593Smuzhiyun show_error = 0;
1406*4882a593Smuzhiyun break;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /* RX interrupts */
1409*4882a593Smuzhiyun case SPIDER_NET_GDDFDCINT:
1410*4882a593Smuzhiyun case SPIDER_NET_GDCFDCINT:
1411*4882a593Smuzhiyun case SPIDER_NET_GDBFDCINT:
1412*4882a593Smuzhiyun case SPIDER_NET_GDAFDCINT:
1413*4882a593Smuzhiyun /* case SPIDER_NET_GDNMINT: not used. print a message */
1414*4882a593Smuzhiyun /* case SPIDER_NET_GCNMINT: not used. print a message */
1415*4882a593Smuzhiyun /* case SPIDER_NET_GBNMINT: not used. print a message */
1416*4882a593Smuzhiyun /* case SPIDER_NET_GANMINT: not used. print a message */
1417*4882a593Smuzhiyun /* case SPIDER_NET_GRFNMINT: not used. print a message */
1418*4882a593Smuzhiyun show_error = 0;
1419*4882a593Smuzhiyun break;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun /* TX interrupts */
1422*4882a593Smuzhiyun case SPIDER_NET_GDTFDCINT:
1423*4882a593Smuzhiyun show_error = 0;
1424*4882a593Smuzhiyun break;
1425*4882a593Smuzhiyun case SPIDER_NET_GTTEDINT:
1426*4882a593Smuzhiyun show_error = 0;
1427*4882a593Smuzhiyun break;
1428*4882a593Smuzhiyun case SPIDER_NET_GDTDCEINT:
1429*4882a593Smuzhiyun /* chain end. If a descriptor should be sent, kick off
1430*4882a593Smuzhiyun * tx dma
1431*4882a593Smuzhiyun if (card->tx_chain.tail != card->tx_chain.head)
1432*4882a593Smuzhiyun spider_net_kick_tx_dma(card);
1433*4882a593Smuzhiyun */
1434*4882a593Smuzhiyun show_error = 0;
1435*4882a593Smuzhiyun break;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1438*4882a593Smuzhiyun /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* check GHIINT1STS ************************************/
1442*4882a593Smuzhiyun if (error_reg1)
1443*4882a593Smuzhiyun for (i = 0; i < 32; i++)
1444*4882a593Smuzhiyun if (error_reg1 & (1<<i))
1445*4882a593Smuzhiyun switch (i)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun case SPIDER_NET_GTMFLLINT:
1448*4882a593Smuzhiyun /* TX RAM full may happen on a usual case.
1449*4882a593Smuzhiyun * Logging is not needed. */
1450*4882a593Smuzhiyun show_error = 0;
1451*4882a593Smuzhiyun break;
1452*4882a593Smuzhiyun case SPIDER_NET_GRFDFLLINT:
1453*4882a593Smuzhiyun case SPIDER_NET_GRFCFLLINT:
1454*4882a593Smuzhiyun case SPIDER_NET_GRFBFLLINT:
1455*4882a593Smuzhiyun case SPIDER_NET_GRFAFLLINT:
1456*4882a593Smuzhiyun case SPIDER_NET_GRMFLLINT:
1457*4882a593Smuzhiyun /* Could happen when rx chain is full */
1458*4882a593Smuzhiyun if (card->ignore_rx_ramfull == 0) {
1459*4882a593Smuzhiyun card->ignore_rx_ramfull = 1;
1460*4882a593Smuzhiyun spider_net_resync_head_ptr(card);
1461*4882a593Smuzhiyun spider_net_refill_rx_chain(card);
1462*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
1463*4882a593Smuzhiyun card->num_rx_ints ++;
1464*4882a593Smuzhiyun napi_schedule(&card->napi);
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun show_error = 0;
1467*4882a593Smuzhiyun break;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1470*4882a593Smuzhiyun case SPIDER_NET_GDTINVDINT:
1471*4882a593Smuzhiyun /* allrighty. tx from previous descr ok */
1472*4882a593Smuzhiyun show_error = 0;
1473*4882a593Smuzhiyun break;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun /* chain end */
1476*4882a593Smuzhiyun case SPIDER_NET_GDDDCEINT:
1477*4882a593Smuzhiyun case SPIDER_NET_GDCDCEINT:
1478*4882a593Smuzhiyun case SPIDER_NET_GDBDCEINT:
1479*4882a593Smuzhiyun case SPIDER_NET_GDADCEINT:
1480*4882a593Smuzhiyun spider_net_resync_head_ptr(card);
1481*4882a593Smuzhiyun spider_net_refill_rx_chain(card);
1482*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
1483*4882a593Smuzhiyun card->num_rx_ints ++;
1484*4882a593Smuzhiyun napi_schedule(&card->napi);
1485*4882a593Smuzhiyun show_error = 0;
1486*4882a593Smuzhiyun break;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /* invalid descriptor */
1489*4882a593Smuzhiyun case SPIDER_NET_GDDINVDINT:
1490*4882a593Smuzhiyun case SPIDER_NET_GDCINVDINT:
1491*4882a593Smuzhiyun case SPIDER_NET_GDBINVDINT:
1492*4882a593Smuzhiyun case SPIDER_NET_GDAINVDINT:
1493*4882a593Smuzhiyun /* Could happen when rx chain is full */
1494*4882a593Smuzhiyun spider_net_resync_head_ptr(card);
1495*4882a593Smuzhiyun spider_net_refill_rx_chain(card);
1496*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
1497*4882a593Smuzhiyun card->num_rx_ints ++;
1498*4882a593Smuzhiyun napi_schedule(&card->napi);
1499*4882a593Smuzhiyun show_error = 0;
1500*4882a593Smuzhiyun break;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1503*4882a593Smuzhiyun /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1504*4882a593Smuzhiyun /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1505*4882a593Smuzhiyun /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1506*4882a593Smuzhiyun /* case SPIDER_NET_GDARSERINT: problem, print a message */
1507*4882a593Smuzhiyun /* case SPIDER_NET_GDSERINT: problem, print a message */
1508*4882a593Smuzhiyun /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1509*4882a593Smuzhiyun /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1510*4882a593Smuzhiyun /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1511*4882a593Smuzhiyun /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1512*4882a593Smuzhiyun /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1513*4882a593Smuzhiyun default:
1514*4882a593Smuzhiyun show_error = 1;
1515*4882a593Smuzhiyun break;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun /* check GHIINT2STS ************************************/
1519*4882a593Smuzhiyun if (error_reg2)
1520*4882a593Smuzhiyun for (i = 0; i < 32; i++)
1521*4882a593Smuzhiyun if (error_reg2 & (1<<i))
1522*4882a593Smuzhiyun switch (i)
1523*4882a593Smuzhiyun {
1524*4882a593Smuzhiyun /* there is nothing we can (want to) do at this time. Log a
1525*4882a593Smuzhiyun * message, we can switch on and off the specific values later on
1526*4882a593Smuzhiyun case SPIDER_NET_GPROPERINT:
1527*4882a593Smuzhiyun case SPIDER_NET_GMCTCRSNGINT:
1528*4882a593Smuzhiyun case SPIDER_NET_GMCTLCOLINT:
1529*4882a593Smuzhiyun case SPIDER_NET_GMCTTMOTINT:
1530*4882a593Smuzhiyun case SPIDER_NET_GMCRCAERINT:
1531*4882a593Smuzhiyun case SPIDER_NET_GMCRCALERINT:
1532*4882a593Smuzhiyun case SPIDER_NET_GMCRALNERINT:
1533*4882a593Smuzhiyun case SPIDER_NET_GMCROVRINT:
1534*4882a593Smuzhiyun case SPIDER_NET_GMCRRNTINT:
1535*4882a593Smuzhiyun case SPIDER_NET_GMCRRXERINT:
1536*4882a593Smuzhiyun case SPIDER_NET_GTITCSERINT:
1537*4882a593Smuzhiyun case SPIDER_NET_GTIFMTERINT:
1538*4882a593Smuzhiyun case SPIDER_NET_GTIPKTRVKINT:
1539*4882a593Smuzhiyun case SPIDER_NET_GTISPINGINT:
1540*4882a593Smuzhiyun case SPIDER_NET_GTISADNGINT:
1541*4882a593Smuzhiyun case SPIDER_NET_GTISPDNGINT:
1542*4882a593Smuzhiyun case SPIDER_NET_GRIFMTERINT:
1543*4882a593Smuzhiyun case SPIDER_NET_GRIPKTRVKINT:
1544*4882a593Smuzhiyun case SPIDER_NET_GRISPINGINT:
1545*4882a593Smuzhiyun case SPIDER_NET_GRISADNGINT:
1546*4882a593Smuzhiyun case SPIDER_NET_GRISPDNGINT:
1547*4882a593Smuzhiyun break;
1548*4882a593Smuzhiyun */
1549*4882a593Smuzhiyun default:
1550*4882a593Smuzhiyun break;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1554*4882a593Smuzhiyun dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
1555*4882a593Smuzhiyun "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1556*4882a593Smuzhiyun status_reg, error_reg1, error_reg2);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /* clear interrupt sources */
1559*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1560*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun /**
1564*4882a593Smuzhiyun * spider_net_interrupt - interrupt handler for spider_net
1565*4882a593Smuzhiyun * @irq: interrupt number
1566*4882a593Smuzhiyun * @ptr: pointer to net_device
1567*4882a593Smuzhiyun *
1568*4882a593Smuzhiyun * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1569*4882a593Smuzhiyun * interrupt found raised by card.
1570*4882a593Smuzhiyun *
1571*4882a593Smuzhiyun * This is the interrupt handler, that turns off
1572*4882a593Smuzhiyun * interrupts for this device and makes the stack poll the driver
1573*4882a593Smuzhiyun */
1574*4882a593Smuzhiyun static irqreturn_t
spider_net_interrupt(int irq,void * ptr)1575*4882a593Smuzhiyun spider_net_interrupt(int irq, void *ptr)
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun struct net_device *netdev = ptr;
1578*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
1579*4882a593Smuzhiyun u32 status_reg, error_reg1, error_reg2;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1582*4882a593Smuzhiyun error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1583*4882a593Smuzhiyun error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1586*4882a593Smuzhiyun !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1587*4882a593Smuzhiyun !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1588*4882a593Smuzhiyun return IRQ_NONE;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun if (status_reg & SPIDER_NET_RXINT ) {
1591*4882a593Smuzhiyun spider_net_rx_irq_off(card);
1592*4882a593Smuzhiyun napi_schedule(&card->napi);
1593*4882a593Smuzhiyun card->num_rx_ints ++;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun if (status_reg & SPIDER_NET_TXINT)
1596*4882a593Smuzhiyun napi_schedule(&card->napi);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun if (status_reg & SPIDER_NET_LINKINT)
1599*4882a593Smuzhiyun spider_net_link_reset(netdev);
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun if (status_reg & SPIDER_NET_ERRINT )
1602*4882a593Smuzhiyun spider_net_handle_error_irq(card, status_reg,
1603*4882a593Smuzhiyun error_reg1, error_reg2);
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun /* clear interrupt sources */
1606*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun return IRQ_HANDLED;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1612*4882a593Smuzhiyun /**
1613*4882a593Smuzhiyun * spider_net_poll_controller - artificial interrupt for netconsole etc.
1614*4882a593Smuzhiyun * @netdev: interface device structure
1615*4882a593Smuzhiyun *
1616*4882a593Smuzhiyun * see Documentation/networking/netconsole.rst
1617*4882a593Smuzhiyun */
1618*4882a593Smuzhiyun static void
spider_net_poll_controller(struct net_device * netdev)1619*4882a593Smuzhiyun spider_net_poll_controller(struct net_device *netdev)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun disable_irq(netdev->irq);
1622*4882a593Smuzhiyun spider_net_interrupt(netdev->irq, netdev);
1623*4882a593Smuzhiyun enable_irq(netdev->irq);
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun #endif /* CONFIG_NET_POLL_CONTROLLER */
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /**
1628*4882a593Smuzhiyun * spider_net_enable_interrupts - enable interrupts
1629*4882a593Smuzhiyun * @card: card structure
1630*4882a593Smuzhiyun *
1631*4882a593Smuzhiyun * spider_net_enable_interrupt enables several interrupts
1632*4882a593Smuzhiyun */
1633*4882a593Smuzhiyun static void
spider_net_enable_interrupts(struct spider_net_card * card)1634*4882a593Smuzhiyun spider_net_enable_interrupts(struct spider_net_card *card)
1635*4882a593Smuzhiyun {
1636*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1637*4882a593Smuzhiyun SPIDER_NET_INT0_MASK_VALUE);
1638*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1639*4882a593Smuzhiyun SPIDER_NET_INT1_MASK_VALUE);
1640*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1641*4882a593Smuzhiyun SPIDER_NET_INT2_MASK_VALUE);
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /**
1645*4882a593Smuzhiyun * spider_net_disable_interrupts - disable interrupts
1646*4882a593Smuzhiyun * @card: card structure
1647*4882a593Smuzhiyun *
1648*4882a593Smuzhiyun * spider_net_disable_interrupts disables all the interrupts
1649*4882a593Smuzhiyun */
1650*4882a593Smuzhiyun static void
spider_net_disable_interrupts(struct spider_net_card * card)1651*4882a593Smuzhiyun spider_net_disable_interrupts(struct spider_net_card *card)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
1654*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
1655*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1656*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun /**
1660*4882a593Smuzhiyun * spider_net_init_card - initializes the card
1661*4882a593Smuzhiyun * @card: card structure
1662*4882a593Smuzhiyun *
1663*4882a593Smuzhiyun * spider_net_init_card initializes the card so that other registers can
1664*4882a593Smuzhiyun * be used
1665*4882a593Smuzhiyun */
1666*4882a593Smuzhiyun static void
spider_net_init_card(struct spider_net_card * card)1667*4882a593Smuzhiyun spider_net_init_card(struct spider_net_card *card)
1668*4882a593Smuzhiyun {
1669*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1670*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_STOP_VALUE);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1673*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_RUN_VALUE);
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun /* trigger ETOMOD signal */
1676*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1677*4882a593Smuzhiyun spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun spider_net_disable_interrupts(card);
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /**
1683*4882a593Smuzhiyun * spider_net_enable_card - enables the card by setting all kinds of regs
1684*4882a593Smuzhiyun * @card: card structure
1685*4882a593Smuzhiyun *
1686*4882a593Smuzhiyun * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1687*4882a593Smuzhiyun */
1688*4882a593Smuzhiyun static void
spider_net_enable_card(struct spider_net_card * card)1689*4882a593Smuzhiyun spider_net_enable_card(struct spider_net_card *card)
1690*4882a593Smuzhiyun {
1691*4882a593Smuzhiyun int i;
1692*4882a593Smuzhiyun /* the following array consists of (register),(value) pairs
1693*4882a593Smuzhiyun * that are set in this function. A register of 0 ends the list */
1694*4882a593Smuzhiyun u32 regs[][2] = {
1695*4882a593Smuzhiyun { SPIDER_NET_GRESUMINTNUM, 0 },
1696*4882a593Smuzhiyun { SPIDER_NET_GREINTNUM, 0 },
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* set interrupt frame number registers */
1699*4882a593Smuzhiyun /* clear the single DMA engine registers first */
1700*4882a593Smuzhiyun { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1701*4882a593Smuzhiyun { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1702*4882a593Smuzhiyun { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1703*4882a593Smuzhiyun { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1704*4882a593Smuzhiyun /* then set, what we really need */
1705*4882a593Smuzhiyun { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun /* timer counter registers and stuff */
1708*4882a593Smuzhiyun { SPIDER_NET_GFREECNNUM, 0 },
1709*4882a593Smuzhiyun { SPIDER_NET_GONETIMENUM, 0 },
1710*4882a593Smuzhiyun { SPIDER_NET_GTOUTFRMNUM, 0 },
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun /* RX mode setting */
1713*4882a593Smuzhiyun { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1714*4882a593Smuzhiyun /* TX mode setting */
1715*4882a593Smuzhiyun { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1716*4882a593Smuzhiyun /* IPSEC mode setting */
1717*4882a593Smuzhiyun { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun { SPIDER_NET_GMRWOLCTRL, 0 },
1722*4882a593Smuzhiyun { SPIDER_NET_GTESTMD, 0x10000000 },
1723*4882a593Smuzhiyun { SPIDER_NET_GTTQMSK, 0x00400040 },
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun { SPIDER_NET_GMACINTEN, 0 },
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun /* flow control stuff */
1728*4882a593Smuzhiyun { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1729*4882a593Smuzhiyun { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1732*4882a593Smuzhiyun { 0, 0}
1733*4882a593Smuzhiyun };
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun i = 0;
1736*4882a593Smuzhiyun while (regs[i][0]) {
1737*4882a593Smuzhiyun spider_net_write_reg(card, regs[i][0], regs[i][1]);
1738*4882a593Smuzhiyun i++;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /* clear unicast filter table entries 1 to 14 */
1742*4882a593Smuzhiyun for (i = 1; i <= 14; i++) {
1743*4882a593Smuzhiyun spider_net_write_reg(card,
1744*4882a593Smuzhiyun SPIDER_NET_GMRUAFILnR + i * 8,
1745*4882a593Smuzhiyun 0x00080000);
1746*4882a593Smuzhiyun spider_net_write_reg(card,
1747*4882a593Smuzhiyun SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1748*4882a593Smuzhiyun 0x00000000);
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /* set chain tail address for RX chains and
1756*4882a593Smuzhiyun * enable DMA */
1757*4882a593Smuzhiyun spider_net_enable_rxchtails(card);
1758*4882a593Smuzhiyun spider_net_enable_rxdmac(card);
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1763*4882a593Smuzhiyun SPIDER_NET_LENLMT_VALUE);
1764*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1765*4882a593Smuzhiyun SPIDER_NET_OPMODE_VALUE);
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1768*4882a593Smuzhiyun SPIDER_NET_GDTBSTA);
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun /**
1772*4882a593Smuzhiyun * spider_net_download_firmware - loads firmware into the adapter
1773*4882a593Smuzhiyun * @card: card structure
1774*4882a593Smuzhiyun * @firmware_ptr: pointer to firmware data
1775*4882a593Smuzhiyun *
1776*4882a593Smuzhiyun * spider_net_download_firmware loads the firmware data into the
1777*4882a593Smuzhiyun * adapter. It assumes the length etc. to be allright.
1778*4882a593Smuzhiyun */
1779*4882a593Smuzhiyun static int
spider_net_download_firmware(struct spider_net_card * card,const void * firmware_ptr)1780*4882a593Smuzhiyun spider_net_download_firmware(struct spider_net_card *card,
1781*4882a593Smuzhiyun const void *firmware_ptr)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun int sequencer, i;
1784*4882a593Smuzhiyun const u32 *fw_ptr = firmware_ptr;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun /* stop sequencers */
1787*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSINIT,
1788*4882a593Smuzhiyun SPIDER_NET_STOP_SEQ_VALUE);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1791*4882a593Smuzhiyun sequencer++) {
1792*4882a593Smuzhiyun spider_net_write_reg(card,
1793*4882a593Smuzhiyun SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1794*4882a593Smuzhiyun for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1795*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1796*4882a593Smuzhiyun sequencer * 8, *fw_ptr);
1797*4882a593Smuzhiyun fw_ptr++;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1802*4882a593Smuzhiyun return -EIO;
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSINIT,
1805*4882a593Smuzhiyun SPIDER_NET_RUN_SEQ_VALUE);
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun return 0;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun /**
1811*4882a593Smuzhiyun * spider_net_init_firmware - reads in firmware parts
1812*4882a593Smuzhiyun * @card: card structure
1813*4882a593Smuzhiyun *
1814*4882a593Smuzhiyun * Returns 0 on success, <0 on failure
1815*4882a593Smuzhiyun *
1816*4882a593Smuzhiyun * spider_net_init_firmware opens the sequencer firmware and does some basic
1817*4882a593Smuzhiyun * checks. This function opens and releases the firmware structure. A call
1818*4882a593Smuzhiyun * to download the firmware is performed before the release.
1819*4882a593Smuzhiyun *
1820*4882a593Smuzhiyun * Firmware format
1821*4882a593Smuzhiyun * ===============
1822*4882a593Smuzhiyun * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1823*4882a593Smuzhiyun * the program for each sequencer. Use the command
1824*4882a593Smuzhiyun * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1825*4882a593Smuzhiyun * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1826*4882a593Smuzhiyun * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1827*4882a593Smuzhiyun *
1828*4882a593Smuzhiyun * to generate spider_fw.bin, if you have sequencer programs with something
1829*4882a593Smuzhiyun * like the following contents for each sequencer:
1830*4882a593Smuzhiyun * <ONE LINE COMMENT>
1831*4882a593Smuzhiyun * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1832*4882a593Smuzhiyun * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1833*4882a593Smuzhiyun * ...
1834*4882a593Smuzhiyun * <1024th 4-BYTES-WORD FOR SEQUENCER>
1835*4882a593Smuzhiyun */
1836*4882a593Smuzhiyun static int
spider_net_init_firmware(struct spider_net_card * card)1837*4882a593Smuzhiyun spider_net_init_firmware(struct spider_net_card *card)
1838*4882a593Smuzhiyun {
1839*4882a593Smuzhiyun struct firmware *firmware = NULL;
1840*4882a593Smuzhiyun struct device_node *dn;
1841*4882a593Smuzhiyun const u8 *fw_prop = NULL;
1842*4882a593Smuzhiyun int err = -ENOENT;
1843*4882a593Smuzhiyun int fw_size;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun if (request_firmware((const struct firmware **)&firmware,
1846*4882a593Smuzhiyun SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1847*4882a593Smuzhiyun if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1848*4882a593Smuzhiyun netif_msg_probe(card) ) {
1849*4882a593Smuzhiyun dev_err(&card->netdev->dev,
1850*4882a593Smuzhiyun "Incorrect size of spidernet firmware in " \
1851*4882a593Smuzhiyun "filesystem. Looking in host firmware...\n");
1852*4882a593Smuzhiyun goto try_host_fw;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun err = spider_net_download_firmware(card, firmware->data);
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun release_firmware(firmware);
1857*4882a593Smuzhiyun if (err)
1858*4882a593Smuzhiyun goto try_host_fw;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun goto done;
1861*4882a593Smuzhiyun }
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun try_host_fw:
1864*4882a593Smuzhiyun dn = pci_device_to_OF_node(card->pdev);
1865*4882a593Smuzhiyun if (!dn)
1866*4882a593Smuzhiyun goto out_err;
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun fw_prop = of_get_property(dn, "firmware", &fw_size);
1869*4882a593Smuzhiyun if (!fw_prop)
1870*4882a593Smuzhiyun goto out_err;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1873*4882a593Smuzhiyun netif_msg_probe(card) ) {
1874*4882a593Smuzhiyun dev_err(&card->netdev->dev,
1875*4882a593Smuzhiyun "Incorrect size of spidernet firmware in host firmware\n");
1876*4882a593Smuzhiyun goto done;
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun err = spider_net_download_firmware(card, fw_prop);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun done:
1882*4882a593Smuzhiyun return err;
1883*4882a593Smuzhiyun out_err:
1884*4882a593Smuzhiyun if (netif_msg_probe(card))
1885*4882a593Smuzhiyun dev_err(&card->netdev->dev,
1886*4882a593Smuzhiyun "Couldn't find spidernet firmware in filesystem " \
1887*4882a593Smuzhiyun "or host firmware\n");
1888*4882a593Smuzhiyun return err;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun /**
1892*4882a593Smuzhiyun * spider_net_open - called upon ifonfig up
1893*4882a593Smuzhiyun * @netdev: interface device structure
1894*4882a593Smuzhiyun *
1895*4882a593Smuzhiyun * returns 0 on success, <0 on failure
1896*4882a593Smuzhiyun *
1897*4882a593Smuzhiyun * spider_net_open allocates all the descriptors and memory needed for
1898*4882a593Smuzhiyun * operation, sets up multicast list and enables interrupts
1899*4882a593Smuzhiyun */
1900*4882a593Smuzhiyun int
spider_net_open(struct net_device * netdev)1901*4882a593Smuzhiyun spider_net_open(struct net_device *netdev)
1902*4882a593Smuzhiyun {
1903*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
1904*4882a593Smuzhiyun int result;
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun result = spider_net_init_firmware(card);
1907*4882a593Smuzhiyun if (result)
1908*4882a593Smuzhiyun goto init_firmware_failed;
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun /* start probing with copper */
1911*4882a593Smuzhiyun card->aneg_count = 0;
1912*4882a593Smuzhiyun card->medium = BCM54XX_COPPER;
1913*4882a593Smuzhiyun spider_net_setup_aneg(card);
1914*4882a593Smuzhiyun if (card->phy.def->phy_id)
1915*4882a593Smuzhiyun mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun result = spider_net_init_chain(card, &card->tx_chain);
1918*4882a593Smuzhiyun if (result)
1919*4882a593Smuzhiyun goto alloc_tx_failed;
1920*4882a593Smuzhiyun card->low_watermark = NULL;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun result = spider_net_init_chain(card, &card->rx_chain);
1923*4882a593Smuzhiyun if (result)
1924*4882a593Smuzhiyun goto alloc_rx_failed;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun /* Allocate rx skbs */
1927*4882a593Smuzhiyun result = spider_net_alloc_rx_skbs(card);
1928*4882a593Smuzhiyun if (result)
1929*4882a593Smuzhiyun goto alloc_skbs_failed;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun spider_net_set_multi(netdev);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* further enhancement: setup hw vlan, if needed */
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun result = -EBUSY;
1936*4882a593Smuzhiyun if (request_irq(netdev->irq, spider_net_interrupt,
1937*4882a593Smuzhiyun IRQF_SHARED, netdev->name, netdev))
1938*4882a593Smuzhiyun goto register_int_failed;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun spider_net_enable_card(card);
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun netif_start_queue(netdev);
1943*4882a593Smuzhiyun netif_carrier_on(netdev);
1944*4882a593Smuzhiyun napi_enable(&card->napi);
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun spider_net_enable_interrupts(card);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun return 0;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun register_int_failed:
1951*4882a593Smuzhiyun spider_net_free_rx_chain_contents(card);
1952*4882a593Smuzhiyun alloc_skbs_failed:
1953*4882a593Smuzhiyun spider_net_free_chain(card, &card->rx_chain);
1954*4882a593Smuzhiyun alloc_rx_failed:
1955*4882a593Smuzhiyun spider_net_free_chain(card, &card->tx_chain);
1956*4882a593Smuzhiyun alloc_tx_failed:
1957*4882a593Smuzhiyun del_timer_sync(&card->aneg_timer);
1958*4882a593Smuzhiyun init_firmware_failed:
1959*4882a593Smuzhiyun return result;
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun /**
1963*4882a593Smuzhiyun * spider_net_link_phy
1964*4882a593Smuzhiyun * @data: used for pointer to card structure
1965*4882a593Smuzhiyun *
1966*4882a593Smuzhiyun */
spider_net_link_phy(struct timer_list * t)1967*4882a593Smuzhiyun static void spider_net_link_phy(struct timer_list *t)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun struct spider_net_card *card = from_timer(card, t, aneg_timer);
1970*4882a593Smuzhiyun struct mii_phy *phy = &card->phy;
1971*4882a593Smuzhiyun
1972*4882a593Smuzhiyun /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1973*4882a593Smuzhiyun if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun pr_debug("%s: link is down trying to bring it up\n",
1976*4882a593Smuzhiyun card->netdev->name);
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun switch (card->medium) {
1979*4882a593Smuzhiyun case BCM54XX_COPPER:
1980*4882a593Smuzhiyun /* enable fiber with autonegotiation first */
1981*4882a593Smuzhiyun if (phy->def->ops->enable_fiber)
1982*4882a593Smuzhiyun phy->def->ops->enable_fiber(phy, 1);
1983*4882a593Smuzhiyun card->medium = BCM54XX_FIBER;
1984*4882a593Smuzhiyun break;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun case BCM54XX_FIBER:
1987*4882a593Smuzhiyun /* fiber didn't come up, try to disable fiber autoneg */
1988*4882a593Smuzhiyun if (phy->def->ops->enable_fiber)
1989*4882a593Smuzhiyun phy->def->ops->enable_fiber(phy, 0);
1990*4882a593Smuzhiyun card->medium = BCM54XX_UNKNOWN;
1991*4882a593Smuzhiyun break;
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun case BCM54XX_UNKNOWN:
1994*4882a593Smuzhiyun /* copper, fiber with and without failed,
1995*4882a593Smuzhiyun * retry from beginning */
1996*4882a593Smuzhiyun spider_net_setup_aneg(card);
1997*4882a593Smuzhiyun card->medium = BCM54XX_COPPER;
1998*4882a593Smuzhiyun break;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun card->aneg_count = 0;
2002*4882a593Smuzhiyun mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2003*4882a593Smuzhiyun return;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun /* link still not up, try again later */
2007*4882a593Smuzhiyun if (!(phy->def->ops->poll_link(phy))) {
2008*4882a593Smuzhiyun card->aneg_count++;
2009*4882a593Smuzhiyun mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2010*4882a593Smuzhiyun return;
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun /* link came up, get abilities */
2014*4882a593Smuzhiyun phy->def->ops->read_link(phy);
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACST,
2017*4882a593Smuzhiyun spider_net_read_reg(card, SPIDER_NET_GMACST));
2018*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun if (phy->speed == 1000)
2021*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2022*4882a593Smuzhiyun else
2023*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun card->aneg_count = 0;
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2028*4882a593Smuzhiyun card->netdev->name, phy->speed,
2029*4882a593Smuzhiyun phy->duplex == 1 ? "Full" : "Half",
2030*4882a593Smuzhiyun phy->autoneg == 1 ? "" : "no ");
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun /**
2034*4882a593Smuzhiyun * spider_net_setup_phy - setup PHY
2035*4882a593Smuzhiyun * @card: card structure
2036*4882a593Smuzhiyun *
2037*4882a593Smuzhiyun * returns 0 on success, <0 on failure
2038*4882a593Smuzhiyun *
2039*4882a593Smuzhiyun * spider_net_setup_phy is used as part of spider_net_probe.
2040*4882a593Smuzhiyun **/
2041*4882a593Smuzhiyun static int
spider_net_setup_phy(struct spider_net_card * card)2042*4882a593Smuzhiyun spider_net_setup_phy(struct spider_net_card *card)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun struct mii_phy *phy = &card->phy;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2047*4882a593Smuzhiyun SPIDER_NET_DMASEL_VALUE);
2048*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2049*4882a593Smuzhiyun SPIDER_NET_PHY_CTRL_VALUE);
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun phy->dev = card->netdev;
2052*4882a593Smuzhiyun phy->mdio_read = spider_net_read_phy;
2053*4882a593Smuzhiyun phy->mdio_write = spider_net_write_phy;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2056*4882a593Smuzhiyun unsigned short id;
2057*4882a593Smuzhiyun id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2058*4882a593Smuzhiyun if (id != 0x0000 && id != 0xffff) {
2059*4882a593Smuzhiyun if (!sungem_phy_probe(phy, phy->mii_id)) {
2060*4882a593Smuzhiyun pr_info("Found %s.\n", phy->def->name);
2061*4882a593Smuzhiyun break;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun return 0;
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun /**
2070*4882a593Smuzhiyun * spider_net_workaround_rxramfull - work around firmware bug
2071*4882a593Smuzhiyun * @card: card structure
2072*4882a593Smuzhiyun *
2073*4882a593Smuzhiyun * no return value
2074*4882a593Smuzhiyun **/
2075*4882a593Smuzhiyun static void
spider_net_workaround_rxramfull(struct spider_net_card * card)2076*4882a593Smuzhiyun spider_net_workaround_rxramfull(struct spider_net_card *card)
2077*4882a593Smuzhiyun {
2078*4882a593Smuzhiyun int i, sequencer = 0;
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun /* cancel reset */
2081*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2082*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_RUN_VALUE);
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun /* empty sequencer data */
2085*4882a593Smuzhiyun for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2086*4882a593Smuzhiyun sequencer++) {
2087*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
2088*4882a593Smuzhiyun sequencer * 8, 0x0);
2089*4882a593Smuzhiyun for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
2090*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2091*4882a593Smuzhiyun sequencer * 8, 0x0);
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun
2095*4882a593Smuzhiyun /* set sequencer operation */
2096*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun /* reset */
2099*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2100*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_STOP_VALUE);
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun /**
2104*4882a593Smuzhiyun * spider_net_stop - called upon ifconfig down
2105*4882a593Smuzhiyun * @netdev: interface device structure
2106*4882a593Smuzhiyun *
2107*4882a593Smuzhiyun * always returns 0
2108*4882a593Smuzhiyun */
2109*4882a593Smuzhiyun int
spider_net_stop(struct net_device * netdev)2110*4882a593Smuzhiyun spider_net_stop(struct net_device *netdev)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun struct spider_net_card *card = netdev_priv(netdev);
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun napi_disable(&card->napi);
2115*4882a593Smuzhiyun netif_carrier_off(netdev);
2116*4882a593Smuzhiyun netif_stop_queue(netdev);
2117*4882a593Smuzhiyun del_timer_sync(&card->tx_timer);
2118*4882a593Smuzhiyun del_timer_sync(&card->aneg_timer);
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun spider_net_disable_interrupts(card);
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun free_irq(netdev->irq, netdev);
2123*4882a593Smuzhiyun
2124*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2125*4882a593Smuzhiyun SPIDER_NET_DMA_TX_FEND_VALUE);
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun /* turn off DMA, force end */
2128*4882a593Smuzhiyun spider_net_disable_rxdmac(card);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun /* release chains */
2131*4882a593Smuzhiyun spider_net_release_tx_chain(card, 1);
2132*4882a593Smuzhiyun spider_net_free_rx_chain_contents(card);
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun spider_net_free_chain(card, &card->tx_chain);
2135*4882a593Smuzhiyun spider_net_free_chain(card, &card->rx_chain);
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun return 0;
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun /**
2141*4882a593Smuzhiyun * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2142*4882a593Smuzhiyun * function (to be called not under interrupt status)
2143*4882a593Smuzhiyun * @data: data, is interface device structure
2144*4882a593Smuzhiyun *
2145*4882a593Smuzhiyun * called as task when tx hangs, resets interface (if interface is up)
2146*4882a593Smuzhiyun */
2147*4882a593Smuzhiyun static void
spider_net_tx_timeout_task(struct work_struct * work)2148*4882a593Smuzhiyun spider_net_tx_timeout_task(struct work_struct *work)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun struct spider_net_card *card =
2151*4882a593Smuzhiyun container_of(work, struct spider_net_card, tx_timeout_task);
2152*4882a593Smuzhiyun struct net_device *netdev = card->netdev;
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun if (!(netdev->flags & IFF_UP))
2155*4882a593Smuzhiyun goto out;
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun netif_device_detach(netdev);
2158*4882a593Smuzhiyun spider_net_stop(netdev);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun spider_net_workaround_rxramfull(card);
2161*4882a593Smuzhiyun spider_net_init_card(card);
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun if (spider_net_setup_phy(card))
2164*4882a593Smuzhiyun goto out;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun spider_net_open(netdev);
2167*4882a593Smuzhiyun spider_net_kick_tx_dma(card);
2168*4882a593Smuzhiyun netif_device_attach(netdev);
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun out:
2171*4882a593Smuzhiyun atomic_dec(&card->tx_timeout_task_counter);
2172*4882a593Smuzhiyun }
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun /**
2175*4882a593Smuzhiyun * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2176*4882a593Smuzhiyun * @netdev: interface device structure
2177*4882a593Smuzhiyun *
2178*4882a593Smuzhiyun * called, if tx hangs. Schedules a task that resets the interface
2179*4882a593Smuzhiyun */
2180*4882a593Smuzhiyun static void
spider_net_tx_timeout(struct net_device * netdev,unsigned int txqueue)2181*4882a593Smuzhiyun spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2182*4882a593Smuzhiyun {
2183*4882a593Smuzhiyun struct spider_net_card *card;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun card = netdev_priv(netdev);
2186*4882a593Smuzhiyun atomic_inc(&card->tx_timeout_task_counter);
2187*4882a593Smuzhiyun if (netdev->flags & IFF_UP)
2188*4882a593Smuzhiyun schedule_work(&card->tx_timeout_task);
2189*4882a593Smuzhiyun else
2190*4882a593Smuzhiyun atomic_dec(&card->tx_timeout_task_counter);
2191*4882a593Smuzhiyun card->spider_stats.tx_timeouts++;
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun static const struct net_device_ops spider_net_ops = {
2195*4882a593Smuzhiyun .ndo_open = spider_net_open,
2196*4882a593Smuzhiyun .ndo_stop = spider_net_stop,
2197*4882a593Smuzhiyun .ndo_start_xmit = spider_net_xmit,
2198*4882a593Smuzhiyun .ndo_set_rx_mode = spider_net_set_multi,
2199*4882a593Smuzhiyun .ndo_set_mac_address = spider_net_set_mac,
2200*4882a593Smuzhiyun .ndo_do_ioctl = spider_net_do_ioctl,
2201*4882a593Smuzhiyun .ndo_tx_timeout = spider_net_tx_timeout,
2202*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
2203*4882a593Smuzhiyun /* HW VLAN */
2204*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2205*4882a593Smuzhiyun /* poll controller */
2206*4882a593Smuzhiyun .ndo_poll_controller = spider_net_poll_controller,
2207*4882a593Smuzhiyun #endif /* CONFIG_NET_POLL_CONTROLLER */
2208*4882a593Smuzhiyun };
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun /**
2211*4882a593Smuzhiyun * spider_net_setup_netdev_ops - initialization of net_device operations
2212*4882a593Smuzhiyun * @netdev: net_device structure
2213*4882a593Smuzhiyun *
2214*4882a593Smuzhiyun * fills out function pointers in the net_device structure
2215*4882a593Smuzhiyun */
2216*4882a593Smuzhiyun static void
spider_net_setup_netdev_ops(struct net_device * netdev)2217*4882a593Smuzhiyun spider_net_setup_netdev_ops(struct net_device *netdev)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun netdev->netdev_ops = &spider_net_ops;
2220*4882a593Smuzhiyun netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2221*4882a593Smuzhiyun /* ethtool ops */
2222*4882a593Smuzhiyun netdev->ethtool_ops = &spider_net_ethtool_ops;
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun /**
2226*4882a593Smuzhiyun * spider_net_setup_netdev - initialization of net_device
2227*4882a593Smuzhiyun * @card: card structure
2228*4882a593Smuzhiyun *
2229*4882a593Smuzhiyun * Returns 0 on success or <0 on failure
2230*4882a593Smuzhiyun *
2231*4882a593Smuzhiyun * spider_net_setup_netdev initializes the net_device structure
2232*4882a593Smuzhiyun **/
2233*4882a593Smuzhiyun static int
spider_net_setup_netdev(struct spider_net_card * card)2234*4882a593Smuzhiyun spider_net_setup_netdev(struct spider_net_card *card)
2235*4882a593Smuzhiyun {
2236*4882a593Smuzhiyun int result;
2237*4882a593Smuzhiyun struct net_device *netdev = card->netdev;
2238*4882a593Smuzhiyun struct device_node *dn;
2239*4882a593Smuzhiyun struct sockaddr addr;
2240*4882a593Smuzhiyun const u8 *mac;
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &card->pdev->dev);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun pci_set_drvdata(card->pdev, netdev);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
2247*4882a593Smuzhiyun netdev->irq = card->pdev->irq;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun card->aneg_count = 0;
2250*4882a593Smuzhiyun timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun netif_napi_add(netdev, &card->napi,
2253*4882a593Smuzhiyun spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun spider_net_setup_netdev_ops(netdev);
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
2258*4882a593Smuzhiyun if (SPIDER_NET_RX_CSUM_DEFAULT)
2259*4882a593Smuzhiyun netdev->features |= NETIF_F_RXCSUM;
2260*4882a593Smuzhiyun netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
2261*4882a593Smuzhiyun /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2262*4882a593Smuzhiyun * NETIF_F_HW_VLAN_CTAG_FILTER */
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun /* MTU range: 64 - 2294 */
2265*4882a593Smuzhiyun netdev->min_mtu = SPIDER_NET_MIN_MTU;
2266*4882a593Smuzhiyun netdev->max_mtu = SPIDER_NET_MAX_MTU;
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun netdev->irq = card->pdev->irq;
2269*4882a593Smuzhiyun card->num_rx_ints = 0;
2270*4882a593Smuzhiyun card->ignore_rx_ramfull = 0;
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun dn = pci_device_to_OF_node(card->pdev);
2273*4882a593Smuzhiyun if (!dn)
2274*4882a593Smuzhiyun return -EIO;
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun mac = of_get_property(dn, "local-mac-address", NULL);
2277*4882a593Smuzhiyun if (!mac)
2278*4882a593Smuzhiyun return -EIO;
2279*4882a593Smuzhiyun memcpy(addr.sa_data, mac, ETH_ALEN);
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun result = spider_net_set_mac(netdev, &addr);
2282*4882a593Smuzhiyun if ((result) && (netif_msg_probe(card)))
2283*4882a593Smuzhiyun dev_err(&card->netdev->dev,
2284*4882a593Smuzhiyun "Failed to set MAC address: %i\n", result);
2285*4882a593Smuzhiyun
2286*4882a593Smuzhiyun result = register_netdev(netdev);
2287*4882a593Smuzhiyun if (result) {
2288*4882a593Smuzhiyun if (netif_msg_probe(card))
2289*4882a593Smuzhiyun dev_err(&card->netdev->dev,
2290*4882a593Smuzhiyun "Couldn't register net_device: %i\n", result);
2291*4882a593Smuzhiyun return result;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun if (netif_msg_probe(card))
2295*4882a593Smuzhiyun pr_info("Initialized device %s.\n", netdev->name);
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun return 0;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun /**
2301*4882a593Smuzhiyun * spider_net_alloc_card - allocates net_device and card structure
2302*4882a593Smuzhiyun *
2303*4882a593Smuzhiyun * returns the card structure or NULL in case of errors
2304*4882a593Smuzhiyun *
2305*4882a593Smuzhiyun * the card and net_device structures are linked to each other
2306*4882a593Smuzhiyun */
2307*4882a593Smuzhiyun static struct spider_net_card *
spider_net_alloc_card(void)2308*4882a593Smuzhiyun spider_net_alloc_card(void)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun struct net_device *netdev;
2311*4882a593Smuzhiyun struct spider_net_card *card;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun netdev = alloc_etherdev(struct_size(card, darray,
2314*4882a593Smuzhiyun tx_descriptors + rx_descriptors));
2315*4882a593Smuzhiyun if (!netdev)
2316*4882a593Smuzhiyun return NULL;
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun card = netdev_priv(netdev);
2319*4882a593Smuzhiyun card->netdev = netdev;
2320*4882a593Smuzhiyun card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2321*4882a593Smuzhiyun INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2322*4882a593Smuzhiyun init_waitqueue_head(&card->waitq);
2323*4882a593Smuzhiyun atomic_set(&card->tx_timeout_task_counter, 0);
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun card->rx_chain.num_desc = rx_descriptors;
2326*4882a593Smuzhiyun card->rx_chain.ring = card->darray;
2327*4882a593Smuzhiyun card->tx_chain.num_desc = tx_descriptors;
2328*4882a593Smuzhiyun card->tx_chain.ring = card->darray + rx_descriptors;
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun return card;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun /**
2334*4882a593Smuzhiyun * spider_net_undo_pci_setup - releases PCI ressources
2335*4882a593Smuzhiyun * @card: card structure
2336*4882a593Smuzhiyun *
2337*4882a593Smuzhiyun * spider_net_undo_pci_setup releases the mapped regions
2338*4882a593Smuzhiyun */
2339*4882a593Smuzhiyun static void
spider_net_undo_pci_setup(struct spider_net_card * card)2340*4882a593Smuzhiyun spider_net_undo_pci_setup(struct spider_net_card *card)
2341*4882a593Smuzhiyun {
2342*4882a593Smuzhiyun iounmap(card->regs);
2343*4882a593Smuzhiyun pci_release_regions(card->pdev);
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun /**
2347*4882a593Smuzhiyun * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2348*4882a593Smuzhiyun * @pdev: PCI device
2349*4882a593Smuzhiyun *
2350*4882a593Smuzhiyun * Returns the card structure or NULL if any errors occur
2351*4882a593Smuzhiyun *
2352*4882a593Smuzhiyun * spider_net_setup_pci_dev initializes pdev and together with the
2353*4882a593Smuzhiyun * functions called in spider_net_open configures the device so that
2354*4882a593Smuzhiyun * data can be transferred over it
2355*4882a593Smuzhiyun * The net_device structure is attached to the card structure, if the
2356*4882a593Smuzhiyun * function returns without error.
2357*4882a593Smuzhiyun **/
2358*4882a593Smuzhiyun static struct spider_net_card *
spider_net_setup_pci_dev(struct pci_dev * pdev)2359*4882a593Smuzhiyun spider_net_setup_pci_dev(struct pci_dev *pdev)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun struct spider_net_card *card;
2362*4882a593Smuzhiyun unsigned long mmio_start, mmio_len;
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun if (pci_enable_device(pdev)) {
2365*4882a593Smuzhiyun dev_err(&pdev->dev, "Couldn't enable PCI device\n");
2366*4882a593Smuzhiyun return NULL;
2367*4882a593Smuzhiyun }
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2370*4882a593Smuzhiyun dev_err(&pdev->dev,
2371*4882a593Smuzhiyun "Couldn't find proper PCI device base address.\n");
2372*4882a593Smuzhiyun goto out_disable_dev;
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun if (pci_request_regions(pdev, spider_net_driver_name)) {
2376*4882a593Smuzhiyun dev_err(&pdev->dev,
2377*4882a593Smuzhiyun "Couldn't obtain PCI resources, aborting.\n");
2378*4882a593Smuzhiyun goto out_disable_dev;
2379*4882a593Smuzhiyun }
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun pci_set_master(pdev);
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun card = spider_net_alloc_card();
2384*4882a593Smuzhiyun if (!card) {
2385*4882a593Smuzhiyun dev_err(&pdev->dev,
2386*4882a593Smuzhiyun "Couldn't allocate net_device structure, aborting.\n");
2387*4882a593Smuzhiyun goto out_release_regions;
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun card->pdev = pdev;
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun /* fetch base address and length of first resource */
2392*4882a593Smuzhiyun mmio_start = pci_resource_start(pdev, 0);
2393*4882a593Smuzhiyun mmio_len = pci_resource_len(pdev, 0);
2394*4882a593Smuzhiyun
2395*4882a593Smuzhiyun card->netdev->mem_start = mmio_start;
2396*4882a593Smuzhiyun card->netdev->mem_end = mmio_start + mmio_len;
2397*4882a593Smuzhiyun card->regs = ioremap(mmio_start, mmio_len);
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun if (!card->regs) {
2400*4882a593Smuzhiyun dev_err(&pdev->dev,
2401*4882a593Smuzhiyun "Couldn't obtain PCI resources, aborting.\n");
2402*4882a593Smuzhiyun goto out_release_regions;
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun return card;
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun out_release_regions:
2408*4882a593Smuzhiyun pci_release_regions(pdev);
2409*4882a593Smuzhiyun out_disable_dev:
2410*4882a593Smuzhiyun pci_disable_device(pdev);
2411*4882a593Smuzhiyun return NULL;
2412*4882a593Smuzhiyun }
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun /**
2415*4882a593Smuzhiyun * spider_net_probe - initialization of a device
2416*4882a593Smuzhiyun * @pdev: PCI device
2417*4882a593Smuzhiyun * @ent: entry in the device id list
2418*4882a593Smuzhiyun *
2419*4882a593Smuzhiyun * Returns 0 on success, <0 on failure
2420*4882a593Smuzhiyun *
2421*4882a593Smuzhiyun * spider_net_probe initializes pdev and registers a net_device
2422*4882a593Smuzhiyun * structure for it. After that, the device can be ifconfig'ed up
2423*4882a593Smuzhiyun **/
2424*4882a593Smuzhiyun static int
spider_net_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2425*4882a593Smuzhiyun spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun int err = -EIO;
2428*4882a593Smuzhiyun struct spider_net_card *card;
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun card = spider_net_setup_pci_dev(pdev);
2431*4882a593Smuzhiyun if (!card)
2432*4882a593Smuzhiyun goto out;
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun spider_net_workaround_rxramfull(card);
2435*4882a593Smuzhiyun spider_net_init_card(card);
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun err = spider_net_setup_phy(card);
2438*4882a593Smuzhiyun if (err)
2439*4882a593Smuzhiyun goto out_undo_pci;
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun err = spider_net_setup_netdev(card);
2442*4882a593Smuzhiyun if (err)
2443*4882a593Smuzhiyun goto out_undo_pci;
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun return 0;
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun out_undo_pci:
2448*4882a593Smuzhiyun spider_net_undo_pci_setup(card);
2449*4882a593Smuzhiyun free_netdev(card->netdev);
2450*4882a593Smuzhiyun out:
2451*4882a593Smuzhiyun return err;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun /**
2455*4882a593Smuzhiyun * spider_net_remove - removal of a device
2456*4882a593Smuzhiyun * @pdev: PCI device
2457*4882a593Smuzhiyun *
2458*4882a593Smuzhiyun * Returns 0 on success, <0 on failure
2459*4882a593Smuzhiyun *
2460*4882a593Smuzhiyun * spider_net_remove is called to remove the device and unregisters the
2461*4882a593Smuzhiyun * net_device
2462*4882a593Smuzhiyun **/
2463*4882a593Smuzhiyun static void
spider_net_remove(struct pci_dev * pdev)2464*4882a593Smuzhiyun spider_net_remove(struct pci_dev *pdev)
2465*4882a593Smuzhiyun {
2466*4882a593Smuzhiyun struct net_device *netdev;
2467*4882a593Smuzhiyun struct spider_net_card *card;
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun netdev = pci_get_drvdata(pdev);
2470*4882a593Smuzhiyun card = netdev_priv(netdev);
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun wait_event(card->waitq,
2473*4882a593Smuzhiyun atomic_read(&card->tx_timeout_task_counter) == 0);
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun unregister_netdev(netdev);
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun /* switch off card */
2478*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2479*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_STOP_VALUE);
2480*4882a593Smuzhiyun spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2481*4882a593Smuzhiyun SPIDER_NET_CKRCTRL_RUN_VALUE);
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun spider_net_undo_pci_setup(card);
2484*4882a593Smuzhiyun free_netdev(netdev);
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun static struct pci_driver spider_net_driver = {
2488*4882a593Smuzhiyun .name = spider_net_driver_name,
2489*4882a593Smuzhiyun .id_table = spider_net_pci_tbl,
2490*4882a593Smuzhiyun .probe = spider_net_probe,
2491*4882a593Smuzhiyun .remove = spider_net_remove
2492*4882a593Smuzhiyun };
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun /**
2495*4882a593Smuzhiyun * spider_net_init - init function when the driver is loaded
2496*4882a593Smuzhiyun *
2497*4882a593Smuzhiyun * spider_net_init registers the device driver
2498*4882a593Smuzhiyun */
spider_net_init(void)2499*4882a593Smuzhiyun static int __init spider_net_init(void)
2500*4882a593Smuzhiyun {
2501*4882a593Smuzhiyun printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2502*4882a593Smuzhiyun
2503*4882a593Smuzhiyun if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2504*4882a593Smuzhiyun rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2505*4882a593Smuzhiyun pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2508*4882a593Smuzhiyun rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2509*4882a593Smuzhiyun pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2510*4882a593Smuzhiyun }
2511*4882a593Smuzhiyun if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2512*4882a593Smuzhiyun tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2513*4882a593Smuzhiyun pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2516*4882a593Smuzhiyun tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2517*4882a593Smuzhiyun pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2518*4882a593Smuzhiyun }
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun return pci_register_driver(&spider_net_driver);
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /**
2524*4882a593Smuzhiyun * spider_net_cleanup - exit function when driver is unloaded
2525*4882a593Smuzhiyun *
2526*4882a593Smuzhiyun * spider_net_cleanup unregisters the device driver
2527*4882a593Smuzhiyun */
spider_net_cleanup(void)2528*4882a593Smuzhiyun static void __exit spider_net_cleanup(void)
2529*4882a593Smuzhiyun {
2530*4882a593Smuzhiyun pci_unregister_driver(&spider_net_driver);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun module_init(spider_net_init);
2534*4882a593Smuzhiyun module_exit(spider_net_cleanup);
2535