1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * sonic.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) 2005 Finn Thain
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Converted to DMA API, added zero-copy buffer handling, and
8*4882a593Smuzhiyun * (from the mac68k project) introduced dhd's support for 16-bit cards.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This driver is based on work from Andreas Busse, but most of
13*4882a593Smuzhiyun * the code is rewritten.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Core code included by system sonic drivers
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * And... partially rewritten again by David Huggins-Daines in order
20*4882a593Smuzhiyun * to cope with screwed up Macintosh NICs that may or may not use
21*4882a593Smuzhiyun * 16-bit DMA.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * (C) 1999 David Huggins-Daines <dhd@debian.org>
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
29*4882a593Smuzhiyun * National Semiconductors data sheet for the DP83932B Sonic Ethernet
30*4882a593Smuzhiyun * controller, and the files "8390.c" and "skeleton.c" in this directory.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
33*4882a593Smuzhiyun * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
34*4882a593Smuzhiyun * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static unsigned int version_printed;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static int sonic_debug = -1;
40*4882a593Smuzhiyun module_param(sonic_debug, int, 0);
41*4882a593Smuzhiyun MODULE_PARM_DESC(sonic_debug, "debug message level");
42*4882a593Smuzhiyun
sonic_msg_init(struct net_device * dev)43*4882a593Smuzhiyun static void sonic_msg_init(struct net_device *dev)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun lp->msg_enable = netif_msg_init(sonic_debug, 0);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun if (version_printed++ == 0)
50*4882a593Smuzhiyun netif_dbg(lp, drv, dev, "%s", version);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
sonic_alloc_descriptors(struct net_device * dev)53*4882a593Smuzhiyun static int sonic_alloc_descriptors(struct net_device *dev)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Allocate a chunk of memory for the descriptors. Note that this
58*4882a593Smuzhiyun * must not cross a 64K boundary. It is smaller than one page which
59*4882a593Smuzhiyun * means that page alignment is a sufficient condition.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun lp->descriptors =
62*4882a593Smuzhiyun dma_alloc_coherent(lp->device,
63*4882a593Smuzhiyun SIZEOF_SONIC_DESC *
64*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode),
65*4882a593Smuzhiyun &lp->descriptors_laddr, GFP_KERNEL);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (!lp->descriptors)
68*4882a593Smuzhiyun return -ENOMEM;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun lp->cda = lp->descriptors;
71*4882a593Smuzhiyun lp->tda = lp->cda + SIZEOF_SONIC_CDA *
72*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
73*4882a593Smuzhiyun lp->rda = lp->tda + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
74*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
75*4882a593Smuzhiyun lp->rra = lp->rda + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
76*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun lp->cda_laddr = lp->descriptors_laddr;
79*4882a593Smuzhiyun lp->tda_laddr = lp->cda_laddr + SIZEOF_SONIC_CDA *
80*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
81*4882a593Smuzhiyun lp->rda_laddr = lp->tda_laddr + SIZEOF_SONIC_TD * SONIC_NUM_TDS *
82*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
83*4882a593Smuzhiyun lp->rra_laddr = lp->rda_laddr + SIZEOF_SONIC_RD * SONIC_NUM_RDS *
84*4882a593Smuzhiyun SONIC_BUS_SCALE(lp->dma_bitmode);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Open/initialize the SONIC controller.
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * This routine should set everything up anew at each open, even
93*4882a593Smuzhiyun * registers that "should" only need to be set once at boot, so that
94*4882a593Smuzhiyun * there is non-reboot way to recover if something goes wrong.
95*4882a593Smuzhiyun */
sonic_open(struct net_device * dev)96*4882a593Smuzhiyun static int sonic_open(struct net_device *dev)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
99*4882a593Smuzhiyun int i;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun spin_lock_init(&lp->lock);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_RRS; i++) {
106*4882a593Smuzhiyun struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
107*4882a593Smuzhiyun if (skb == NULL) {
108*4882a593Smuzhiyun while(i > 0) { /* free any that were allocated successfully */
109*4882a593Smuzhiyun i--;
110*4882a593Smuzhiyun dev_kfree_skb(lp->rx_skb[i]);
111*4882a593Smuzhiyun lp->rx_skb[i] = NULL;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
114*4882a593Smuzhiyun dev->name);
115*4882a593Smuzhiyun return -ENOMEM;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun /* align IP header unless DMA requires otherwise */
118*4882a593Smuzhiyun if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
119*4882a593Smuzhiyun skb_reserve(skb, 2);
120*4882a593Smuzhiyun lp->rx_skb[i] = skb;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_RRS; i++) {
124*4882a593Smuzhiyun dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
125*4882a593Smuzhiyun SONIC_RBSIZE, DMA_FROM_DEVICE);
126*4882a593Smuzhiyun if (dma_mapping_error(lp->device, laddr)) {
127*4882a593Smuzhiyun while(i > 0) { /* free any that were mapped successfully */
128*4882a593Smuzhiyun i--;
129*4882a593Smuzhiyun dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
130*4882a593Smuzhiyun lp->rx_laddr[i] = (dma_addr_t)0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_RRS; i++) {
133*4882a593Smuzhiyun dev_kfree_skb(lp->rx_skb[i]);
134*4882a593Smuzhiyun lp->rx_skb[i] = NULL;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
137*4882a593Smuzhiyun dev->name);
138*4882a593Smuzhiyun return -ENOMEM;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun lp->rx_laddr[i] = laddr;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * Initialize the SONIC
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun sonic_init(dev, true);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun netif_start_queue(dev);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Wait for the SONIC to become idle. */
sonic_quiesce(struct net_device * dev,u16 mask,bool may_sleep)156*4882a593Smuzhiyun static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct sonic_local * __maybe_unused lp = netdev_priv(dev);
159*4882a593Smuzhiyun int i;
160*4882a593Smuzhiyun u16 bits;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun for (i = 0; i < 1000; ++i) {
163*4882a593Smuzhiyun bits = SONIC_READ(SONIC_CMD) & mask;
164*4882a593Smuzhiyun if (!bits)
165*4882a593Smuzhiyun return;
166*4882a593Smuzhiyun if (!may_sleep)
167*4882a593Smuzhiyun udelay(20);
168*4882a593Smuzhiyun else
169*4882a593Smuzhiyun usleep_range(100, 200);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Close the SONIC device
176*4882a593Smuzhiyun */
sonic_close(struct net_device * dev)177*4882a593Smuzhiyun static int sonic_close(struct net_device *dev)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun netif_dbg(lp, ifdown, dev, "%s\n", __func__);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun netif_stop_queue(dev);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun * stop the SONIC, disable interrupts
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
190*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_ALL, true);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun SONIC_WRITE(SONIC_IMR, 0);
193*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, 0x7fff);
194*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* unmap and free skbs that haven't been transmitted */
197*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_TDS; i++) {
198*4882a593Smuzhiyun if(lp->tx_laddr[i]) {
199*4882a593Smuzhiyun dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
200*4882a593Smuzhiyun lp->tx_laddr[i] = (dma_addr_t)0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun if(lp->tx_skb[i]) {
203*4882a593Smuzhiyun dev_kfree_skb(lp->tx_skb[i]);
204*4882a593Smuzhiyun lp->tx_skb[i] = NULL;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* unmap and free the receive buffers */
209*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_RRS; i++) {
210*4882a593Smuzhiyun if(lp->rx_laddr[i]) {
211*4882a593Smuzhiyun dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
212*4882a593Smuzhiyun lp->rx_laddr[i] = (dma_addr_t)0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun if(lp->rx_skb[i]) {
215*4882a593Smuzhiyun dev_kfree_skb(lp->rx_skb[i]);
216*4882a593Smuzhiyun lp->rx_skb[i] = NULL;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
sonic_tx_timeout(struct net_device * dev,unsigned int txqueue)223*4882a593Smuzhiyun static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
226*4882a593Smuzhiyun int i;
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * put the Sonic into software-reset mode and
229*4882a593Smuzhiyun * disable all interrupts before releasing DMA buffers
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
232*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_ALL, false);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun SONIC_WRITE(SONIC_IMR, 0);
235*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, 0x7fff);
236*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
237*4882a593Smuzhiyun /* We could resend the original skbs. Easier to re-initialise. */
238*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_TDS; i++) {
239*4882a593Smuzhiyun if(lp->tx_laddr[i]) {
240*4882a593Smuzhiyun dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
241*4882a593Smuzhiyun lp->tx_laddr[i] = (dma_addr_t)0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun if(lp->tx_skb[i]) {
244*4882a593Smuzhiyun dev_kfree_skb(lp->tx_skb[i]);
245*4882a593Smuzhiyun lp->tx_skb[i] = NULL;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun /* Try to restart the adaptor. */
249*4882a593Smuzhiyun sonic_init(dev, false);
250*4882a593Smuzhiyun lp->stats.tx_errors++;
251*4882a593Smuzhiyun netif_trans_update(dev); /* prevent tx timeout */
252*4882a593Smuzhiyun netif_wake_queue(dev);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * transmit packet
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * Appends new TD during transmission thus avoiding any TX interrupts
259*4882a593Smuzhiyun * until we run out of TDs.
260*4882a593Smuzhiyun * This routine interacts closely with the ISR in that it may,
261*4882a593Smuzhiyun * set tx_skb[i]
262*4882a593Smuzhiyun * reset the status flags of the new TD
263*4882a593Smuzhiyun * set and reset EOL flags
264*4882a593Smuzhiyun * stop the tx queue
265*4882a593Smuzhiyun * The ISR interacts with this routine in various ways. It may,
266*4882a593Smuzhiyun * reset tx_skb[i]
267*4882a593Smuzhiyun * test the EOL and status flags of the TDs
268*4882a593Smuzhiyun * wake the tx queue
269*4882a593Smuzhiyun * Concurrently with all of this, the SONIC is potentially writing to
270*4882a593Smuzhiyun * the status flags of the TDs.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun
sonic_send_packet(struct sk_buff * skb,struct net_device * dev)273*4882a593Smuzhiyun static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
276*4882a593Smuzhiyun dma_addr_t laddr;
277*4882a593Smuzhiyun int length;
278*4882a593Smuzhiyun int entry;
279*4882a593Smuzhiyun unsigned long flags;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun length = skb->len;
284*4882a593Smuzhiyun if (length < ETH_ZLEN) {
285*4882a593Smuzhiyun if (skb_padto(skb, ETH_ZLEN))
286*4882a593Smuzhiyun return NETDEV_TX_OK;
287*4882a593Smuzhiyun length = ETH_ZLEN;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun * Map the packet data into the logical DMA address space
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
295*4882a593Smuzhiyun if (!laddr) {
296*4882a593Smuzhiyun pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
297*4882a593Smuzhiyun dev_kfree_skb_any(skb);
298*4882a593Smuzhiyun return NETDEV_TX_OK;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun spin_lock_irqsave(&lp->lock, flags);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun entry = (lp->eol_tx + 1) & SONIC_TDS_MASK;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
306*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
307*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
308*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
309*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
310*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
311*4882a593Smuzhiyun sonic_tda_put(dev, entry, SONIC_TD_LINK,
312*4882a593Smuzhiyun sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK, ~SONIC_EOL &
315*4882a593Smuzhiyun sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK));
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun lp->tx_len[entry] = length;
322*4882a593Smuzhiyun lp->tx_laddr[entry] = laddr;
323*4882a593Smuzhiyun lp->tx_skb[entry] = skb;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun lp->eol_tx = entry;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun entry = (entry + 1) & SONIC_TDS_MASK;
328*4882a593Smuzhiyun if (lp->tx_skb[entry]) {
329*4882a593Smuzhiyun /* The ring is full, the ISR has yet to process the next TD. */
330*4882a593Smuzhiyun netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
331*4882a593Smuzhiyun netif_stop_queue(dev);
332*4882a593Smuzhiyun /* after this packet, wait for ISR to free up some TDAs */
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun spin_unlock_irqrestore(&lp->lock, flags);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return NETDEV_TX_OK;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * The typical workload of the driver:
342*4882a593Smuzhiyun * Handle the network interface interrupts.
343*4882a593Smuzhiyun */
sonic_interrupt(int irq,void * dev_id)344*4882a593Smuzhiyun static irqreturn_t sonic_interrupt(int irq, void *dev_id)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct net_device *dev = dev_id;
347*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
348*4882a593Smuzhiyun int status;
349*4882a593Smuzhiyun unsigned long flags;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
352*4882a593Smuzhiyun * with sonic_send_packet() so that the two functions can share state.
353*4882a593Smuzhiyun * Secondly, it makes sonic_interrupt() re-entrant, as that is required
354*4882a593Smuzhiyun * by macsonic which must use two IRQs with different priority levels.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun spin_lock_irqsave(&lp->lock, flags);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
359*4882a593Smuzhiyun if (!status) {
360*4882a593Smuzhiyun spin_unlock_irqrestore(&lp->lock, flags);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return IRQ_NONE;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun do {
366*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (status & SONIC_INT_PKTRX) {
369*4882a593Smuzhiyun netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
370*4882a593Smuzhiyun sonic_rx(dev); /* got packet(s) */
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (status & SONIC_INT_TXDN) {
374*4882a593Smuzhiyun int entry = lp->cur_tx;
375*4882a593Smuzhiyun int td_status;
376*4882a593Smuzhiyun int freed_some = 0;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* The state of a Transmit Descriptor may be inferred
379*4882a593Smuzhiyun * from { tx_skb[entry], td_status } as follows.
380*4882a593Smuzhiyun * { clear, clear } => the TD has never been used
381*4882a593Smuzhiyun * { set, clear } => the TD was handed to SONIC
382*4882a593Smuzhiyun * { set, set } => the TD was handed back
383*4882a593Smuzhiyun * { clear, set } => the TD is available for re-use
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun while (lp->tx_skb[entry] != NULL) {
389*4882a593Smuzhiyun if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
390*4882a593Smuzhiyun break;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (td_status & SONIC_TCR_PTX) {
393*4882a593Smuzhiyun lp->stats.tx_packets++;
394*4882a593Smuzhiyun lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
395*4882a593Smuzhiyun } else {
396*4882a593Smuzhiyun if (td_status & (SONIC_TCR_EXD |
397*4882a593Smuzhiyun SONIC_TCR_EXC | SONIC_TCR_BCM))
398*4882a593Smuzhiyun lp->stats.tx_aborted_errors++;
399*4882a593Smuzhiyun if (td_status &
400*4882a593Smuzhiyun (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
401*4882a593Smuzhiyun lp->stats.tx_carrier_errors++;
402*4882a593Smuzhiyun if (td_status & SONIC_TCR_OWC)
403*4882a593Smuzhiyun lp->stats.tx_window_errors++;
404*4882a593Smuzhiyun if (td_status & SONIC_TCR_FU)
405*4882a593Smuzhiyun lp->stats.tx_fifo_errors++;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* We must free the original skb */
409*4882a593Smuzhiyun dev_consume_skb_irq(lp->tx_skb[entry]);
410*4882a593Smuzhiyun lp->tx_skb[entry] = NULL;
411*4882a593Smuzhiyun /* and unmap DMA buffer */
412*4882a593Smuzhiyun dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
413*4882a593Smuzhiyun lp->tx_laddr[entry] = (dma_addr_t)0;
414*4882a593Smuzhiyun freed_some = 1;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
417*4882a593Smuzhiyun entry = (entry + 1) & SONIC_TDS_MASK;
418*4882a593Smuzhiyun break;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun entry = (entry + 1) & SONIC_TDS_MASK;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (freed_some || lp->tx_skb[entry] == NULL)
424*4882a593Smuzhiyun netif_wake_queue(dev); /* The ring is no longer full */
425*4882a593Smuzhiyun lp->cur_tx = entry;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * check error conditions
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun if (status & SONIC_INT_RFO) {
432*4882a593Smuzhiyun netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
433*4882a593Smuzhiyun __func__);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun if (status & SONIC_INT_RDE) {
436*4882a593Smuzhiyun netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
437*4882a593Smuzhiyun __func__);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun if (status & SONIC_INT_RBAE) {
440*4882a593Smuzhiyun netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
441*4882a593Smuzhiyun __func__);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* counter overruns; all counters are 16bit wide */
445*4882a593Smuzhiyun if (status & SONIC_INT_FAE)
446*4882a593Smuzhiyun lp->stats.rx_frame_errors += 65536;
447*4882a593Smuzhiyun if (status & SONIC_INT_CRC)
448*4882a593Smuzhiyun lp->stats.rx_crc_errors += 65536;
449*4882a593Smuzhiyun if (status & SONIC_INT_MP)
450*4882a593Smuzhiyun lp->stats.rx_missed_errors += 65536;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* transmit error */
453*4882a593Smuzhiyun if (status & SONIC_INT_TXER) {
454*4882a593Smuzhiyun u16 tcr = SONIC_READ(SONIC_TCR);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
457*4882a593Smuzhiyun __func__, tcr);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
460*4882a593Smuzhiyun SONIC_TCR_FU | SONIC_TCR_BCM)) {
461*4882a593Smuzhiyun /* Aborted transmission. Try again. */
462*4882a593Smuzhiyun netif_stop_queue(dev);
463*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* bus retry */
468*4882a593Smuzhiyun if (status & SONIC_INT_BR) {
469*4882a593Smuzhiyun printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
470*4882a593Smuzhiyun dev->name);
471*4882a593Smuzhiyun /* ... to help debug DMA problems causing endless interrupts. */
472*4882a593Smuzhiyun /* Bounce the eth interface to turn on the interrupt again. */
473*4882a593Smuzhiyun SONIC_WRITE(SONIC_IMR, 0);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
477*4882a593Smuzhiyun } while (status);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun spin_unlock_irqrestore(&lp->lock, flags);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun return IRQ_HANDLED;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Return the array index corresponding to a given Receive Buffer pointer. */
index_from_addr(struct sonic_local * lp,dma_addr_t addr,unsigned int last)485*4882a593Smuzhiyun static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
486*4882a593Smuzhiyun unsigned int last)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun unsigned int i = last;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun do {
491*4882a593Smuzhiyun i = (i + 1) & SONIC_RRS_MASK;
492*4882a593Smuzhiyun if (addr == lp->rx_laddr[i])
493*4882a593Smuzhiyun return i;
494*4882a593Smuzhiyun } while (i != last);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun return -ENOENT;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* Allocate and map a new skb to be used as a receive buffer. */
sonic_alloc_rb(struct net_device * dev,struct sonic_local * lp,struct sk_buff ** new_skb,dma_addr_t * new_addr)500*4882a593Smuzhiyun static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
501*4882a593Smuzhiyun struct sk_buff **new_skb, dma_addr_t *new_addr)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
504*4882a593Smuzhiyun if (!*new_skb)
505*4882a593Smuzhiyun return false;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
508*4882a593Smuzhiyun skb_reserve(*new_skb, 2);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
511*4882a593Smuzhiyun SONIC_RBSIZE, DMA_FROM_DEVICE);
512*4882a593Smuzhiyun if (!*new_addr) {
513*4882a593Smuzhiyun dev_kfree_skb(*new_skb);
514*4882a593Smuzhiyun *new_skb = NULL;
515*4882a593Smuzhiyun return false;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return true;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Place a new receive resource in the Receive Resource Area and update RWP. */
sonic_update_rra(struct net_device * dev,struct sonic_local * lp,dma_addr_t old_addr,dma_addr_t new_addr)522*4882a593Smuzhiyun static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
523*4882a593Smuzhiyun dma_addr_t old_addr, dma_addr_t new_addr)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
526*4882a593Smuzhiyun unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
527*4882a593Smuzhiyun u32 buf;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
530*4882a593Smuzhiyun * scans the other resources in the RRA, those in the range [RWP, RRP).
531*4882a593Smuzhiyun */
532*4882a593Smuzhiyun do {
533*4882a593Smuzhiyun buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
534*4882a593Smuzhiyun sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (buf == old_addr)
537*4882a593Smuzhiyun break;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun entry = (entry + 1) & SONIC_RRS_MASK;
540*4882a593Smuzhiyun } while (entry != end);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun WARN_ONCE(buf != old_addr, "failed to find resource!\n");
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
545*4882a593Smuzhiyun sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun entry = (entry + 1) & SONIC_RRS_MASK;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * We have a good packet(s), pass it/them up the network stack.
554*4882a593Smuzhiyun */
sonic_rx(struct net_device * dev)555*4882a593Smuzhiyun static void sonic_rx(struct net_device *dev)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
558*4882a593Smuzhiyun int entry = lp->cur_rx;
559*4882a593Smuzhiyun int prev_entry = lp->eol_rx;
560*4882a593Smuzhiyun bool rbe = false;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
563*4882a593Smuzhiyun u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* If the RD has LPKT set, the chip has finished with the RB */
566*4882a593Smuzhiyun if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
567*4882a593Smuzhiyun struct sk_buff *new_skb;
568*4882a593Smuzhiyun dma_addr_t new_laddr;
569*4882a593Smuzhiyun u32 addr = (sonic_rda_get(dev, entry,
570*4882a593Smuzhiyun SONIC_RD_PKTPTR_H) << 16) |
571*4882a593Smuzhiyun sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
572*4882a593Smuzhiyun int i = index_from_addr(lp, addr, entry);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (i < 0) {
575*4882a593Smuzhiyun WARN_ONCE(1, "failed to find buffer!\n");
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
580*4882a593Smuzhiyun struct sk_buff *used_skb = lp->rx_skb[i];
581*4882a593Smuzhiyun int pkt_len;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* Pass the used buffer up the stack */
584*4882a593Smuzhiyun dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
585*4882a593Smuzhiyun DMA_FROM_DEVICE);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun pkt_len = sonic_rda_get(dev, entry,
588*4882a593Smuzhiyun SONIC_RD_PKTLEN);
589*4882a593Smuzhiyun skb_trim(used_skb, pkt_len);
590*4882a593Smuzhiyun used_skb->protocol = eth_type_trans(used_skb,
591*4882a593Smuzhiyun dev);
592*4882a593Smuzhiyun netif_rx(used_skb);
593*4882a593Smuzhiyun lp->stats.rx_packets++;
594*4882a593Smuzhiyun lp->stats.rx_bytes += pkt_len;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun lp->rx_skb[i] = new_skb;
597*4882a593Smuzhiyun lp->rx_laddr[i] = new_laddr;
598*4882a593Smuzhiyun } else {
599*4882a593Smuzhiyun /* Failed to obtain a new buffer so re-use it */
600*4882a593Smuzhiyun new_laddr = addr;
601*4882a593Smuzhiyun lp->stats.rx_dropped++;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun /* If RBE is already asserted when RWP advances then
604*4882a593Smuzhiyun * it's safe to clear RBE after processing this packet.
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
607*4882a593Smuzhiyun sonic_update_rra(dev, lp, addr, new_laddr);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * give back the descriptor
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
613*4882a593Smuzhiyun sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun prev_entry = entry;
616*4882a593Smuzhiyun entry = (entry + 1) & SONIC_RDS_MASK;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun lp->cur_rx = entry;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (prev_entry != lp->eol_rx) {
622*4882a593Smuzhiyun /* Advance the EOL flag to put descriptors back into service */
623*4882a593Smuzhiyun sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
624*4882a593Smuzhiyun sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
625*4882a593Smuzhiyun sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
626*4882a593Smuzhiyun sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
627*4882a593Smuzhiyun lp->eol_rx = prev_entry;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (rbe)
631*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun * Get the current statistics.
637*4882a593Smuzhiyun * This may be called with the device open or closed.
638*4882a593Smuzhiyun */
sonic_get_stats(struct net_device * dev)639*4882a593Smuzhiyun static struct net_device_stats *sonic_get_stats(struct net_device *dev)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* read the tally counter from the SONIC and reset them */
644*4882a593Smuzhiyun lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
645*4882a593Smuzhiyun SONIC_WRITE(SONIC_CRCT, 0xffff);
646*4882a593Smuzhiyun lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
647*4882a593Smuzhiyun SONIC_WRITE(SONIC_FAET, 0xffff);
648*4882a593Smuzhiyun lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
649*4882a593Smuzhiyun SONIC_WRITE(SONIC_MPT, 0xffff);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return &lp->stats;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * Set or clear the multicast filter for this adaptor.
657*4882a593Smuzhiyun */
sonic_multicast_list(struct net_device * dev)658*4882a593Smuzhiyun static void sonic_multicast_list(struct net_device *dev)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
661*4882a593Smuzhiyun unsigned int rcr;
662*4882a593Smuzhiyun struct netdev_hw_addr *ha;
663*4882a593Smuzhiyun unsigned char *addr;
664*4882a593Smuzhiyun int i;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
667*4882a593Smuzhiyun rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
670*4882a593Smuzhiyun rcr |= SONIC_RCR_PRO;
671*4882a593Smuzhiyun } else {
672*4882a593Smuzhiyun if ((dev->flags & IFF_ALLMULTI) ||
673*4882a593Smuzhiyun (netdev_mc_count(dev) > 15)) {
674*4882a593Smuzhiyun rcr |= SONIC_RCR_AMC;
675*4882a593Smuzhiyun } else {
676*4882a593Smuzhiyun unsigned long flags;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
679*4882a593Smuzhiyun netdev_mc_count(dev));
680*4882a593Smuzhiyun sonic_set_cam_enable(dev, 1); /* always enable our own address */
681*4882a593Smuzhiyun i = 1;
682*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
683*4882a593Smuzhiyun addr = ha->addr;
684*4882a593Smuzhiyun sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
685*4882a593Smuzhiyun sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
686*4882a593Smuzhiyun sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
687*4882a593Smuzhiyun sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
688*4882a593Smuzhiyun i++;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun SONIC_WRITE(SONIC_CDC, 16);
691*4882a593Smuzhiyun SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* LCAM and TXP commands can't be used simultaneously */
694*4882a593Smuzhiyun spin_lock_irqsave(&lp->lock, flags);
695*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_TXP, false);
696*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
697*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_LCAM, false);
698*4882a593Smuzhiyun spin_unlock_irqrestore(&lp->lock, flags);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun SONIC_WRITE(SONIC_RCR, rcr);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /*
709*4882a593Smuzhiyun * Initialize the SONIC ethernet controller.
710*4882a593Smuzhiyun */
sonic_init(struct net_device * dev,bool may_sleep)711*4882a593Smuzhiyun static int sonic_init(struct net_device *dev, bool may_sleep)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun struct sonic_local *lp = netdev_priv(dev);
714*4882a593Smuzhiyun int i;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * put the Sonic into software-reset mode and
718*4882a593Smuzhiyun * disable all interrupts
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun SONIC_WRITE(SONIC_IMR, 0);
721*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, 0x7fff);
722*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* While in reset mode, clear CAM Enable register */
725*4882a593Smuzhiyun SONIC_WRITE(SONIC_CE, 0);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /*
728*4882a593Smuzhiyun * clear software reset flag, disable receiver, clear and
729*4882a593Smuzhiyun * enable interrupts, then completely initialize the SONIC
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, 0);
732*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
733*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /*
736*4882a593Smuzhiyun * initialize the receive resource area
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
739*4882a593Smuzhiyun __func__);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_RRS; i++) {
742*4882a593Smuzhiyun u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
743*4882a593Smuzhiyun u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
744*4882a593Smuzhiyun sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
745*4882a593Smuzhiyun sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
746*4882a593Smuzhiyun sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
747*4882a593Smuzhiyun sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* initialize all RRA registers */
751*4882a593Smuzhiyun SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
752*4882a593Smuzhiyun SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
753*4882a593Smuzhiyun SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
754*4882a593Smuzhiyun SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
755*4882a593Smuzhiyun SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
756*4882a593Smuzhiyun SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* load the resource pointers */
759*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
762*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /*
765*4882a593Smuzhiyun * Initialize the receive descriptors so that they
766*4882a593Smuzhiyun * become a circular linked list, ie. let the last
767*4882a593Smuzhiyun * descriptor point to the first again.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
770*4882a593Smuzhiyun __func__);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun for (i=0; i<SONIC_NUM_RDS; i++) {
773*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
774*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
775*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
776*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
777*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
778*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
779*4882a593Smuzhiyun sonic_rda_put(dev, i, SONIC_RD_LINK,
780*4882a593Smuzhiyun lp->rda_laddr +
781*4882a593Smuzhiyun ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun /* fix last descriptor */
784*4882a593Smuzhiyun sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
785*4882a593Smuzhiyun (lp->rda_laddr & 0xffff) | SONIC_EOL);
786*4882a593Smuzhiyun lp->eol_rx = SONIC_NUM_RDS - 1;
787*4882a593Smuzhiyun lp->cur_rx = 0;
788*4882a593Smuzhiyun SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
789*4882a593Smuzhiyun SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * initialize transmit descriptors
793*4882a593Smuzhiyun */
794*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
795*4882a593Smuzhiyun __func__);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun for (i = 0; i < SONIC_NUM_TDS; i++) {
798*4882a593Smuzhiyun sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
799*4882a593Smuzhiyun sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
800*4882a593Smuzhiyun sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
801*4882a593Smuzhiyun sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
802*4882a593Smuzhiyun sonic_tda_put(dev, i, SONIC_TD_LINK,
803*4882a593Smuzhiyun (lp->tda_laddr & 0xffff) +
804*4882a593Smuzhiyun (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
805*4882a593Smuzhiyun lp->tx_skb[i] = NULL;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun /* fix last descriptor */
808*4882a593Smuzhiyun sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
809*4882a593Smuzhiyun (lp->tda_laddr & 0xffff));
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
812*4882a593Smuzhiyun SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
813*4882a593Smuzhiyun lp->cur_tx = 0;
814*4882a593Smuzhiyun lp->eol_tx = SONIC_NUM_TDS - 1;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * put our own address to CAM desc[0]
818*4882a593Smuzhiyun */
819*4882a593Smuzhiyun sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
820*4882a593Smuzhiyun sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
821*4882a593Smuzhiyun sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
822*4882a593Smuzhiyun sonic_set_cam_enable(dev, 1);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun for (i = 0; i < 16; i++)
825*4882a593Smuzhiyun sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * initialize CAM registers
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
831*4882a593Smuzhiyun SONIC_WRITE(SONIC_CDC, 16);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun * load the CAM
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
837*4882a593Smuzhiyun sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun * enable receiver, disable loopback
841*4882a593Smuzhiyun * and enable all interrupts
842*4882a593Smuzhiyun */
843*4882a593Smuzhiyun SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
844*4882a593Smuzhiyun SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
845*4882a593Smuzhiyun SONIC_WRITE(SONIC_ISR, 0x7fff);
846*4882a593Smuzhiyun SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
847*4882a593Smuzhiyun SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
850*4882a593Smuzhiyun SONIC_READ(SONIC_CMD));
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun return 0;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun MODULE_LICENSE("GPL");
856