1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* drivers/net/ethernet/freescale/gianfar.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Gianfar Ethernet Driver
5*4882a593Smuzhiyun * This driver is designed for the non-CPM ethernet controllers
6*4882a593Smuzhiyun * on the 85xx and 83xx family of integrated processors
7*4882a593Smuzhiyun * Based on 8260_io/fcc_enet.c
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Author: Andy Fleming
10*4882a593Smuzhiyun * Maintainer: Kumar Gala
11*4882a593Smuzhiyun * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14*4882a593Smuzhiyun * Copyright 2007 MontaVista Software, Inc.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Gianfar: AKA Lambda Draconis, "Dragon"
17*4882a593Smuzhiyun * RA 11 31 24.2
18*4882a593Smuzhiyun * Dec +69 19 52
19*4882a593Smuzhiyun * V 3.84
20*4882a593Smuzhiyun * B-V +1.62
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * Theory of operation
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The driver is initialized through of_device. Configuration information
25*4882a593Smuzhiyun * is therefore conveyed through an OF-style device tree.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * The Gianfar Ethernet Controller uses a ring of buffer
28*4882a593Smuzhiyun * descriptors. The beginning is indicated by a register
29*4882a593Smuzhiyun * pointing to the physical address of the start of the ring.
30*4882a593Smuzhiyun * The end is determined by a "wrap" bit being set in the
31*4882a593Smuzhiyun * last descriptor of the ring.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * When a packet is received, the RXF bit in the
34*4882a593Smuzhiyun * IEVENT register is set, triggering an interrupt when the
35*4882a593Smuzhiyun * corresponding bit in the IMASK register is also set (if
36*4882a593Smuzhiyun * interrupt coalescing is active, then the interrupt may not
37*4882a593Smuzhiyun * happen immediately, but will wait until either a set number
38*4882a593Smuzhiyun * of frames or amount of time have passed). In NAPI, the
39*4882a593Smuzhiyun * interrupt handler will signal there is work to be done, and
40*4882a593Smuzhiyun * exit. This method will start at the last known empty
41*4882a593Smuzhiyun * descriptor, and process every subsequent descriptor until there
42*4882a593Smuzhiyun * are none left with data (NAPI will stop after a set number of
43*4882a593Smuzhiyun * packets to give time to other tasks, but will eventually
44*4882a593Smuzhiyun * process all the packets). The data arrives inside a
45*4882a593Smuzhiyun * pre-allocated skb, and so after the skb is passed up to the
46*4882a593Smuzhiyun * stack, a new skb must be allocated, and the address field in
47*4882a593Smuzhiyun * the buffer descriptor must be updated to indicate this new
48*4882a593Smuzhiyun * skb.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * When the kernel requests that a packet be transmitted, the
51*4882a593Smuzhiyun * driver starts where it left off last time, and points the
52*4882a593Smuzhiyun * descriptor at the buffer which was passed in. The driver
53*4882a593Smuzhiyun * then informs the DMA engine that there are packets ready to
54*4882a593Smuzhiyun * be transmitted. Once the controller is finished transmitting
55*4882a593Smuzhiyun * the packet, an interrupt may be triggered (under the same
56*4882a593Smuzhiyun * conditions as for reception, but depending on the TXF bit).
57*4882a593Smuzhiyun * The driver then cleans up the buffer.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61*4882a593Smuzhiyun #define DEBUG
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include <linux/kernel.h>
64*4882a593Smuzhiyun #include <linux/string.h>
65*4882a593Smuzhiyun #include <linux/errno.h>
66*4882a593Smuzhiyun #include <linux/unistd.h>
67*4882a593Smuzhiyun #include <linux/slab.h>
68*4882a593Smuzhiyun #include <linux/interrupt.h>
69*4882a593Smuzhiyun #include <linux/delay.h>
70*4882a593Smuzhiyun #include <linux/netdevice.h>
71*4882a593Smuzhiyun #include <linux/etherdevice.h>
72*4882a593Smuzhiyun #include <linux/skbuff.h>
73*4882a593Smuzhiyun #include <linux/if_vlan.h>
74*4882a593Smuzhiyun #include <linux/spinlock.h>
75*4882a593Smuzhiyun #include <linux/mm.h>
76*4882a593Smuzhiyun #include <linux/of_address.h>
77*4882a593Smuzhiyun #include <linux/of_irq.h>
78*4882a593Smuzhiyun #include <linux/of_mdio.h>
79*4882a593Smuzhiyun #include <linux/of_platform.h>
80*4882a593Smuzhiyun #include <linux/ip.h>
81*4882a593Smuzhiyun #include <linux/tcp.h>
82*4882a593Smuzhiyun #include <linux/udp.h>
83*4882a593Smuzhiyun #include <linux/in.h>
84*4882a593Smuzhiyun #include <linux/net_tstamp.h>
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #include <asm/io.h>
87*4882a593Smuzhiyun #ifdef CONFIG_PPC
88*4882a593Smuzhiyun #include <asm/reg.h>
89*4882a593Smuzhiyun #include <asm/mpc85xx.h>
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun #include <asm/irq.h>
92*4882a593Smuzhiyun #include <linux/uaccess.h>
93*4882a593Smuzhiyun #include <linux/module.h>
94*4882a593Smuzhiyun #include <linux/dma-mapping.h>
95*4882a593Smuzhiyun #include <linux/crc32.h>
96*4882a593Smuzhiyun #include <linux/mii.h>
97*4882a593Smuzhiyun #include <linux/phy.h>
98*4882a593Smuzhiyun #include <linux/phy_fixed.h>
99*4882a593Smuzhiyun #include <linux/of.h>
100*4882a593Smuzhiyun #include <linux/of_net.h>
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #include "gianfar.h"
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define TX_TIMEOUT (5*HZ)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun MODULE_AUTHOR("Freescale Semiconductor, Inc");
107*4882a593Smuzhiyun MODULE_DESCRIPTION("Gianfar Ethernet Driver");
108*4882a593Smuzhiyun MODULE_LICENSE("GPL");
109*4882a593Smuzhiyun
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)110*4882a593Smuzhiyun static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
111*4882a593Smuzhiyun dma_addr_t buf)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun u32 lstatus;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun bdp->bufPtr = cpu_to_be32(buf);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
118*4882a593Smuzhiyun if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
119*4882a593Smuzhiyun lstatus |= BD_LFLAG(RXBD_WRAP);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun gfar_wmb();
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun bdp->lstatus = cpu_to_be32(lstatus);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
gfar_init_tx_rx_base(struct gfar_private * priv)126*4882a593Smuzhiyun static void gfar_init_tx_rx_base(struct gfar_private *priv)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
129*4882a593Smuzhiyun u32 __iomem *baddr;
130*4882a593Smuzhiyun int i;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun baddr = ®s->tbase0;
133*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
134*4882a593Smuzhiyun gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
135*4882a593Smuzhiyun baddr += 2;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun baddr = ®s->rbase0;
139*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
140*4882a593Smuzhiyun gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
141*4882a593Smuzhiyun baddr += 2;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
gfar_init_rqprm(struct gfar_private * priv)145*4882a593Smuzhiyun static void gfar_init_rqprm(struct gfar_private *priv)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
148*4882a593Smuzhiyun u32 __iomem *baddr;
149*4882a593Smuzhiyun int i;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun baddr = ®s->rqprm0;
152*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
153*4882a593Smuzhiyun gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
154*4882a593Smuzhiyun (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
155*4882a593Smuzhiyun baddr++;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
gfar_rx_offload_en(struct gfar_private * priv)159*4882a593Smuzhiyun static void gfar_rx_offload_en(struct gfar_private *priv)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun /* set this when rx hw offload (TOE) functions are being used */
162*4882a593Smuzhiyun priv->uses_rxfcb = 0;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
165*4882a593Smuzhiyun priv->uses_rxfcb = 1;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (priv->hwts_rx_en || priv->rx_filer_enable)
168*4882a593Smuzhiyun priv->uses_rxfcb = 1;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
gfar_mac_rx_config(struct gfar_private * priv)171*4882a593Smuzhiyun static void gfar_mac_rx_config(struct gfar_private *priv)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
174*4882a593Smuzhiyun u32 rctrl = 0;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (priv->rx_filer_enable) {
177*4882a593Smuzhiyun rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
178*4882a593Smuzhiyun /* Program the RIR0 reg with the required distribution */
179*4882a593Smuzhiyun if (priv->poll_mode == GFAR_SQ_POLLING)
180*4882a593Smuzhiyun gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
181*4882a593Smuzhiyun else /* GFAR_MQ_POLLING */
182*4882a593Smuzhiyun gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Restore PROMISC mode */
186*4882a593Smuzhiyun if (priv->ndev->flags & IFF_PROMISC)
187*4882a593Smuzhiyun rctrl |= RCTRL_PROM;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (priv->ndev->features & NETIF_F_RXCSUM)
190*4882a593Smuzhiyun rctrl |= RCTRL_CHECKSUMMING;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (priv->extended_hash)
193*4882a593Smuzhiyun rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (priv->padding) {
196*4882a593Smuzhiyun rctrl &= ~RCTRL_PAL_MASK;
197*4882a593Smuzhiyun rctrl |= RCTRL_PADDING(priv->padding);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Enable HW time stamping if requested from user space */
201*4882a593Smuzhiyun if (priv->hwts_rx_en)
202*4882a593Smuzhiyun rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
205*4882a593Smuzhiyun rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Clear the LFC bit */
208*4882a593Smuzhiyun gfar_write(®s->rctrl, rctrl);
209*4882a593Smuzhiyun /* Init flow control threshold values */
210*4882a593Smuzhiyun gfar_init_rqprm(priv);
211*4882a593Smuzhiyun gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
212*4882a593Smuzhiyun rctrl |= RCTRL_LFC;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* Init rctrl based on our settings */
215*4882a593Smuzhiyun gfar_write(®s->rctrl, rctrl);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
gfar_mac_tx_config(struct gfar_private * priv)218*4882a593Smuzhiyun static void gfar_mac_tx_config(struct gfar_private *priv)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
221*4882a593Smuzhiyun u32 tctrl = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (priv->ndev->features & NETIF_F_IP_CSUM)
224*4882a593Smuzhiyun tctrl |= TCTRL_INIT_CSUM;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (priv->prio_sched_en)
227*4882a593Smuzhiyun tctrl |= TCTRL_TXSCHED_PRIO;
228*4882a593Smuzhiyun else {
229*4882a593Smuzhiyun tctrl |= TCTRL_TXSCHED_WRRS;
230*4882a593Smuzhiyun gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
231*4882a593Smuzhiyun gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
235*4882a593Smuzhiyun tctrl |= TCTRL_VLINS;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun gfar_write(®s->tctrl, tctrl);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)240*4882a593Smuzhiyun static void gfar_configure_coalescing(struct gfar_private *priv,
241*4882a593Smuzhiyun unsigned long tx_mask, unsigned long rx_mask)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
244*4882a593Smuzhiyun u32 __iomem *baddr;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (priv->mode == MQ_MG_MODE) {
247*4882a593Smuzhiyun int i = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun baddr = ®s->txic0;
250*4882a593Smuzhiyun for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
251*4882a593Smuzhiyun gfar_write(baddr + i, 0);
252*4882a593Smuzhiyun if (likely(priv->tx_queue[i]->txcoalescing))
253*4882a593Smuzhiyun gfar_write(baddr + i, priv->tx_queue[i]->txic);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun baddr = ®s->rxic0;
257*4882a593Smuzhiyun for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
258*4882a593Smuzhiyun gfar_write(baddr + i, 0);
259*4882a593Smuzhiyun if (likely(priv->rx_queue[i]->rxcoalescing))
260*4882a593Smuzhiyun gfar_write(baddr + i, priv->rx_queue[i]->rxic);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun /* Backward compatible case -- even if we enable
264*4882a593Smuzhiyun * multiple queues, there's only single reg to program
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun gfar_write(®s->txic, 0);
267*4882a593Smuzhiyun if (likely(priv->tx_queue[0]->txcoalescing))
268*4882a593Smuzhiyun gfar_write(®s->txic, priv->tx_queue[0]->txic);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun gfar_write(®s->rxic, 0);
271*4882a593Smuzhiyun if (unlikely(priv->rx_queue[0]->rxcoalescing))
272*4882a593Smuzhiyun gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
gfar_configure_coalescing_all(struct gfar_private * priv)276*4882a593Smuzhiyun static void gfar_configure_coalescing_all(struct gfar_private *priv)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun gfar_configure_coalescing(priv, 0xFF, 0xFF);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
gfar_get_stats(struct net_device * dev)281*4882a593Smuzhiyun static struct net_device_stats *gfar_get_stats(struct net_device *dev)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
284*4882a593Smuzhiyun unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
285*4882a593Smuzhiyun unsigned long tx_packets = 0, tx_bytes = 0;
286*4882a593Smuzhiyun int i;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
289*4882a593Smuzhiyun rx_packets += priv->rx_queue[i]->stats.rx_packets;
290*4882a593Smuzhiyun rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
291*4882a593Smuzhiyun rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun dev->stats.rx_packets = rx_packets;
295*4882a593Smuzhiyun dev->stats.rx_bytes = rx_bytes;
296*4882a593Smuzhiyun dev->stats.rx_dropped = rx_dropped;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
299*4882a593Smuzhiyun tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
300*4882a593Smuzhiyun tx_packets += priv->tx_queue[i]->stats.tx_packets;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun dev->stats.tx_bytes = tx_bytes;
304*4882a593Smuzhiyun dev->stats.tx_packets = tx_packets;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return &dev->stats;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Set the appropriate hash bit for the given addr */
310*4882a593Smuzhiyun /* The algorithm works like so:
311*4882a593Smuzhiyun * 1) Take the Destination Address (ie the multicast address), and
312*4882a593Smuzhiyun * do a CRC on it (little endian), and reverse the bits of the
313*4882a593Smuzhiyun * result.
314*4882a593Smuzhiyun * 2) Use the 8 most significant bits as a hash into a 256-entry
315*4882a593Smuzhiyun * table. The table is controlled through 8 32-bit registers:
316*4882a593Smuzhiyun * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
317*4882a593Smuzhiyun * gaddr7. This means that the 3 most significant bits in the
318*4882a593Smuzhiyun * hash index which gaddr register to use, and the 5 other bits
319*4882a593Smuzhiyun * indicate which bit (assuming an IBM numbering scheme, which
320*4882a593Smuzhiyun * for PowerPC (tm) is usually the case) in the register holds
321*4882a593Smuzhiyun * the entry.
322*4882a593Smuzhiyun */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)323*4882a593Smuzhiyun static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun u32 tempval;
326*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
327*4882a593Smuzhiyun u32 result = ether_crc(ETH_ALEN, addr);
328*4882a593Smuzhiyun int width = priv->hash_width;
329*4882a593Smuzhiyun u8 whichbit = (result >> (32 - width)) & 0x1f;
330*4882a593Smuzhiyun u8 whichreg = result >> (32 - width + 5);
331*4882a593Smuzhiyun u32 value = (1 << (31-whichbit));
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun tempval = gfar_read(priv->hash_regs[whichreg]);
334*4882a593Smuzhiyun tempval |= value;
335*4882a593Smuzhiyun gfar_write(priv->hash_regs[whichreg], tempval);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* There are multiple MAC Address register pairs on some controllers
339*4882a593Smuzhiyun * This function sets the numth pair to a given address
340*4882a593Smuzhiyun */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)341*4882a593Smuzhiyun static void gfar_set_mac_for_addr(struct net_device *dev, int num,
342*4882a593Smuzhiyun const u8 *addr)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
345*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
346*4882a593Smuzhiyun u32 tempval;
347*4882a593Smuzhiyun u32 __iomem *macptr = ®s->macstnaddr1;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun macptr += num*2;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* For a station address of 0x12345678ABCD in transmission
352*4882a593Smuzhiyun * order (BE), MACnADDR1 is set to 0xCDAB7856 and
353*4882a593Smuzhiyun * MACnADDR2 is set to 0x34120000.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun tempval = (addr[5] << 24) | (addr[4] << 16) |
356*4882a593Smuzhiyun (addr[3] << 8) | addr[2];
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun gfar_write(macptr, tempval);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun tempval = (addr[1] << 24) | (addr[0] << 16);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun gfar_write(macptr+1, tempval);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
gfar_set_mac_addr(struct net_device * dev,void * p)365*4882a593Smuzhiyun static int gfar_set_mac_addr(struct net_device *dev, void *p)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun int ret;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun ret = eth_mac_addr(dev, p);
370*4882a593Smuzhiyun if (ret)
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun return 0;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
gfar_ints_disable(struct gfar_private * priv)378*4882a593Smuzhiyun static void gfar_ints_disable(struct gfar_private *priv)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun int i;
381*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
382*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[i].regs;
383*4882a593Smuzhiyun /* Clear IEVENT */
384*4882a593Smuzhiyun gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Initialize IMASK */
387*4882a593Smuzhiyun gfar_write(®s->imask, IMASK_INIT_CLEAR);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
gfar_ints_enable(struct gfar_private * priv)391*4882a593Smuzhiyun static void gfar_ints_enable(struct gfar_private *priv)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun int i;
394*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
395*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[i].regs;
396*4882a593Smuzhiyun /* Unmask the interrupts we look for */
397*4882a593Smuzhiyun gfar_write(®s->imask, IMASK_DEFAULT);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
gfar_alloc_tx_queues(struct gfar_private * priv)401*4882a593Smuzhiyun static int gfar_alloc_tx_queues(struct gfar_private *priv)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun int i;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
406*4882a593Smuzhiyun priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
407*4882a593Smuzhiyun GFP_KERNEL);
408*4882a593Smuzhiyun if (!priv->tx_queue[i])
409*4882a593Smuzhiyun return -ENOMEM;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun priv->tx_queue[i]->tx_skbuff = NULL;
412*4882a593Smuzhiyun priv->tx_queue[i]->qindex = i;
413*4882a593Smuzhiyun priv->tx_queue[i]->dev = priv->ndev;
414*4882a593Smuzhiyun spin_lock_init(&(priv->tx_queue[i]->txlock));
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
gfar_alloc_rx_queues(struct gfar_private * priv)419*4882a593Smuzhiyun static int gfar_alloc_rx_queues(struct gfar_private *priv)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun int i;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
424*4882a593Smuzhiyun priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
425*4882a593Smuzhiyun GFP_KERNEL);
426*4882a593Smuzhiyun if (!priv->rx_queue[i])
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun priv->rx_queue[i]->qindex = i;
430*4882a593Smuzhiyun priv->rx_queue[i]->ndev = priv->ndev;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
gfar_free_tx_queues(struct gfar_private * priv)435*4882a593Smuzhiyun static void gfar_free_tx_queues(struct gfar_private *priv)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun int i;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++)
440*4882a593Smuzhiyun kfree(priv->tx_queue[i]);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
gfar_free_rx_queues(struct gfar_private * priv)443*4882a593Smuzhiyun static void gfar_free_rx_queues(struct gfar_private *priv)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun int i;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++)
448*4882a593Smuzhiyun kfree(priv->rx_queue[i]);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
unmap_group_regs(struct gfar_private * priv)451*4882a593Smuzhiyun static void unmap_group_regs(struct gfar_private *priv)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun int i;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun for (i = 0; i < MAXGROUPS; i++)
456*4882a593Smuzhiyun if (priv->gfargrp[i].regs)
457*4882a593Smuzhiyun iounmap(priv->gfargrp[i].regs);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
free_gfar_dev(struct gfar_private * priv)460*4882a593Smuzhiyun static void free_gfar_dev(struct gfar_private *priv)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun int i, j;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++)
465*4882a593Smuzhiyun for (j = 0; j < GFAR_NUM_IRQS; j++) {
466*4882a593Smuzhiyun kfree(priv->gfargrp[i].irqinfo[j]);
467*4882a593Smuzhiyun priv->gfargrp[i].irqinfo[j] = NULL;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun free_netdev(priv->ndev);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
disable_napi(struct gfar_private * priv)473*4882a593Smuzhiyun static void disable_napi(struct gfar_private *priv)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun int i;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
478*4882a593Smuzhiyun napi_disable(&priv->gfargrp[i].napi_rx);
479*4882a593Smuzhiyun napi_disable(&priv->gfargrp[i].napi_tx);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
enable_napi(struct gfar_private * priv)483*4882a593Smuzhiyun static void enable_napi(struct gfar_private *priv)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun int i;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
488*4882a593Smuzhiyun napi_enable(&priv->gfargrp[i].napi_rx);
489*4882a593Smuzhiyun napi_enable(&priv->gfargrp[i].napi_tx);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)493*4882a593Smuzhiyun static int gfar_parse_group(struct device_node *np,
494*4882a593Smuzhiyun struct gfar_private *priv, const char *model)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
497*4882a593Smuzhiyun int i;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun for (i = 0; i < GFAR_NUM_IRQS; i++) {
500*4882a593Smuzhiyun grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
501*4882a593Smuzhiyun GFP_KERNEL);
502*4882a593Smuzhiyun if (!grp->irqinfo[i])
503*4882a593Smuzhiyun return -ENOMEM;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun grp->regs = of_iomap(np, 0);
507*4882a593Smuzhiyun if (!grp->regs)
508*4882a593Smuzhiyun return -ENOMEM;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* If we aren't the FEC we have multiple interrupts */
513*4882a593Smuzhiyun if (model && strcasecmp(model, "FEC")) {
514*4882a593Smuzhiyun gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
515*4882a593Smuzhiyun gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
516*4882a593Smuzhiyun if (!gfar_irq(grp, TX)->irq ||
517*4882a593Smuzhiyun !gfar_irq(grp, RX)->irq ||
518*4882a593Smuzhiyun !gfar_irq(grp, ER)->irq)
519*4882a593Smuzhiyun return -EINVAL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun grp->priv = priv;
523*4882a593Smuzhiyun spin_lock_init(&grp->grplock);
524*4882a593Smuzhiyun if (priv->mode == MQ_MG_MODE) {
525*4882a593Smuzhiyun u32 rxq_mask, txq_mask;
526*4882a593Smuzhiyun int ret;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
529*4882a593Smuzhiyun grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
532*4882a593Smuzhiyun if (!ret) {
533*4882a593Smuzhiyun grp->rx_bit_map = rxq_mask ?
534*4882a593Smuzhiyun rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
538*4882a593Smuzhiyun if (!ret) {
539*4882a593Smuzhiyun grp->tx_bit_map = txq_mask ?
540*4882a593Smuzhiyun txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (priv->poll_mode == GFAR_SQ_POLLING) {
544*4882a593Smuzhiyun /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
545*4882a593Smuzhiyun grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
546*4882a593Smuzhiyun grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun } else {
549*4882a593Smuzhiyun grp->rx_bit_map = 0xFF;
550*4882a593Smuzhiyun grp->tx_bit_map = 0xFF;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
554*4882a593Smuzhiyun * right to left, so we need to revert the 8 bits to get the q index
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun grp->rx_bit_map = bitrev8(grp->rx_bit_map);
557*4882a593Smuzhiyun grp->tx_bit_map = bitrev8(grp->tx_bit_map);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
560*4882a593Smuzhiyun * also assign queues to groups
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
563*4882a593Smuzhiyun if (!grp->rx_queue)
564*4882a593Smuzhiyun grp->rx_queue = priv->rx_queue[i];
565*4882a593Smuzhiyun grp->num_rx_queues++;
566*4882a593Smuzhiyun grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
567*4882a593Smuzhiyun priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
568*4882a593Smuzhiyun priv->rx_queue[i]->grp = grp;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
572*4882a593Smuzhiyun if (!grp->tx_queue)
573*4882a593Smuzhiyun grp->tx_queue = priv->tx_queue[i];
574*4882a593Smuzhiyun grp->num_tx_queues++;
575*4882a593Smuzhiyun grp->tstat |= (TSTAT_CLEAR_THALT >> i);
576*4882a593Smuzhiyun priv->tqueue |= (TQUEUE_EN0 >> i);
577*4882a593Smuzhiyun priv->tx_queue[i]->grp = grp;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun priv->num_grps++;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
gfar_of_group_count(struct device_node * np)585*4882a593Smuzhiyun static int gfar_of_group_count(struct device_node *np)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct device_node *child;
588*4882a593Smuzhiyun int num = 0;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun for_each_available_child_of_node(np, child)
591*4882a593Smuzhiyun if (of_node_name_eq(child, "queue-group"))
592*4882a593Smuzhiyun num++;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun return num;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Reads the controller's registers to determine what interface
598*4882a593Smuzhiyun * connects it to the PHY.
599*4882a593Smuzhiyun */
gfar_get_interface(struct net_device * dev)600*4882a593Smuzhiyun static phy_interface_t gfar_get_interface(struct net_device *dev)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
603*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
604*4882a593Smuzhiyun u32 ecntrl;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun ecntrl = gfar_read(®s->ecntrl);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (ecntrl & ECNTRL_SGMII_MODE)
609*4882a593Smuzhiyun return PHY_INTERFACE_MODE_SGMII;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (ecntrl & ECNTRL_TBI_MODE) {
612*4882a593Smuzhiyun if (ecntrl & ECNTRL_REDUCED_MODE)
613*4882a593Smuzhiyun return PHY_INTERFACE_MODE_RTBI;
614*4882a593Smuzhiyun else
615*4882a593Smuzhiyun return PHY_INTERFACE_MODE_TBI;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (ecntrl & ECNTRL_REDUCED_MODE) {
619*4882a593Smuzhiyun if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
620*4882a593Smuzhiyun return PHY_INTERFACE_MODE_RMII;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun else {
623*4882a593Smuzhiyun phy_interface_t interface = priv->interface;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* This isn't autodetected right now, so it must
626*4882a593Smuzhiyun * be set by the device tree or platform code.
627*4882a593Smuzhiyun */
628*4882a593Smuzhiyun if (interface == PHY_INTERFACE_MODE_RGMII_ID)
629*4882a593Smuzhiyun return PHY_INTERFACE_MODE_RGMII_ID;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun return PHY_INTERFACE_MODE_RGMII;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
636*4882a593Smuzhiyun return PHY_INTERFACE_MODE_GMII;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun return PHY_INTERFACE_MODE_MII;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)641*4882a593Smuzhiyun static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun const char *model;
644*4882a593Smuzhiyun const void *mac_addr;
645*4882a593Smuzhiyun int err = 0, i;
646*4882a593Smuzhiyun phy_interface_t interface;
647*4882a593Smuzhiyun struct net_device *dev = NULL;
648*4882a593Smuzhiyun struct gfar_private *priv = NULL;
649*4882a593Smuzhiyun struct device_node *np = ofdev->dev.of_node;
650*4882a593Smuzhiyun struct device_node *child = NULL;
651*4882a593Smuzhiyun u32 stash_len = 0;
652*4882a593Smuzhiyun u32 stash_idx = 0;
653*4882a593Smuzhiyun unsigned int num_tx_qs, num_rx_qs;
654*4882a593Smuzhiyun unsigned short mode, poll_mode;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (!np)
657*4882a593Smuzhiyun return -ENODEV;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,etsec2")) {
660*4882a593Smuzhiyun mode = MQ_MG_MODE;
661*4882a593Smuzhiyun poll_mode = GFAR_SQ_POLLING;
662*4882a593Smuzhiyun } else {
663*4882a593Smuzhiyun mode = SQ_SG_MODE;
664*4882a593Smuzhiyun poll_mode = GFAR_SQ_POLLING;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (mode == SQ_SG_MODE) {
668*4882a593Smuzhiyun num_tx_qs = 1;
669*4882a593Smuzhiyun num_rx_qs = 1;
670*4882a593Smuzhiyun } else { /* MQ_MG_MODE */
671*4882a593Smuzhiyun /* get the actual number of supported groups */
672*4882a593Smuzhiyun unsigned int num_grps = gfar_of_group_count(np);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (num_grps == 0 || num_grps > MAXGROUPS) {
675*4882a593Smuzhiyun dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
676*4882a593Smuzhiyun num_grps);
677*4882a593Smuzhiyun pr_err("Cannot do alloc_etherdev, aborting\n");
678*4882a593Smuzhiyun return -EINVAL;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (poll_mode == GFAR_SQ_POLLING) {
682*4882a593Smuzhiyun num_tx_qs = num_grps; /* one txq per int group */
683*4882a593Smuzhiyun num_rx_qs = num_grps; /* one rxq per int group */
684*4882a593Smuzhiyun } else { /* GFAR_MQ_POLLING */
685*4882a593Smuzhiyun u32 tx_queues, rx_queues;
686*4882a593Smuzhiyun int ret;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* parse the num of HW tx and rx queues */
689*4882a593Smuzhiyun ret = of_property_read_u32(np, "fsl,num_tx_queues",
690*4882a593Smuzhiyun &tx_queues);
691*4882a593Smuzhiyun num_tx_qs = ret ? 1 : tx_queues;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ret = of_property_read_u32(np, "fsl,num_rx_queues",
694*4882a593Smuzhiyun &rx_queues);
695*4882a593Smuzhiyun num_rx_qs = ret ? 1 : rx_queues;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (num_tx_qs > MAX_TX_QS) {
700*4882a593Smuzhiyun pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
701*4882a593Smuzhiyun num_tx_qs, MAX_TX_QS);
702*4882a593Smuzhiyun pr_err("Cannot do alloc_etherdev, aborting\n");
703*4882a593Smuzhiyun return -EINVAL;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (num_rx_qs > MAX_RX_QS) {
707*4882a593Smuzhiyun pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
708*4882a593Smuzhiyun num_rx_qs, MAX_RX_QS);
709*4882a593Smuzhiyun pr_err("Cannot do alloc_etherdev, aborting\n");
710*4882a593Smuzhiyun return -EINVAL;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
714*4882a593Smuzhiyun dev = *pdev;
715*4882a593Smuzhiyun if (NULL == dev)
716*4882a593Smuzhiyun return -ENOMEM;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun priv = netdev_priv(dev);
719*4882a593Smuzhiyun priv->ndev = dev;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun priv->mode = mode;
722*4882a593Smuzhiyun priv->poll_mode = poll_mode;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun priv->num_tx_queues = num_tx_qs;
725*4882a593Smuzhiyun netif_set_real_num_rx_queues(dev, num_rx_qs);
726*4882a593Smuzhiyun priv->num_rx_queues = num_rx_qs;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun err = gfar_alloc_tx_queues(priv);
729*4882a593Smuzhiyun if (err)
730*4882a593Smuzhiyun goto tx_alloc_failed;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun err = gfar_alloc_rx_queues(priv);
733*4882a593Smuzhiyun if (err)
734*4882a593Smuzhiyun goto rx_alloc_failed;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun err = of_property_read_string(np, "model", &model);
737*4882a593Smuzhiyun if (err) {
738*4882a593Smuzhiyun pr_err("Device model property missing, aborting\n");
739*4882a593Smuzhiyun goto rx_alloc_failed;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /* Init Rx queue filer rule set linked list */
743*4882a593Smuzhiyun INIT_LIST_HEAD(&priv->rx_list.list);
744*4882a593Smuzhiyun priv->rx_list.count = 0;
745*4882a593Smuzhiyun mutex_init(&priv->rx_queue_access);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun for (i = 0; i < MAXGROUPS; i++)
748*4882a593Smuzhiyun priv->gfargrp[i].regs = NULL;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Parse and initialize group specific information */
751*4882a593Smuzhiyun if (priv->mode == MQ_MG_MODE) {
752*4882a593Smuzhiyun for_each_available_child_of_node(np, child) {
753*4882a593Smuzhiyun if (!of_node_name_eq(child, "queue-group"))
754*4882a593Smuzhiyun continue;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun err = gfar_parse_group(child, priv, model);
757*4882a593Smuzhiyun if (err) {
758*4882a593Smuzhiyun of_node_put(child);
759*4882a593Smuzhiyun goto err_grp_init;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun } else { /* SQ_SG_MODE */
763*4882a593Smuzhiyun err = gfar_parse_group(np, priv, model);
764*4882a593Smuzhiyun if (err)
765*4882a593Smuzhiyun goto err_grp_init;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (of_property_read_bool(np, "bd-stash")) {
769*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
770*4882a593Smuzhiyun priv->bd_stash_en = 1;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun err = of_property_read_u32(np, "rx-stash-len", &stash_len);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (err == 0)
776*4882a593Smuzhiyun priv->rx_stash_size = stash_len;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (err == 0)
781*4882a593Smuzhiyun priv->rx_stash_index = stash_idx;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (stash_len || stash_idx)
784*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun mac_addr = of_get_mac_address(np);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (!IS_ERR(mac_addr)) {
789*4882a593Smuzhiyun ether_addr_copy(dev->dev_addr, mac_addr);
790*4882a593Smuzhiyun } else {
791*4882a593Smuzhiyun eth_hw_addr_random(dev);
792*4882a593Smuzhiyun dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (model && !strcasecmp(model, "TSEC"))
796*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
797*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_COALESCE |
798*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_RMON |
799*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_MULTI_INTR;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (model && !strcasecmp(model, "eTSEC"))
802*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
803*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_COALESCE |
804*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_RMON |
805*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_MULTI_INTR |
806*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_CSUM |
807*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_VLAN |
808*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
809*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
810*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_TIMER |
811*4882a593Smuzhiyun FSL_GIANFAR_DEV_HAS_RX_FILER;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* Use PHY connection type from the DT node if one is specified there.
814*4882a593Smuzhiyun * rgmii-id really needs to be specified. Other types can be
815*4882a593Smuzhiyun * detected by hardware
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun err = of_get_phy_mode(np, &interface);
818*4882a593Smuzhiyun if (!err)
819*4882a593Smuzhiyun priv->interface = interface;
820*4882a593Smuzhiyun else
821*4882a593Smuzhiyun priv->interface = gfar_get_interface(dev);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (of_find_property(np, "fsl,magic-packet", NULL))
824*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (of_get_property(np, "fsl,wake-on-filer", NULL))
827*4882a593Smuzhiyun priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* In the case of a fixed PHY, the DT node associated
832*4882a593Smuzhiyun * to the PHY is the Ethernet MAC DT node.
833*4882a593Smuzhiyun */
834*4882a593Smuzhiyun if (!priv->phy_node && of_phy_is_fixed_link(np)) {
835*4882a593Smuzhiyun err = of_phy_register_fixed_link(np);
836*4882a593Smuzhiyun if (err)
837*4882a593Smuzhiyun goto err_grp_init;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun priv->phy_node = of_node_get(np);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Find the TBI PHY. If it's not there, we don't support SGMII */
843*4882a593Smuzhiyun priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return 0;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun err_grp_init:
848*4882a593Smuzhiyun unmap_group_regs(priv);
849*4882a593Smuzhiyun rx_alloc_failed:
850*4882a593Smuzhiyun gfar_free_rx_queues(priv);
851*4882a593Smuzhiyun tx_alloc_failed:
852*4882a593Smuzhiyun gfar_free_tx_queues(priv);
853*4882a593Smuzhiyun free_gfar_dev(priv);
854*4882a593Smuzhiyun return err;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)857*4882a593Smuzhiyun static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
858*4882a593Smuzhiyun u32 class)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun u32 rqfpr = FPR_FILER_MASK;
861*4882a593Smuzhiyun u32 rqfcr = 0x0;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun rqfar--;
864*4882a593Smuzhiyun rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
865*4882a593Smuzhiyun priv->ftp_rqfpr[rqfar] = rqfpr;
866*4882a593Smuzhiyun priv->ftp_rqfcr[rqfar] = rqfcr;
867*4882a593Smuzhiyun gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun rqfar--;
870*4882a593Smuzhiyun rqfcr = RQFCR_CMP_NOMATCH;
871*4882a593Smuzhiyun priv->ftp_rqfpr[rqfar] = rqfpr;
872*4882a593Smuzhiyun priv->ftp_rqfcr[rqfar] = rqfcr;
873*4882a593Smuzhiyun gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun rqfar--;
876*4882a593Smuzhiyun rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
877*4882a593Smuzhiyun rqfpr = class;
878*4882a593Smuzhiyun priv->ftp_rqfcr[rqfar] = rqfcr;
879*4882a593Smuzhiyun priv->ftp_rqfpr[rqfar] = rqfpr;
880*4882a593Smuzhiyun gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun rqfar--;
883*4882a593Smuzhiyun rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
884*4882a593Smuzhiyun rqfpr = class;
885*4882a593Smuzhiyun priv->ftp_rqfcr[rqfar] = rqfcr;
886*4882a593Smuzhiyun priv->ftp_rqfpr[rqfar] = rqfpr;
887*4882a593Smuzhiyun gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun return rqfar;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
gfar_init_filer_table(struct gfar_private * priv)892*4882a593Smuzhiyun static void gfar_init_filer_table(struct gfar_private *priv)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun int i = 0x0;
895*4882a593Smuzhiyun u32 rqfar = MAX_FILER_IDX;
896*4882a593Smuzhiyun u32 rqfcr = 0x0;
897*4882a593Smuzhiyun u32 rqfpr = FPR_FILER_MASK;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /* Default rule */
900*4882a593Smuzhiyun rqfcr = RQFCR_CMP_MATCH;
901*4882a593Smuzhiyun priv->ftp_rqfcr[rqfar] = rqfcr;
902*4882a593Smuzhiyun priv->ftp_rqfpr[rqfar] = rqfpr;
903*4882a593Smuzhiyun gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
906*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
907*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
908*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
909*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
910*4882a593Smuzhiyun rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /* cur_filer_idx indicated the first non-masked rule */
913*4882a593Smuzhiyun priv->cur_filer_idx = rqfar;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun /* Rest are masked rules */
916*4882a593Smuzhiyun rqfcr = RQFCR_CMP_NOMATCH;
917*4882a593Smuzhiyun for (i = 0; i < rqfar; i++) {
918*4882a593Smuzhiyun priv->ftp_rqfcr[i] = rqfcr;
919*4882a593Smuzhiyun priv->ftp_rqfpr[i] = rqfpr;
920*4882a593Smuzhiyun gfar_write_filer(priv, i, rqfcr, rqfpr);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)925*4882a593Smuzhiyun static void __gfar_detect_errata_83xx(struct gfar_private *priv)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun unsigned int pvr = mfspr(SPRN_PVR);
928*4882a593Smuzhiyun unsigned int svr = mfspr(SPRN_SVR);
929*4882a593Smuzhiyun unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
930*4882a593Smuzhiyun unsigned int rev = svr & 0xffff;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* MPC8313 Rev 2.0 and higher; All MPC837x */
933*4882a593Smuzhiyun if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
934*4882a593Smuzhiyun (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
935*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_74;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /* MPC8313 and MPC837x all rev */
938*4882a593Smuzhiyun if ((pvr == 0x80850010 && mod == 0x80b0) ||
939*4882a593Smuzhiyun (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
940*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_76;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /* MPC8313 Rev < 2.0 */
943*4882a593Smuzhiyun if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
944*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_12;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
__gfar_detect_errata_85xx(struct gfar_private * priv)947*4882a593Smuzhiyun static void __gfar_detect_errata_85xx(struct gfar_private *priv)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun unsigned int svr = mfspr(SPRN_SVR);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
952*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_12;
953*4882a593Smuzhiyun /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
954*4882a593Smuzhiyun if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
955*4882a593Smuzhiyun ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
956*4882a593Smuzhiyun ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
957*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun #endif
960*4882a593Smuzhiyun
gfar_detect_errata(struct gfar_private * priv)961*4882a593Smuzhiyun static void gfar_detect_errata(struct gfar_private *priv)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun struct device *dev = &priv->ofdev->dev;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* no plans to fix */
966*4882a593Smuzhiyun priv->errata |= GFAR_ERRATA_A002;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun #ifdef CONFIG_PPC
969*4882a593Smuzhiyun if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
970*4882a593Smuzhiyun __gfar_detect_errata_85xx(priv);
971*4882a593Smuzhiyun else /* non-mpc85xx parts, i.e. e300 core based */
972*4882a593Smuzhiyun __gfar_detect_errata_83xx(priv);
973*4882a593Smuzhiyun #endif
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (priv->errata)
976*4882a593Smuzhiyun dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
977*4882a593Smuzhiyun priv->errata);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
gfar_init_addr_hash_table(struct gfar_private * priv)980*4882a593Smuzhiyun static void gfar_init_addr_hash_table(struct gfar_private *priv)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
985*4882a593Smuzhiyun priv->extended_hash = 1;
986*4882a593Smuzhiyun priv->hash_width = 9;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun priv->hash_regs[0] = ®s->igaddr0;
989*4882a593Smuzhiyun priv->hash_regs[1] = ®s->igaddr1;
990*4882a593Smuzhiyun priv->hash_regs[2] = ®s->igaddr2;
991*4882a593Smuzhiyun priv->hash_regs[3] = ®s->igaddr3;
992*4882a593Smuzhiyun priv->hash_regs[4] = ®s->igaddr4;
993*4882a593Smuzhiyun priv->hash_regs[5] = ®s->igaddr5;
994*4882a593Smuzhiyun priv->hash_regs[6] = ®s->igaddr6;
995*4882a593Smuzhiyun priv->hash_regs[7] = ®s->igaddr7;
996*4882a593Smuzhiyun priv->hash_regs[8] = ®s->gaddr0;
997*4882a593Smuzhiyun priv->hash_regs[9] = ®s->gaddr1;
998*4882a593Smuzhiyun priv->hash_regs[10] = ®s->gaddr2;
999*4882a593Smuzhiyun priv->hash_regs[11] = ®s->gaddr3;
1000*4882a593Smuzhiyun priv->hash_regs[12] = ®s->gaddr4;
1001*4882a593Smuzhiyun priv->hash_regs[13] = ®s->gaddr5;
1002*4882a593Smuzhiyun priv->hash_regs[14] = ®s->gaddr6;
1003*4882a593Smuzhiyun priv->hash_regs[15] = ®s->gaddr7;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun } else {
1006*4882a593Smuzhiyun priv->extended_hash = 0;
1007*4882a593Smuzhiyun priv->hash_width = 8;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun priv->hash_regs[0] = ®s->gaddr0;
1010*4882a593Smuzhiyun priv->hash_regs[1] = ®s->gaddr1;
1011*4882a593Smuzhiyun priv->hash_regs[2] = ®s->gaddr2;
1012*4882a593Smuzhiyun priv->hash_regs[3] = ®s->gaddr3;
1013*4882a593Smuzhiyun priv->hash_regs[4] = ®s->gaddr4;
1014*4882a593Smuzhiyun priv->hash_regs[5] = ®s->gaddr5;
1015*4882a593Smuzhiyun priv->hash_regs[6] = ®s->gaddr6;
1016*4882a593Smuzhiyun priv->hash_regs[7] = ®s->gaddr7;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
__gfar_is_rx_idle(struct gfar_private * priv)1020*4882a593Smuzhiyun static int __gfar_is_rx_idle(struct gfar_private *priv)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun u32 res;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* Normaly TSEC should not hang on GRS commands, so we should
1025*4882a593Smuzhiyun * actually wait for IEVENT_GRSC flag.
1026*4882a593Smuzhiyun */
1027*4882a593Smuzhiyun if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1028*4882a593Smuzhiyun return 0;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1031*4882a593Smuzhiyun * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1032*4882a593Smuzhiyun * and the Rx can be safely reset.
1033*4882a593Smuzhiyun */
1034*4882a593Smuzhiyun res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1035*4882a593Smuzhiyun res &= 0x7f807f80;
1036*4882a593Smuzhiyun if ((res & 0xffff) == (res >> 16))
1037*4882a593Smuzhiyun return 1;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun return 0;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)1043*4882a593Smuzhiyun static void gfar_halt_nodisable(struct gfar_private *priv)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
1046*4882a593Smuzhiyun u32 tempval;
1047*4882a593Smuzhiyun unsigned int timeout;
1048*4882a593Smuzhiyun int stopped;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun gfar_ints_disable(priv);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun if (gfar_is_dma_stopped(priv))
1053*4882a593Smuzhiyun return;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /* Stop the DMA, and wait for it to stop */
1056*4882a593Smuzhiyun tempval = gfar_read(®s->dmactrl);
1057*4882a593Smuzhiyun tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1058*4882a593Smuzhiyun gfar_write(®s->dmactrl, tempval);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun retry:
1061*4882a593Smuzhiyun timeout = 1000;
1062*4882a593Smuzhiyun while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1063*4882a593Smuzhiyun cpu_relax();
1064*4882a593Smuzhiyun timeout--;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (!timeout)
1068*4882a593Smuzhiyun stopped = gfar_is_dma_stopped(priv);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1071*4882a593Smuzhiyun !__gfar_is_rx_idle(priv))
1072*4882a593Smuzhiyun goto retry;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)1076*4882a593Smuzhiyun static void gfar_halt(struct gfar_private *priv)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
1079*4882a593Smuzhiyun u32 tempval;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /* Dissable the Rx/Tx hw queues */
1082*4882a593Smuzhiyun gfar_write(®s->rqueue, 0);
1083*4882a593Smuzhiyun gfar_write(®s->tqueue, 0);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun mdelay(10);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun gfar_halt_nodisable(priv);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /* Disable Rx/Tx DMA */
1090*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg1);
1091*4882a593Smuzhiyun tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1092*4882a593Smuzhiyun gfar_write(®s->maccfg1, tempval);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1095*4882a593Smuzhiyun static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun struct txbd8 *txbdp;
1098*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(tx_queue->dev);
1099*4882a593Smuzhiyun int i, j;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun txbdp = tx_queue->tx_bd_base;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun for (i = 0; i < tx_queue->tx_ring_size; i++) {
1104*4882a593Smuzhiyun if (!tx_queue->tx_skbuff[i])
1105*4882a593Smuzhiyun continue;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1108*4882a593Smuzhiyun be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1109*4882a593Smuzhiyun txbdp->lstatus = 0;
1110*4882a593Smuzhiyun for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1111*4882a593Smuzhiyun j++) {
1112*4882a593Smuzhiyun txbdp++;
1113*4882a593Smuzhiyun dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1114*4882a593Smuzhiyun be16_to_cpu(txbdp->length),
1115*4882a593Smuzhiyun DMA_TO_DEVICE);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun txbdp++;
1118*4882a593Smuzhiyun dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1119*4882a593Smuzhiyun tx_queue->tx_skbuff[i] = NULL;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun kfree(tx_queue->tx_skbuff);
1122*4882a593Smuzhiyun tx_queue->tx_skbuff = NULL;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1125*4882a593Smuzhiyun static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun int i;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun dev_kfree_skb(rx_queue->skb);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun for (i = 0; i < rx_queue->rx_ring_size; i++) {
1134*4882a593Smuzhiyun struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun rxbdp->lstatus = 0;
1137*4882a593Smuzhiyun rxbdp->bufPtr = 0;
1138*4882a593Smuzhiyun rxbdp++;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (!rxb->page)
1141*4882a593Smuzhiyun continue;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun dma_unmap_page(rx_queue->dev, rxb->dma,
1144*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
1145*4882a593Smuzhiyun __free_page(rxb->page);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun rxb->page = NULL;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun kfree(rx_queue->rx_buff);
1151*4882a593Smuzhiyun rx_queue->rx_buff = NULL;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /* If there are any tx skbs or rx skbs still around, free them.
1155*4882a593Smuzhiyun * Then free tx_skbuff and rx_skbuff
1156*4882a593Smuzhiyun */
free_skb_resources(struct gfar_private * priv)1157*4882a593Smuzhiyun static void free_skb_resources(struct gfar_private *priv)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = NULL;
1160*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = NULL;
1161*4882a593Smuzhiyun int i;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* Go through all the buffer descriptors and free their data buffers */
1164*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
1165*4882a593Smuzhiyun struct netdev_queue *txq;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun tx_queue = priv->tx_queue[i];
1168*4882a593Smuzhiyun txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1169*4882a593Smuzhiyun if (tx_queue->tx_skbuff)
1170*4882a593Smuzhiyun free_skb_tx_queue(tx_queue);
1171*4882a593Smuzhiyun netdev_tx_reset_queue(txq);
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
1175*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
1176*4882a593Smuzhiyun if (rx_queue->rx_buff)
1177*4882a593Smuzhiyun free_skb_rx_queue(rx_queue);
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun dma_free_coherent(priv->dev,
1181*4882a593Smuzhiyun sizeof(struct txbd8) * priv->total_tx_ring_size +
1182*4882a593Smuzhiyun sizeof(struct rxbd8) * priv->total_rx_ring_size,
1183*4882a593Smuzhiyun priv->tx_queue[0]->tx_bd_base,
1184*4882a593Smuzhiyun priv->tx_queue[0]->tx_bd_dma_base);
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
stop_gfar(struct net_device * dev)1187*4882a593Smuzhiyun void stop_gfar(struct net_device *dev)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun netif_tx_stop_all_queues(dev);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun smp_mb__before_atomic();
1194*4882a593Smuzhiyun set_bit(GFAR_DOWN, &priv->state);
1195*4882a593Smuzhiyun smp_mb__after_atomic();
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun disable_napi(priv);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun /* disable ints and gracefully shut down Rx/Tx DMA */
1200*4882a593Smuzhiyun gfar_halt(priv);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun phy_stop(dev->phydev);
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun free_skb_resources(priv);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
gfar_start(struct gfar_private * priv)1207*4882a593Smuzhiyun static void gfar_start(struct gfar_private *priv)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
1210*4882a593Smuzhiyun u32 tempval;
1211*4882a593Smuzhiyun int i = 0;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /* Enable Rx/Tx hw queues */
1214*4882a593Smuzhiyun gfar_write(®s->rqueue, priv->rqueue);
1215*4882a593Smuzhiyun gfar_write(®s->tqueue, priv->tqueue);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /* Initialize DMACTRL to have WWR and WOP */
1218*4882a593Smuzhiyun tempval = gfar_read(®s->dmactrl);
1219*4882a593Smuzhiyun tempval |= DMACTRL_INIT_SETTINGS;
1220*4882a593Smuzhiyun gfar_write(®s->dmactrl, tempval);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /* Make sure we aren't stopped */
1223*4882a593Smuzhiyun tempval = gfar_read(®s->dmactrl);
1224*4882a593Smuzhiyun tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1225*4882a593Smuzhiyun gfar_write(®s->dmactrl, tempval);
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
1228*4882a593Smuzhiyun regs = priv->gfargrp[i].regs;
1229*4882a593Smuzhiyun /* Clear THLT/RHLT, so that the DMA starts polling now */
1230*4882a593Smuzhiyun gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1231*4882a593Smuzhiyun gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* Enable Rx/Tx DMA */
1235*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg1);
1236*4882a593Smuzhiyun tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1237*4882a593Smuzhiyun gfar_write(®s->maccfg1, tempval);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun gfar_ints_enable(priv);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun netif_trans_update(priv->ndev); /* prevent tx timeout */
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)1244*4882a593Smuzhiyun static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun struct page *page;
1247*4882a593Smuzhiyun dma_addr_t addr;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun page = dev_alloc_page();
1250*4882a593Smuzhiyun if (unlikely(!page))
1251*4882a593Smuzhiyun return false;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1254*4882a593Smuzhiyun if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1255*4882a593Smuzhiyun __free_page(page);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun return false;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun rxb->dma = addr;
1261*4882a593Smuzhiyun rxb->page = page;
1262*4882a593Smuzhiyun rxb->page_offset = 0;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun return true;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)1267*4882a593Smuzhiyun static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1270*4882a593Smuzhiyun struct gfar_extra_stats *estats = &priv->extra_stats;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1273*4882a593Smuzhiyun atomic64_inc(&estats->rx_alloc_err);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)1276*4882a593Smuzhiyun static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1277*4882a593Smuzhiyun int alloc_cnt)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct rxbd8 *bdp;
1280*4882a593Smuzhiyun struct gfar_rx_buff *rxb;
1281*4882a593Smuzhiyun int i;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun i = rx_queue->next_to_use;
1284*4882a593Smuzhiyun bdp = &rx_queue->rx_bd_base[i];
1285*4882a593Smuzhiyun rxb = &rx_queue->rx_buff[i];
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun while (alloc_cnt--) {
1288*4882a593Smuzhiyun /* try reuse page */
1289*4882a593Smuzhiyun if (unlikely(!rxb->page)) {
1290*4882a593Smuzhiyun if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1291*4882a593Smuzhiyun gfar_rx_alloc_err(rx_queue);
1292*4882a593Smuzhiyun break;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun /* Setup the new RxBD */
1297*4882a593Smuzhiyun gfar_init_rxbdp(rx_queue, bdp,
1298*4882a593Smuzhiyun rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* Update to the next pointer */
1301*4882a593Smuzhiyun bdp++;
1302*4882a593Smuzhiyun rxb++;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun if (unlikely(++i == rx_queue->rx_ring_size)) {
1305*4882a593Smuzhiyun i = 0;
1306*4882a593Smuzhiyun bdp = rx_queue->rx_bd_base;
1307*4882a593Smuzhiyun rxb = rx_queue->rx_buff;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun rx_queue->next_to_use = i;
1312*4882a593Smuzhiyun rx_queue->next_to_alloc = i;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
gfar_init_bds(struct net_device * ndev)1315*4882a593Smuzhiyun static void gfar_init_bds(struct net_device *ndev)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
1318*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
1319*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = NULL;
1320*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = NULL;
1321*4882a593Smuzhiyun struct txbd8 *txbdp;
1322*4882a593Smuzhiyun u32 __iomem *rfbptr;
1323*4882a593Smuzhiyun int i, j;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
1326*4882a593Smuzhiyun tx_queue = priv->tx_queue[i];
1327*4882a593Smuzhiyun /* Initialize some variables in our dev structure */
1328*4882a593Smuzhiyun tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1329*4882a593Smuzhiyun tx_queue->dirty_tx = tx_queue->tx_bd_base;
1330*4882a593Smuzhiyun tx_queue->cur_tx = tx_queue->tx_bd_base;
1331*4882a593Smuzhiyun tx_queue->skb_curtx = 0;
1332*4882a593Smuzhiyun tx_queue->skb_dirtytx = 0;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun /* Initialize Transmit Descriptor Ring */
1335*4882a593Smuzhiyun txbdp = tx_queue->tx_bd_base;
1336*4882a593Smuzhiyun for (j = 0; j < tx_queue->tx_ring_size; j++) {
1337*4882a593Smuzhiyun txbdp->lstatus = 0;
1338*4882a593Smuzhiyun txbdp->bufPtr = 0;
1339*4882a593Smuzhiyun txbdp++;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* Set the last descriptor in the ring to indicate wrap */
1343*4882a593Smuzhiyun txbdp--;
1344*4882a593Smuzhiyun txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1345*4882a593Smuzhiyun TXBD_WRAP);
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun rfbptr = ®s->rfbptr0;
1349*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
1350*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun rx_queue->next_to_clean = 0;
1353*4882a593Smuzhiyun rx_queue->next_to_use = 0;
1354*4882a593Smuzhiyun rx_queue->next_to_alloc = 0;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /* make sure next_to_clean != next_to_use after this
1357*4882a593Smuzhiyun * by leaving at least 1 unused descriptor
1358*4882a593Smuzhiyun */
1359*4882a593Smuzhiyun gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun rx_queue->rfbptr = rfbptr;
1362*4882a593Smuzhiyun rfbptr += 2;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
gfar_alloc_skb_resources(struct net_device * ndev)1366*4882a593Smuzhiyun static int gfar_alloc_skb_resources(struct net_device *ndev)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun void *vaddr;
1369*4882a593Smuzhiyun dma_addr_t addr;
1370*4882a593Smuzhiyun int i, j;
1371*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
1372*4882a593Smuzhiyun struct device *dev = priv->dev;
1373*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = NULL;
1374*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = NULL;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun priv->total_tx_ring_size = 0;
1377*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++)
1378*4882a593Smuzhiyun priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun priv->total_rx_ring_size = 0;
1381*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++)
1382*4882a593Smuzhiyun priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun /* Allocate memory for the buffer descriptors */
1385*4882a593Smuzhiyun vaddr = dma_alloc_coherent(dev,
1386*4882a593Smuzhiyun (priv->total_tx_ring_size *
1387*4882a593Smuzhiyun sizeof(struct txbd8)) +
1388*4882a593Smuzhiyun (priv->total_rx_ring_size *
1389*4882a593Smuzhiyun sizeof(struct rxbd8)),
1390*4882a593Smuzhiyun &addr, GFP_KERNEL);
1391*4882a593Smuzhiyun if (!vaddr)
1392*4882a593Smuzhiyun return -ENOMEM;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
1395*4882a593Smuzhiyun tx_queue = priv->tx_queue[i];
1396*4882a593Smuzhiyun tx_queue->tx_bd_base = vaddr;
1397*4882a593Smuzhiyun tx_queue->tx_bd_dma_base = addr;
1398*4882a593Smuzhiyun tx_queue->dev = ndev;
1399*4882a593Smuzhiyun /* enet DMA only understands physical addresses */
1400*4882a593Smuzhiyun addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1401*4882a593Smuzhiyun vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* Start the rx descriptor ring where the tx ring leaves off */
1405*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
1406*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
1407*4882a593Smuzhiyun rx_queue->rx_bd_base = vaddr;
1408*4882a593Smuzhiyun rx_queue->rx_bd_dma_base = addr;
1409*4882a593Smuzhiyun rx_queue->ndev = ndev;
1410*4882a593Smuzhiyun rx_queue->dev = dev;
1411*4882a593Smuzhiyun addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1412*4882a593Smuzhiyun vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun /* Setup the skbuff rings */
1416*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
1417*4882a593Smuzhiyun tx_queue = priv->tx_queue[i];
1418*4882a593Smuzhiyun tx_queue->tx_skbuff =
1419*4882a593Smuzhiyun kmalloc_array(tx_queue->tx_ring_size,
1420*4882a593Smuzhiyun sizeof(*tx_queue->tx_skbuff),
1421*4882a593Smuzhiyun GFP_KERNEL);
1422*4882a593Smuzhiyun if (!tx_queue->tx_skbuff)
1423*4882a593Smuzhiyun goto cleanup;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun for (j = 0; j < tx_queue->tx_ring_size; j++)
1426*4882a593Smuzhiyun tx_queue->tx_skbuff[j] = NULL;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
1430*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
1431*4882a593Smuzhiyun rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1432*4882a593Smuzhiyun sizeof(*rx_queue->rx_buff),
1433*4882a593Smuzhiyun GFP_KERNEL);
1434*4882a593Smuzhiyun if (!rx_queue->rx_buff)
1435*4882a593Smuzhiyun goto cleanup;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun gfar_init_bds(ndev);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun return 0;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun cleanup:
1443*4882a593Smuzhiyun free_skb_resources(priv);
1444*4882a593Smuzhiyun return -ENOMEM;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1448*4882a593Smuzhiyun int startup_gfar(struct net_device *ndev)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
1451*4882a593Smuzhiyun int err;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun gfar_mac_reset(priv);
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun err = gfar_alloc_skb_resources(ndev);
1456*4882a593Smuzhiyun if (err)
1457*4882a593Smuzhiyun return err;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun gfar_init_tx_rx_base(priv);
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun smp_mb__before_atomic();
1462*4882a593Smuzhiyun clear_bit(GFAR_DOWN, &priv->state);
1463*4882a593Smuzhiyun smp_mb__after_atomic();
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /* Start Rx/Tx DMA and enable the interrupts */
1466*4882a593Smuzhiyun gfar_start(priv);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /* force link state update after mac reset */
1469*4882a593Smuzhiyun priv->oldlink = 0;
1470*4882a593Smuzhiyun priv->oldspeed = 0;
1471*4882a593Smuzhiyun priv->oldduplex = -1;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun phy_start(ndev->phydev);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun enable_napi(priv);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun netif_tx_wake_all_queues(ndev);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun return 0;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
gfar_get_flowctrl_cfg(struct gfar_private * priv)1482*4882a593Smuzhiyun static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
1485*4882a593Smuzhiyun struct phy_device *phydev = ndev->phydev;
1486*4882a593Smuzhiyun u32 val = 0;
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (!phydev->duplex)
1489*4882a593Smuzhiyun return val;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun if (!priv->pause_aneg_en) {
1492*4882a593Smuzhiyun if (priv->tx_pause_en)
1493*4882a593Smuzhiyun val |= MACCFG1_TX_FLOW;
1494*4882a593Smuzhiyun if (priv->rx_pause_en)
1495*4882a593Smuzhiyun val |= MACCFG1_RX_FLOW;
1496*4882a593Smuzhiyun } else {
1497*4882a593Smuzhiyun u16 lcl_adv, rmt_adv;
1498*4882a593Smuzhiyun u8 flowctrl;
1499*4882a593Smuzhiyun /* get link partner capabilities */
1500*4882a593Smuzhiyun rmt_adv = 0;
1501*4882a593Smuzhiyun if (phydev->pause)
1502*4882a593Smuzhiyun rmt_adv = LPA_PAUSE_CAP;
1503*4882a593Smuzhiyun if (phydev->asym_pause)
1504*4882a593Smuzhiyun rmt_adv |= LPA_PAUSE_ASYM;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1507*4882a593Smuzhiyun flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1508*4882a593Smuzhiyun if (flowctrl & FLOW_CTRL_TX)
1509*4882a593Smuzhiyun val |= MACCFG1_TX_FLOW;
1510*4882a593Smuzhiyun if (flowctrl & FLOW_CTRL_RX)
1511*4882a593Smuzhiyun val |= MACCFG1_RX_FLOW;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun return val;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
gfar_update_link_state(struct gfar_private * priv)1517*4882a593Smuzhiyun static noinline void gfar_update_link_state(struct gfar_private *priv)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
1520*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
1521*4882a593Smuzhiyun struct phy_device *phydev = ndev->phydev;
1522*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = NULL;
1523*4882a593Smuzhiyun int i;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1526*4882a593Smuzhiyun return;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (phydev->link) {
1529*4882a593Smuzhiyun u32 tempval1 = gfar_read(®s->maccfg1);
1530*4882a593Smuzhiyun u32 tempval = gfar_read(®s->maccfg2);
1531*4882a593Smuzhiyun u32 ecntrl = gfar_read(®s->ecntrl);
1532*4882a593Smuzhiyun u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun if (phydev->duplex != priv->oldduplex) {
1535*4882a593Smuzhiyun if (!(phydev->duplex))
1536*4882a593Smuzhiyun tempval &= ~(MACCFG2_FULL_DUPLEX);
1537*4882a593Smuzhiyun else
1538*4882a593Smuzhiyun tempval |= MACCFG2_FULL_DUPLEX;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun priv->oldduplex = phydev->duplex;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (phydev->speed != priv->oldspeed) {
1544*4882a593Smuzhiyun switch (phydev->speed) {
1545*4882a593Smuzhiyun case 1000:
1546*4882a593Smuzhiyun tempval =
1547*4882a593Smuzhiyun ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun ecntrl &= ~(ECNTRL_R100);
1550*4882a593Smuzhiyun break;
1551*4882a593Smuzhiyun case 100:
1552*4882a593Smuzhiyun case 10:
1553*4882a593Smuzhiyun tempval =
1554*4882a593Smuzhiyun ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun /* Reduced mode distinguishes
1557*4882a593Smuzhiyun * between 10 and 100
1558*4882a593Smuzhiyun */
1559*4882a593Smuzhiyun if (phydev->speed == SPEED_100)
1560*4882a593Smuzhiyun ecntrl |= ECNTRL_R100;
1561*4882a593Smuzhiyun else
1562*4882a593Smuzhiyun ecntrl &= ~(ECNTRL_R100);
1563*4882a593Smuzhiyun break;
1564*4882a593Smuzhiyun default:
1565*4882a593Smuzhiyun netif_warn(priv, link, priv->ndev,
1566*4882a593Smuzhiyun "Ack! Speed (%d) is not 10/100/1000!\n",
1567*4882a593Smuzhiyun phydev->speed);
1568*4882a593Smuzhiyun break;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun priv->oldspeed = phydev->speed;
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1575*4882a593Smuzhiyun tempval1 |= gfar_get_flowctrl_cfg(priv);
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun /* Turn last free buffer recording on */
1578*4882a593Smuzhiyun if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1579*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
1580*4882a593Smuzhiyun u32 bdp_dma;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
1583*4882a593Smuzhiyun bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1584*4882a593Smuzhiyun gfar_write(rx_queue->rfbptr, bdp_dma);
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun priv->tx_actual_en = 1;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1591*4882a593Smuzhiyun priv->tx_actual_en = 0;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun gfar_write(®s->maccfg1, tempval1);
1594*4882a593Smuzhiyun gfar_write(®s->maccfg2, tempval);
1595*4882a593Smuzhiyun gfar_write(®s->ecntrl, ecntrl);
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if (!priv->oldlink)
1598*4882a593Smuzhiyun priv->oldlink = 1;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun } else if (priv->oldlink) {
1601*4882a593Smuzhiyun priv->oldlink = 0;
1602*4882a593Smuzhiyun priv->oldspeed = 0;
1603*4882a593Smuzhiyun priv->oldduplex = -1;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun if (netif_msg_link(priv))
1607*4882a593Smuzhiyun phy_print_status(phydev);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun /* Called every time the controller might need to be made
1611*4882a593Smuzhiyun * aware of new link state. The PHY code conveys this
1612*4882a593Smuzhiyun * information through variables in the phydev structure, and this
1613*4882a593Smuzhiyun * function converts those variables into the appropriate
1614*4882a593Smuzhiyun * register values, and can bring down the device if needed.
1615*4882a593Smuzhiyun */
adjust_link(struct net_device * dev)1616*4882a593Smuzhiyun static void adjust_link(struct net_device *dev)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
1619*4882a593Smuzhiyun struct phy_device *phydev = dev->phydev;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun if (unlikely(phydev->link != priv->oldlink ||
1622*4882a593Smuzhiyun (phydev->link && (phydev->duplex != priv->oldduplex ||
1623*4882a593Smuzhiyun phydev->speed != priv->oldspeed))))
1624*4882a593Smuzhiyun gfar_update_link_state(priv);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /* Initialize TBI PHY interface for communicating with the
1628*4882a593Smuzhiyun * SERDES lynx PHY on the chip. We communicate with this PHY
1629*4882a593Smuzhiyun * through the MDIO bus on each controller, treating it as a
1630*4882a593Smuzhiyun * "normal" PHY at the address found in the TBIPA register. We assume
1631*4882a593Smuzhiyun * that the TBIPA register is valid. Either the MDIO bus code will set
1632*4882a593Smuzhiyun * it to a value that doesn't conflict with other PHYs on the bus, or the
1633*4882a593Smuzhiyun * value doesn't matter, as there are no other PHYs on the bus.
1634*4882a593Smuzhiyun */
gfar_configure_serdes(struct net_device * dev)1635*4882a593Smuzhiyun static void gfar_configure_serdes(struct net_device *dev)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
1638*4882a593Smuzhiyun struct phy_device *tbiphy;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun if (!priv->tbi_node) {
1641*4882a593Smuzhiyun dev_warn(&dev->dev, "error: SGMII mode requires that the "
1642*4882a593Smuzhiyun "device tree specify a tbi-handle\n");
1643*4882a593Smuzhiyun return;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun tbiphy = of_phy_find_device(priv->tbi_node);
1647*4882a593Smuzhiyun if (!tbiphy) {
1648*4882a593Smuzhiyun dev_err(&dev->dev, "error: Could not get TBI device\n");
1649*4882a593Smuzhiyun return;
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun /* If the link is already up, we must already be ok, and don't need to
1653*4882a593Smuzhiyun * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1654*4882a593Smuzhiyun * everything for us? Resetting it takes the link down and requires
1655*4882a593Smuzhiyun * several seconds for it to come back.
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1658*4882a593Smuzhiyun put_device(&tbiphy->mdio.dev);
1659*4882a593Smuzhiyun return;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun /* Single clk mode, mii mode off(for serdes communication) */
1663*4882a593Smuzhiyun phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun phy_write(tbiphy, MII_ADVERTISE,
1666*4882a593Smuzhiyun ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1667*4882a593Smuzhiyun ADVERTISE_1000XPSE_ASYM);
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun phy_write(tbiphy, MII_BMCR,
1670*4882a593Smuzhiyun BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1671*4882a593Smuzhiyun BMCR_SPEED1000);
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun put_device(&tbiphy->mdio.dev);
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun /* Initializes driver's PHY state, and attaches to the PHY.
1677*4882a593Smuzhiyun * Returns 0 on success.
1678*4882a593Smuzhiyun */
init_phy(struct net_device * dev)1679*4882a593Smuzhiyun static int init_phy(struct net_device *dev)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1682*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
1683*4882a593Smuzhiyun phy_interface_t interface = priv->interface;
1684*4882a593Smuzhiyun struct phy_device *phydev;
1685*4882a593Smuzhiyun struct ethtool_eee edata;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun linkmode_set_bit_array(phy_10_100_features_array,
1688*4882a593Smuzhiyun ARRAY_SIZE(phy_10_100_features_array),
1689*4882a593Smuzhiyun mask);
1690*4882a593Smuzhiyun linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1691*4882a593Smuzhiyun linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1692*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1693*4882a593Smuzhiyun linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun priv->oldlink = 0;
1696*4882a593Smuzhiyun priv->oldspeed = 0;
1697*4882a593Smuzhiyun priv->oldduplex = -1;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1700*4882a593Smuzhiyun interface);
1701*4882a593Smuzhiyun if (!phydev) {
1702*4882a593Smuzhiyun dev_err(&dev->dev, "could not attach to PHY\n");
1703*4882a593Smuzhiyun return -ENODEV;
1704*4882a593Smuzhiyun }
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun if (interface == PHY_INTERFACE_MODE_SGMII)
1707*4882a593Smuzhiyun gfar_configure_serdes(dev);
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun /* Remove any features not supported by the controller */
1710*4882a593Smuzhiyun linkmode_and(phydev->supported, phydev->supported, mask);
1711*4882a593Smuzhiyun linkmode_copy(phydev->advertising, phydev->supported);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun /* Add support for flow control */
1714*4882a593Smuzhiyun phy_support_asym_pause(phydev);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun /* disable EEE autoneg, EEE not supported by eTSEC */
1717*4882a593Smuzhiyun memset(&edata, 0, sizeof(struct ethtool_eee));
1718*4882a593Smuzhiyun phy_ethtool_set_eee(phydev, &edata);
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun return 0;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
gfar_add_fcb(struct sk_buff * skb)1723*4882a593Smuzhiyun static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun memset(fcb, 0, GMAC_FCB_LEN);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun return fcb;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)1732*4882a593Smuzhiyun static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1733*4882a593Smuzhiyun int fcb_length)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun /* If we're here, it's a IP packet with a TCP or UDP
1736*4882a593Smuzhiyun * payload. We set it to checksum, using a pseudo-header
1737*4882a593Smuzhiyun * we provide
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun u8 flags = TXFCB_DEFAULT;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /* Tell the controller what the protocol is
1742*4882a593Smuzhiyun * And provide the already calculated phcs
1743*4882a593Smuzhiyun */
1744*4882a593Smuzhiyun if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1745*4882a593Smuzhiyun flags |= TXFCB_UDP;
1746*4882a593Smuzhiyun fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1747*4882a593Smuzhiyun } else
1748*4882a593Smuzhiyun fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /* l3os is the distance between the start of the
1751*4882a593Smuzhiyun * frame (skb->data) and the start of the IP hdr.
1752*4882a593Smuzhiyun * l4os is the distance between the start of the
1753*4882a593Smuzhiyun * l3 hdr and the l4 hdr
1754*4882a593Smuzhiyun */
1755*4882a593Smuzhiyun fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1756*4882a593Smuzhiyun fcb->l4os = skb_network_header_len(skb);
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun fcb->flags = flags;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1761*4882a593Smuzhiyun static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun fcb->flags |= TXFCB_VLN;
1764*4882a593Smuzhiyun fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1767*4882a593Smuzhiyun static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1768*4882a593Smuzhiyun struct txbd8 *base, int ring_size)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun struct txbd8 *new_bd = bdp + stride;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1775*4882a593Smuzhiyun static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1776*4882a593Smuzhiyun int ring_size)
1777*4882a593Smuzhiyun {
1778*4882a593Smuzhiyun return skip_txbd(bdp, 1, base, ring_size);
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)1782*4882a593Smuzhiyun static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1783*4882a593Smuzhiyun unsigned long fcb_addr)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1786*4882a593Smuzhiyun (fcb_addr % 0x20) > 0x18);
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun /* eTSEC76: csum generation for frames larger than 2500 may
1790*4882a593Smuzhiyun * cause excess delays before start of transmission
1791*4882a593Smuzhiyun */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)1792*4882a593Smuzhiyun static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1793*4882a593Smuzhiyun unsigned int len)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1796*4882a593Smuzhiyun (len > 2500));
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /* This is called by the kernel when a frame is ready for transmission.
1800*4882a593Smuzhiyun * It is pointed to by the dev->hard_start_xmit function pointer
1801*4882a593Smuzhiyun */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)1802*4882a593Smuzhiyun static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
1805*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = NULL;
1806*4882a593Smuzhiyun struct netdev_queue *txq;
1807*4882a593Smuzhiyun struct gfar __iomem *regs = NULL;
1808*4882a593Smuzhiyun struct txfcb *fcb = NULL;
1809*4882a593Smuzhiyun struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1810*4882a593Smuzhiyun u32 lstatus;
1811*4882a593Smuzhiyun skb_frag_t *frag;
1812*4882a593Smuzhiyun int i, rq = 0;
1813*4882a593Smuzhiyun int do_tstamp, do_csum, do_vlan;
1814*4882a593Smuzhiyun u32 bufaddr;
1815*4882a593Smuzhiyun unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun rq = skb->queue_mapping;
1818*4882a593Smuzhiyun tx_queue = priv->tx_queue[rq];
1819*4882a593Smuzhiyun txq = netdev_get_tx_queue(dev, rq);
1820*4882a593Smuzhiyun base = tx_queue->tx_bd_base;
1821*4882a593Smuzhiyun regs = tx_queue->grp->regs;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1824*4882a593Smuzhiyun do_vlan = skb_vlan_tag_present(skb);
1825*4882a593Smuzhiyun do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1826*4882a593Smuzhiyun priv->hwts_tx_en;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun if (do_csum || do_vlan)
1829*4882a593Smuzhiyun fcb_len = GMAC_FCB_LEN;
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun /* check if time stamp should be generated */
1832*4882a593Smuzhiyun if (unlikely(do_tstamp))
1833*4882a593Smuzhiyun fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun /* make space for additional header when fcb is needed */
1836*4882a593Smuzhiyun if (fcb_len) {
1837*4882a593Smuzhiyun if (unlikely(skb_cow_head(skb, fcb_len))) {
1838*4882a593Smuzhiyun dev->stats.tx_errors++;
1839*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1840*4882a593Smuzhiyun return NETDEV_TX_OK;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun /* total number of fragments in the SKB */
1845*4882a593Smuzhiyun nr_frags = skb_shinfo(skb)->nr_frags;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun /* calculate the required number of TxBDs for this skb */
1848*4882a593Smuzhiyun if (unlikely(do_tstamp))
1849*4882a593Smuzhiyun nr_txbds = nr_frags + 2;
1850*4882a593Smuzhiyun else
1851*4882a593Smuzhiyun nr_txbds = nr_frags + 1;
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun /* check if there is space to queue this packet */
1854*4882a593Smuzhiyun if (nr_txbds > tx_queue->num_txbdfree) {
1855*4882a593Smuzhiyun /* no space, stop the queue */
1856*4882a593Smuzhiyun netif_tx_stop_queue(txq);
1857*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
1858*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun /* Update transmit stats */
1862*4882a593Smuzhiyun bytes_sent = skb->len;
1863*4882a593Smuzhiyun tx_queue->stats.tx_bytes += bytes_sent;
1864*4882a593Smuzhiyun /* keep Tx bytes on wire for BQL accounting */
1865*4882a593Smuzhiyun GFAR_CB(skb)->bytes_sent = bytes_sent;
1866*4882a593Smuzhiyun tx_queue->stats.tx_packets++;
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun txbdp = txbdp_start = tx_queue->cur_tx;
1869*4882a593Smuzhiyun lstatus = be32_to_cpu(txbdp->lstatus);
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun /* Add TxPAL between FCB and frame if required */
1872*4882a593Smuzhiyun if (unlikely(do_tstamp)) {
1873*4882a593Smuzhiyun skb_push(skb, GMAC_TXPAL_LEN);
1874*4882a593Smuzhiyun memset(skb->data, 0, GMAC_TXPAL_LEN);
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun /* Add TxFCB if required */
1878*4882a593Smuzhiyun if (fcb_len) {
1879*4882a593Smuzhiyun fcb = gfar_add_fcb(skb);
1880*4882a593Smuzhiyun lstatus |= BD_LFLAG(TXBD_TOE);
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun /* Set up checksumming */
1884*4882a593Smuzhiyun if (do_csum) {
1885*4882a593Smuzhiyun gfar_tx_checksum(skb, fcb, fcb_len);
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1888*4882a593Smuzhiyun unlikely(gfar_csum_errata_76(priv, skb->len))) {
1889*4882a593Smuzhiyun __skb_pull(skb, GMAC_FCB_LEN);
1890*4882a593Smuzhiyun skb_checksum_help(skb);
1891*4882a593Smuzhiyun if (do_vlan || do_tstamp) {
1892*4882a593Smuzhiyun /* put back a new fcb for vlan/tstamp TOE */
1893*4882a593Smuzhiyun fcb = gfar_add_fcb(skb);
1894*4882a593Smuzhiyun } else {
1895*4882a593Smuzhiyun /* Tx TOE not used */
1896*4882a593Smuzhiyun lstatus &= ~(BD_LFLAG(TXBD_TOE));
1897*4882a593Smuzhiyun fcb = NULL;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun if (do_vlan)
1903*4882a593Smuzhiyun gfar_tx_vlan(skb, fcb);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1906*4882a593Smuzhiyun DMA_TO_DEVICE);
1907*4882a593Smuzhiyun if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1908*4882a593Smuzhiyun goto dma_map_err;
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun /* Time stamp insertion requires one additional TxBD */
1913*4882a593Smuzhiyun if (unlikely(do_tstamp))
1914*4882a593Smuzhiyun txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1915*4882a593Smuzhiyun tx_queue->tx_ring_size);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun if (likely(!nr_frags)) {
1918*4882a593Smuzhiyun if (likely(!do_tstamp))
1919*4882a593Smuzhiyun lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1920*4882a593Smuzhiyun } else {
1921*4882a593Smuzhiyun u32 lstatus_start = lstatus;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun /* Place the fragment addresses and lengths into the TxBDs */
1924*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[0];
1925*4882a593Smuzhiyun for (i = 0; i < nr_frags; i++, frag++) {
1926*4882a593Smuzhiyun unsigned int size;
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun /* Point at the next BD, wrapping as needed */
1929*4882a593Smuzhiyun txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun size = skb_frag_size(frag);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun lstatus = be32_to_cpu(txbdp->lstatus) | size |
1934*4882a593Smuzhiyun BD_LFLAG(TXBD_READY);
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun /* Handle the last BD specially */
1937*4882a593Smuzhiyun if (i == nr_frags - 1)
1938*4882a593Smuzhiyun lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1941*4882a593Smuzhiyun size, DMA_TO_DEVICE);
1942*4882a593Smuzhiyun if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1943*4882a593Smuzhiyun goto dma_map_err;
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun /* set the TxBD length and buffer pointer */
1946*4882a593Smuzhiyun txbdp->bufPtr = cpu_to_be32(bufaddr);
1947*4882a593Smuzhiyun txbdp->lstatus = cpu_to_be32(lstatus);
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun lstatus = lstatus_start;
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun /* If time stamping is requested one additional TxBD must be set up. The
1954*4882a593Smuzhiyun * first TxBD points to the FCB and must have a data length of
1955*4882a593Smuzhiyun * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1956*4882a593Smuzhiyun * the full frame length.
1957*4882a593Smuzhiyun */
1958*4882a593Smuzhiyun if (unlikely(do_tstamp)) {
1959*4882a593Smuzhiyun u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1962*4882a593Smuzhiyun bufaddr += fcb_len;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun lstatus_ts |= BD_LFLAG(TXBD_READY) |
1965*4882a593Smuzhiyun (skb_headlen(skb) - fcb_len);
1966*4882a593Smuzhiyun if (!nr_frags)
1967*4882a593Smuzhiyun lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1970*4882a593Smuzhiyun txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1971*4882a593Smuzhiyun lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun /* Setup tx hardware time stamping */
1974*4882a593Smuzhiyun skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1975*4882a593Smuzhiyun fcb->ptp = 1;
1976*4882a593Smuzhiyun } else {
1977*4882a593Smuzhiyun lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun netdev_tx_sent_queue(txq, bytes_sent);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun gfar_wmb();
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun txbdp_start->lstatus = cpu_to_be32(lstatus);
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun gfar_wmb(); /* force lstatus write before tx_skbuff */
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun /* Update the current skb pointer to the next entry we will use
1991*4882a593Smuzhiyun * (wrapping if necessary)
1992*4882a593Smuzhiyun */
1993*4882a593Smuzhiyun tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1994*4882a593Smuzhiyun TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun /* We can work in parallel with gfar_clean_tx_ring(), except
1999*4882a593Smuzhiyun * when modifying num_txbdfree. Note that we didn't grab the lock
2000*4882a593Smuzhiyun * when we were reading the num_txbdfree and checking for available
2001*4882a593Smuzhiyun * space, that's because outside of this function it can only grow.
2002*4882a593Smuzhiyun */
2003*4882a593Smuzhiyun spin_lock_bh(&tx_queue->txlock);
2004*4882a593Smuzhiyun /* reduce TxBD free count */
2005*4882a593Smuzhiyun tx_queue->num_txbdfree -= (nr_txbds);
2006*4882a593Smuzhiyun spin_unlock_bh(&tx_queue->txlock);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /* If the next BD still needs to be cleaned up, then the bds
2009*4882a593Smuzhiyun * are full. We need to tell the kernel to stop sending us stuff.
2010*4882a593Smuzhiyun */
2011*4882a593Smuzhiyun if (!tx_queue->num_txbdfree) {
2012*4882a593Smuzhiyun netif_tx_stop_queue(txq);
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun /* Tell the DMA to go go go */
2018*4882a593Smuzhiyun gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun return NETDEV_TX_OK;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun dma_map_err:
2023*4882a593Smuzhiyun txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2024*4882a593Smuzhiyun if (do_tstamp)
2025*4882a593Smuzhiyun txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2026*4882a593Smuzhiyun for (i = 0; i < nr_frags; i++) {
2027*4882a593Smuzhiyun lstatus = be32_to_cpu(txbdp->lstatus);
2028*4882a593Smuzhiyun if (!(lstatus & BD_LFLAG(TXBD_READY)))
2029*4882a593Smuzhiyun break;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun lstatus &= ~BD_LFLAG(TXBD_READY);
2032*4882a593Smuzhiyun txbdp->lstatus = cpu_to_be32(lstatus);
2033*4882a593Smuzhiyun bufaddr = be32_to_cpu(txbdp->bufPtr);
2034*4882a593Smuzhiyun dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2035*4882a593Smuzhiyun DMA_TO_DEVICE);
2036*4882a593Smuzhiyun txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun gfar_wmb();
2039*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2040*4882a593Smuzhiyun return NETDEV_TX_OK;
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)2044*4882a593Smuzhiyun static int gfar_set_mac_address(struct net_device *dev)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2047*4882a593Smuzhiyun
2048*4882a593Smuzhiyun return 0;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun
gfar_change_mtu(struct net_device * dev,int new_mtu)2051*4882a593Smuzhiyun static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2056*4882a593Smuzhiyun cpu_relax();
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun if (dev->flags & IFF_UP)
2059*4882a593Smuzhiyun stop_gfar(dev);
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun dev->mtu = new_mtu;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun if (dev->flags & IFF_UP)
2064*4882a593Smuzhiyun startup_gfar(dev);
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun clear_bit_unlock(GFAR_RESETTING, &priv->state);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun return 0;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
reset_gfar(struct net_device * ndev)2071*4882a593Smuzhiyun static void reset_gfar(struct net_device *ndev)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2076*4882a593Smuzhiyun cpu_relax();
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun stop_gfar(ndev);
2079*4882a593Smuzhiyun startup_gfar(ndev);
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun clear_bit_unlock(GFAR_RESETTING, &priv->state);
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun /* gfar_reset_task gets scheduled when a packet has not been
2085*4882a593Smuzhiyun * transmitted after a set amount of time.
2086*4882a593Smuzhiyun * For now, assume that clearing out all the structures, and
2087*4882a593Smuzhiyun * starting over will fix the problem.
2088*4882a593Smuzhiyun */
gfar_reset_task(struct work_struct * work)2089*4882a593Smuzhiyun static void gfar_reset_task(struct work_struct *work)
2090*4882a593Smuzhiyun {
2091*4882a593Smuzhiyun struct gfar_private *priv = container_of(work, struct gfar_private,
2092*4882a593Smuzhiyun reset_task);
2093*4882a593Smuzhiyun reset_gfar(priv->ndev);
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun
gfar_timeout(struct net_device * dev,unsigned int txqueue)2096*4882a593Smuzhiyun static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun dev->stats.tx_errors++;
2101*4882a593Smuzhiyun schedule_work(&priv->reset_task);
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun
gfar_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)2104*4882a593Smuzhiyun static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun struct hwtstamp_config config;
2107*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(netdev);
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2110*4882a593Smuzhiyun return -EFAULT;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun /* reserved for future extensions */
2113*4882a593Smuzhiyun if (config.flags)
2114*4882a593Smuzhiyun return -EINVAL;
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun switch (config.tx_type) {
2117*4882a593Smuzhiyun case HWTSTAMP_TX_OFF:
2118*4882a593Smuzhiyun priv->hwts_tx_en = 0;
2119*4882a593Smuzhiyun break;
2120*4882a593Smuzhiyun case HWTSTAMP_TX_ON:
2121*4882a593Smuzhiyun if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2122*4882a593Smuzhiyun return -ERANGE;
2123*4882a593Smuzhiyun priv->hwts_tx_en = 1;
2124*4882a593Smuzhiyun break;
2125*4882a593Smuzhiyun default:
2126*4882a593Smuzhiyun return -ERANGE;
2127*4882a593Smuzhiyun }
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun switch (config.rx_filter) {
2130*4882a593Smuzhiyun case HWTSTAMP_FILTER_NONE:
2131*4882a593Smuzhiyun if (priv->hwts_rx_en) {
2132*4882a593Smuzhiyun priv->hwts_rx_en = 0;
2133*4882a593Smuzhiyun reset_gfar(netdev);
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun break;
2136*4882a593Smuzhiyun default:
2137*4882a593Smuzhiyun if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2138*4882a593Smuzhiyun return -ERANGE;
2139*4882a593Smuzhiyun if (!priv->hwts_rx_en) {
2140*4882a593Smuzhiyun priv->hwts_rx_en = 1;
2141*4882a593Smuzhiyun reset_gfar(netdev);
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun config.rx_filter = HWTSTAMP_FILTER_ALL;
2144*4882a593Smuzhiyun break;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2148*4882a593Smuzhiyun -EFAULT : 0;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
gfar_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)2151*4882a593Smuzhiyun static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun struct hwtstamp_config config;
2154*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(netdev);
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun config.flags = 0;
2157*4882a593Smuzhiyun config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2158*4882a593Smuzhiyun config.rx_filter = (priv->hwts_rx_en ?
2159*4882a593Smuzhiyun HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2162*4882a593Smuzhiyun -EFAULT : 0;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun
gfar_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2165*4882a593Smuzhiyun static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun struct phy_device *phydev = dev->phydev;
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun if (!netif_running(dev))
2170*4882a593Smuzhiyun return -EINVAL;
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun if (cmd == SIOCSHWTSTAMP)
2173*4882a593Smuzhiyun return gfar_hwtstamp_set(dev, rq);
2174*4882a593Smuzhiyun if (cmd == SIOCGHWTSTAMP)
2175*4882a593Smuzhiyun return gfar_hwtstamp_get(dev, rq);
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun if (!phydev)
2178*4882a593Smuzhiyun return -ENODEV;
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun return phy_mii_ioctl(phydev, rq, cmd);
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2184*4882a593Smuzhiyun static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2185*4882a593Smuzhiyun {
2186*4882a593Smuzhiyun struct net_device *dev = tx_queue->dev;
2187*4882a593Smuzhiyun struct netdev_queue *txq;
2188*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
2189*4882a593Smuzhiyun struct txbd8 *bdp, *next = NULL;
2190*4882a593Smuzhiyun struct txbd8 *lbdp = NULL;
2191*4882a593Smuzhiyun struct txbd8 *base = tx_queue->tx_bd_base;
2192*4882a593Smuzhiyun struct sk_buff *skb;
2193*4882a593Smuzhiyun int skb_dirtytx;
2194*4882a593Smuzhiyun int tx_ring_size = tx_queue->tx_ring_size;
2195*4882a593Smuzhiyun int frags = 0, nr_txbds = 0;
2196*4882a593Smuzhiyun int i;
2197*4882a593Smuzhiyun int howmany = 0;
2198*4882a593Smuzhiyun int tqi = tx_queue->qindex;
2199*4882a593Smuzhiyun unsigned int bytes_sent = 0;
2200*4882a593Smuzhiyun u32 lstatus;
2201*4882a593Smuzhiyun size_t buflen;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun txq = netdev_get_tx_queue(dev, tqi);
2204*4882a593Smuzhiyun bdp = tx_queue->dirty_tx;
2205*4882a593Smuzhiyun skb_dirtytx = tx_queue->skb_dirtytx;
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2208*4882a593Smuzhiyun bool do_tstamp;
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2211*4882a593Smuzhiyun priv->hwts_tx_en;
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun frags = skb_shinfo(skb)->nr_frags;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun /* When time stamping, one additional TxBD must be freed.
2216*4882a593Smuzhiyun * Also, we need to dma_unmap_single() the TxPAL.
2217*4882a593Smuzhiyun */
2218*4882a593Smuzhiyun if (unlikely(do_tstamp))
2219*4882a593Smuzhiyun nr_txbds = frags + 2;
2220*4882a593Smuzhiyun else
2221*4882a593Smuzhiyun nr_txbds = frags + 1;
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun lstatus = be32_to_cpu(lbdp->lstatus);
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun /* Only clean completed frames */
2228*4882a593Smuzhiyun if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2229*4882a593Smuzhiyun (lstatus & BD_LENGTH_MASK))
2230*4882a593Smuzhiyun break;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun if (unlikely(do_tstamp)) {
2233*4882a593Smuzhiyun next = next_txbd(bdp, base, tx_ring_size);
2234*4882a593Smuzhiyun buflen = be16_to_cpu(next->length) +
2235*4882a593Smuzhiyun GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2236*4882a593Smuzhiyun } else
2237*4882a593Smuzhiyun buflen = be16_to_cpu(bdp->length);
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2240*4882a593Smuzhiyun buflen, DMA_TO_DEVICE);
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun if (unlikely(do_tstamp)) {
2243*4882a593Smuzhiyun struct skb_shared_hwtstamps shhwtstamps;
2244*4882a593Smuzhiyun u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2245*4882a593Smuzhiyun ~0x7UL);
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2248*4882a593Smuzhiyun shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2249*4882a593Smuzhiyun skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2250*4882a593Smuzhiyun skb_tstamp_tx(skb, &shhwtstamps);
2251*4882a593Smuzhiyun gfar_clear_txbd_status(bdp);
2252*4882a593Smuzhiyun bdp = next;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun gfar_clear_txbd_status(bdp);
2256*4882a593Smuzhiyun bdp = next_txbd(bdp, base, tx_ring_size);
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun for (i = 0; i < frags; i++) {
2259*4882a593Smuzhiyun dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2260*4882a593Smuzhiyun be16_to_cpu(bdp->length),
2261*4882a593Smuzhiyun DMA_TO_DEVICE);
2262*4882a593Smuzhiyun gfar_clear_txbd_status(bdp);
2263*4882a593Smuzhiyun bdp = next_txbd(bdp, base, tx_ring_size);
2264*4882a593Smuzhiyun }
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun bytes_sent += GFAR_CB(skb)->bytes_sent;
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun skb_dirtytx = (skb_dirtytx + 1) &
2273*4882a593Smuzhiyun TX_RING_MOD_MASK(tx_ring_size);
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun howmany++;
2276*4882a593Smuzhiyun spin_lock(&tx_queue->txlock);
2277*4882a593Smuzhiyun tx_queue->num_txbdfree += nr_txbds;
2278*4882a593Smuzhiyun spin_unlock(&tx_queue->txlock);
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun /* If we freed a buffer, we can restart transmission, if necessary */
2282*4882a593Smuzhiyun if (tx_queue->num_txbdfree &&
2283*4882a593Smuzhiyun netif_tx_queue_stopped(txq) &&
2284*4882a593Smuzhiyun !(test_bit(GFAR_DOWN, &priv->state)))
2285*4882a593Smuzhiyun netif_wake_subqueue(priv->ndev, tqi);
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun /* Update dirty indicators */
2288*4882a593Smuzhiyun tx_queue->skb_dirtytx = skb_dirtytx;
2289*4882a593Smuzhiyun tx_queue->dirty_tx = bdp;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun netdev_tx_completed_queue(txq, howmany, bytes_sent);
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
count_errors(u32 lstatus,struct net_device * ndev)2294*4882a593Smuzhiyun static void count_errors(u32 lstatus, struct net_device *ndev)
2295*4882a593Smuzhiyun {
2296*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
2297*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
2298*4882a593Smuzhiyun struct gfar_extra_stats *estats = &priv->extra_stats;
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun /* If the packet was truncated, none of the other errors matter */
2301*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2302*4882a593Smuzhiyun stats->rx_length_errors++;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun atomic64_inc(&estats->rx_trunc);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun return;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun /* Count the errors, if there were any */
2309*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2310*4882a593Smuzhiyun stats->rx_length_errors++;
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_LARGE))
2313*4882a593Smuzhiyun atomic64_inc(&estats->rx_large);
2314*4882a593Smuzhiyun else
2315*4882a593Smuzhiyun atomic64_inc(&estats->rx_short);
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2318*4882a593Smuzhiyun stats->rx_frame_errors++;
2319*4882a593Smuzhiyun atomic64_inc(&estats->rx_nonoctet);
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2322*4882a593Smuzhiyun atomic64_inc(&estats->rx_crcerr);
2323*4882a593Smuzhiyun stats->rx_crc_errors++;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2326*4882a593Smuzhiyun atomic64_inc(&estats->rx_overrun);
2327*4882a593Smuzhiyun stats->rx_over_errors++;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun
gfar_receive(int irq,void * grp_id)2331*4882a593Smuzhiyun static irqreturn_t gfar_receive(int irq, void *grp_id)
2332*4882a593Smuzhiyun {
2333*4882a593Smuzhiyun struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2334*4882a593Smuzhiyun unsigned long flags;
2335*4882a593Smuzhiyun u32 imask, ievent;
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun ievent = gfar_read(&grp->regs->ievent);
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun if (unlikely(ievent & IEVENT_FGPI)) {
2340*4882a593Smuzhiyun gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2341*4882a593Smuzhiyun return IRQ_HANDLED;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun if (likely(napi_schedule_prep(&grp->napi_rx))) {
2345*4882a593Smuzhiyun spin_lock_irqsave(&grp->grplock, flags);
2346*4882a593Smuzhiyun imask = gfar_read(&grp->regs->imask);
2347*4882a593Smuzhiyun imask &= IMASK_RX_DISABLED;
2348*4882a593Smuzhiyun gfar_write(&grp->regs->imask, imask);
2349*4882a593Smuzhiyun spin_unlock_irqrestore(&grp->grplock, flags);
2350*4882a593Smuzhiyun __napi_schedule(&grp->napi_rx);
2351*4882a593Smuzhiyun } else {
2352*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2353*4882a593Smuzhiyun * because of the packets that have already arrived.
2354*4882a593Smuzhiyun */
2355*4882a593Smuzhiyun gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun return IRQ_HANDLED;
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2362*4882a593Smuzhiyun static irqreturn_t gfar_transmit(int irq, void *grp_id)
2363*4882a593Smuzhiyun {
2364*4882a593Smuzhiyun struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2365*4882a593Smuzhiyun unsigned long flags;
2366*4882a593Smuzhiyun u32 imask;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun if (likely(napi_schedule_prep(&grp->napi_tx))) {
2369*4882a593Smuzhiyun spin_lock_irqsave(&grp->grplock, flags);
2370*4882a593Smuzhiyun imask = gfar_read(&grp->regs->imask);
2371*4882a593Smuzhiyun imask &= IMASK_TX_DISABLED;
2372*4882a593Smuzhiyun gfar_write(&grp->regs->imask, imask);
2373*4882a593Smuzhiyun spin_unlock_irqrestore(&grp->grplock, flags);
2374*4882a593Smuzhiyun __napi_schedule(&grp->napi_tx);
2375*4882a593Smuzhiyun } else {
2376*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2377*4882a593Smuzhiyun * because of the packets that have already arrived.
2378*4882a593Smuzhiyun */
2379*4882a593Smuzhiyun gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun return IRQ_HANDLED;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)2385*4882a593Smuzhiyun static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2386*4882a593Smuzhiyun struct sk_buff *skb, bool first)
2387*4882a593Smuzhiyun {
2388*4882a593Smuzhiyun int size = lstatus & BD_LENGTH_MASK;
2389*4882a593Smuzhiyun struct page *page = rxb->page;
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun if (likely(first)) {
2392*4882a593Smuzhiyun skb_put(skb, size);
2393*4882a593Smuzhiyun } else {
2394*4882a593Smuzhiyun /* the last fragments' length contains the full frame length */
2395*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_LAST))
2396*4882a593Smuzhiyun size -= skb->len;
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun WARN(size < 0, "gianfar: rx fragment size underflow");
2399*4882a593Smuzhiyun if (size < 0)
2400*4882a593Smuzhiyun return false;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2403*4882a593Smuzhiyun rxb->page_offset + RXBUF_ALIGNMENT,
2404*4882a593Smuzhiyun size, GFAR_RXB_TRUESIZE);
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun /* try reuse page */
2408*4882a593Smuzhiyun if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2409*4882a593Smuzhiyun return false;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun /* change offset to the other half */
2412*4882a593Smuzhiyun rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun page_ref_inc(page);
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun return true;
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)2419*4882a593Smuzhiyun static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2420*4882a593Smuzhiyun struct gfar_rx_buff *old_rxb)
2421*4882a593Smuzhiyun {
2422*4882a593Smuzhiyun struct gfar_rx_buff *new_rxb;
2423*4882a593Smuzhiyun u16 nta = rxq->next_to_alloc;
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun new_rxb = &rxq->rx_buff[nta];
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun /* find next buf that can reuse a page */
2428*4882a593Smuzhiyun nta++;
2429*4882a593Smuzhiyun rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun /* copy page reference */
2432*4882a593Smuzhiyun *new_rxb = *old_rxb;
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun /* sync for use by the device */
2435*4882a593Smuzhiyun dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2436*4882a593Smuzhiyun old_rxb->page_offset,
2437*4882a593Smuzhiyun GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2438*4882a593Smuzhiyun }
2439*4882a593Smuzhiyun
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)2440*4882a593Smuzhiyun static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2441*4882a593Smuzhiyun u32 lstatus, struct sk_buff *skb)
2442*4882a593Smuzhiyun {
2443*4882a593Smuzhiyun struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2444*4882a593Smuzhiyun struct page *page = rxb->page;
2445*4882a593Smuzhiyun bool first = false;
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun if (likely(!skb)) {
2448*4882a593Smuzhiyun void *buff_addr = page_address(page) + rxb->page_offset;
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2451*4882a593Smuzhiyun if (unlikely(!skb)) {
2452*4882a593Smuzhiyun gfar_rx_alloc_err(rx_queue);
2453*4882a593Smuzhiyun return NULL;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun skb_reserve(skb, RXBUF_ALIGNMENT);
2456*4882a593Smuzhiyun first = true;
2457*4882a593Smuzhiyun }
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2460*4882a593Smuzhiyun GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2463*4882a593Smuzhiyun /* reuse the free half of the page */
2464*4882a593Smuzhiyun gfar_reuse_rx_page(rx_queue, rxb);
2465*4882a593Smuzhiyun } else {
2466*4882a593Smuzhiyun /* page cannot be reused, unmap it */
2467*4882a593Smuzhiyun dma_unmap_page(rx_queue->dev, rxb->dma,
2468*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
2469*4882a593Smuzhiyun }
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun /* clear rxb content */
2472*4882a593Smuzhiyun rxb->page = NULL;
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun return skb;
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2477*4882a593Smuzhiyun static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2478*4882a593Smuzhiyun {
2479*4882a593Smuzhiyun /* If valid headers were found, and valid sums
2480*4882a593Smuzhiyun * were verified, then we tell the kernel that no
2481*4882a593Smuzhiyun * checksumming is necessary. Otherwise, it is [FIXME]
2482*4882a593Smuzhiyun */
2483*4882a593Smuzhiyun if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2484*4882a593Smuzhiyun (RXFCB_CIP | RXFCB_CTU))
2485*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
2486*4882a593Smuzhiyun else
2487*4882a593Smuzhiyun skb_checksum_none_assert(skb);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2491*4882a593Smuzhiyun static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2492*4882a593Smuzhiyun {
2493*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
2494*4882a593Smuzhiyun struct rxfcb *fcb = NULL;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun /* fcb is at the beginning if exists */
2497*4882a593Smuzhiyun fcb = (struct rxfcb *)skb->data;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun /* Remove the FCB from the skb
2500*4882a593Smuzhiyun * Remove the padded bytes, if there are any
2501*4882a593Smuzhiyun */
2502*4882a593Smuzhiyun if (priv->uses_rxfcb)
2503*4882a593Smuzhiyun skb_pull(skb, GMAC_FCB_LEN);
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun /* Get receive timestamp from the skb */
2506*4882a593Smuzhiyun if (priv->hwts_rx_en) {
2507*4882a593Smuzhiyun struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2508*4882a593Smuzhiyun u64 *ns = (u64 *) skb->data;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2511*4882a593Smuzhiyun shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun if (priv->padding)
2515*4882a593Smuzhiyun skb_pull(skb, priv->padding);
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun /* Trim off the FCS */
2518*4882a593Smuzhiyun pskb_trim(skb, skb->len - ETH_FCS_LEN);
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun if (ndev->features & NETIF_F_RXCSUM)
2521*4882a593Smuzhiyun gfar_rx_checksum(skb, fcb);
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2524*4882a593Smuzhiyun * Even if vlan rx accel is disabled, on some chips
2525*4882a593Smuzhiyun * RXFCB_VLN is pseudo randomly set.
2526*4882a593Smuzhiyun */
2527*4882a593Smuzhiyun if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2528*4882a593Smuzhiyun be16_to_cpu(fcb->flags) & RXFCB_VLN)
2529*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2530*4882a593Smuzhiyun be16_to_cpu(fcb->vlctl));
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2534*4882a593Smuzhiyun * until the budget/quota has been reached. Returns the number
2535*4882a593Smuzhiyun * of frames handled
2536*4882a593Smuzhiyun */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)2537*4882a593Smuzhiyun static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2538*4882a593Smuzhiyun int rx_work_limit)
2539*4882a593Smuzhiyun {
2540*4882a593Smuzhiyun struct net_device *ndev = rx_queue->ndev;
2541*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(ndev);
2542*4882a593Smuzhiyun struct rxbd8 *bdp;
2543*4882a593Smuzhiyun int i, howmany = 0;
2544*4882a593Smuzhiyun struct sk_buff *skb = rx_queue->skb;
2545*4882a593Smuzhiyun int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2546*4882a593Smuzhiyun unsigned int total_bytes = 0, total_pkts = 0;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun /* Get the first full descriptor */
2549*4882a593Smuzhiyun i = rx_queue->next_to_clean;
2550*4882a593Smuzhiyun
2551*4882a593Smuzhiyun while (rx_work_limit--) {
2552*4882a593Smuzhiyun u32 lstatus;
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2555*4882a593Smuzhiyun gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2556*4882a593Smuzhiyun cleaned_cnt = 0;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun bdp = &rx_queue->rx_bd_base[i];
2560*4882a593Smuzhiyun lstatus = be32_to_cpu(bdp->lstatus);
2561*4882a593Smuzhiyun if (lstatus & BD_LFLAG(RXBD_EMPTY))
2562*4882a593Smuzhiyun break;
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun /* lost RXBD_LAST descriptor due to overrun */
2565*4882a593Smuzhiyun if (skb &&
2566*4882a593Smuzhiyun (lstatus & BD_LFLAG(RXBD_FIRST))) {
2567*4882a593Smuzhiyun /* discard faulty buffer */
2568*4882a593Smuzhiyun dev_kfree_skb(skb);
2569*4882a593Smuzhiyun skb = NULL;
2570*4882a593Smuzhiyun rx_queue->stats.rx_dropped++;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun /* can continue normally */
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun /* order rx buffer descriptor reads */
2576*4882a593Smuzhiyun rmb();
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun /* fetch next to clean buffer from the ring */
2579*4882a593Smuzhiyun skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2580*4882a593Smuzhiyun if (unlikely(!skb))
2581*4882a593Smuzhiyun break;
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun cleaned_cnt++;
2584*4882a593Smuzhiyun howmany++;
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun if (unlikely(++i == rx_queue->rx_ring_size))
2587*4882a593Smuzhiyun i = 0;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun rx_queue->next_to_clean = i;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun /* fetch next buffer if not the last in frame */
2592*4882a593Smuzhiyun if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2593*4882a593Smuzhiyun continue;
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2596*4882a593Smuzhiyun count_errors(lstatus, ndev);
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun /* discard faulty buffer */
2599*4882a593Smuzhiyun dev_kfree_skb(skb);
2600*4882a593Smuzhiyun skb = NULL;
2601*4882a593Smuzhiyun rx_queue->stats.rx_dropped++;
2602*4882a593Smuzhiyun continue;
2603*4882a593Smuzhiyun }
2604*4882a593Smuzhiyun
2605*4882a593Smuzhiyun gfar_process_frame(ndev, skb);
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun /* Increment the number of packets */
2608*4882a593Smuzhiyun total_pkts++;
2609*4882a593Smuzhiyun total_bytes += skb->len;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun skb_record_rx_queue(skb, rx_queue->qindex);
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
2614*4882a593Smuzhiyun
2615*4882a593Smuzhiyun /* Send the packet up the stack */
2616*4882a593Smuzhiyun napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun skb = NULL;
2619*4882a593Smuzhiyun }
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun /* Store incomplete frames for completion */
2622*4882a593Smuzhiyun rx_queue->skb = skb;
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun rx_queue->stats.rx_packets += total_pkts;
2625*4882a593Smuzhiyun rx_queue->stats.rx_bytes += total_bytes;
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun if (cleaned_cnt)
2628*4882a593Smuzhiyun gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun /* Update Last Free RxBD pointer for LFC */
2631*4882a593Smuzhiyun if (unlikely(priv->tx_actual_en)) {
2632*4882a593Smuzhiyun u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2633*4882a593Smuzhiyun
2634*4882a593Smuzhiyun gfar_write(rx_queue->rfbptr, bdp_dma);
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun return howmany;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2640*4882a593Smuzhiyun static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2641*4882a593Smuzhiyun {
2642*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp =
2643*4882a593Smuzhiyun container_of(napi, struct gfar_priv_grp, napi_rx);
2644*4882a593Smuzhiyun struct gfar __iomem *regs = gfargrp->regs;
2645*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2646*4882a593Smuzhiyun int work_done = 0;
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2649*4882a593Smuzhiyun * because of the packets that have already arrived
2650*4882a593Smuzhiyun */
2651*4882a593Smuzhiyun gfar_write(®s->ievent, IEVENT_RX_MASK);
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun work_done = gfar_clean_rx_ring(rx_queue, budget);
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun if (work_done < budget) {
2656*4882a593Smuzhiyun u32 imask;
2657*4882a593Smuzhiyun napi_complete_done(napi, work_done);
2658*4882a593Smuzhiyun /* Clear the halt bit in RSTAT */
2659*4882a593Smuzhiyun gfar_write(®s->rstat, gfargrp->rstat);
2660*4882a593Smuzhiyun
2661*4882a593Smuzhiyun spin_lock_irq(&gfargrp->grplock);
2662*4882a593Smuzhiyun imask = gfar_read(®s->imask);
2663*4882a593Smuzhiyun imask |= IMASK_RX_DEFAULT;
2664*4882a593Smuzhiyun gfar_write(®s->imask, imask);
2665*4882a593Smuzhiyun spin_unlock_irq(&gfargrp->grplock);
2666*4882a593Smuzhiyun }
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun return work_done;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2671*4882a593Smuzhiyun static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp =
2674*4882a593Smuzhiyun container_of(napi, struct gfar_priv_grp, napi_tx);
2675*4882a593Smuzhiyun struct gfar __iomem *regs = gfargrp->regs;
2676*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2677*4882a593Smuzhiyun u32 imask;
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2680*4882a593Smuzhiyun * because of the packets that have already arrived
2681*4882a593Smuzhiyun */
2682*4882a593Smuzhiyun gfar_write(®s->ievent, IEVENT_TX_MASK);
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun /* run Tx cleanup to completion */
2685*4882a593Smuzhiyun if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2686*4882a593Smuzhiyun gfar_clean_tx_ring(tx_queue);
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun napi_complete(napi);
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun spin_lock_irq(&gfargrp->grplock);
2691*4882a593Smuzhiyun imask = gfar_read(®s->imask);
2692*4882a593Smuzhiyun imask |= IMASK_TX_DEFAULT;
2693*4882a593Smuzhiyun gfar_write(®s->imask, imask);
2694*4882a593Smuzhiyun spin_unlock_irq(&gfargrp->grplock);
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun return 0;
2697*4882a593Smuzhiyun }
2698*4882a593Smuzhiyun
gfar_poll_rx(struct napi_struct * napi,int budget)2699*4882a593Smuzhiyun static int gfar_poll_rx(struct napi_struct *napi, int budget)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp =
2702*4882a593Smuzhiyun container_of(napi, struct gfar_priv_grp, napi_rx);
2703*4882a593Smuzhiyun struct gfar_private *priv = gfargrp->priv;
2704*4882a593Smuzhiyun struct gfar __iomem *regs = gfargrp->regs;
2705*4882a593Smuzhiyun struct gfar_priv_rx_q *rx_queue = NULL;
2706*4882a593Smuzhiyun int work_done = 0, work_done_per_q = 0;
2707*4882a593Smuzhiyun int i, budget_per_q = 0;
2708*4882a593Smuzhiyun unsigned long rstat_rxf;
2709*4882a593Smuzhiyun int num_act_queues;
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2712*4882a593Smuzhiyun * because of the packets that have already arrived
2713*4882a593Smuzhiyun */
2714*4882a593Smuzhiyun gfar_write(®s->ievent, IEVENT_RX_MASK);
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2719*4882a593Smuzhiyun if (num_act_queues)
2720*4882a593Smuzhiyun budget_per_q = budget/num_act_queues;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2723*4882a593Smuzhiyun /* skip queue if not active */
2724*4882a593Smuzhiyun if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2725*4882a593Smuzhiyun continue;
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun rx_queue = priv->rx_queue[i];
2728*4882a593Smuzhiyun work_done_per_q =
2729*4882a593Smuzhiyun gfar_clean_rx_ring(rx_queue, budget_per_q);
2730*4882a593Smuzhiyun work_done += work_done_per_q;
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun /* finished processing this queue */
2733*4882a593Smuzhiyun if (work_done_per_q < budget_per_q) {
2734*4882a593Smuzhiyun /* clear active queue hw indication */
2735*4882a593Smuzhiyun gfar_write(®s->rstat,
2736*4882a593Smuzhiyun RSTAT_CLEAR_RXF0 >> i);
2737*4882a593Smuzhiyun num_act_queues--;
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun if (!num_act_queues)
2740*4882a593Smuzhiyun break;
2741*4882a593Smuzhiyun }
2742*4882a593Smuzhiyun }
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun if (!num_act_queues) {
2745*4882a593Smuzhiyun u32 imask;
2746*4882a593Smuzhiyun napi_complete_done(napi, work_done);
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun /* Clear the halt bit in RSTAT */
2749*4882a593Smuzhiyun gfar_write(®s->rstat, gfargrp->rstat);
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun spin_lock_irq(&gfargrp->grplock);
2752*4882a593Smuzhiyun imask = gfar_read(®s->imask);
2753*4882a593Smuzhiyun imask |= IMASK_RX_DEFAULT;
2754*4882a593Smuzhiyun gfar_write(®s->imask, imask);
2755*4882a593Smuzhiyun spin_unlock_irq(&gfargrp->grplock);
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun return work_done;
2759*4882a593Smuzhiyun }
2760*4882a593Smuzhiyun
gfar_poll_tx(struct napi_struct * napi,int budget)2761*4882a593Smuzhiyun static int gfar_poll_tx(struct napi_struct *napi, int budget)
2762*4882a593Smuzhiyun {
2763*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp =
2764*4882a593Smuzhiyun container_of(napi, struct gfar_priv_grp, napi_tx);
2765*4882a593Smuzhiyun struct gfar_private *priv = gfargrp->priv;
2766*4882a593Smuzhiyun struct gfar __iomem *regs = gfargrp->regs;
2767*4882a593Smuzhiyun struct gfar_priv_tx_q *tx_queue = NULL;
2768*4882a593Smuzhiyun int has_tx_work = 0;
2769*4882a593Smuzhiyun int i;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun /* Clear IEVENT, so interrupts aren't called again
2772*4882a593Smuzhiyun * because of the packets that have already arrived
2773*4882a593Smuzhiyun */
2774*4882a593Smuzhiyun gfar_write(®s->ievent, IEVENT_TX_MASK);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2777*4882a593Smuzhiyun tx_queue = priv->tx_queue[i];
2778*4882a593Smuzhiyun /* run Tx cleanup to completion */
2779*4882a593Smuzhiyun if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2780*4882a593Smuzhiyun gfar_clean_tx_ring(tx_queue);
2781*4882a593Smuzhiyun has_tx_work = 1;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun }
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun if (!has_tx_work) {
2786*4882a593Smuzhiyun u32 imask;
2787*4882a593Smuzhiyun napi_complete(napi);
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun spin_lock_irq(&gfargrp->grplock);
2790*4882a593Smuzhiyun imask = gfar_read(®s->imask);
2791*4882a593Smuzhiyun imask |= IMASK_TX_DEFAULT;
2792*4882a593Smuzhiyun gfar_write(®s->imask, imask);
2793*4882a593Smuzhiyun spin_unlock_irq(&gfargrp->grplock);
2794*4882a593Smuzhiyun }
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun return 0;
2797*4882a593Smuzhiyun }
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)2800*4882a593Smuzhiyun static irqreturn_t gfar_error(int irq, void *grp_id)
2801*4882a593Smuzhiyun {
2802*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp = grp_id;
2803*4882a593Smuzhiyun struct gfar __iomem *regs = gfargrp->regs;
2804*4882a593Smuzhiyun struct gfar_private *priv= gfargrp->priv;
2805*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun /* Save ievent for future reference */
2808*4882a593Smuzhiyun u32 events = gfar_read(®s->ievent);
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun /* Clear IEVENT */
2811*4882a593Smuzhiyun gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun /* Magic Packet is not an error. */
2814*4882a593Smuzhiyun if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2815*4882a593Smuzhiyun (events & IEVENT_MAG))
2816*4882a593Smuzhiyun events &= ~IEVENT_MAG;
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun /* Hmm... */
2819*4882a593Smuzhiyun if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2820*4882a593Smuzhiyun netdev_dbg(dev,
2821*4882a593Smuzhiyun "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2822*4882a593Smuzhiyun events, gfar_read(®s->imask));
2823*4882a593Smuzhiyun
2824*4882a593Smuzhiyun /* Update the error counters */
2825*4882a593Smuzhiyun if (events & IEVENT_TXE) {
2826*4882a593Smuzhiyun dev->stats.tx_errors++;
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun if (events & IEVENT_LC)
2829*4882a593Smuzhiyun dev->stats.tx_window_errors++;
2830*4882a593Smuzhiyun if (events & IEVENT_CRL)
2831*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
2832*4882a593Smuzhiyun if (events & IEVENT_XFUN) {
2833*4882a593Smuzhiyun netif_dbg(priv, tx_err, dev,
2834*4882a593Smuzhiyun "TX FIFO underrun, packet dropped\n");
2835*4882a593Smuzhiyun dev->stats.tx_dropped++;
2836*4882a593Smuzhiyun atomic64_inc(&priv->extra_stats.tx_underrun);
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun schedule_work(&priv->reset_task);
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun if (events & IEVENT_BSY) {
2843*4882a593Smuzhiyun dev->stats.rx_over_errors++;
2844*4882a593Smuzhiyun atomic64_inc(&priv->extra_stats.rx_bsy);
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2847*4882a593Smuzhiyun gfar_read(®s->rstat));
2848*4882a593Smuzhiyun }
2849*4882a593Smuzhiyun if (events & IEVENT_BABR) {
2850*4882a593Smuzhiyun dev->stats.rx_errors++;
2851*4882a593Smuzhiyun atomic64_inc(&priv->extra_stats.rx_babr);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2854*4882a593Smuzhiyun }
2855*4882a593Smuzhiyun if (events & IEVENT_EBERR) {
2856*4882a593Smuzhiyun atomic64_inc(&priv->extra_stats.eberr);
2857*4882a593Smuzhiyun netif_dbg(priv, rx_err, dev, "bus error\n");
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun if (events & IEVENT_RXC)
2860*4882a593Smuzhiyun netif_dbg(priv, rx_status, dev, "control frame\n");
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun if (events & IEVENT_BABT) {
2863*4882a593Smuzhiyun atomic64_inc(&priv->extra_stats.tx_babt);
2864*4882a593Smuzhiyun netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2865*4882a593Smuzhiyun }
2866*4882a593Smuzhiyun return IRQ_HANDLED;
2867*4882a593Smuzhiyun }
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)2870*4882a593Smuzhiyun static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2871*4882a593Smuzhiyun {
2872*4882a593Smuzhiyun struct gfar_priv_grp *gfargrp = grp_id;
2873*4882a593Smuzhiyun
2874*4882a593Smuzhiyun /* Save ievent for future reference */
2875*4882a593Smuzhiyun u32 events = gfar_read(&gfargrp->regs->ievent);
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun /* Check for reception */
2878*4882a593Smuzhiyun if (events & IEVENT_RX_MASK)
2879*4882a593Smuzhiyun gfar_receive(irq, grp_id);
2880*4882a593Smuzhiyun
2881*4882a593Smuzhiyun /* Check for transmit completion */
2882*4882a593Smuzhiyun if (events & IEVENT_TX_MASK)
2883*4882a593Smuzhiyun gfar_transmit(irq, grp_id);
2884*4882a593Smuzhiyun
2885*4882a593Smuzhiyun /* Check for errors */
2886*4882a593Smuzhiyun if (events & IEVENT_ERR_MASK)
2887*4882a593Smuzhiyun gfar_error(irq, grp_id);
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun return IRQ_HANDLED;
2890*4882a593Smuzhiyun }
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2893*4882a593Smuzhiyun /* Polling 'interrupt' - used by things like netconsole to send skbs
2894*4882a593Smuzhiyun * without having to re-enable interrupts. It's not called while
2895*4882a593Smuzhiyun * the interrupt routine is executing.
2896*4882a593Smuzhiyun */
gfar_netpoll(struct net_device * dev)2897*4882a593Smuzhiyun static void gfar_netpoll(struct net_device *dev)
2898*4882a593Smuzhiyun {
2899*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
2900*4882a593Smuzhiyun int i;
2901*4882a593Smuzhiyun
2902*4882a593Smuzhiyun /* If the device has multiple interrupts, run tx/rx */
2903*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2904*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
2905*4882a593Smuzhiyun struct gfar_priv_grp *grp = &priv->gfargrp[i];
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun disable_irq(gfar_irq(grp, TX)->irq);
2908*4882a593Smuzhiyun disable_irq(gfar_irq(grp, RX)->irq);
2909*4882a593Smuzhiyun disable_irq(gfar_irq(grp, ER)->irq);
2910*4882a593Smuzhiyun gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2911*4882a593Smuzhiyun enable_irq(gfar_irq(grp, ER)->irq);
2912*4882a593Smuzhiyun enable_irq(gfar_irq(grp, RX)->irq);
2913*4882a593Smuzhiyun enable_irq(gfar_irq(grp, TX)->irq);
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun } else {
2916*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
2917*4882a593Smuzhiyun struct gfar_priv_grp *grp = &priv->gfargrp[i];
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun disable_irq(gfar_irq(grp, TX)->irq);
2920*4882a593Smuzhiyun gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2921*4882a593Smuzhiyun enable_irq(gfar_irq(grp, TX)->irq);
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun }
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun #endif
2926*4882a593Smuzhiyun
free_grp_irqs(struct gfar_priv_grp * grp)2927*4882a593Smuzhiyun static void free_grp_irqs(struct gfar_priv_grp *grp)
2928*4882a593Smuzhiyun {
2929*4882a593Smuzhiyun free_irq(gfar_irq(grp, TX)->irq, grp);
2930*4882a593Smuzhiyun free_irq(gfar_irq(grp, RX)->irq, grp);
2931*4882a593Smuzhiyun free_irq(gfar_irq(grp, ER)->irq, grp);
2932*4882a593Smuzhiyun }
2933*4882a593Smuzhiyun
register_grp_irqs(struct gfar_priv_grp * grp)2934*4882a593Smuzhiyun static int register_grp_irqs(struct gfar_priv_grp *grp)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun struct gfar_private *priv = grp->priv;
2937*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
2938*4882a593Smuzhiyun int err;
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun /* If the device has multiple interrupts, register for
2941*4882a593Smuzhiyun * them. Otherwise, only register for the one
2942*4882a593Smuzhiyun */
2943*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2944*4882a593Smuzhiyun /* Install our interrupt handlers for Error,
2945*4882a593Smuzhiyun * Transmit, and Receive
2946*4882a593Smuzhiyun */
2947*4882a593Smuzhiyun err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2948*4882a593Smuzhiyun gfar_irq(grp, ER)->name, grp);
2949*4882a593Smuzhiyun if (err < 0) {
2950*4882a593Smuzhiyun netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2951*4882a593Smuzhiyun gfar_irq(grp, ER)->irq);
2952*4882a593Smuzhiyun
2953*4882a593Smuzhiyun goto err_irq_fail;
2954*4882a593Smuzhiyun }
2955*4882a593Smuzhiyun enable_irq_wake(gfar_irq(grp, ER)->irq);
2956*4882a593Smuzhiyun
2957*4882a593Smuzhiyun err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2958*4882a593Smuzhiyun gfar_irq(grp, TX)->name, grp);
2959*4882a593Smuzhiyun if (err < 0) {
2960*4882a593Smuzhiyun netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2961*4882a593Smuzhiyun gfar_irq(grp, TX)->irq);
2962*4882a593Smuzhiyun goto tx_irq_fail;
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2965*4882a593Smuzhiyun gfar_irq(grp, RX)->name, grp);
2966*4882a593Smuzhiyun if (err < 0) {
2967*4882a593Smuzhiyun netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2968*4882a593Smuzhiyun gfar_irq(grp, RX)->irq);
2969*4882a593Smuzhiyun goto rx_irq_fail;
2970*4882a593Smuzhiyun }
2971*4882a593Smuzhiyun enable_irq_wake(gfar_irq(grp, RX)->irq);
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun } else {
2974*4882a593Smuzhiyun err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2975*4882a593Smuzhiyun gfar_irq(grp, TX)->name, grp);
2976*4882a593Smuzhiyun if (err < 0) {
2977*4882a593Smuzhiyun netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2978*4882a593Smuzhiyun gfar_irq(grp, TX)->irq);
2979*4882a593Smuzhiyun goto err_irq_fail;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun enable_irq_wake(gfar_irq(grp, TX)->irq);
2982*4882a593Smuzhiyun }
2983*4882a593Smuzhiyun
2984*4882a593Smuzhiyun return 0;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun rx_irq_fail:
2987*4882a593Smuzhiyun free_irq(gfar_irq(grp, TX)->irq, grp);
2988*4882a593Smuzhiyun tx_irq_fail:
2989*4882a593Smuzhiyun free_irq(gfar_irq(grp, ER)->irq, grp);
2990*4882a593Smuzhiyun err_irq_fail:
2991*4882a593Smuzhiyun return err;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun }
2994*4882a593Smuzhiyun
gfar_free_irq(struct gfar_private * priv)2995*4882a593Smuzhiyun static void gfar_free_irq(struct gfar_private *priv)
2996*4882a593Smuzhiyun {
2997*4882a593Smuzhiyun int i;
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun /* Free the IRQs */
3000*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3001*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++)
3002*4882a593Smuzhiyun free_grp_irqs(&priv->gfargrp[i]);
3003*4882a593Smuzhiyun } else {
3004*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++)
3005*4882a593Smuzhiyun free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
3006*4882a593Smuzhiyun &priv->gfargrp[i]);
3007*4882a593Smuzhiyun }
3008*4882a593Smuzhiyun }
3009*4882a593Smuzhiyun
gfar_request_irq(struct gfar_private * priv)3010*4882a593Smuzhiyun static int gfar_request_irq(struct gfar_private *priv)
3011*4882a593Smuzhiyun {
3012*4882a593Smuzhiyun int err, i, j;
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
3015*4882a593Smuzhiyun err = register_grp_irqs(&priv->gfargrp[i]);
3016*4882a593Smuzhiyun if (err) {
3017*4882a593Smuzhiyun for (j = 0; j < i; j++)
3018*4882a593Smuzhiyun free_grp_irqs(&priv->gfargrp[j]);
3019*4882a593Smuzhiyun return err;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun }
3022*4882a593Smuzhiyun
3023*4882a593Smuzhiyun return 0;
3024*4882a593Smuzhiyun }
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun /* Called when something needs to use the ethernet device
3027*4882a593Smuzhiyun * Returns 0 for success.
3028*4882a593Smuzhiyun */
gfar_enet_open(struct net_device * dev)3029*4882a593Smuzhiyun static int gfar_enet_open(struct net_device *dev)
3030*4882a593Smuzhiyun {
3031*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
3032*4882a593Smuzhiyun int err;
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun err = init_phy(dev);
3035*4882a593Smuzhiyun if (err)
3036*4882a593Smuzhiyun return err;
3037*4882a593Smuzhiyun
3038*4882a593Smuzhiyun err = gfar_request_irq(priv);
3039*4882a593Smuzhiyun if (err)
3040*4882a593Smuzhiyun return err;
3041*4882a593Smuzhiyun
3042*4882a593Smuzhiyun err = startup_gfar(dev);
3043*4882a593Smuzhiyun if (err)
3044*4882a593Smuzhiyun return err;
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun return err;
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)3050*4882a593Smuzhiyun static int gfar_close(struct net_device *dev)
3051*4882a593Smuzhiyun {
3052*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun cancel_work_sync(&priv->reset_task);
3055*4882a593Smuzhiyun stop_gfar(dev);
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun /* Disconnect from the PHY */
3058*4882a593Smuzhiyun phy_disconnect(dev->phydev);
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun gfar_free_irq(priv);
3061*4882a593Smuzhiyun
3062*4882a593Smuzhiyun return 0;
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun /* Clears each of the exact match registers to zero, so they
3066*4882a593Smuzhiyun * don't interfere with normal reception
3067*4882a593Smuzhiyun */
gfar_clear_exact_match(struct net_device * dev)3068*4882a593Smuzhiyun static void gfar_clear_exact_match(struct net_device *dev)
3069*4882a593Smuzhiyun {
3070*4882a593Smuzhiyun int idx;
3071*4882a593Smuzhiyun static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3074*4882a593Smuzhiyun gfar_set_mac_for_addr(dev, idx, zero_arr);
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun /* Update the hash table based on the current list of multicast
3078*4882a593Smuzhiyun * addresses we subscribe to. Also, change the promiscuity of
3079*4882a593Smuzhiyun * the device based on the flags (this function is called
3080*4882a593Smuzhiyun * whenever dev->flags is changed
3081*4882a593Smuzhiyun */
gfar_set_multi(struct net_device * dev)3082*4882a593Smuzhiyun static void gfar_set_multi(struct net_device *dev)
3083*4882a593Smuzhiyun {
3084*4882a593Smuzhiyun struct netdev_hw_addr *ha;
3085*4882a593Smuzhiyun struct gfar_private *priv = netdev_priv(dev);
3086*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3087*4882a593Smuzhiyun u32 tempval;
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
3090*4882a593Smuzhiyun /* Set RCTRL to PROM */
3091*4882a593Smuzhiyun tempval = gfar_read(®s->rctrl);
3092*4882a593Smuzhiyun tempval |= RCTRL_PROM;
3093*4882a593Smuzhiyun gfar_write(®s->rctrl, tempval);
3094*4882a593Smuzhiyun } else {
3095*4882a593Smuzhiyun /* Set RCTRL to not PROM */
3096*4882a593Smuzhiyun tempval = gfar_read(®s->rctrl);
3097*4882a593Smuzhiyun tempval &= ~(RCTRL_PROM);
3098*4882a593Smuzhiyun gfar_write(®s->rctrl, tempval);
3099*4882a593Smuzhiyun }
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun if (dev->flags & IFF_ALLMULTI) {
3102*4882a593Smuzhiyun /* Set the hash to rx all multicast frames */
3103*4882a593Smuzhiyun gfar_write(®s->igaddr0, 0xffffffff);
3104*4882a593Smuzhiyun gfar_write(®s->igaddr1, 0xffffffff);
3105*4882a593Smuzhiyun gfar_write(®s->igaddr2, 0xffffffff);
3106*4882a593Smuzhiyun gfar_write(®s->igaddr3, 0xffffffff);
3107*4882a593Smuzhiyun gfar_write(®s->igaddr4, 0xffffffff);
3108*4882a593Smuzhiyun gfar_write(®s->igaddr5, 0xffffffff);
3109*4882a593Smuzhiyun gfar_write(®s->igaddr6, 0xffffffff);
3110*4882a593Smuzhiyun gfar_write(®s->igaddr7, 0xffffffff);
3111*4882a593Smuzhiyun gfar_write(®s->gaddr0, 0xffffffff);
3112*4882a593Smuzhiyun gfar_write(®s->gaddr1, 0xffffffff);
3113*4882a593Smuzhiyun gfar_write(®s->gaddr2, 0xffffffff);
3114*4882a593Smuzhiyun gfar_write(®s->gaddr3, 0xffffffff);
3115*4882a593Smuzhiyun gfar_write(®s->gaddr4, 0xffffffff);
3116*4882a593Smuzhiyun gfar_write(®s->gaddr5, 0xffffffff);
3117*4882a593Smuzhiyun gfar_write(®s->gaddr6, 0xffffffff);
3118*4882a593Smuzhiyun gfar_write(®s->gaddr7, 0xffffffff);
3119*4882a593Smuzhiyun } else {
3120*4882a593Smuzhiyun int em_num;
3121*4882a593Smuzhiyun int idx;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun /* zero out the hash */
3124*4882a593Smuzhiyun gfar_write(®s->igaddr0, 0x0);
3125*4882a593Smuzhiyun gfar_write(®s->igaddr1, 0x0);
3126*4882a593Smuzhiyun gfar_write(®s->igaddr2, 0x0);
3127*4882a593Smuzhiyun gfar_write(®s->igaddr3, 0x0);
3128*4882a593Smuzhiyun gfar_write(®s->igaddr4, 0x0);
3129*4882a593Smuzhiyun gfar_write(®s->igaddr5, 0x0);
3130*4882a593Smuzhiyun gfar_write(®s->igaddr6, 0x0);
3131*4882a593Smuzhiyun gfar_write(®s->igaddr7, 0x0);
3132*4882a593Smuzhiyun gfar_write(®s->gaddr0, 0x0);
3133*4882a593Smuzhiyun gfar_write(®s->gaddr1, 0x0);
3134*4882a593Smuzhiyun gfar_write(®s->gaddr2, 0x0);
3135*4882a593Smuzhiyun gfar_write(®s->gaddr3, 0x0);
3136*4882a593Smuzhiyun gfar_write(®s->gaddr4, 0x0);
3137*4882a593Smuzhiyun gfar_write(®s->gaddr5, 0x0);
3138*4882a593Smuzhiyun gfar_write(®s->gaddr6, 0x0);
3139*4882a593Smuzhiyun gfar_write(®s->gaddr7, 0x0);
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun /* If we have extended hash tables, we need to
3142*4882a593Smuzhiyun * clear the exact match registers to prepare for
3143*4882a593Smuzhiyun * setting them
3144*4882a593Smuzhiyun */
3145*4882a593Smuzhiyun if (priv->extended_hash) {
3146*4882a593Smuzhiyun em_num = GFAR_EM_NUM + 1;
3147*4882a593Smuzhiyun gfar_clear_exact_match(dev);
3148*4882a593Smuzhiyun idx = 1;
3149*4882a593Smuzhiyun } else {
3150*4882a593Smuzhiyun idx = 0;
3151*4882a593Smuzhiyun em_num = 0;
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun if (netdev_mc_empty(dev))
3155*4882a593Smuzhiyun return;
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun /* Parse the list, and set the appropriate bits */
3158*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
3159*4882a593Smuzhiyun if (idx < em_num) {
3160*4882a593Smuzhiyun gfar_set_mac_for_addr(dev, idx, ha->addr);
3161*4882a593Smuzhiyun idx++;
3162*4882a593Smuzhiyun } else
3163*4882a593Smuzhiyun gfar_set_hash_for_addr(dev, ha->addr);
3164*4882a593Smuzhiyun }
3165*4882a593Smuzhiyun }
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun
gfar_mac_reset(struct gfar_private * priv)3168*4882a593Smuzhiyun void gfar_mac_reset(struct gfar_private *priv)
3169*4882a593Smuzhiyun {
3170*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3171*4882a593Smuzhiyun u32 tempval;
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun /* Reset MAC layer */
3174*4882a593Smuzhiyun gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun /* We need to delay at least 3 TX clocks */
3177*4882a593Smuzhiyun udelay(3);
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun /* the soft reset bit is not self-resetting, so we need to
3180*4882a593Smuzhiyun * clear it before resuming normal operation
3181*4882a593Smuzhiyun */
3182*4882a593Smuzhiyun gfar_write(®s->maccfg1, 0);
3183*4882a593Smuzhiyun
3184*4882a593Smuzhiyun udelay(3);
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun gfar_rx_offload_en(priv);
3187*4882a593Smuzhiyun
3188*4882a593Smuzhiyun /* Initialize the max receive frame/buffer lengths */
3189*4882a593Smuzhiyun gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3190*4882a593Smuzhiyun gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3191*4882a593Smuzhiyun
3192*4882a593Smuzhiyun /* Initialize the Minimum Frame Length Register */
3193*4882a593Smuzhiyun gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun /* Initialize MACCFG2. */
3196*4882a593Smuzhiyun tempval = MACCFG2_INIT_SETTINGS;
3197*4882a593Smuzhiyun
3198*4882a593Smuzhiyun /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3199*4882a593Smuzhiyun * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3200*4882a593Smuzhiyun * and by checking RxBD[LG] and discarding larger than MAXFRM.
3201*4882a593Smuzhiyun */
3202*4882a593Smuzhiyun if (gfar_has_errata(priv, GFAR_ERRATA_74))
3203*4882a593Smuzhiyun tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3204*4882a593Smuzhiyun
3205*4882a593Smuzhiyun gfar_write(®s->maccfg2, tempval);
3206*4882a593Smuzhiyun
3207*4882a593Smuzhiyun /* Clear mac addr hash registers */
3208*4882a593Smuzhiyun gfar_write(®s->igaddr0, 0);
3209*4882a593Smuzhiyun gfar_write(®s->igaddr1, 0);
3210*4882a593Smuzhiyun gfar_write(®s->igaddr2, 0);
3211*4882a593Smuzhiyun gfar_write(®s->igaddr3, 0);
3212*4882a593Smuzhiyun gfar_write(®s->igaddr4, 0);
3213*4882a593Smuzhiyun gfar_write(®s->igaddr5, 0);
3214*4882a593Smuzhiyun gfar_write(®s->igaddr6, 0);
3215*4882a593Smuzhiyun gfar_write(®s->igaddr7, 0);
3216*4882a593Smuzhiyun
3217*4882a593Smuzhiyun gfar_write(®s->gaddr0, 0);
3218*4882a593Smuzhiyun gfar_write(®s->gaddr1, 0);
3219*4882a593Smuzhiyun gfar_write(®s->gaddr2, 0);
3220*4882a593Smuzhiyun gfar_write(®s->gaddr3, 0);
3221*4882a593Smuzhiyun gfar_write(®s->gaddr4, 0);
3222*4882a593Smuzhiyun gfar_write(®s->gaddr5, 0);
3223*4882a593Smuzhiyun gfar_write(®s->gaddr6, 0);
3224*4882a593Smuzhiyun gfar_write(®s->gaddr7, 0);
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun if (priv->extended_hash)
3227*4882a593Smuzhiyun gfar_clear_exact_match(priv->ndev);
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun gfar_mac_rx_config(priv);
3230*4882a593Smuzhiyun
3231*4882a593Smuzhiyun gfar_mac_tx_config(priv);
3232*4882a593Smuzhiyun
3233*4882a593Smuzhiyun gfar_set_mac_address(priv->ndev);
3234*4882a593Smuzhiyun
3235*4882a593Smuzhiyun gfar_set_multi(priv->ndev);
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun /* clear ievent and imask before configuring coalescing */
3238*4882a593Smuzhiyun gfar_ints_disable(priv);
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun /* Configure the coalescing support */
3241*4882a593Smuzhiyun gfar_configure_coalescing_all(priv);
3242*4882a593Smuzhiyun }
3243*4882a593Smuzhiyun
gfar_hw_init(struct gfar_private * priv)3244*4882a593Smuzhiyun static void gfar_hw_init(struct gfar_private *priv)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3247*4882a593Smuzhiyun u32 attrs;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun /* Stop the DMA engine now, in case it was running before
3250*4882a593Smuzhiyun * (The firmware could have used it, and left it running).
3251*4882a593Smuzhiyun */
3252*4882a593Smuzhiyun gfar_halt(priv);
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun gfar_mac_reset(priv);
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun /* Zero out the rmon mib registers if it has them */
3257*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3258*4882a593Smuzhiyun memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun /* Mask off the CAM interrupts */
3261*4882a593Smuzhiyun gfar_write(®s->rmon.cam1, 0xffffffff);
3262*4882a593Smuzhiyun gfar_write(®s->rmon.cam2, 0xffffffff);
3263*4882a593Smuzhiyun }
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun /* Initialize ECNTRL */
3266*4882a593Smuzhiyun gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3267*4882a593Smuzhiyun
3268*4882a593Smuzhiyun /* Set the extraction length and index */
3269*4882a593Smuzhiyun attrs = ATTRELI_EL(priv->rx_stash_size) |
3270*4882a593Smuzhiyun ATTRELI_EI(priv->rx_stash_index);
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun gfar_write(®s->attreli, attrs);
3273*4882a593Smuzhiyun
3274*4882a593Smuzhiyun /* Start with defaults, and add stashing
3275*4882a593Smuzhiyun * depending on driver parameters
3276*4882a593Smuzhiyun */
3277*4882a593Smuzhiyun attrs = ATTR_INIT_SETTINGS;
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun if (priv->bd_stash_en)
3280*4882a593Smuzhiyun attrs |= ATTR_BDSTASH;
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun if (priv->rx_stash_size != 0)
3283*4882a593Smuzhiyun attrs |= ATTR_BUFSTASH;
3284*4882a593Smuzhiyun
3285*4882a593Smuzhiyun gfar_write(®s->attr, attrs);
3286*4882a593Smuzhiyun
3287*4882a593Smuzhiyun /* FIFO configs */
3288*4882a593Smuzhiyun gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3289*4882a593Smuzhiyun gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3290*4882a593Smuzhiyun gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3291*4882a593Smuzhiyun
3292*4882a593Smuzhiyun /* Program the interrupt steering regs, only for MG devices */
3293*4882a593Smuzhiyun if (priv->num_grps > 1)
3294*4882a593Smuzhiyun gfar_write_isrg(priv);
3295*4882a593Smuzhiyun }
3296*4882a593Smuzhiyun
3297*4882a593Smuzhiyun static const struct net_device_ops gfar_netdev_ops = {
3298*4882a593Smuzhiyun .ndo_open = gfar_enet_open,
3299*4882a593Smuzhiyun .ndo_start_xmit = gfar_start_xmit,
3300*4882a593Smuzhiyun .ndo_stop = gfar_close,
3301*4882a593Smuzhiyun .ndo_change_mtu = gfar_change_mtu,
3302*4882a593Smuzhiyun .ndo_set_features = gfar_set_features,
3303*4882a593Smuzhiyun .ndo_set_rx_mode = gfar_set_multi,
3304*4882a593Smuzhiyun .ndo_tx_timeout = gfar_timeout,
3305*4882a593Smuzhiyun .ndo_do_ioctl = gfar_ioctl,
3306*4882a593Smuzhiyun .ndo_get_stats = gfar_get_stats,
3307*4882a593Smuzhiyun .ndo_change_carrier = fixed_phy_change_carrier,
3308*4882a593Smuzhiyun .ndo_set_mac_address = gfar_set_mac_addr,
3309*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
3310*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
3311*4882a593Smuzhiyun .ndo_poll_controller = gfar_netpoll,
3312*4882a593Smuzhiyun #endif
3313*4882a593Smuzhiyun };
3314*4882a593Smuzhiyun
3315*4882a593Smuzhiyun /* Set up the ethernet device structure, private data,
3316*4882a593Smuzhiyun * and anything else we need before we start
3317*4882a593Smuzhiyun */
gfar_probe(struct platform_device * ofdev)3318*4882a593Smuzhiyun static int gfar_probe(struct platform_device *ofdev)
3319*4882a593Smuzhiyun {
3320*4882a593Smuzhiyun struct device_node *np = ofdev->dev.of_node;
3321*4882a593Smuzhiyun struct net_device *dev = NULL;
3322*4882a593Smuzhiyun struct gfar_private *priv = NULL;
3323*4882a593Smuzhiyun int err = 0, i;
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun err = gfar_of_init(ofdev, &dev);
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun if (err)
3328*4882a593Smuzhiyun return err;
3329*4882a593Smuzhiyun
3330*4882a593Smuzhiyun priv = netdev_priv(dev);
3331*4882a593Smuzhiyun priv->ndev = dev;
3332*4882a593Smuzhiyun priv->ofdev = ofdev;
3333*4882a593Smuzhiyun priv->dev = &ofdev->dev;
3334*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &ofdev->dev);
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun INIT_WORK(&priv->reset_task, gfar_reset_task);
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun platform_set_drvdata(ofdev, priv);
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun gfar_detect_errata(priv);
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun /* Set the dev->base_addr to the gfar reg region */
3343*4882a593Smuzhiyun dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3344*4882a593Smuzhiyun
3345*4882a593Smuzhiyun /* Fill in the dev structure */
3346*4882a593Smuzhiyun dev->watchdog_timeo = TX_TIMEOUT;
3347*4882a593Smuzhiyun /* MTU range: 50 - 9586 */
3348*4882a593Smuzhiyun dev->mtu = 1500;
3349*4882a593Smuzhiyun dev->min_mtu = 50;
3350*4882a593Smuzhiyun dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3351*4882a593Smuzhiyun dev->netdev_ops = &gfar_netdev_ops;
3352*4882a593Smuzhiyun dev->ethtool_ops = &gfar_ethtool_ops;
3353*4882a593Smuzhiyun
3354*4882a593Smuzhiyun /* Register for napi ...We are registering NAPI for each grp */
3355*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
3356*4882a593Smuzhiyun if (priv->poll_mode == GFAR_SQ_POLLING) {
3357*4882a593Smuzhiyun netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3358*4882a593Smuzhiyun gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3359*4882a593Smuzhiyun netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3360*4882a593Smuzhiyun gfar_poll_tx_sq, 2);
3361*4882a593Smuzhiyun } else {
3362*4882a593Smuzhiyun netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3363*4882a593Smuzhiyun gfar_poll_rx, GFAR_DEV_WEIGHT);
3364*4882a593Smuzhiyun netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3365*4882a593Smuzhiyun gfar_poll_tx, 2);
3366*4882a593Smuzhiyun }
3367*4882a593Smuzhiyun }
3368*4882a593Smuzhiyun
3369*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3370*4882a593Smuzhiyun dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3371*4882a593Smuzhiyun NETIF_F_RXCSUM;
3372*4882a593Smuzhiyun dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3373*4882a593Smuzhiyun NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3377*4882a593Smuzhiyun dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3378*4882a593Smuzhiyun NETIF_F_HW_VLAN_CTAG_RX;
3379*4882a593Smuzhiyun dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3380*4882a593Smuzhiyun }
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun gfar_init_addr_hash_table(priv);
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun /* Insert receive time stamps into padding alignment bytes, and
3387*4882a593Smuzhiyun * plus 2 bytes padding to ensure the cpu alignment.
3388*4882a593Smuzhiyun */
3389*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3390*4882a593Smuzhiyun priv->padding = 8 + DEFAULT_PADDING;
3391*4882a593Smuzhiyun
3392*4882a593Smuzhiyun if (dev->features & NETIF_F_IP_CSUM ||
3393*4882a593Smuzhiyun priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3394*4882a593Smuzhiyun dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3395*4882a593Smuzhiyun
3396*4882a593Smuzhiyun /* Initializing some of the rx/tx queue level parameters */
3397*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++) {
3398*4882a593Smuzhiyun priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3399*4882a593Smuzhiyun priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3400*4882a593Smuzhiyun priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3401*4882a593Smuzhiyun priv->tx_queue[i]->txic = DEFAULT_TXIC;
3402*4882a593Smuzhiyun }
3403*4882a593Smuzhiyun
3404*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++) {
3405*4882a593Smuzhiyun priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3406*4882a593Smuzhiyun priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3407*4882a593Smuzhiyun priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3408*4882a593Smuzhiyun }
3409*4882a593Smuzhiyun
3410*4882a593Smuzhiyun /* Always enable rx filer if available */
3411*4882a593Smuzhiyun priv->rx_filer_enable =
3412*4882a593Smuzhiyun (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3413*4882a593Smuzhiyun /* Enable most messages by default */
3414*4882a593Smuzhiyun priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3415*4882a593Smuzhiyun /* use pritority h/w tx queue scheduling for single queue devices */
3416*4882a593Smuzhiyun if (priv->num_tx_queues == 1)
3417*4882a593Smuzhiyun priv->prio_sched_en = 1;
3418*4882a593Smuzhiyun
3419*4882a593Smuzhiyun set_bit(GFAR_DOWN, &priv->state);
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun gfar_hw_init(priv);
3422*4882a593Smuzhiyun
3423*4882a593Smuzhiyun /* Carrier starts down, phylib will bring it up */
3424*4882a593Smuzhiyun netif_carrier_off(dev);
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun err = register_netdev(dev);
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun if (err) {
3429*4882a593Smuzhiyun pr_err("%s: Cannot register net device, aborting\n", dev->name);
3430*4882a593Smuzhiyun goto register_fail;
3431*4882a593Smuzhiyun }
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3434*4882a593Smuzhiyun priv->wol_supported |= GFAR_WOL_MAGIC;
3435*4882a593Smuzhiyun
3436*4882a593Smuzhiyun if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3437*4882a593Smuzhiyun priv->rx_filer_enable)
3438*4882a593Smuzhiyun priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun /* fill out IRQ number and name fields */
3443*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
3444*4882a593Smuzhiyun struct gfar_priv_grp *grp = &priv->gfargrp[i];
3445*4882a593Smuzhiyun if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3446*4882a593Smuzhiyun sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3447*4882a593Smuzhiyun dev->name, "_g", '0' + i, "_tx");
3448*4882a593Smuzhiyun sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3449*4882a593Smuzhiyun dev->name, "_g", '0' + i, "_rx");
3450*4882a593Smuzhiyun sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3451*4882a593Smuzhiyun dev->name, "_g", '0' + i, "_er");
3452*4882a593Smuzhiyun } else
3453*4882a593Smuzhiyun strcpy(gfar_irq(grp, TX)->name, dev->name);
3454*4882a593Smuzhiyun }
3455*4882a593Smuzhiyun
3456*4882a593Smuzhiyun /* Initialize the filer table */
3457*4882a593Smuzhiyun gfar_init_filer_table(priv);
3458*4882a593Smuzhiyun
3459*4882a593Smuzhiyun /* Print out the device info */
3460*4882a593Smuzhiyun netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3461*4882a593Smuzhiyun
3462*4882a593Smuzhiyun /* Even more device info helps when determining which kernel
3463*4882a593Smuzhiyun * provided which set of benchmarks.
3464*4882a593Smuzhiyun */
3465*4882a593Smuzhiyun netdev_info(dev, "Running with NAPI enabled\n");
3466*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_queues; i++)
3467*4882a593Smuzhiyun netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3468*4882a593Smuzhiyun i, priv->rx_queue[i]->rx_ring_size);
3469*4882a593Smuzhiyun for (i = 0; i < priv->num_tx_queues; i++)
3470*4882a593Smuzhiyun netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3471*4882a593Smuzhiyun i, priv->tx_queue[i]->tx_ring_size);
3472*4882a593Smuzhiyun
3473*4882a593Smuzhiyun return 0;
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun register_fail:
3476*4882a593Smuzhiyun if (of_phy_is_fixed_link(np))
3477*4882a593Smuzhiyun of_phy_deregister_fixed_link(np);
3478*4882a593Smuzhiyun unmap_group_regs(priv);
3479*4882a593Smuzhiyun gfar_free_rx_queues(priv);
3480*4882a593Smuzhiyun gfar_free_tx_queues(priv);
3481*4882a593Smuzhiyun of_node_put(priv->phy_node);
3482*4882a593Smuzhiyun of_node_put(priv->tbi_node);
3483*4882a593Smuzhiyun free_gfar_dev(priv);
3484*4882a593Smuzhiyun return err;
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
gfar_remove(struct platform_device * ofdev)3487*4882a593Smuzhiyun static int gfar_remove(struct platform_device *ofdev)
3488*4882a593Smuzhiyun {
3489*4882a593Smuzhiyun struct gfar_private *priv = platform_get_drvdata(ofdev);
3490*4882a593Smuzhiyun struct device_node *np = ofdev->dev.of_node;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun of_node_put(priv->phy_node);
3493*4882a593Smuzhiyun of_node_put(priv->tbi_node);
3494*4882a593Smuzhiyun
3495*4882a593Smuzhiyun unregister_netdev(priv->ndev);
3496*4882a593Smuzhiyun
3497*4882a593Smuzhiyun if (of_phy_is_fixed_link(np))
3498*4882a593Smuzhiyun of_phy_deregister_fixed_link(np);
3499*4882a593Smuzhiyun
3500*4882a593Smuzhiyun unmap_group_regs(priv);
3501*4882a593Smuzhiyun gfar_free_rx_queues(priv);
3502*4882a593Smuzhiyun gfar_free_tx_queues(priv);
3503*4882a593Smuzhiyun free_gfar_dev(priv);
3504*4882a593Smuzhiyun
3505*4882a593Smuzhiyun return 0;
3506*4882a593Smuzhiyun }
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun #ifdef CONFIG_PM
3509*4882a593Smuzhiyun
__gfar_filer_disable(struct gfar_private * priv)3510*4882a593Smuzhiyun static void __gfar_filer_disable(struct gfar_private *priv)
3511*4882a593Smuzhiyun {
3512*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3513*4882a593Smuzhiyun u32 temp;
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun temp = gfar_read(®s->rctrl);
3516*4882a593Smuzhiyun temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3517*4882a593Smuzhiyun gfar_write(®s->rctrl, temp);
3518*4882a593Smuzhiyun }
3519*4882a593Smuzhiyun
__gfar_filer_enable(struct gfar_private * priv)3520*4882a593Smuzhiyun static void __gfar_filer_enable(struct gfar_private *priv)
3521*4882a593Smuzhiyun {
3522*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3523*4882a593Smuzhiyun u32 temp;
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun temp = gfar_read(®s->rctrl);
3526*4882a593Smuzhiyun temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3527*4882a593Smuzhiyun gfar_write(®s->rctrl, temp);
3528*4882a593Smuzhiyun }
3529*4882a593Smuzhiyun
3530*4882a593Smuzhiyun /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)3531*4882a593Smuzhiyun static void gfar_filer_config_wol(struct gfar_private *priv)
3532*4882a593Smuzhiyun {
3533*4882a593Smuzhiyun unsigned int i;
3534*4882a593Smuzhiyun u32 rqfcr;
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun __gfar_filer_disable(priv);
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun /* clear the filer table, reject any packet by default */
3539*4882a593Smuzhiyun rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3540*4882a593Smuzhiyun for (i = 0; i <= MAX_FILER_IDX; i++)
3541*4882a593Smuzhiyun gfar_write_filer(priv, i, rqfcr, 0);
3542*4882a593Smuzhiyun
3543*4882a593Smuzhiyun i = 0;
3544*4882a593Smuzhiyun if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3545*4882a593Smuzhiyun /* unicast packet, accept it */
3546*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
3547*4882a593Smuzhiyun /* get the default rx queue index */
3548*4882a593Smuzhiyun u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3549*4882a593Smuzhiyun u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3550*4882a593Smuzhiyun (ndev->dev_addr[1] << 8) |
3551*4882a593Smuzhiyun ndev->dev_addr[2];
3552*4882a593Smuzhiyun
3553*4882a593Smuzhiyun rqfcr = (qindex << 10) | RQFCR_AND |
3554*4882a593Smuzhiyun RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3555*4882a593Smuzhiyun
3556*4882a593Smuzhiyun gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3557*4882a593Smuzhiyun
3558*4882a593Smuzhiyun dest_mac_addr = (ndev->dev_addr[3] << 16) |
3559*4882a593Smuzhiyun (ndev->dev_addr[4] << 8) |
3560*4882a593Smuzhiyun ndev->dev_addr[5];
3561*4882a593Smuzhiyun rqfcr = (qindex << 10) | RQFCR_GPI |
3562*4882a593Smuzhiyun RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3563*4882a593Smuzhiyun gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3564*4882a593Smuzhiyun }
3565*4882a593Smuzhiyun
3566*4882a593Smuzhiyun __gfar_filer_enable(priv);
3567*4882a593Smuzhiyun }
3568*4882a593Smuzhiyun
gfar_filer_restore_table(struct gfar_private * priv)3569*4882a593Smuzhiyun static void gfar_filer_restore_table(struct gfar_private *priv)
3570*4882a593Smuzhiyun {
3571*4882a593Smuzhiyun u32 rqfcr, rqfpr;
3572*4882a593Smuzhiyun unsigned int i;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun __gfar_filer_disable(priv);
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun for (i = 0; i <= MAX_FILER_IDX; i++) {
3577*4882a593Smuzhiyun rqfcr = priv->ftp_rqfcr[i];
3578*4882a593Smuzhiyun rqfpr = priv->ftp_rqfpr[i];
3579*4882a593Smuzhiyun gfar_write_filer(priv, i, rqfcr, rqfpr);
3580*4882a593Smuzhiyun }
3581*4882a593Smuzhiyun
3582*4882a593Smuzhiyun __gfar_filer_enable(priv);
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun
3585*4882a593Smuzhiyun /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)3586*4882a593Smuzhiyun static void gfar_start_wol_filer(struct gfar_private *priv)
3587*4882a593Smuzhiyun {
3588*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3589*4882a593Smuzhiyun u32 tempval;
3590*4882a593Smuzhiyun int i = 0;
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun /* Enable Rx hw queues */
3593*4882a593Smuzhiyun gfar_write(®s->rqueue, priv->rqueue);
3594*4882a593Smuzhiyun
3595*4882a593Smuzhiyun /* Initialize DMACTRL to have WWR and WOP */
3596*4882a593Smuzhiyun tempval = gfar_read(®s->dmactrl);
3597*4882a593Smuzhiyun tempval |= DMACTRL_INIT_SETTINGS;
3598*4882a593Smuzhiyun gfar_write(®s->dmactrl, tempval);
3599*4882a593Smuzhiyun
3600*4882a593Smuzhiyun /* Make sure we aren't stopped */
3601*4882a593Smuzhiyun tempval = gfar_read(®s->dmactrl);
3602*4882a593Smuzhiyun tempval &= ~DMACTRL_GRS;
3603*4882a593Smuzhiyun gfar_write(®s->dmactrl, tempval);
3604*4882a593Smuzhiyun
3605*4882a593Smuzhiyun for (i = 0; i < priv->num_grps; i++) {
3606*4882a593Smuzhiyun regs = priv->gfargrp[i].regs;
3607*4882a593Smuzhiyun /* Clear RHLT, so that the DMA starts polling now */
3608*4882a593Smuzhiyun gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3609*4882a593Smuzhiyun /* enable the Filer General Purpose Interrupt */
3610*4882a593Smuzhiyun gfar_write(®s->imask, IMASK_FGPI);
3611*4882a593Smuzhiyun }
3612*4882a593Smuzhiyun
3613*4882a593Smuzhiyun /* Enable Rx DMA */
3614*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg1);
3615*4882a593Smuzhiyun tempval |= MACCFG1_RX_EN;
3616*4882a593Smuzhiyun gfar_write(®s->maccfg1, tempval);
3617*4882a593Smuzhiyun }
3618*4882a593Smuzhiyun
gfar_suspend(struct device * dev)3619*4882a593Smuzhiyun static int gfar_suspend(struct device *dev)
3620*4882a593Smuzhiyun {
3621*4882a593Smuzhiyun struct gfar_private *priv = dev_get_drvdata(dev);
3622*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
3623*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3624*4882a593Smuzhiyun u32 tempval;
3625*4882a593Smuzhiyun u16 wol = priv->wol_opts;
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun if (!netif_running(ndev))
3628*4882a593Smuzhiyun return 0;
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun disable_napi(priv);
3631*4882a593Smuzhiyun netif_tx_lock(ndev);
3632*4882a593Smuzhiyun netif_device_detach(ndev);
3633*4882a593Smuzhiyun netif_tx_unlock(ndev);
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun gfar_halt(priv);
3636*4882a593Smuzhiyun
3637*4882a593Smuzhiyun if (wol & GFAR_WOL_MAGIC) {
3638*4882a593Smuzhiyun /* Enable interrupt on Magic Packet */
3639*4882a593Smuzhiyun gfar_write(®s->imask, IMASK_MAG);
3640*4882a593Smuzhiyun
3641*4882a593Smuzhiyun /* Enable Magic Packet mode */
3642*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg2);
3643*4882a593Smuzhiyun tempval |= MACCFG2_MPEN;
3644*4882a593Smuzhiyun gfar_write(®s->maccfg2, tempval);
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun /* re-enable the Rx block */
3647*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg1);
3648*4882a593Smuzhiyun tempval |= MACCFG1_RX_EN;
3649*4882a593Smuzhiyun gfar_write(®s->maccfg1, tempval);
3650*4882a593Smuzhiyun
3651*4882a593Smuzhiyun } else if (wol & GFAR_WOL_FILER_UCAST) {
3652*4882a593Smuzhiyun gfar_filer_config_wol(priv);
3653*4882a593Smuzhiyun gfar_start_wol_filer(priv);
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun } else {
3656*4882a593Smuzhiyun phy_stop(ndev->phydev);
3657*4882a593Smuzhiyun }
3658*4882a593Smuzhiyun
3659*4882a593Smuzhiyun return 0;
3660*4882a593Smuzhiyun }
3661*4882a593Smuzhiyun
gfar_resume(struct device * dev)3662*4882a593Smuzhiyun static int gfar_resume(struct device *dev)
3663*4882a593Smuzhiyun {
3664*4882a593Smuzhiyun struct gfar_private *priv = dev_get_drvdata(dev);
3665*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
3666*4882a593Smuzhiyun struct gfar __iomem *regs = priv->gfargrp[0].regs;
3667*4882a593Smuzhiyun u32 tempval;
3668*4882a593Smuzhiyun u16 wol = priv->wol_opts;
3669*4882a593Smuzhiyun
3670*4882a593Smuzhiyun if (!netif_running(ndev))
3671*4882a593Smuzhiyun return 0;
3672*4882a593Smuzhiyun
3673*4882a593Smuzhiyun if (wol & GFAR_WOL_MAGIC) {
3674*4882a593Smuzhiyun /* Disable Magic Packet mode */
3675*4882a593Smuzhiyun tempval = gfar_read(®s->maccfg2);
3676*4882a593Smuzhiyun tempval &= ~MACCFG2_MPEN;
3677*4882a593Smuzhiyun gfar_write(®s->maccfg2, tempval);
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun } else if (wol & GFAR_WOL_FILER_UCAST) {
3680*4882a593Smuzhiyun /* need to stop rx only, tx is already down */
3681*4882a593Smuzhiyun gfar_halt(priv);
3682*4882a593Smuzhiyun gfar_filer_restore_table(priv);
3683*4882a593Smuzhiyun
3684*4882a593Smuzhiyun } else {
3685*4882a593Smuzhiyun phy_start(ndev->phydev);
3686*4882a593Smuzhiyun }
3687*4882a593Smuzhiyun
3688*4882a593Smuzhiyun gfar_start(priv);
3689*4882a593Smuzhiyun
3690*4882a593Smuzhiyun netif_device_attach(ndev);
3691*4882a593Smuzhiyun enable_napi(priv);
3692*4882a593Smuzhiyun
3693*4882a593Smuzhiyun return 0;
3694*4882a593Smuzhiyun }
3695*4882a593Smuzhiyun
gfar_restore(struct device * dev)3696*4882a593Smuzhiyun static int gfar_restore(struct device *dev)
3697*4882a593Smuzhiyun {
3698*4882a593Smuzhiyun struct gfar_private *priv = dev_get_drvdata(dev);
3699*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
3700*4882a593Smuzhiyun
3701*4882a593Smuzhiyun if (!netif_running(ndev)) {
3702*4882a593Smuzhiyun netif_device_attach(ndev);
3703*4882a593Smuzhiyun
3704*4882a593Smuzhiyun return 0;
3705*4882a593Smuzhiyun }
3706*4882a593Smuzhiyun
3707*4882a593Smuzhiyun gfar_init_bds(ndev);
3708*4882a593Smuzhiyun
3709*4882a593Smuzhiyun gfar_mac_reset(priv);
3710*4882a593Smuzhiyun
3711*4882a593Smuzhiyun gfar_init_tx_rx_base(priv);
3712*4882a593Smuzhiyun
3713*4882a593Smuzhiyun gfar_start(priv);
3714*4882a593Smuzhiyun
3715*4882a593Smuzhiyun priv->oldlink = 0;
3716*4882a593Smuzhiyun priv->oldspeed = 0;
3717*4882a593Smuzhiyun priv->oldduplex = -1;
3718*4882a593Smuzhiyun
3719*4882a593Smuzhiyun if (ndev->phydev)
3720*4882a593Smuzhiyun phy_start(ndev->phydev);
3721*4882a593Smuzhiyun
3722*4882a593Smuzhiyun netif_device_attach(ndev);
3723*4882a593Smuzhiyun enable_napi(priv);
3724*4882a593Smuzhiyun
3725*4882a593Smuzhiyun return 0;
3726*4882a593Smuzhiyun }
3727*4882a593Smuzhiyun
3728*4882a593Smuzhiyun static const struct dev_pm_ops gfar_pm_ops = {
3729*4882a593Smuzhiyun .suspend = gfar_suspend,
3730*4882a593Smuzhiyun .resume = gfar_resume,
3731*4882a593Smuzhiyun .freeze = gfar_suspend,
3732*4882a593Smuzhiyun .thaw = gfar_resume,
3733*4882a593Smuzhiyun .restore = gfar_restore,
3734*4882a593Smuzhiyun };
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun #define GFAR_PM_OPS (&gfar_pm_ops)
3737*4882a593Smuzhiyun
3738*4882a593Smuzhiyun #else
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun #define GFAR_PM_OPS NULL
3741*4882a593Smuzhiyun
3742*4882a593Smuzhiyun #endif
3743*4882a593Smuzhiyun
3744*4882a593Smuzhiyun static const struct of_device_id gfar_match[] =
3745*4882a593Smuzhiyun {
3746*4882a593Smuzhiyun {
3747*4882a593Smuzhiyun .type = "network",
3748*4882a593Smuzhiyun .compatible = "gianfar",
3749*4882a593Smuzhiyun },
3750*4882a593Smuzhiyun {
3751*4882a593Smuzhiyun .compatible = "fsl,etsec2",
3752*4882a593Smuzhiyun },
3753*4882a593Smuzhiyun {},
3754*4882a593Smuzhiyun };
3755*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, gfar_match);
3756*4882a593Smuzhiyun
3757*4882a593Smuzhiyun /* Structure for a device driver */
3758*4882a593Smuzhiyun static struct platform_driver gfar_driver = {
3759*4882a593Smuzhiyun .driver = {
3760*4882a593Smuzhiyun .name = "fsl-gianfar",
3761*4882a593Smuzhiyun .pm = GFAR_PM_OPS,
3762*4882a593Smuzhiyun .of_match_table = gfar_match,
3763*4882a593Smuzhiyun },
3764*4882a593Smuzhiyun .probe = gfar_probe,
3765*4882a593Smuzhiyun .remove = gfar_remove,
3766*4882a593Smuzhiyun };
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun module_platform_driver(gfar_driver);
3769