xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cirrus/ep93xx_eth.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * EP93xx ethernet network device driver
4*4882a593Smuzhiyun  * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
5*4882a593Smuzhiyun  * Dedicated to Marija Kulikova.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/mii.h>
15*4882a593Smuzhiyun #include <linux/etherdevice.h>
16*4882a593Smuzhiyun #include <linux/ethtool.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/moduleparam.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/platform_data/eth-ep93xx.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define DRV_MODULE_NAME		"ep93xx-eth"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define RX_QUEUE_ENTRIES	64
29*4882a593Smuzhiyun #define TX_QUEUE_ENTRIES	8
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define MAX_PKT_SIZE		2044
32*4882a593Smuzhiyun #define PKT_BUF_SIZE		2048
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define REG_RXCTL		0x0000
35*4882a593Smuzhiyun #define  REG_RXCTL_DEFAULT	0x00073800
36*4882a593Smuzhiyun #define REG_TXCTL		0x0004
37*4882a593Smuzhiyun #define  REG_TXCTL_ENABLE	0x00000001
38*4882a593Smuzhiyun #define REG_MIICMD		0x0010
39*4882a593Smuzhiyun #define  REG_MIICMD_READ	0x00008000
40*4882a593Smuzhiyun #define  REG_MIICMD_WRITE	0x00004000
41*4882a593Smuzhiyun #define REG_MIIDATA		0x0014
42*4882a593Smuzhiyun #define REG_MIISTS		0x0018
43*4882a593Smuzhiyun #define  REG_MIISTS_BUSY	0x00000001
44*4882a593Smuzhiyun #define REG_SELFCTL		0x0020
45*4882a593Smuzhiyun #define  REG_SELFCTL_RESET	0x00000001
46*4882a593Smuzhiyun #define REG_INTEN		0x0024
47*4882a593Smuzhiyun #define  REG_INTEN_TX		0x00000008
48*4882a593Smuzhiyun #define  REG_INTEN_RX		0x00000007
49*4882a593Smuzhiyun #define REG_INTSTSP		0x0028
50*4882a593Smuzhiyun #define  REG_INTSTS_TX		0x00000008
51*4882a593Smuzhiyun #define  REG_INTSTS_RX		0x00000004
52*4882a593Smuzhiyun #define REG_INTSTSC		0x002c
53*4882a593Smuzhiyun #define REG_AFP			0x004c
54*4882a593Smuzhiyun #define REG_INDAD0		0x0050
55*4882a593Smuzhiyun #define REG_INDAD1		0x0051
56*4882a593Smuzhiyun #define REG_INDAD2		0x0052
57*4882a593Smuzhiyun #define REG_INDAD3		0x0053
58*4882a593Smuzhiyun #define REG_INDAD4		0x0054
59*4882a593Smuzhiyun #define REG_INDAD5		0x0055
60*4882a593Smuzhiyun #define REG_GIINTMSK		0x0064
61*4882a593Smuzhiyun #define  REG_GIINTMSK_ENABLE	0x00008000
62*4882a593Smuzhiyun #define REG_BMCTL		0x0080
63*4882a593Smuzhiyun #define  REG_BMCTL_ENABLE_TX	0x00000100
64*4882a593Smuzhiyun #define  REG_BMCTL_ENABLE_RX	0x00000001
65*4882a593Smuzhiyun #define REG_BMSTS		0x0084
66*4882a593Smuzhiyun #define  REG_BMSTS_RX_ACTIVE	0x00000008
67*4882a593Smuzhiyun #define REG_RXDQBADD		0x0090
68*4882a593Smuzhiyun #define REG_RXDQBLEN		0x0094
69*4882a593Smuzhiyun #define REG_RXDCURADD		0x0098
70*4882a593Smuzhiyun #define REG_RXDENQ		0x009c
71*4882a593Smuzhiyun #define REG_RXSTSQBADD		0x00a0
72*4882a593Smuzhiyun #define REG_RXSTSQBLEN		0x00a4
73*4882a593Smuzhiyun #define REG_RXSTSQCURADD	0x00a8
74*4882a593Smuzhiyun #define REG_RXSTSENQ		0x00ac
75*4882a593Smuzhiyun #define REG_TXDQBADD		0x00b0
76*4882a593Smuzhiyun #define REG_TXDQBLEN		0x00b4
77*4882a593Smuzhiyun #define REG_TXDQCURADD		0x00b8
78*4882a593Smuzhiyun #define REG_TXDENQ		0x00bc
79*4882a593Smuzhiyun #define REG_TXSTSQBADD		0x00c0
80*4882a593Smuzhiyun #define REG_TXSTSQBLEN		0x00c4
81*4882a593Smuzhiyun #define REG_TXSTSQCURADD	0x00c8
82*4882a593Smuzhiyun #define REG_MAXFRMLEN		0x00e8
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct ep93xx_rdesc
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	u32	buf_addr;
87*4882a593Smuzhiyun 	u32	rdesc1;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define RDESC1_NSOF		0x80000000
91*4882a593Smuzhiyun #define RDESC1_BUFFER_INDEX	0x7fff0000
92*4882a593Smuzhiyun #define RDESC1_BUFFER_LENGTH	0x0000ffff
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun struct ep93xx_rstat
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	u32	rstat0;
97*4882a593Smuzhiyun 	u32	rstat1;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define RSTAT0_RFP		0x80000000
101*4882a593Smuzhiyun #define RSTAT0_RWE		0x40000000
102*4882a593Smuzhiyun #define RSTAT0_EOF		0x20000000
103*4882a593Smuzhiyun #define RSTAT0_EOB		0x10000000
104*4882a593Smuzhiyun #define RSTAT0_AM		0x00c00000
105*4882a593Smuzhiyun #define RSTAT0_RX_ERR		0x00200000
106*4882a593Smuzhiyun #define RSTAT0_OE		0x00100000
107*4882a593Smuzhiyun #define RSTAT0_FE		0x00080000
108*4882a593Smuzhiyun #define RSTAT0_RUNT		0x00040000
109*4882a593Smuzhiyun #define RSTAT0_EDATA		0x00020000
110*4882a593Smuzhiyun #define RSTAT0_CRCE		0x00010000
111*4882a593Smuzhiyun #define RSTAT0_CRCI		0x00008000
112*4882a593Smuzhiyun #define RSTAT0_HTI		0x00003f00
113*4882a593Smuzhiyun #define RSTAT1_RFP		0x80000000
114*4882a593Smuzhiyun #define RSTAT1_BUFFER_INDEX	0x7fff0000
115*4882a593Smuzhiyun #define RSTAT1_FRAME_LENGTH	0x0000ffff
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun struct ep93xx_tdesc
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	u32	buf_addr;
120*4882a593Smuzhiyun 	u32	tdesc1;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define TDESC1_EOF		0x80000000
124*4882a593Smuzhiyun #define TDESC1_BUFFER_INDEX	0x7fff0000
125*4882a593Smuzhiyun #define TDESC1_BUFFER_ABORT	0x00008000
126*4882a593Smuzhiyun #define TDESC1_BUFFER_LENGTH	0x00000fff
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct ep93xx_tstat
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	u32	tstat0;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #define TSTAT0_TXFP		0x80000000
134*4882a593Smuzhiyun #define TSTAT0_TXWE		0x40000000
135*4882a593Smuzhiyun #define TSTAT0_FA		0x20000000
136*4882a593Smuzhiyun #define TSTAT0_LCRS		0x10000000
137*4882a593Smuzhiyun #define TSTAT0_OW		0x04000000
138*4882a593Smuzhiyun #define TSTAT0_TXU		0x02000000
139*4882a593Smuzhiyun #define TSTAT0_ECOLL		0x01000000
140*4882a593Smuzhiyun #define TSTAT0_NCOLL		0x001f0000
141*4882a593Smuzhiyun #define TSTAT0_BUFFER_INDEX	0x00007fff
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct ep93xx_descs
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct ep93xx_rdesc	rdesc[RX_QUEUE_ENTRIES];
146*4882a593Smuzhiyun 	struct ep93xx_tdesc	tdesc[TX_QUEUE_ENTRIES];
147*4882a593Smuzhiyun 	struct ep93xx_rstat	rstat[RX_QUEUE_ENTRIES];
148*4882a593Smuzhiyun 	struct ep93xx_tstat	tstat[TX_QUEUE_ENTRIES];
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct ep93xx_priv
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	struct resource		*res;
154*4882a593Smuzhiyun 	void __iomem		*base_addr;
155*4882a593Smuzhiyun 	int			irq;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	struct ep93xx_descs	*descs;
158*4882a593Smuzhiyun 	dma_addr_t		descs_dma_addr;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	void			*rx_buf[RX_QUEUE_ENTRIES];
161*4882a593Smuzhiyun 	void			*tx_buf[TX_QUEUE_ENTRIES];
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	spinlock_t		rx_lock;
164*4882a593Smuzhiyun 	unsigned int		rx_pointer;
165*4882a593Smuzhiyun 	unsigned int		tx_clean_pointer;
166*4882a593Smuzhiyun 	unsigned int		tx_pointer;
167*4882a593Smuzhiyun 	spinlock_t		tx_pending_lock;
168*4882a593Smuzhiyun 	unsigned int		tx_pending;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	struct net_device	*dev;
171*4882a593Smuzhiyun 	struct napi_struct	napi;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	struct mii_if_info	mii;
174*4882a593Smuzhiyun 	u8			mdc_divisor;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define rdb(ep, off)		__raw_readb((ep)->base_addr + (off))
178*4882a593Smuzhiyun #define rdw(ep, off)		__raw_readw((ep)->base_addr + (off))
179*4882a593Smuzhiyun #define rdl(ep, off)		__raw_readl((ep)->base_addr + (off))
180*4882a593Smuzhiyun #define wrb(ep, off, val)	__raw_writeb((val), (ep)->base_addr + (off))
181*4882a593Smuzhiyun #define wrw(ep, off, val)	__raw_writew((val), (ep)->base_addr + (off))
182*4882a593Smuzhiyun #define wrl(ep, off, val)	__raw_writel((val), (ep)->base_addr + (off))
183*4882a593Smuzhiyun 
ep93xx_mdio_read(struct net_device * dev,int phy_id,int reg)184*4882a593Smuzhiyun static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
187*4882a593Smuzhiyun 	int data;
188*4882a593Smuzhiyun 	int i;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
193*4882a593Smuzhiyun 		if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
194*4882a593Smuzhiyun 			break;
195*4882a593Smuzhiyun 		msleep(1);
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (i == 10) {
199*4882a593Smuzhiyun 		pr_info("mdio read timed out\n");
200*4882a593Smuzhiyun 		data = 0xffff;
201*4882a593Smuzhiyun 	} else {
202*4882a593Smuzhiyun 		data = rdl(ep, REG_MIIDATA);
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return data;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
ep93xx_mdio_write(struct net_device * dev,int phy_id,int reg,int data)208*4882a593Smuzhiyun static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
211*4882a593Smuzhiyun 	int i;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	wrl(ep, REG_MIIDATA, data);
214*4882a593Smuzhiyun 	wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
217*4882a593Smuzhiyun 		if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
218*4882a593Smuzhiyun 			break;
219*4882a593Smuzhiyun 		msleep(1);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (i == 10)
223*4882a593Smuzhiyun 		pr_info("mdio write timed out\n");
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
ep93xx_rx(struct net_device * dev,int budget)226*4882a593Smuzhiyun static int ep93xx_rx(struct net_device *dev, int budget)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
229*4882a593Smuzhiyun 	int processed = 0;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	while (processed < budget) {
232*4882a593Smuzhiyun 		int entry;
233*4882a593Smuzhiyun 		struct ep93xx_rstat *rstat;
234*4882a593Smuzhiyun 		u32 rstat0;
235*4882a593Smuzhiyun 		u32 rstat1;
236*4882a593Smuzhiyun 		int length;
237*4882a593Smuzhiyun 		struct sk_buff *skb;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		entry = ep->rx_pointer;
240*4882a593Smuzhiyun 		rstat = ep->descs->rstat + entry;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		rstat0 = rstat->rstat0;
243*4882a593Smuzhiyun 		rstat1 = rstat->rstat1;
244*4882a593Smuzhiyun 		if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP))
245*4882a593Smuzhiyun 			break;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		rstat->rstat0 = 0;
248*4882a593Smuzhiyun 		rstat->rstat1 = 0;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		if (!(rstat0 & RSTAT0_EOF))
251*4882a593Smuzhiyun 			pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
252*4882a593Smuzhiyun 		if (!(rstat0 & RSTAT0_EOB))
253*4882a593Smuzhiyun 			pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
254*4882a593Smuzhiyun 		if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
255*4882a593Smuzhiyun 			pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		if (!(rstat0 & RSTAT0_RWE)) {
258*4882a593Smuzhiyun 			dev->stats.rx_errors++;
259*4882a593Smuzhiyun 			if (rstat0 & RSTAT0_OE)
260*4882a593Smuzhiyun 				dev->stats.rx_fifo_errors++;
261*4882a593Smuzhiyun 			if (rstat0 & RSTAT0_FE)
262*4882a593Smuzhiyun 				dev->stats.rx_frame_errors++;
263*4882a593Smuzhiyun 			if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
264*4882a593Smuzhiyun 				dev->stats.rx_length_errors++;
265*4882a593Smuzhiyun 			if (rstat0 & RSTAT0_CRCE)
266*4882a593Smuzhiyun 				dev->stats.rx_crc_errors++;
267*4882a593Smuzhiyun 			goto err;
268*4882a593Smuzhiyun 		}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		length = rstat1 & RSTAT1_FRAME_LENGTH;
271*4882a593Smuzhiyun 		if (length > MAX_PKT_SIZE) {
272*4882a593Smuzhiyun 			pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
273*4882a593Smuzhiyun 			goto err;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		/* Strip FCS.  */
277*4882a593Smuzhiyun 		if (rstat0 & RSTAT0_CRCI)
278*4882a593Smuzhiyun 			length -= 4;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		skb = netdev_alloc_skb(dev, length + 2);
281*4882a593Smuzhiyun 		if (likely(skb != NULL)) {
282*4882a593Smuzhiyun 			struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
283*4882a593Smuzhiyun 			skb_reserve(skb, 2);
284*4882a593Smuzhiyun 			dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
285*4882a593Smuzhiyun 						length, DMA_FROM_DEVICE);
286*4882a593Smuzhiyun 			skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
287*4882a593Smuzhiyun 			dma_sync_single_for_device(dev->dev.parent,
288*4882a593Smuzhiyun 						   rxd->buf_addr, length,
289*4882a593Smuzhiyun 						   DMA_FROM_DEVICE);
290*4882a593Smuzhiyun 			skb_put(skb, length);
291*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 			napi_gro_receive(&ep->napi, skb);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 			dev->stats.rx_packets++;
296*4882a593Smuzhiyun 			dev->stats.rx_bytes += length;
297*4882a593Smuzhiyun 		} else {
298*4882a593Smuzhiyun 			dev->stats.rx_dropped++;
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun err:
302*4882a593Smuzhiyun 		ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1);
303*4882a593Smuzhiyun 		processed++;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return processed;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
ep93xx_poll(struct napi_struct * napi,int budget)309*4882a593Smuzhiyun static int ep93xx_poll(struct napi_struct *napi, int budget)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
312*4882a593Smuzhiyun 	struct net_device *dev = ep->dev;
313*4882a593Smuzhiyun 	int rx;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	rx = ep93xx_rx(dev, budget);
316*4882a593Smuzhiyun 	if (rx < budget && napi_complete_done(napi, rx)) {
317*4882a593Smuzhiyun 		spin_lock_irq(&ep->rx_lock);
318*4882a593Smuzhiyun 		wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
319*4882a593Smuzhiyun 		spin_unlock_irq(&ep->rx_lock);
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (rx) {
323*4882a593Smuzhiyun 		wrw(ep, REG_RXDENQ, rx);
324*4882a593Smuzhiyun 		wrw(ep, REG_RXSTSENQ, rx);
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return rx;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
ep93xx_xmit(struct sk_buff * skb,struct net_device * dev)330*4882a593Smuzhiyun static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
333*4882a593Smuzhiyun 	struct ep93xx_tdesc *txd;
334*4882a593Smuzhiyun 	int entry;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (unlikely(skb->len > MAX_PKT_SIZE)) {
337*4882a593Smuzhiyun 		dev->stats.tx_dropped++;
338*4882a593Smuzhiyun 		dev_kfree_skb(skb);
339*4882a593Smuzhiyun 		return NETDEV_TX_OK;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	entry = ep->tx_pointer;
343*4882a593Smuzhiyun 	ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	txd = &ep->descs->tdesc[entry];
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
348*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
349*4882a593Smuzhiyun 				DMA_TO_DEVICE);
350*4882a593Smuzhiyun 	skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
351*4882a593Smuzhiyun 	dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
352*4882a593Smuzhiyun 				   DMA_TO_DEVICE);
353*4882a593Smuzhiyun 	dev_kfree_skb(skb);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	spin_lock_irq(&ep->tx_pending_lock);
356*4882a593Smuzhiyun 	ep->tx_pending++;
357*4882a593Smuzhiyun 	if (ep->tx_pending == TX_QUEUE_ENTRIES)
358*4882a593Smuzhiyun 		netif_stop_queue(dev);
359*4882a593Smuzhiyun 	spin_unlock_irq(&ep->tx_pending_lock);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	wrl(ep, REG_TXDENQ, 1);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return NETDEV_TX_OK;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
ep93xx_tx_complete(struct net_device * dev)366*4882a593Smuzhiyun static void ep93xx_tx_complete(struct net_device *dev)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
369*4882a593Smuzhiyun 	int wake;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	wake = 0;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	spin_lock(&ep->tx_pending_lock);
374*4882a593Smuzhiyun 	while (1) {
375*4882a593Smuzhiyun 		int entry;
376*4882a593Smuzhiyun 		struct ep93xx_tstat *tstat;
377*4882a593Smuzhiyun 		u32 tstat0;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		entry = ep->tx_clean_pointer;
380*4882a593Smuzhiyun 		tstat = ep->descs->tstat + entry;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		tstat0 = tstat->tstat0;
383*4882a593Smuzhiyun 		if (!(tstat0 & TSTAT0_TXFP))
384*4882a593Smuzhiyun 			break;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		tstat->tstat0 = 0;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 		if (tstat0 & TSTAT0_FA)
389*4882a593Smuzhiyun 			pr_crit("frame aborted %.8x\n", tstat0);
390*4882a593Smuzhiyun 		if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
391*4882a593Smuzhiyun 			pr_crit("entry mismatch %.8x\n", tstat0);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		if (tstat0 & TSTAT0_TXWE) {
394*4882a593Smuzhiyun 			int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 			dev->stats.tx_packets++;
397*4882a593Smuzhiyun 			dev->stats.tx_bytes += length;
398*4882a593Smuzhiyun 		} else {
399*4882a593Smuzhiyun 			dev->stats.tx_errors++;
400*4882a593Smuzhiyun 		}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		if (tstat0 & TSTAT0_OW)
403*4882a593Smuzhiyun 			dev->stats.tx_window_errors++;
404*4882a593Smuzhiyun 		if (tstat0 & TSTAT0_TXU)
405*4882a593Smuzhiyun 			dev->stats.tx_fifo_errors++;
406*4882a593Smuzhiyun 		dev->stats.collisions += (tstat0 >> 16) & 0x1f;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
409*4882a593Smuzhiyun 		if (ep->tx_pending == TX_QUEUE_ENTRIES)
410*4882a593Smuzhiyun 			wake = 1;
411*4882a593Smuzhiyun 		ep->tx_pending--;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 	spin_unlock(&ep->tx_pending_lock);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (wake)
416*4882a593Smuzhiyun 		netif_wake_queue(dev);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
ep93xx_irq(int irq,void * dev_id)419*4882a593Smuzhiyun static irqreturn_t ep93xx_irq(int irq, void *dev_id)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
422*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
423*4882a593Smuzhiyun 	u32 status;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	status = rdl(ep, REG_INTSTSC);
426*4882a593Smuzhiyun 	if (status == 0)
427*4882a593Smuzhiyun 		return IRQ_NONE;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (status & REG_INTSTS_RX) {
430*4882a593Smuzhiyun 		spin_lock(&ep->rx_lock);
431*4882a593Smuzhiyun 		if (likely(napi_schedule_prep(&ep->napi))) {
432*4882a593Smuzhiyun 			wrl(ep, REG_INTEN, REG_INTEN_TX);
433*4882a593Smuzhiyun 			__napi_schedule(&ep->napi);
434*4882a593Smuzhiyun 		}
435*4882a593Smuzhiyun 		spin_unlock(&ep->rx_lock);
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (status & REG_INTSTS_TX)
439*4882a593Smuzhiyun 		ep93xx_tx_complete(dev);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return IRQ_HANDLED;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
ep93xx_free_buffers(struct ep93xx_priv * ep)444*4882a593Smuzhiyun static void ep93xx_free_buffers(struct ep93xx_priv *ep)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct device *dev = ep->dev->dev.parent;
447*4882a593Smuzhiyun 	int i;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (!ep->descs)
450*4882a593Smuzhiyun 		return;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
453*4882a593Smuzhiyun 		dma_addr_t d;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		d = ep->descs->rdesc[i].buf_addr;
456*4882a593Smuzhiyun 		if (d)
457*4882a593Smuzhiyun 			dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		kfree(ep->rx_buf[i]);
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
463*4882a593Smuzhiyun 		dma_addr_t d;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		d = ep->descs->tdesc[i].buf_addr;
466*4882a593Smuzhiyun 		if (d)
467*4882a593Smuzhiyun 			dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 		kfree(ep->tx_buf[i]);
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
473*4882a593Smuzhiyun 							ep->descs_dma_addr);
474*4882a593Smuzhiyun 	ep->descs = NULL;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
ep93xx_alloc_buffers(struct ep93xx_priv * ep)477*4882a593Smuzhiyun static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct device *dev = ep->dev->dev.parent;
480*4882a593Smuzhiyun 	int i;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
483*4882a593Smuzhiyun 				&ep->descs_dma_addr, GFP_KERNEL);
484*4882a593Smuzhiyun 	if (ep->descs == NULL)
485*4882a593Smuzhiyun 		return 1;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
488*4882a593Smuzhiyun 		void *buf;
489*4882a593Smuzhiyun 		dma_addr_t d;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
492*4882a593Smuzhiyun 		if (buf == NULL)
493*4882a593Smuzhiyun 			goto err;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
496*4882a593Smuzhiyun 		if (dma_mapping_error(dev, d)) {
497*4882a593Smuzhiyun 			kfree(buf);
498*4882a593Smuzhiyun 			goto err;
499*4882a593Smuzhiyun 		}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		ep->rx_buf[i] = buf;
502*4882a593Smuzhiyun 		ep->descs->rdesc[i].buf_addr = d;
503*4882a593Smuzhiyun 		ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
507*4882a593Smuzhiyun 		void *buf;
508*4882a593Smuzhiyun 		dma_addr_t d;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
511*4882a593Smuzhiyun 		if (buf == NULL)
512*4882a593Smuzhiyun 			goto err;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
515*4882a593Smuzhiyun 		if (dma_mapping_error(dev, d)) {
516*4882a593Smuzhiyun 			kfree(buf);
517*4882a593Smuzhiyun 			goto err;
518*4882a593Smuzhiyun 		}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		ep->tx_buf[i] = buf;
521*4882a593Smuzhiyun 		ep->descs->tdesc[i].buf_addr = d;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return 0;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun err:
527*4882a593Smuzhiyun 	ep93xx_free_buffers(ep);
528*4882a593Smuzhiyun 	return 1;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
ep93xx_start_hw(struct net_device * dev)531*4882a593Smuzhiyun static int ep93xx_start_hw(struct net_device *dev)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
534*4882a593Smuzhiyun 	unsigned long addr;
535*4882a593Smuzhiyun 	int i;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
538*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
539*4882a593Smuzhiyun 		if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
540*4882a593Smuzhiyun 			break;
541*4882a593Smuzhiyun 		msleep(1);
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (i == 10) {
545*4882a593Smuzhiyun 		pr_crit("hw failed to reset\n");
546*4882a593Smuzhiyun 		return 1;
547*4882a593Smuzhiyun 	}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9));
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	/* Does the PHY support preamble suppress?  */
552*4882a593Smuzhiyun 	if ((ep93xx_mdio_read(dev, ep->mii.phy_id, MII_BMSR) & 0x0040) != 0)
553*4882a593Smuzhiyun 		wrl(ep, REG_SELFCTL, ((ep->mdc_divisor - 1) << 9) | (1 << 8));
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/* Receive descriptor ring.  */
556*4882a593Smuzhiyun 	addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rdesc);
557*4882a593Smuzhiyun 	wrl(ep, REG_RXDQBADD, addr);
558*4882a593Smuzhiyun 	wrl(ep, REG_RXDCURADD, addr);
559*4882a593Smuzhiyun 	wrw(ep, REG_RXDQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rdesc));
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* Receive status ring.  */
562*4882a593Smuzhiyun 	addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, rstat);
563*4882a593Smuzhiyun 	wrl(ep, REG_RXSTSQBADD, addr);
564*4882a593Smuzhiyun 	wrl(ep, REG_RXSTSQCURADD, addr);
565*4882a593Smuzhiyun 	wrw(ep, REG_RXSTSQBLEN, RX_QUEUE_ENTRIES * sizeof(struct ep93xx_rstat));
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* Transmit descriptor ring.  */
568*4882a593Smuzhiyun 	addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tdesc);
569*4882a593Smuzhiyun 	wrl(ep, REG_TXDQBADD, addr);
570*4882a593Smuzhiyun 	wrl(ep, REG_TXDQCURADD, addr);
571*4882a593Smuzhiyun 	wrw(ep, REG_TXDQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tdesc));
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/* Transmit status ring.  */
574*4882a593Smuzhiyun 	addr = ep->descs_dma_addr + offsetof(struct ep93xx_descs, tstat);
575*4882a593Smuzhiyun 	wrl(ep, REG_TXSTSQBADD, addr);
576*4882a593Smuzhiyun 	wrl(ep, REG_TXSTSQCURADD, addr);
577*4882a593Smuzhiyun 	wrw(ep, REG_TXSTSQBLEN, TX_QUEUE_ENTRIES * sizeof(struct ep93xx_tstat));
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	wrl(ep, REG_BMCTL, REG_BMCTL_ENABLE_TX | REG_BMCTL_ENABLE_RX);
580*4882a593Smuzhiyun 	wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
581*4882a593Smuzhiyun 	wrl(ep, REG_GIINTMSK, 0);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
584*4882a593Smuzhiyun 		if ((rdl(ep, REG_BMSTS) & REG_BMSTS_RX_ACTIVE) != 0)
585*4882a593Smuzhiyun 			break;
586*4882a593Smuzhiyun 		msleep(1);
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (i == 10) {
590*4882a593Smuzhiyun 		pr_crit("hw failed to start\n");
591*4882a593Smuzhiyun 		return 1;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	wrl(ep, REG_RXDENQ, RX_QUEUE_ENTRIES);
595*4882a593Smuzhiyun 	wrl(ep, REG_RXSTSENQ, RX_QUEUE_ENTRIES);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	wrb(ep, REG_INDAD0, dev->dev_addr[0]);
598*4882a593Smuzhiyun 	wrb(ep, REG_INDAD1, dev->dev_addr[1]);
599*4882a593Smuzhiyun 	wrb(ep, REG_INDAD2, dev->dev_addr[2]);
600*4882a593Smuzhiyun 	wrb(ep, REG_INDAD3, dev->dev_addr[3]);
601*4882a593Smuzhiyun 	wrb(ep, REG_INDAD4, dev->dev_addr[4]);
602*4882a593Smuzhiyun 	wrb(ep, REG_INDAD5, dev->dev_addr[5]);
603*4882a593Smuzhiyun 	wrl(ep, REG_AFP, 0);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	wrl(ep, REG_MAXFRMLEN, (MAX_PKT_SIZE << 16) | MAX_PKT_SIZE);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	wrl(ep, REG_RXCTL, REG_RXCTL_DEFAULT);
608*4882a593Smuzhiyun 	wrl(ep, REG_TXCTL, REG_TXCTL_ENABLE);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	return 0;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
ep93xx_stop_hw(struct net_device * dev)613*4882a593Smuzhiyun static void ep93xx_stop_hw(struct net_device *dev)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
616*4882a593Smuzhiyun 	int i;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	wrl(ep, REG_SELFCTL, REG_SELFCTL_RESET);
619*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
620*4882a593Smuzhiyun 		if ((rdl(ep, REG_SELFCTL) & REG_SELFCTL_RESET) == 0)
621*4882a593Smuzhiyun 			break;
622*4882a593Smuzhiyun 		msleep(1);
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (i == 10)
626*4882a593Smuzhiyun 		pr_crit("hw failed to reset\n");
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
ep93xx_open(struct net_device * dev)629*4882a593Smuzhiyun static int ep93xx_open(struct net_device *dev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
632*4882a593Smuzhiyun 	int err;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (ep93xx_alloc_buffers(ep))
635*4882a593Smuzhiyun 		return -ENOMEM;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	napi_enable(&ep->napi);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (ep93xx_start_hw(dev)) {
640*4882a593Smuzhiyun 		napi_disable(&ep->napi);
641*4882a593Smuzhiyun 		ep93xx_free_buffers(ep);
642*4882a593Smuzhiyun 		return -EIO;
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	spin_lock_init(&ep->rx_lock);
646*4882a593Smuzhiyun 	ep->rx_pointer = 0;
647*4882a593Smuzhiyun 	ep->tx_clean_pointer = 0;
648*4882a593Smuzhiyun 	ep->tx_pointer = 0;
649*4882a593Smuzhiyun 	spin_lock_init(&ep->tx_pending_lock);
650*4882a593Smuzhiyun 	ep->tx_pending = 0;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev);
653*4882a593Smuzhiyun 	if (err) {
654*4882a593Smuzhiyun 		napi_disable(&ep->napi);
655*4882a593Smuzhiyun 		ep93xx_stop_hw(dev);
656*4882a593Smuzhiyun 		ep93xx_free_buffers(ep);
657*4882a593Smuzhiyun 		return err;
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	wrl(ep, REG_GIINTMSK, REG_GIINTMSK_ENABLE);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	netif_start_queue(dev);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	return 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
ep93xx_close(struct net_device * dev)667*4882a593Smuzhiyun static int ep93xx_close(struct net_device *dev)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	napi_disable(&ep->napi);
672*4882a593Smuzhiyun 	netif_stop_queue(dev);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	wrl(ep, REG_GIINTMSK, 0);
675*4882a593Smuzhiyun 	free_irq(ep->irq, dev);
676*4882a593Smuzhiyun 	ep93xx_stop_hw(dev);
677*4882a593Smuzhiyun 	ep93xx_free_buffers(ep);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
ep93xx_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)682*4882a593Smuzhiyun static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
685*4882a593Smuzhiyun 	struct mii_ioctl_data *data = if_mii(ifr);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
ep93xx_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)690*4882a593Smuzhiyun static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
ep93xx_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)695*4882a593Smuzhiyun static int ep93xx_get_link_ksettings(struct net_device *dev,
696*4882a593Smuzhiyun 				     struct ethtool_link_ksettings *cmd)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&ep->mii, cmd);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	return 0;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
ep93xx_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)705*4882a593Smuzhiyun static int ep93xx_set_link_ksettings(struct net_device *dev,
706*4882a593Smuzhiyun 				     const struct ethtool_link_ksettings *cmd)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
709*4882a593Smuzhiyun 	return mii_ethtool_set_link_ksettings(&ep->mii, cmd);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
ep93xx_nway_reset(struct net_device * dev)712*4882a593Smuzhiyun static int ep93xx_nway_reset(struct net_device *dev)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
715*4882a593Smuzhiyun 	return mii_nway_restart(&ep->mii);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
ep93xx_get_link(struct net_device * dev)718*4882a593Smuzhiyun static u32 ep93xx_get_link(struct net_device *dev)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	struct ep93xx_priv *ep = netdev_priv(dev);
721*4882a593Smuzhiyun 	return mii_link_ok(&ep->mii);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun static const struct ethtool_ops ep93xx_ethtool_ops = {
725*4882a593Smuzhiyun 	.get_drvinfo		= ep93xx_get_drvinfo,
726*4882a593Smuzhiyun 	.nway_reset		= ep93xx_nway_reset,
727*4882a593Smuzhiyun 	.get_link		= ep93xx_get_link,
728*4882a593Smuzhiyun 	.get_link_ksettings	= ep93xx_get_link_ksettings,
729*4882a593Smuzhiyun 	.set_link_ksettings	= ep93xx_set_link_ksettings,
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun static const struct net_device_ops ep93xx_netdev_ops = {
733*4882a593Smuzhiyun 	.ndo_open		= ep93xx_open,
734*4882a593Smuzhiyun 	.ndo_stop		= ep93xx_close,
735*4882a593Smuzhiyun 	.ndo_start_xmit		= ep93xx_xmit,
736*4882a593Smuzhiyun 	.ndo_do_ioctl		= ep93xx_ioctl,
737*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
738*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
739*4882a593Smuzhiyun };
740*4882a593Smuzhiyun 
ep93xx_dev_alloc(struct ep93xx_eth_data * data)741*4882a593Smuzhiyun static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct net_device *dev;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct ep93xx_priv));
746*4882a593Smuzhiyun 	if (dev == NULL)
747*4882a593Smuzhiyun 		return NULL;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	dev->ethtool_ops = &ep93xx_ethtool_ops;
752*4882a593Smuzhiyun 	dev->netdev_ops = &ep93xx_netdev_ops;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	return dev;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 
ep93xx_eth_remove(struct platform_device * pdev)760*4882a593Smuzhiyun static int ep93xx_eth_remove(struct platform_device *pdev)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	struct net_device *dev;
763*4882a593Smuzhiyun 	struct ep93xx_priv *ep;
764*4882a593Smuzhiyun 	struct resource *mem;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	dev = platform_get_drvdata(pdev);
767*4882a593Smuzhiyun 	if (dev == NULL)
768*4882a593Smuzhiyun 		return 0;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	ep = netdev_priv(dev);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	/* @@@ Force down.  */
773*4882a593Smuzhiyun 	unregister_netdev(dev);
774*4882a593Smuzhiyun 	ep93xx_free_buffers(ep);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (ep->base_addr != NULL)
777*4882a593Smuzhiyun 		iounmap(ep->base_addr);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (ep->res != NULL) {
780*4882a593Smuzhiyun 		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
781*4882a593Smuzhiyun 		release_mem_region(mem->start, resource_size(mem));
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	free_netdev(dev);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
ep93xx_eth_probe(struct platform_device * pdev)789*4882a593Smuzhiyun static int ep93xx_eth_probe(struct platform_device *pdev)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	struct ep93xx_eth_data *data;
792*4882a593Smuzhiyun 	struct net_device *dev;
793*4882a593Smuzhiyun 	struct ep93xx_priv *ep;
794*4882a593Smuzhiyun 	struct resource *mem;
795*4882a593Smuzhiyun 	int irq;
796*4882a593Smuzhiyun 	int err;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (pdev == NULL)
799*4882a593Smuzhiyun 		return -ENODEV;
800*4882a593Smuzhiyun 	data = dev_get_platdata(&pdev->dev);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
804*4882a593Smuzhiyun 	if (!mem || irq < 0)
805*4882a593Smuzhiyun 		return -ENXIO;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	dev = ep93xx_dev_alloc(data);
808*4882a593Smuzhiyun 	if (dev == NULL) {
809*4882a593Smuzhiyun 		err = -ENOMEM;
810*4882a593Smuzhiyun 		goto err_out;
811*4882a593Smuzhiyun 	}
812*4882a593Smuzhiyun 	ep = netdev_priv(dev);
813*4882a593Smuzhiyun 	ep->dev = dev;
814*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
815*4882a593Smuzhiyun 	netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	ep->res = request_mem_region(mem->start, resource_size(mem),
820*4882a593Smuzhiyun 				     dev_name(&pdev->dev));
821*4882a593Smuzhiyun 	if (ep->res == NULL) {
822*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Could not reserve memory region\n");
823*4882a593Smuzhiyun 		err = -ENOMEM;
824*4882a593Smuzhiyun 		goto err_out;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	ep->base_addr = ioremap(mem->start, resource_size(mem));
828*4882a593Smuzhiyun 	if (ep->base_addr == NULL) {
829*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
830*4882a593Smuzhiyun 		err = -EIO;
831*4882a593Smuzhiyun 		goto err_out;
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun 	ep->irq = irq;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	ep->mii.phy_id = data->phy_id;
836*4882a593Smuzhiyun 	ep->mii.phy_id_mask = 0x1f;
837*4882a593Smuzhiyun 	ep->mii.reg_num_mask = 0x1f;
838*4882a593Smuzhiyun 	ep->mii.dev = dev;
839*4882a593Smuzhiyun 	ep->mii.mdio_read = ep93xx_mdio_read;
840*4882a593Smuzhiyun 	ep->mii.mdio_write = ep93xx_mdio_write;
841*4882a593Smuzhiyun 	ep->mdc_divisor = 40;	/* Max HCLK 100 MHz, min MDIO clk 2.5 MHz.  */
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	if (is_zero_ether_addr(dev->dev_addr))
844*4882a593Smuzhiyun 		eth_hw_addr_random(dev);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	err = register_netdev(dev);
847*4882a593Smuzhiyun 	if (err) {
848*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to register netdev\n");
849*4882a593Smuzhiyun 		goto err_out;
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
853*4882a593Smuzhiyun 			dev->name, ep->irq, dev->dev_addr);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	return 0;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun err_out:
858*4882a593Smuzhiyun 	ep93xx_eth_remove(pdev);
859*4882a593Smuzhiyun 	return err;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun static struct platform_driver ep93xx_eth_driver = {
864*4882a593Smuzhiyun 	.probe		= ep93xx_eth_probe,
865*4882a593Smuzhiyun 	.remove		= ep93xx_eth_remove,
866*4882a593Smuzhiyun 	.driver		= {
867*4882a593Smuzhiyun 		.name	= "ep93xx-eth",
868*4882a593Smuzhiyun 	},
869*4882a593Smuzhiyun };
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun module_platform_driver(ep93xx_eth_driver);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun MODULE_LICENSE("GPL");
874*4882a593Smuzhiyun MODULE_ALIAS("platform:ep93xx-eth");
875