xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/ti/cpmac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2006, 2007 Eugene Konev
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/moduleparam.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/delay.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/netdevice.h>
19*4882a593Smuzhiyun #include <linux/if_vlan.h>
20*4882a593Smuzhiyun #include <linux/etherdevice.h>
21*4882a593Smuzhiyun #include <linux/ethtool.h>
22*4882a593Smuzhiyun #include <linux/skbuff.h>
23*4882a593Smuzhiyun #include <linux/mii.h>
24*4882a593Smuzhiyun #include <linux/phy.h>
25*4882a593Smuzhiyun #include <linux/phy_fixed.h>
26*4882a593Smuzhiyun #include <linux/platform_device.h>
27*4882a593Smuzhiyun #include <linux/dma-mapping.h>
28*4882a593Smuzhiyun #include <linux/clk.h>
29*4882a593Smuzhiyun #include <linux/gpio.h>
30*4882a593Smuzhiyun #include <linux/atomic.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <asm/mach-ar7/ar7.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
35*4882a593Smuzhiyun MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
36*4882a593Smuzhiyun MODULE_LICENSE("GPL");
37*4882a593Smuzhiyun MODULE_ALIAS("platform:cpmac");
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static int debug_level = 8;
40*4882a593Smuzhiyun static int dumb_switch;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
43*4882a593Smuzhiyun module_param(debug_level, int, 0444);
44*4882a593Smuzhiyun module_param(dumb_switch, int, 0444);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
47*4882a593Smuzhiyun MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define CPMAC_VERSION "0.5.2"
50*4882a593Smuzhiyun /* frame size + 802.1q tag + FCS size */
51*4882a593Smuzhiyun #define CPMAC_SKB_SIZE		(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
52*4882a593Smuzhiyun #define CPMAC_QUEUES	8
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* Ethernet registers */
55*4882a593Smuzhiyun #define CPMAC_TX_CONTROL		0x0004
56*4882a593Smuzhiyun #define CPMAC_TX_TEARDOWN		0x0008
57*4882a593Smuzhiyun #define CPMAC_RX_CONTROL		0x0014
58*4882a593Smuzhiyun #define CPMAC_RX_TEARDOWN		0x0018
59*4882a593Smuzhiyun #define CPMAC_MBP			0x0100
60*4882a593Smuzhiyun #define MBP_RXPASSCRC			0x40000000
61*4882a593Smuzhiyun #define MBP_RXQOS			0x20000000
62*4882a593Smuzhiyun #define MBP_RXNOCHAIN			0x10000000
63*4882a593Smuzhiyun #define MBP_RXCMF			0x01000000
64*4882a593Smuzhiyun #define MBP_RXSHORT			0x00800000
65*4882a593Smuzhiyun #define MBP_RXCEF			0x00400000
66*4882a593Smuzhiyun #define MBP_RXPROMISC			0x00200000
67*4882a593Smuzhiyun #define MBP_PROMISCCHAN(channel)	(((channel) & 0x7) << 16)
68*4882a593Smuzhiyun #define MBP_RXBCAST			0x00002000
69*4882a593Smuzhiyun #define MBP_BCASTCHAN(channel)		(((channel) & 0x7) << 8)
70*4882a593Smuzhiyun #define MBP_RXMCAST			0x00000020
71*4882a593Smuzhiyun #define MBP_MCASTCHAN(channel)		((channel) & 0x7)
72*4882a593Smuzhiyun #define CPMAC_UNICAST_ENABLE		0x0104
73*4882a593Smuzhiyun #define CPMAC_UNICAST_CLEAR		0x0108
74*4882a593Smuzhiyun #define CPMAC_MAX_LENGTH		0x010c
75*4882a593Smuzhiyun #define CPMAC_BUFFER_OFFSET		0x0110
76*4882a593Smuzhiyun #define CPMAC_MAC_CONTROL		0x0160
77*4882a593Smuzhiyun #define MAC_TXPTYPE			0x00000200
78*4882a593Smuzhiyun #define MAC_TXPACE			0x00000040
79*4882a593Smuzhiyun #define MAC_MII				0x00000020
80*4882a593Smuzhiyun #define MAC_TXFLOW			0x00000010
81*4882a593Smuzhiyun #define MAC_RXFLOW			0x00000008
82*4882a593Smuzhiyun #define MAC_MTEST			0x00000004
83*4882a593Smuzhiyun #define MAC_LOOPBACK			0x00000002
84*4882a593Smuzhiyun #define MAC_FDX				0x00000001
85*4882a593Smuzhiyun #define CPMAC_MAC_STATUS		0x0164
86*4882a593Smuzhiyun #define MAC_STATUS_QOS			0x00000004
87*4882a593Smuzhiyun #define MAC_STATUS_RXFLOW		0x00000002
88*4882a593Smuzhiyun #define MAC_STATUS_TXFLOW		0x00000001
89*4882a593Smuzhiyun #define CPMAC_TX_INT_ENABLE		0x0178
90*4882a593Smuzhiyun #define CPMAC_TX_INT_CLEAR		0x017c
91*4882a593Smuzhiyun #define CPMAC_MAC_INT_VECTOR		0x0180
92*4882a593Smuzhiyun #define MAC_INT_STATUS			0x00080000
93*4882a593Smuzhiyun #define MAC_INT_HOST			0x00040000
94*4882a593Smuzhiyun #define MAC_INT_RX			0x00020000
95*4882a593Smuzhiyun #define MAC_INT_TX			0x00010000
96*4882a593Smuzhiyun #define CPMAC_MAC_EOI_VECTOR		0x0184
97*4882a593Smuzhiyun #define CPMAC_RX_INT_ENABLE		0x0198
98*4882a593Smuzhiyun #define CPMAC_RX_INT_CLEAR		0x019c
99*4882a593Smuzhiyun #define CPMAC_MAC_INT_ENABLE		0x01a8
100*4882a593Smuzhiyun #define CPMAC_MAC_INT_CLEAR		0x01ac
101*4882a593Smuzhiyun #define CPMAC_MAC_ADDR_LO(channel)	(0x01b0 + (channel) * 4)
102*4882a593Smuzhiyun #define CPMAC_MAC_ADDR_MID		0x01d0
103*4882a593Smuzhiyun #define CPMAC_MAC_ADDR_HI		0x01d4
104*4882a593Smuzhiyun #define CPMAC_MAC_HASH_LO		0x01d8
105*4882a593Smuzhiyun #define CPMAC_MAC_HASH_HI		0x01dc
106*4882a593Smuzhiyun #define CPMAC_TX_PTR(channel)		(0x0600 + (channel) * 4)
107*4882a593Smuzhiyun #define CPMAC_RX_PTR(channel)		(0x0620 + (channel) * 4)
108*4882a593Smuzhiyun #define CPMAC_TX_ACK(channel)		(0x0640 + (channel) * 4)
109*4882a593Smuzhiyun #define CPMAC_RX_ACK(channel)		(0x0660 + (channel) * 4)
110*4882a593Smuzhiyun #define CPMAC_REG_END			0x0680
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /* Rx/Tx statistics
113*4882a593Smuzhiyun  * TODO: use some of them to fill stats in cpmac_stats()
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun #define CPMAC_STATS_RX_GOOD		0x0200
116*4882a593Smuzhiyun #define CPMAC_STATS_RX_BCAST		0x0204
117*4882a593Smuzhiyun #define CPMAC_STATS_RX_MCAST		0x0208
118*4882a593Smuzhiyun #define CPMAC_STATS_RX_PAUSE		0x020c
119*4882a593Smuzhiyun #define CPMAC_STATS_RX_CRC		0x0210
120*4882a593Smuzhiyun #define CPMAC_STATS_RX_ALIGN		0x0214
121*4882a593Smuzhiyun #define CPMAC_STATS_RX_OVER		0x0218
122*4882a593Smuzhiyun #define CPMAC_STATS_RX_JABBER		0x021c
123*4882a593Smuzhiyun #define CPMAC_STATS_RX_UNDER		0x0220
124*4882a593Smuzhiyun #define CPMAC_STATS_RX_FRAG		0x0224
125*4882a593Smuzhiyun #define CPMAC_STATS_RX_FILTER		0x0228
126*4882a593Smuzhiyun #define CPMAC_STATS_RX_QOSFILTER	0x022c
127*4882a593Smuzhiyun #define CPMAC_STATS_RX_OCTETS		0x0230
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define CPMAC_STATS_TX_GOOD		0x0234
130*4882a593Smuzhiyun #define CPMAC_STATS_TX_BCAST		0x0238
131*4882a593Smuzhiyun #define CPMAC_STATS_TX_MCAST		0x023c
132*4882a593Smuzhiyun #define CPMAC_STATS_TX_PAUSE		0x0240
133*4882a593Smuzhiyun #define CPMAC_STATS_TX_DEFER		0x0244
134*4882a593Smuzhiyun #define CPMAC_STATS_TX_COLLISION	0x0248
135*4882a593Smuzhiyun #define CPMAC_STATS_TX_SINGLECOLL	0x024c
136*4882a593Smuzhiyun #define CPMAC_STATS_TX_MULTICOLL	0x0250
137*4882a593Smuzhiyun #define CPMAC_STATS_TX_EXCESSCOLL	0x0254
138*4882a593Smuzhiyun #define CPMAC_STATS_TX_LATECOLL		0x0258
139*4882a593Smuzhiyun #define CPMAC_STATS_TX_UNDERRUN		0x025c
140*4882a593Smuzhiyun #define CPMAC_STATS_TX_CARRIERSENSE	0x0260
141*4882a593Smuzhiyun #define CPMAC_STATS_TX_OCTETS		0x0264
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define cpmac_read(base, reg)		(readl((void __iomem *)(base) + (reg)))
144*4882a593Smuzhiyun #define cpmac_write(base, reg, val)	(writel(val, (void __iomem *)(base) + \
145*4882a593Smuzhiyun 						(reg)))
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /* MDIO bus */
148*4882a593Smuzhiyun #define CPMAC_MDIO_VERSION		0x0000
149*4882a593Smuzhiyun #define CPMAC_MDIO_CONTROL		0x0004
150*4882a593Smuzhiyun #define MDIOC_IDLE			0x80000000
151*4882a593Smuzhiyun #define MDIOC_ENABLE			0x40000000
152*4882a593Smuzhiyun #define MDIOC_PREAMBLE			0x00100000
153*4882a593Smuzhiyun #define MDIOC_FAULT			0x00080000
154*4882a593Smuzhiyun #define MDIOC_FAULTDETECT		0x00040000
155*4882a593Smuzhiyun #define MDIOC_INTTEST			0x00020000
156*4882a593Smuzhiyun #define MDIOC_CLKDIV(div)		((div) & 0xff)
157*4882a593Smuzhiyun #define CPMAC_MDIO_ALIVE		0x0008
158*4882a593Smuzhiyun #define CPMAC_MDIO_LINK			0x000c
159*4882a593Smuzhiyun #define CPMAC_MDIO_ACCESS(channel)	(0x0080 + (channel) * 8)
160*4882a593Smuzhiyun #define MDIO_BUSY			0x80000000
161*4882a593Smuzhiyun #define MDIO_WRITE			0x40000000
162*4882a593Smuzhiyun #define MDIO_REG(reg)			(((reg) & 0x1f) << 21)
163*4882a593Smuzhiyun #define MDIO_PHY(phy)			(((phy) & 0x1f) << 16)
164*4882a593Smuzhiyun #define MDIO_DATA(data)			((data) & 0xffff)
165*4882a593Smuzhiyun #define CPMAC_MDIO_PHYSEL(channel)	(0x0084 + (channel) * 8)
166*4882a593Smuzhiyun #define PHYSEL_LINKSEL			0x00000040
167*4882a593Smuzhiyun #define PHYSEL_LINKINT			0x00000020
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct cpmac_desc {
170*4882a593Smuzhiyun 	u32 hw_next;
171*4882a593Smuzhiyun 	u32 hw_data;
172*4882a593Smuzhiyun 	u16 buflen;
173*4882a593Smuzhiyun 	u16 bufflags;
174*4882a593Smuzhiyun 	u16 datalen;
175*4882a593Smuzhiyun 	u16 dataflags;
176*4882a593Smuzhiyun #define CPMAC_SOP			0x8000
177*4882a593Smuzhiyun #define CPMAC_EOP			0x4000
178*4882a593Smuzhiyun #define CPMAC_OWN			0x2000
179*4882a593Smuzhiyun #define CPMAC_EOQ			0x1000
180*4882a593Smuzhiyun 	struct sk_buff *skb;
181*4882a593Smuzhiyun 	struct cpmac_desc *next;
182*4882a593Smuzhiyun 	struct cpmac_desc *prev;
183*4882a593Smuzhiyun 	dma_addr_t mapping;
184*4882a593Smuzhiyun 	dma_addr_t data_mapping;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct cpmac_priv {
188*4882a593Smuzhiyun 	spinlock_t lock;
189*4882a593Smuzhiyun 	spinlock_t rx_lock;
190*4882a593Smuzhiyun 	struct cpmac_desc *rx_head;
191*4882a593Smuzhiyun 	int ring_size;
192*4882a593Smuzhiyun 	struct cpmac_desc *desc_ring;
193*4882a593Smuzhiyun 	dma_addr_t dma_ring;
194*4882a593Smuzhiyun 	void __iomem *regs;
195*4882a593Smuzhiyun 	struct mii_bus *mii_bus;
196*4882a593Smuzhiyun 	char phy_name[MII_BUS_ID_SIZE + 3];
197*4882a593Smuzhiyun 	int oldlink, oldspeed, oldduplex;
198*4882a593Smuzhiyun 	u32 msg_enable;
199*4882a593Smuzhiyun 	struct net_device *dev;
200*4882a593Smuzhiyun 	struct work_struct reset_work;
201*4882a593Smuzhiyun 	struct platform_device *pdev;
202*4882a593Smuzhiyun 	struct napi_struct napi;
203*4882a593Smuzhiyun 	atomic_t reset_pending;
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun static irqreturn_t cpmac_irq(int, void *);
207*4882a593Smuzhiyun static void cpmac_hw_start(struct net_device *dev);
208*4882a593Smuzhiyun static void cpmac_hw_stop(struct net_device *dev);
209*4882a593Smuzhiyun static int cpmac_stop(struct net_device *dev);
210*4882a593Smuzhiyun static int cpmac_open(struct net_device *dev);
211*4882a593Smuzhiyun 
cpmac_dump_regs(struct net_device * dev)212*4882a593Smuzhiyun static void cpmac_dump_regs(struct net_device *dev)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	int i;
215*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	for (i = 0; i < CPMAC_REG_END; i += 4) {
218*4882a593Smuzhiyun 		if (i % 16 == 0) {
219*4882a593Smuzhiyun 			if (i)
220*4882a593Smuzhiyun 				printk("\n");
221*4882a593Smuzhiyun 			printk("%s: reg[%p]:", dev->name, priv->regs + i);
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 		printk(" %08x", cpmac_read(priv->regs, i));
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	printk("\n");
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
cpmac_dump_desc(struct net_device * dev,struct cpmac_desc * desc)228*4882a593Smuzhiyun static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	int i;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	printk("%s: desc[%p]:", dev->name, desc);
233*4882a593Smuzhiyun 	for (i = 0; i < sizeof(*desc) / 4; i++)
234*4882a593Smuzhiyun 		printk(" %08x", ((u32 *)desc)[i]);
235*4882a593Smuzhiyun 	printk("\n");
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
cpmac_dump_all_desc(struct net_device * dev)238*4882a593Smuzhiyun static void cpmac_dump_all_desc(struct net_device *dev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
241*4882a593Smuzhiyun 	struct cpmac_desc *dump = priv->rx_head;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	do {
244*4882a593Smuzhiyun 		cpmac_dump_desc(dev, dump);
245*4882a593Smuzhiyun 		dump = dump->next;
246*4882a593Smuzhiyun 	} while (dump != priv->rx_head);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
cpmac_dump_skb(struct net_device * dev,struct sk_buff * skb)249*4882a593Smuzhiyun static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	int i;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
254*4882a593Smuzhiyun 	for (i = 0; i < skb->len; i++) {
255*4882a593Smuzhiyun 		if (i % 16 == 0) {
256*4882a593Smuzhiyun 			if (i)
257*4882a593Smuzhiyun 				printk("\n");
258*4882a593Smuzhiyun 			printk("%s: data[%p]:", dev->name, skb->data + i);
259*4882a593Smuzhiyun 		}
260*4882a593Smuzhiyun 		printk(" %02x", ((u8 *)skb->data)[i]);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 	printk("\n");
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
cpmac_mdio_read(struct mii_bus * bus,int phy_id,int reg)265*4882a593Smuzhiyun static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	u32 val;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
270*4882a593Smuzhiyun 		cpu_relax();
271*4882a593Smuzhiyun 	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
272*4882a593Smuzhiyun 		    MDIO_PHY(phy_id));
273*4882a593Smuzhiyun 	while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
274*4882a593Smuzhiyun 		cpu_relax();
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return MDIO_DATA(val);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
cpmac_mdio_write(struct mii_bus * bus,int phy_id,int reg,u16 val)279*4882a593Smuzhiyun static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
280*4882a593Smuzhiyun 			    int reg, u16 val)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
283*4882a593Smuzhiyun 		cpu_relax();
284*4882a593Smuzhiyun 	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
285*4882a593Smuzhiyun 		    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
cpmac_mdio_reset(struct mii_bus * bus)290*4882a593Smuzhiyun static int cpmac_mdio_reset(struct mii_bus *bus)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct clk *cpmac_clk;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	cpmac_clk = clk_get(&bus->dev, "cpmac");
295*4882a593Smuzhiyun 	if (IS_ERR(cpmac_clk)) {
296*4882a593Smuzhiyun 		pr_err("unable to get cpmac clock\n");
297*4882a593Smuzhiyun 		return -1;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 	ar7_device_reset(AR7_RESET_BIT_MDIO);
300*4882a593Smuzhiyun 	cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
301*4882a593Smuzhiyun 		    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun static struct mii_bus *cpmac_mii;
307*4882a593Smuzhiyun 
cpmac_set_multicast_list(struct net_device * dev)308*4882a593Smuzhiyun static void cpmac_set_multicast_list(struct net_device *dev)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
311*4882a593Smuzhiyun 	u8 tmp;
312*4882a593Smuzhiyun 	u32 mbp, bit, hash[2] = { 0, };
313*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	mbp = cpmac_read(priv->regs, CPMAC_MBP);
316*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
317*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
318*4882a593Smuzhiyun 			    MBP_RXPROMISC);
319*4882a593Smuzhiyun 	} else {
320*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
321*4882a593Smuzhiyun 		if (dev->flags & IFF_ALLMULTI) {
322*4882a593Smuzhiyun 			/* enable all multicast mode */
323*4882a593Smuzhiyun 			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
324*4882a593Smuzhiyun 			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
325*4882a593Smuzhiyun 		} else {
326*4882a593Smuzhiyun 			/* cpmac uses some strange mac address hashing
327*4882a593Smuzhiyun 			 * (not crc32)
328*4882a593Smuzhiyun 			 */
329*4882a593Smuzhiyun 			netdev_for_each_mc_addr(ha, dev) {
330*4882a593Smuzhiyun 				bit = 0;
331*4882a593Smuzhiyun 				tmp = ha->addr[0];
332*4882a593Smuzhiyun 				bit  ^= (tmp >> 2) ^ (tmp << 4);
333*4882a593Smuzhiyun 				tmp = ha->addr[1];
334*4882a593Smuzhiyun 				bit  ^= (tmp >> 4) ^ (tmp << 2);
335*4882a593Smuzhiyun 				tmp = ha->addr[2];
336*4882a593Smuzhiyun 				bit  ^= (tmp >> 6) ^ tmp;
337*4882a593Smuzhiyun 				tmp = ha->addr[3];
338*4882a593Smuzhiyun 				bit  ^= (tmp >> 2) ^ (tmp << 4);
339*4882a593Smuzhiyun 				tmp = ha->addr[4];
340*4882a593Smuzhiyun 				bit  ^= (tmp >> 4) ^ (tmp << 2);
341*4882a593Smuzhiyun 				tmp = ha->addr[5];
342*4882a593Smuzhiyun 				bit  ^= (tmp >> 6) ^ tmp;
343*4882a593Smuzhiyun 				bit &= 0x3f;
344*4882a593Smuzhiyun 				hash[bit / 32] |= 1 << (bit % 32);
345*4882a593Smuzhiyun 			}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
348*4882a593Smuzhiyun 			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
349*4882a593Smuzhiyun 		}
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
cpmac_rx_one(struct cpmac_priv * priv,struct cpmac_desc * desc)353*4882a593Smuzhiyun static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
354*4882a593Smuzhiyun 				    struct cpmac_desc *desc)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct sk_buff *skb, *result = NULL;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (unlikely(netif_msg_hw(priv)))
359*4882a593Smuzhiyun 		cpmac_dump_desc(priv->dev, desc);
360*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
361*4882a593Smuzhiyun 	if (unlikely(!desc->datalen)) {
362*4882a593Smuzhiyun 		if (netif_msg_rx_err(priv) && net_ratelimit())
363*4882a593Smuzhiyun 			netdev_warn(priv->dev, "rx: spurious interrupt\n");
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		return NULL;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
369*4882a593Smuzhiyun 	if (likely(skb)) {
370*4882a593Smuzhiyun 		skb_put(desc->skb, desc->datalen);
371*4882a593Smuzhiyun 		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
372*4882a593Smuzhiyun 		skb_checksum_none_assert(desc->skb);
373*4882a593Smuzhiyun 		priv->dev->stats.rx_packets++;
374*4882a593Smuzhiyun 		priv->dev->stats.rx_bytes += desc->datalen;
375*4882a593Smuzhiyun 		result = desc->skb;
376*4882a593Smuzhiyun 		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
377*4882a593Smuzhiyun 				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
378*4882a593Smuzhiyun 		desc->skb = skb;
379*4882a593Smuzhiyun 		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
380*4882a593Smuzhiyun 						    CPMAC_SKB_SIZE,
381*4882a593Smuzhiyun 						    DMA_FROM_DEVICE);
382*4882a593Smuzhiyun 		desc->hw_data = (u32)desc->data_mapping;
383*4882a593Smuzhiyun 		if (unlikely(netif_msg_pktdata(priv))) {
384*4882a593Smuzhiyun 			netdev_dbg(priv->dev, "received packet:\n");
385*4882a593Smuzhiyun 			cpmac_dump_skb(priv->dev, result);
386*4882a593Smuzhiyun 		}
387*4882a593Smuzhiyun 	} else {
388*4882a593Smuzhiyun 		if (netif_msg_rx_err(priv) && net_ratelimit())
389*4882a593Smuzhiyun 			netdev_warn(priv->dev,
390*4882a593Smuzhiyun 				    "low on skbs, dropping packet\n");
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		priv->dev->stats.rx_dropped++;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	desc->buflen = CPMAC_SKB_SIZE;
396*4882a593Smuzhiyun 	desc->dataflags = CPMAC_OWN;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	return result;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
cpmac_poll(struct napi_struct * napi,int budget)401*4882a593Smuzhiyun static int cpmac_poll(struct napi_struct *napi, int budget)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct sk_buff *skb;
404*4882a593Smuzhiyun 	struct cpmac_desc *desc, *restart;
405*4882a593Smuzhiyun 	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
406*4882a593Smuzhiyun 	int received = 0, processed = 0;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	spin_lock(&priv->rx_lock);
409*4882a593Smuzhiyun 	if (unlikely(!priv->rx_head)) {
410*4882a593Smuzhiyun 		if (netif_msg_rx_err(priv) && net_ratelimit())
411*4882a593Smuzhiyun 			netdev_warn(priv->dev, "rx: polling, but no queue\n");
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		spin_unlock(&priv->rx_lock);
414*4882a593Smuzhiyun 		napi_complete(napi);
415*4882a593Smuzhiyun 		return 0;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	desc = priv->rx_head;
419*4882a593Smuzhiyun 	restart = NULL;
420*4882a593Smuzhiyun 	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
421*4882a593Smuzhiyun 		processed++;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		if ((desc->dataflags & CPMAC_EOQ) != 0) {
424*4882a593Smuzhiyun 			/* The last update to eoq->hw_next didn't happen
425*4882a593Smuzhiyun 			 * soon enough, and the receiver stopped here.
426*4882a593Smuzhiyun 			 * Remember this descriptor so we can restart
427*4882a593Smuzhiyun 			 * the receiver after freeing some space.
428*4882a593Smuzhiyun 			 */
429*4882a593Smuzhiyun 			if (unlikely(restart)) {
430*4882a593Smuzhiyun 				if (netif_msg_rx_err(priv))
431*4882a593Smuzhiyun 					netdev_err(priv->dev, "poll found a"
432*4882a593Smuzhiyun 						   " duplicate EOQ: %p and %p\n",
433*4882a593Smuzhiyun 						   restart, desc);
434*4882a593Smuzhiyun 				goto fatal_error;
435*4882a593Smuzhiyun 			}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 			restart = desc->next;
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		skb = cpmac_rx_one(priv, desc);
441*4882a593Smuzhiyun 		if (likely(skb)) {
442*4882a593Smuzhiyun 			netif_receive_skb(skb);
443*4882a593Smuzhiyun 			received++;
444*4882a593Smuzhiyun 		}
445*4882a593Smuzhiyun 		desc = desc->next;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (desc != priv->rx_head) {
449*4882a593Smuzhiyun 		/* We freed some buffers, but not the whole ring,
450*4882a593Smuzhiyun 		 * add what we did free to the rx list
451*4882a593Smuzhiyun 		 */
452*4882a593Smuzhiyun 		desc->prev->hw_next = (u32)0;
453*4882a593Smuzhiyun 		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Optimization: If we did not actually process an EOQ (perhaps because
457*4882a593Smuzhiyun 	 * of quota limits), check to see if the tail of the queue has EOQ set.
458*4882a593Smuzhiyun 	 * We should immediately restart in that case so that the receiver can
459*4882a593Smuzhiyun 	 * restart and run in parallel with more packet processing.
460*4882a593Smuzhiyun 	 * This lets us handle slightly larger bursts before running
461*4882a593Smuzhiyun 	 * out of ring space (assuming dev->weight < ring_size)
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	if (!restart &&
465*4882a593Smuzhiyun 	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
466*4882a593Smuzhiyun 		    == CPMAC_EOQ &&
467*4882a593Smuzhiyun 	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
468*4882a593Smuzhiyun 		/* reset EOQ so the poll loop (above) doesn't try to
469*4882a593Smuzhiyun 		 * restart this when it eventually gets to this descriptor.
470*4882a593Smuzhiyun 		 */
471*4882a593Smuzhiyun 		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
472*4882a593Smuzhiyun 		restart = priv->rx_head;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (restart) {
476*4882a593Smuzhiyun 		priv->dev->stats.rx_errors++;
477*4882a593Smuzhiyun 		priv->dev->stats.rx_fifo_errors++;
478*4882a593Smuzhiyun 		if (netif_msg_rx_err(priv) && net_ratelimit())
479*4882a593Smuzhiyun 			netdev_warn(priv->dev, "rx dma ring overrun\n");
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
482*4882a593Smuzhiyun 			if (netif_msg_drv(priv))
483*4882a593Smuzhiyun 				netdev_err(priv->dev, "cpmac_poll is trying "
484*4882a593Smuzhiyun 					"to restart rx from a descriptor "
485*4882a593Smuzhiyun 					"that's not free: %p\n", restart);
486*4882a593Smuzhiyun 			goto fatal_error;
487*4882a593Smuzhiyun 		}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	priv->rx_head = desc;
493*4882a593Smuzhiyun 	spin_unlock(&priv->rx_lock);
494*4882a593Smuzhiyun 	if (unlikely(netif_msg_rx_status(priv)))
495*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "poll processed %d packets\n", received);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (processed == 0) {
498*4882a593Smuzhiyun 		/* we ran out of packets to read,
499*4882a593Smuzhiyun 		 * revert to interrupt-driven mode
500*4882a593Smuzhiyun 		 */
501*4882a593Smuzhiyun 		napi_complete(napi);
502*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
503*4882a593Smuzhiyun 		return 0;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	return 1;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun fatal_error:
509*4882a593Smuzhiyun 	/* Something went horribly wrong.
510*4882a593Smuzhiyun 	 * Reset hardware to try to recover rather than wedging.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	if (netif_msg_drv(priv)) {
513*4882a593Smuzhiyun 		netdev_err(priv->dev, "cpmac_poll is confused. "
514*4882a593Smuzhiyun 			   "Resetting hardware\n");
515*4882a593Smuzhiyun 		cpmac_dump_all_desc(priv->dev);
516*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
517*4882a593Smuzhiyun 			   cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
518*4882a593Smuzhiyun 			   cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	spin_unlock(&priv->rx_lock);
522*4882a593Smuzhiyun 	napi_complete(napi);
523*4882a593Smuzhiyun 	netif_tx_stop_all_queues(priv->dev);
524*4882a593Smuzhiyun 	napi_disable(&priv->napi);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	atomic_inc(&priv->reset_pending);
527*4882a593Smuzhiyun 	cpmac_hw_stop(priv->dev);
528*4882a593Smuzhiyun 	if (!schedule_work(&priv->reset_work))
529*4882a593Smuzhiyun 		atomic_dec(&priv->reset_pending);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return 0;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
cpmac_start_xmit(struct sk_buff * skb,struct net_device * dev)535*4882a593Smuzhiyun static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	int queue;
538*4882a593Smuzhiyun 	unsigned int len;
539*4882a593Smuzhiyun 	struct cpmac_desc *desc;
540*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (unlikely(atomic_read(&priv->reset_pending)))
543*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	if (unlikely(skb_padto(skb, ETH_ZLEN)))
546*4882a593Smuzhiyun 		return NETDEV_TX_OK;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	len = max_t(unsigned int, skb->len, ETH_ZLEN);
549*4882a593Smuzhiyun 	queue = skb_get_queue_mapping(skb);
550*4882a593Smuzhiyun 	netif_stop_subqueue(dev, queue);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	desc = &priv->desc_ring[queue];
553*4882a593Smuzhiyun 	if (unlikely(desc->dataflags & CPMAC_OWN)) {
554*4882a593Smuzhiyun 		if (netif_msg_tx_err(priv) && net_ratelimit())
555*4882a593Smuzhiyun 			netdev_warn(dev, "tx dma ring full\n");
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	spin_lock(&priv->lock);
561*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
562*4882a593Smuzhiyun 	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
563*4882a593Smuzhiyun 	desc->skb = skb;
564*4882a593Smuzhiyun 	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
565*4882a593Smuzhiyun 					    DMA_TO_DEVICE);
566*4882a593Smuzhiyun 	desc->hw_data = (u32)desc->data_mapping;
567*4882a593Smuzhiyun 	desc->datalen = len;
568*4882a593Smuzhiyun 	desc->buflen = len;
569*4882a593Smuzhiyun 	if (unlikely(netif_msg_tx_queued(priv)))
570*4882a593Smuzhiyun 		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
571*4882a593Smuzhiyun 	if (unlikely(netif_msg_hw(priv)))
572*4882a593Smuzhiyun 		cpmac_dump_desc(dev, desc);
573*4882a593Smuzhiyun 	if (unlikely(netif_msg_pktdata(priv)))
574*4882a593Smuzhiyun 		cpmac_dump_skb(dev, skb);
575*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return NETDEV_TX_OK;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
cpmac_end_xmit(struct net_device * dev,int queue)580*4882a593Smuzhiyun static void cpmac_end_xmit(struct net_device *dev, int queue)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct cpmac_desc *desc;
583*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	desc = &priv->desc_ring[queue];
586*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
587*4882a593Smuzhiyun 	if (likely(desc->skb)) {
588*4882a593Smuzhiyun 		spin_lock(&priv->lock);
589*4882a593Smuzhiyun 		dev->stats.tx_packets++;
590*4882a593Smuzhiyun 		dev->stats.tx_bytes += desc->skb->len;
591*4882a593Smuzhiyun 		spin_unlock(&priv->lock);
592*4882a593Smuzhiyun 		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
593*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		if (unlikely(netif_msg_tx_done(priv)))
596*4882a593Smuzhiyun 			netdev_dbg(dev, "sent 0x%p, len=%d\n",
597*4882a593Smuzhiyun 				   desc->skb, desc->skb->len);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 		dev_consume_skb_irq(desc->skb);
600*4882a593Smuzhiyun 		desc->skb = NULL;
601*4882a593Smuzhiyun 		if (__netif_subqueue_stopped(dev, queue))
602*4882a593Smuzhiyun 			netif_wake_subqueue(dev, queue);
603*4882a593Smuzhiyun 	} else {
604*4882a593Smuzhiyun 		if (netif_msg_tx_err(priv) && net_ratelimit())
605*4882a593Smuzhiyun 			netdev_warn(dev, "end_xmit: spurious interrupt\n");
606*4882a593Smuzhiyun 		if (__netif_subqueue_stopped(dev, queue))
607*4882a593Smuzhiyun 			netif_wake_subqueue(dev, queue);
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
cpmac_hw_stop(struct net_device * dev)611*4882a593Smuzhiyun static void cpmac_hw_stop(struct net_device *dev)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	int i;
614*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
615*4882a593Smuzhiyun 	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	ar7_device_reset(pdata->reset_bit);
618*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
619*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
620*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
621*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
622*4882a593Smuzhiyun 	for (i = 0; i < 8; i++) {
623*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
624*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
627*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
628*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
629*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
630*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
631*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
cpmac_hw_start(struct net_device * dev)634*4882a593Smuzhiyun static void cpmac_hw_start(struct net_device *dev)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	int i;
637*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
638*4882a593Smuzhiyun 	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	ar7_device_reset(pdata->reset_bit);
641*4882a593Smuzhiyun 	for (i = 0; i < 8; i++) {
642*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
643*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
648*4882a593Smuzhiyun 		    MBP_RXMCAST);
649*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
650*4882a593Smuzhiyun 	for (i = 0; i < 8; i++)
651*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
652*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
653*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
654*4882a593Smuzhiyun 		    (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
655*4882a593Smuzhiyun 		    (dev->dev_addr[3] << 24));
656*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
657*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
658*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
659*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
660*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
661*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
662*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
663*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
664*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
667*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
668*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
669*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
670*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
671*4882a593Smuzhiyun 		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
672*4882a593Smuzhiyun 		    MAC_FDX);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
cpmac_clear_rx(struct net_device * dev)675*4882a593Smuzhiyun static void cpmac_clear_rx(struct net_device *dev)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
678*4882a593Smuzhiyun 	struct cpmac_desc *desc;
679*4882a593Smuzhiyun 	int i;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (unlikely(!priv->rx_head))
682*4882a593Smuzhiyun 		return;
683*4882a593Smuzhiyun 	desc = priv->rx_head;
684*4882a593Smuzhiyun 	for (i = 0; i < priv->ring_size; i++) {
685*4882a593Smuzhiyun 		if ((desc->dataflags & CPMAC_OWN) == 0) {
686*4882a593Smuzhiyun 			if (netif_msg_rx_err(priv) && net_ratelimit())
687*4882a593Smuzhiyun 				netdev_warn(dev, "packet dropped\n");
688*4882a593Smuzhiyun 			if (unlikely(netif_msg_hw(priv)))
689*4882a593Smuzhiyun 				cpmac_dump_desc(dev, desc);
690*4882a593Smuzhiyun 			desc->dataflags = CPMAC_OWN;
691*4882a593Smuzhiyun 			dev->stats.rx_dropped++;
692*4882a593Smuzhiyun 		}
693*4882a593Smuzhiyun 		desc->hw_next = desc->next->mapping;
694*4882a593Smuzhiyun 		desc = desc->next;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 	priv->rx_head->prev->hw_next = 0;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
cpmac_clear_tx(struct net_device * dev)699*4882a593Smuzhiyun static void cpmac_clear_tx(struct net_device *dev)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
702*4882a593Smuzhiyun 	int i;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (unlikely(!priv->desc_ring))
705*4882a593Smuzhiyun 		return;
706*4882a593Smuzhiyun 	for (i = 0; i < CPMAC_QUEUES; i++) {
707*4882a593Smuzhiyun 		priv->desc_ring[i].dataflags = 0;
708*4882a593Smuzhiyun 		if (priv->desc_ring[i].skb) {
709*4882a593Smuzhiyun 			dev_kfree_skb_any(priv->desc_ring[i].skb);
710*4882a593Smuzhiyun 			priv->desc_ring[i].skb = NULL;
711*4882a593Smuzhiyun 		}
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
cpmac_hw_error(struct work_struct * work)715*4882a593Smuzhiyun static void cpmac_hw_error(struct work_struct *work)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct cpmac_priv *priv =
718*4882a593Smuzhiyun 		container_of(work, struct cpmac_priv, reset_work);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	spin_lock(&priv->rx_lock);
721*4882a593Smuzhiyun 	cpmac_clear_rx(priv->dev);
722*4882a593Smuzhiyun 	spin_unlock(&priv->rx_lock);
723*4882a593Smuzhiyun 	cpmac_clear_tx(priv->dev);
724*4882a593Smuzhiyun 	cpmac_hw_start(priv->dev);
725*4882a593Smuzhiyun 	barrier();
726*4882a593Smuzhiyun 	atomic_dec(&priv->reset_pending);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	netif_tx_wake_all_queues(priv->dev);
729*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
cpmac_check_status(struct net_device * dev)732*4882a593Smuzhiyun static void cpmac_check_status(struct net_device *dev)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
737*4882a593Smuzhiyun 	int rx_channel = (macstatus >> 8) & 7;
738*4882a593Smuzhiyun 	int rx_code = (macstatus >> 12) & 15;
739*4882a593Smuzhiyun 	int tx_channel = (macstatus >> 16) & 7;
740*4882a593Smuzhiyun 	int tx_code = (macstatus >> 20) & 15;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (rx_code || tx_code) {
743*4882a593Smuzhiyun 		if (netif_msg_drv(priv) && net_ratelimit()) {
744*4882a593Smuzhiyun 			/* Can't find any documentation on what these
745*4882a593Smuzhiyun 			 * error codes actually are. So just log them and hope..
746*4882a593Smuzhiyun 			 */
747*4882a593Smuzhiyun 			if (rx_code)
748*4882a593Smuzhiyun 				netdev_warn(dev, "host error %d on rx "
749*4882a593Smuzhiyun 					"channel %d (macstatus %08x), resetting\n",
750*4882a593Smuzhiyun 					rx_code, rx_channel, macstatus);
751*4882a593Smuzhiyun 			if (tx_code)
752*4882a593Smuzhiyun 				netdev_warn(dev, "host error %d on tx "
753*4882a593Smuzhiyun 					"channel %d (macstatus %08x), resetting\n",
754*4882a593Smuzhiyun 					tx_code, tx_channel, macstatus);
755*4882a593Smuzhiyun 		}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		netif_tx_stop_all_queues(dev);
758*4882a593Smuzhiyun 		cpmac_hw_stop(dev);
759*4882a593Smuzhiyun 		if (schedule_work(&priv->reset_work))
760*4882a593Smuzhiyun 			atomic_inc(&priv->reset_pending);
761*4882a593Smuzhiyun 		if (unlikely(netif_msg_hw(priv)))
762*4882a593Smuzhiyun 			cpmac_dump_regs(dev);
763*4882a593Smuzhiyun 	}
764*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
cpmac_irq(int irq,void * dev_id)767*4882a593Smuzhiyun static irqreturn_t cpmac_irq(int irq, void *dev_id)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
770*4882a593Smuzhiyun 	struct cpmac_priv *priv;
771*4882a593Smuzhiyun 	int queue;
772*4882a593Smuzhiyun 	u32 status;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	priv = netdev_priv(dev);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (unlikely(netif_msg_intr(priv)))
779*4882a593Smuzhiyun 		netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (status & MAC_INT_TX)
782*4882a593Smuzhiyun 		cpmac_end_xmit(dev, (status & 7));
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (status & MAC_INT_RX) {
785*4882a593Smuzhiyun 		queue = (status >> 8) & 7;
786*4882a593Smuzhiyun 		if (napi_schedule_prep(&priv->napi)) {
787*4882a593Smuzhiyun 			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
788*4882a593Smuzhiyun 			__napi_schedule(&priv->napi);
789*4882a593Smuzhiyun 		}
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
795*4882a593Smuzhiyun 		cpmac_check_status(dev);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return IRQ_HANDLED;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
cpmac_tx_timeout(struct net_device * dev,unsigned int txqueue)800*4882a593Smuzhiyun static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	spin_lock(&priv->lock);
805*4882a593Smuzhiyun 	dev->stats.tx_errors++;
806*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
807*4882a593Smuzhiyun 	if (netif_msg_tx_err(priv) && net_ratelimit())
808*4882a593Smuzhiyun 		netdev_warn(dev, "transmit timeout\n");
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	atomic_inc(&priv->reset_pending);
811*4882a593Smuzhiyun 	barrier();
812*4882a593Smuzhiyun 	cpmac_clear_tx(dev);
813*4882a593Smuzhiyun 	barrier();
814*4882a593Smuzhiyun 	atomic_dec(&priv->reset_pending);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	netif_tx_wake_all_queues(priv->dev);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
cpmac_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ring)819*4882a593Smuzhiyun static void cpmac_get_ringparam(struct net_device *dev,
820*4882a593Smuzhiyun 						struct ethtool_ringparam *ring)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	ring->rx_max_pending = 1024;
825*4882a593Smuzhiyun 	ring->rx_mini_max_pending = 1;
826*4882a593Smuzhiyun 	ring->rx_jumbo_max_pending = 1;
827*4882a593Smuzhiyun 	ring->tx_max_pending = 1;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	ring->rx_pending = priv->ring_size;
830*4882a593Smuzhiyun 	ring->rx_mini_pending = 1;
831*4882a593Smuzhiyun 	ring->rx_jumbo_pending = 1;
832*4882a593Smuzhiyun 	ring->tx_pending = 1;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
cpmac_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ring)835*4882a593Smuzhiyun static int cpmac_set_ringparam(struct net_device *dev,
836*4882a593Smuzhiyun 						struct ethtool_ringparam *ring)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (netif_running(dev))
841*4882a593Smuzhiyun 		return -EBUSY;
842*4882a593Smuzhiyun 	priv->ring_size = ring->rx_pending;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	return 0;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
cpmac_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)847*4882a593Smuzhiyun static void cpmac_get_drvinfo(struct net_device *dev,
848*4882a593Smuzhiyun 			      struct ethtool_drvinfo *info)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	strlcpy(info->driver, "cpmac", sizeof(info->driver));
851*4882a593Smuzhiyun 	strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
852*4882a593Smuzhiyun 	snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun static const struct ethtool_ops cpmac_ethtool_ops = {
856*4882a593Smuzhiyun 	.get_drvinfo = cpmac_get_drvinfo,
857*4882a593Smuzhiyun 	.get_link = ethtool_op_get_link,
858*4882a593Smuzhiyun 	.get_ringparam = cpmac_get_ringparam,
859*4882a593Smuzhiyun 	.set_ringparam = cpmac_set_ringparam,
860*4882a593Smuzhiyun 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
861*4882a593Smuzhiyun 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
862*4882a593Smuzhiyun };
863*4882a593Smuzhiyun 
cpmac_adjust_link(struct net_device * dev)864*4882a593Smuzhiyun static void cpmac_adjust_link(struct net_device *dev)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
867*4882a593Smuzhiyun 	int new_state = 0;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	spin_lock(&priv->lock);
870*4882a593Smuzhiyun 	if (dev->phydev->link) {
871*4882a593Smuzhiyun 		netif_tx_start_all_queues(dev);
872*4882a593Smuzhiyun 		if (dev->phydev->duplex != priv->oldduplex) {
873*4882a593Smuzhiyun 			new_state = 1;
874*4882a593Smuzhiyun 			priv->oldduplex = dev->phydev->duplex;
875*4882a593Smuzhiyun 		}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 		if (dev->phydev->speed != priv->oldspeed) {
878*4882a593Smuzhiyun 			new_state = 1;
879*4882a593Smuzhiyun 			priv->oldspeed = dev->phydev->speed;
880*4882a593Smuzhiyun 		}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 		if (!priv->oldlink) {
883*4882a593Smuzhiyun 			new_state = 1;
884*4882a593Smuzhiyun 			priv->oldlink = 1;
885*4882a593Smuzhiyun 		}
886*4882a593Smuzhiyun 	} else if (priv->oldlink) {
887*4882a593Smuzhiyun 		new_state = 1;
888*4882a593Smuzhiyun 		priv->oldlink = 0;
889*4882a593Smuzhiyun 		priv->oldspeed = 0;
890*4882a593Smuzhiyun 		priv->oldduplex = -1;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	if (new_state && netif_msg_link(priv) && net_ratelimit())
894*4882a593Smuzhiyun 		phy_print_status(dev->phydev);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
cpmac_open(struct net_device * dev)899*4882a593Smuzhiyun static int cpmac_open(struct net_device *dev)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	int i, size, res;
902*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
903*4882a593Smuzhiyun 	struct resource *mem;
904*4882a593Smuzhiyun 	struct cpmac_desc *desc;
905*4882a593Smuzhiyun 	struct sk_buff *skb;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
908*4882a593Smuzhiyun 	if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
909*4882a593Smuzhiyun 		if (netif_msg_drv(priv))
910*4882a593Smuzhiyun 			netdev_err(dev, "failed to request registers\n");
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 		res = -ENXIO;
913*4882a593Smuzhiyun 		goto fail_reserve;
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	priv->regs = ioremap(mem->start, resource_size(mem));
917*4882a593Smuzhiyun 	if (!priv->regs) {
918*4882a593Smuzhiyun 		if (netif_msg_drv(priv))
919*4882a593Smuzhiyun 			netdev_err(dev, "failed to remap registers\n");
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		res = -ENXIO;
922*4882a593Smuzhiyun 		goto fail_remap;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	size = priv->ring_size + CPMAC_QUEUES;
926*4882a593Smuzhiyun 	priv->desc_ring = dma_alloc_coherent(&dev->dev,
927*4882a593Smuzhiyun 					     sizeof(struct cpmac_desc) * size,
928*4882a593Smuzhiyun 					     &priv->dma_ring,
929*4882a593Smuzhiyun 					     GFP_KERNEL);
930*4882a593Smuzhiyun 	if (!priv->desc_ring) {
931*4882a593Smuzhiyun 		res = -ENOMEM;
932*4882a593Smuzhiyun 		goto fail_alloc;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	for (i = 0; i < size; i++)
936*4882a593Smuzhiyun 		priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
939*4882a593Smuzhiyun 	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
940*4882a593Smuzhiyun 		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
941*4882a593Smuzhiyun 		if (unlikely(!skb)) {
942*4882a593Smuzhiyun 			res = -ENOMEM;
943*4882a593Smuzhiyun 			goto fail_desc;
944*4882a593Smuzhiyun 		}
945*4882a593Smuzhiyun 		desc->skb = skb;
946*4882a593Smuzhiyun 		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
947*4882a593Smuzhiyun 						    CPMAC_SKB_SIZE,
948*4882a593Smuzhiyun 						    DMA_FROM_DEVICE);
949*4882a593Smuzhiyun 		desc->hw_data = (u32)desc->data_mapping;
950*4882a593Smuzhiyun 		desc->buflen = CPMAC_SKB_SIZE;
951*4882a593Smuzhiyun 		desc->dataflags = CPMAC_OWN;
952*4882a593Smuzhiyun 		desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
953*4882a593Smuzhiyun 		desc->next->prev = desc;
954*4882a593Smuzhiyun 		desc->hw_next = (u32)desc->next->mapping;
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	priv->rx_head->prev->hw_next = (u32)0;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
960*4882a593Smuzhiyun 	if (res) {
961*4882a593Smuzhiyun 		if (netif_msg_drv(priv))
962*4882a593Smuzhiyun 			netdev_err(dev, "failed to obtain irq\n");
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 		goto fail_irq;
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	atomic_set(&priv->reset_pending, 0);
968*4882a593Smuzhiyun 	INIT_WORK(&priv->reset_work, cpmac_hw_error);
969*4882a593Smuzhiyun 	cpmac_hw_start(dev);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	napi_enable(&priv->napi);
972*4882a593Smuzhiyun 	phy_start(dev->phydev);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	return 0;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun fail_irq:
977*4882a593Smuzhiyun fail_desc:
978*4882a593Smuzhiyun 	for (i = 0; i < priv->ring_size; i++) {
979*4882a593Smuzhiyun 		if (priv->rx_head[i].skb) {
980*4882a593Smuzhiyun 			dma_unmap_single(&dev->dev,
981*4882a593Smuzhiyun 					 priv->rx_head[i].data_mapping,
982*4882a593Smuzhiyun 					 CPMAC_SKB_SIZE,
983*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
984*4882a593Smuzhiyun 			kfree_skb(priv->rx_head[i].skb);
985*4882a593Smuzhiyun 		}
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
988*4882a593Smuzhiyun 			  priv->desc_ring, priv->dma_ring);
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun fail_alloc:
991*4882a593Smuzhiyun 	iounmap(priv->regs);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun fail_remap:
994*4882a593Smuzhiyun 	release_mem_region(mem->start, resource_size(mem));
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun fail_reserve:
997*4882a593Smuzhiyun 	return res;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun 
cpmac_stop(struct net_device * dev)1000*4882a593Smuzhiyun static int cpmac_stop(struct net_device *dev)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun 	int i;
1003*4882a593Smuzhiyun 	struct cpmac_priv *priv = netdev_priv(dev);
1004*4882a593Smuzhiyun 	struct resource *mem;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	netif_tx_stop_all_queues(dev);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	cancel_work_sync(&priv->reset_work);
1009*4882a593Smuzhiyun 	napi_disable(&priv->napi);
1010*4882a593Smuzhiyun 	phy_stop(dev->phydev);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	cpmac_hw_stop(dev);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	for (i = 0; i < 8; i++)
1015*4882a593Smuzhiyun 		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1016*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1017*4882a593Smuzhiyun 	cpmac_write(priv->regs, CPMAC_MBP, 0);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1020*4882a593Smuzhiyun 	iounmap(priv->regs);
1021*4882a593Smuzhiyun 	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1022*4882a593Smuzhiyun 	release_mem_region(mem->start, resource_size(mem));
1023*4882a593Smuzhiyun 	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1024*4882a593Smuzhiyun 	for (i = 0; i < priv->ring_size; i++) {
1025*4882a593Smuzhiyun 		if (priv->rx_head[i].skb) {
1026*4882a593Smuzhiyun 			dma_unmap_single(&dev->dev,
1027*4882a593Smuzhiyun 					 priv->rx_head[i].data_mapping,
1028*4882a593Smuzhiyun 					 CPMAC_SKB_SIZE,
1029*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
1030*4882a593Smuzhiyun 			kfree_skb(priv->rx_head[i].skb);
1031*4882a593Smuzhiyun 		}
1032*4882a593Smuzhiyun 	}
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1035*4882a593Smuzhiyun 			  (CPMAC_QUEUES + priv->ring_size),
1036*4882a593Smuzhiyun 			  priv->desc_ring, priv->dma_ring);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun static const struct net_device_ops cpmac_netdev_ops = {
1042*4882a593Smuzhiyun 	.ndo_open		= cpmac_open,
1043*4882a593Smuzhiyun 	.ndo_stop		= cpmac_stop,
1044*4882a593Smuzhiyun 	.ndo_start_xmit		= cpmac_start_xmit,
1045*4882a593Smuzhiyun 	.ndo_tx_timeout		= cpmac_tx_timeout,
1046*4882a593Smuzhiyun 	.ndo_set_rx_mode	= cpmac_set_multicast_list,
1047*4882a593Smuzhiyun 	.ndo_do_ioctl		= phy_do_ioctl_running,
1048*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1049*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
1050*4882a593Smuzhiyun };
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun static int external_switch;
1053*4882a593Smuzhiyun 
cpmac_probe(struct platform_device * pdev)1054*4882a593Smuzhiyun static int cpmac_probe(struct platform_device *pdev)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	int rc, phy_id;
1057*4882a593Smuzhiyun 	char mdio_bus_id[MII_BUS_ID_SIZE];
1058*4882a593Smuzhiyun 	struct resource *mem;
1059*4882a593Smuzhiyun 	struct cpmac_priv *priv;
1060*4882a593Smuzhiyun 	struct net_device *dev;
1061*4882a593Smuzhiyun 	struct plat_cpmac_data *pdata;
1062*4882a593Smuzhiyun 	struct phy_device *phydev = NULL;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	pdata = dev_get_platdata(&pdev->dev);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	if (external_switch || dumb_switch) {
1067*4882a593Smuzhiyun 		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1068*4882a593Smuzhiyun 		phy_id = pdev->id;
1069*4882a593Smuzhiyun 	} else {
1070*4882a593Smuzhiyun 		for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1071*4882a593Smuzhiyun 			if (!(pdata->phy_mask & (1 << phy_id)))
1072*4882a593Smuzhiyun 				continue;
1073*4882a593Smuzhiyun 			if (!mdiobus_get_phy(cpmac_mii, phy_id))
1074*4882a593Smuzhiyun 				continue;
1075*4882a593Smuzhiyun 			strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1076*4882a593Smuzhiyun 			break;
1077*4882a593Smuzhiyun 		}
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (phy_id == PHY_MAX_ADDR) {
1081*4882a593Smuzhiyun 		dev_err(&pdev->dev, "no PHY present, falling back "
1082*4882a593Smuzhiyun 			"to switch on MDIO bus 0\n");
1083*4882a593Smuzhiyun 		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1084*4882a593Smuzhiyun 		phy_id = pdev->id;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 	mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1089*4882a593Smuzhiyun 	if (!dev)
1090*4882a593Smuzhiyun 		return -ENOMEM;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
1093*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
1094*4882a593Smuzhiyun 	priv = netdev_priv(dev);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	priv->pdev = pdev;
1097*4882a593Smuzhiyun 	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1098*4882a593Smuzhiyun 	if (!mem) {
1099*4882a593Smuzhiyun 		rc = -ENODEV;
1100*4882a593Smuzhiyun 		goto fail;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	dev->irq = platform_get_irq_byname(pdev, "irq");
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	dev->netdev_ops = &cpmac_netdev_ops;
1106*4882a593Smuzhiyun 	dev->ethtool_ops = &cpmac_ethtool_ops;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	spin_lock_init(&priv->lock);
1111*4882a593Smuzhiyun 	spin_lock_init(&priv->rx_lock);
1112*4882a593Smuzhiyun 	priv->dev = dev;
1113*4882a593Smuzhiyun 	priv->ring_size = 64;
1114*4882a593Smuzhiyun 	priv->msg_enable = netif_msg_init(debug_level, 0xff);
1115*4882a593Smuzhiyun 	memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1118*4882a593Smuzhiyun 						mdio_bus_id, phy_id);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
1121*4882a593Smuzhiyun 			     PHY_INTERFACE_MODE_MII);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	if (IS_ERR(phydev)) {
1124*4882a593Smuzhiyun 		if (netif_msg_drv(priv))
1125*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Could not attach to PHY\n");
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 		rc = PTR_ERR(phydev);
1128*4882a593Smuzhiyun 		goto fail;
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	rc = register_netdev(dev);
1132*4882a593Smuzhiyun 	if (rc) {
1133*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Could not register net device\n");
1134*4882a593Smuzhiyun 		goto fail;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (netif_msg_probe(priv)) {
1138*4882a593Smuzhiyun 		dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
1139*4882a593Smuzhiyun 			 "mac: %pM\n", (void *)mem->start, dev->irq,
1140*4882a593Smuzhiyun 			 priv->phy_name, dev->dev_addr);
1141*4882a593Smuzhiyun 	}
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	return 0;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun fail:
1146*4882a593Smuzhiyun 	free_netdev(dev);
1147*4882a593Smuzhiyun 	return rc;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
cpmac_remove(struct platform_device * pdev)1150*4882a593Smuzhiyun static int cpmac_remove(struct platform_device *pdev)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	struct net_device *dev = platform_get_drvdata(pdev);
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	unregister_netdev(dev);
1155*4882a593Smuzhiyun 	free_netdev(dev);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	return 0;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun static struct platform_driver cpmac_driver = {
1161*4882a593Smuzhiyun 	.driver = {
1162*4882a593Smuzhiyun 		.name 	= "cpmac",
1163*4882a593Smuzhiyun 	},
1164*4882a593Smuzhiyun 	.probe 	= cpmac_probe,
1165*4882a593Smuzhiyun 	.remove = cpmac_remove,
1166*4882a593Smuzhiyun };
1167*4882a593Smuzhiyun 
cpmac_init(void)1168*4882a593Smuzhiyun int cpmac_init(void)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	u32 mask;
1171*4882a593Smuzhiyun 	int i, res;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	cpmac_mii = mdiobus_alloc();
1174*4882a593Smuzhiyun 	if (cpmac_mii == NULL)
1175*4882a593Smuzhiyun 		return -ENOMEM;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	cpmac_mii->name = "cpmac-mii";
1178*4882a593Smuzhiyun 	cpmac_mii->read = cpmac_mdio_read;
1179*4882a593Smuzhiyun 	cpmac_mii->write = cpmac_mdio_write;
1180*4882a593Smuzhiyun 	cpmac_mii->reset = cpmac_mdio_reset;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	if (!cpmac_mii->priv) {
1185*4882a593Smuzhiyun 		pr_err("Can't ioremap mdio registers\n");
1186*4882a593Smuzhiyun 		res = -ENXIO;
1187*4882a593Smuzhiyun 		goto fail_alloc;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* FIXME: unhardcode gpio&reset bits */
1191*4882a593Smuzhiyun 	ar7_gpio_disable(26);
1192*4882a593Smuzhiyun 	ar7_gpio_disable(27);
1193*4882a593Smuzhiyun 	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1194*4882a593Smuzhiyun 	ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1195*4882a593Smuzhiyun 	ar7_device_reset(AR7_RESET_BIT_EPHY);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	cpmac_mii->reset(cpmac_mii);
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	for (i = 0; i < 300; i++) {
1200*4882a593Smuzhiyun 		mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1201*4882a593Smuzhiyun 		if (mask)
1202*4882a593Smuzhiyun 			break;
1203*4882a593Smuzhiyun 		else
1204*4882a593Smuzhiyun 			msleep(10);
1205*4882a593Smuzhiyun 	}
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	mask &= 0x7fffffff;
1208*4882a593Smuzhiyun 	if (mask & (mask - 1)) {
1209*4882a593Smuzhiyun 		external_switch = 1;
1210*4882a593Smuzhiyun 		mask = 0;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	cpmac_mii->phy_mask = ~(mask | 0x80000000);
1214*4882a593Smuzhiyun 	snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	res = mdiobus_register(cpmac_mii);
1217*4882a593Smuzhiyun 	if (res)
1218*4882a593Smuzhiyun 		goto fail_mii;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	res = platform_driver_register(&cpmac_driver);
1221*4882a593Smuzhiyun 	if (res)
1222*4882a593Smuzhiyun 		goto fail_cpmac;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	return 0;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun fail_cpmac:
1227*4882a593Smuzhiyun 	mdiobus_unregister(cpmac_mii);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun fail_mii:
1230*4882a593Smuzhiyun 	iounmap(cpmac_mii->priv);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun fail_alloc:
1233*4882a593Smuzhiyun 	mdiobus_free(cpmac_mii);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	return res;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun 
cpmac_exit(void)1238*4882a593Smuzhiyun void cpmac_exit(void)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	platform_driver_unregister(&cpmac_driver);
1241*4882a593Smuzhiyun 	mdiobus_unregister(cpmac_mii);
1242*4882a593Smuzhiyun 	iounmap(cpmac_mii->priv);
1243*4882a593Smuzhiyun 	mdiobus_free(cpmac_mii);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun module_init(cpmac_init);
1247*4882a593Smuzhiyun module_exit(cpmac_exit);
1248