xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/broadcom/b44.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4*4882a593Smuzhiyun  * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5*4882a593Smuzhiyun  * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6*4882a593Smuzhiyun  * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7*4882a593Smuzhiyun  * Copyright (C) 2006 Broadcom Corporation.
8*4882a593Smuzhiyun  * Copyright (C) 2007 Michael Buesch <m@bues.ch>
9*4882a593Smuzhiyun  * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Distribute under GPL.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/moduleparam.h>
19*4882a593Smuzhiyun #include <linux/types.h>
20*4882a593Smuzhiyun #include <linux/netdevice.h>
21*4882a593Smuzhiyun #include <linux/ethtool.h>
22*4882a593Smuzhiyun #include <linux/mii.h>
23*4882a593Smuzhiyun #include <linux/if_ether.h>
24*4882a593Smuzhiyun #include <linux/if_vlan.h>
25*4882a593Smuzhiyun #include <linux/etherdevice.h>
26*4882a593Smuzhiyun #include <linux/pci.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <linux/init.h>
29*4882a593Smuzhiyun #include <linux/interrupt.h>
30*4882a593Smuzhiyun #include <linux/dma-mapping.h>
31*4882a593Smuzhiyun #include <linux/ssb/ssb.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/phy.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/uaccess.h>
36*4882a593Smuzhiyun #include <asm/io.h>
37*4882a593Smuzhiyun #include <asm/irq.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include "b44.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define DRV_MODULE_NAME		"b44"
43*4882a593Smuzhiyun #define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define B44_DEF_MSG_ENABLE	  \
46*4882a593Smuzhiyun 	(NETIF_MSG_DRV		| \
47*4882a593Smuzhiyun 	 NETIF_MSG_PROBE	| \
48*4882a593Smuzhiyun 	 NETIF_MSG_LINK		| \
49*4882a593Smuzhiyun 	 NETIF_MSG_TIMER	| \
50*4882a593Smuzhiyun 	 NETIF_MSG_IFDOWN	| \
51*4882a593Smuzhiyun 	 NETIF_MSG_IFUP		| \
52*4882a593Smuzhiyun 	 NETIF_MSG_RX_ERR	| \
53*4882a593Smuzhiyun 	 NETIF_MSG_TX_ERR)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* length of time before we decide the hardware is borked,
56*4882a593Smuzhiyun  * and dev->tx_timeout() should be called to fix the problem
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define B44_TX_TIMEOUT			(5 * HZ)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* hardware minimum and maximum for a single frame's data payload */
61*4882a593Smuzhiyun #define B44_MIN_MTU			ETH_ZLEN
62*4882a593Smuzhiyun #define B44_MAX_MTU			ETH_DATA_LEN
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define B44_RX_RING_SIZE		512
65*4882a593Smuzhiyun #define B44_DEF_RX_RING_PENDING		200
66*4882a593Smuzhiyun #define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
67*4882a593Smuzhiyun 				 B44_RX_RING_SIZE)
68*4882a593Smuzhiyun #define B44_TX_RING_SIZE		512
69*4882a593Smuzhiyun #define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
70*4882a593Smuzhiyun #define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
71*4882a593Smuzhiyun 				 B44_TX_RING_SIZE)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define TX_RING_GAP(BP)	\
74*4882a593Smuzhiyun 	(B44_TX_RING_SIZE - (BP)->tx_pending)
75*4882a593Smuzhiyun #define TX_BUFFS_AVAIL(BP)						\
76*4882a593Smuzhiyun 	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
77*4882a593Smuzhiyun 	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
78*4882a593Smuzhiyun 	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79*4882a593Smuzhiyun #define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
82*4882a593Smuzhiyun #define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* minimum number of free TX descriptors required to wake up TX process */
85*4882a593Smuzhiyun #define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* b44 internal pattern match filter info */
88*4882a593Smuzhiyun #define B44_PATTERN_BASE	0x400
89*4882a593Smuzhiyun #define B44_PATTERN_SIZE	0x80
90*4882a593Smuzhiyun #define B44_PMASK_BASE		0x600
91*4882a593Smuzhiyun #define B44_PMASK_SIZE		0x10
92*4882a593Smuzhiyun #define B44_MAX_PATTERNS	16
93*4882a593Smuzhiyun #define B44_ETHIPV6UDP_HLEN	62
94*4882a593Smuzhiyun #define B44_ETHIPV4UDP_HLEN	42
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESCRIPTION);
98*4882a593Smuzhiyun MODULE_LICENSE("GPL");
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
101*4882a593Smuzhiyun module_param(b44_debug, int, 0);
102*4882a593Smuzhiyun MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #ifdef CONFIG_B44_PCI
106*4882a593Smuzhiyun static const struct pci_device_id b44_pci_tbl[] = {
107*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109*4882a593Smuzhiyun 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110*4882a593Smuzhiyun 	{ 0 } /* terminate list with empty entry */
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun static struct pci_driver b44_pci_driver = {
115*4882a593Smuzhiyun 	.name		= DRV_MODULE_NAME,
116*4882a593Smuzhiyun 	.id_table	= b44_pci_tbl,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun #endif /* CONFIG_B44_PCI */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun static const struct ssb_device_id b44_ssb_tbl[] = {
121*4882a593Smuzhiyun 	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122*4882a593Smuzhiyun 	{},
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static void b44_halt(struct b44 *);
127*4882a593Smuzhiyun static void b44_init_rings(struct b44 *);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define B44_FULL_RESET		1
130*4882a593Smuzhiyun #define B44_FULL_RESET_SKIP_PHY	2
131*4882a593Smuzhiyun #define B44_PARTIAL_RESET	3
132*4882a593Smuzhiyun #define B44_CHIP_RESET_FULL	4
133*4882a593Smuzhiyun #define B44_CHIP_RESET_PARTIAL	5
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static void b44_init_hw(struct b44 *, int);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static int dma_desc_sync_size;
138*4882a593Smuzhiyun static int instance;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141*4882a593Smuzhiyun #define _B44(x...)	# x,
142*4882a593Smuzhiyun B44_STAT_REG_DECLARE
143*4882a593Smuzhiyun #undef _B44
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
b44_sync_dma_desc_for_device(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)146*4882a593Smuzhiyun static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147*4882a593Smuzhiyun 						dma_addr_t dma_base,
148*4882a593Smuzhiyun 						unsigned long offset,
149*4882a593Smuzhiyun 						enum dma_data_direction dir)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152*4882a593Smuzhiyun 				   dma_desc_sync_size, dir);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
b44_sync_dma_desc_for_cpu(struct ssb_device * sdev,dma_addr_t dma_base,unsigned long offset,enum dma_data_direction dir)155*4882a593Smuzhiyun static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156*4882a593Smuzhiyun 					     dma_addr_t dma_base,
157*4882a593Smuzhiyun 					     unsigned long offset,
158*4882a593Smuzhiyun 					     enum dma_data_direction dir)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161*4882a593Smuzhiyun 				dma_desc_sync_size, dir);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
br32(const struct b44 * bp,unsigned long reg)164*4882a593Smuzhiyun static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return ssb_read32(bp->sdev, reg);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
bw32(const struct b44 * bp,unsigned long reg,unsigned long val)169*4882a593Smuzhiyun static inline void bw32(const struct b44 *bp,
170*4882a593Smuzhiyun 			unsigned long reg, unsigned long val)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	ssb_write32(bp->sdev, reg, val);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
b44_wait_bit(struct b44 * bp,unsigned long reg,u32 bit,unsigned long timeout,const int clear)175*4882a593Smuzhiyun static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176*4882a593Smuzhiyun 			u32 bit, unsigned long timeout, const int clear)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	unsigned long i;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	for (i = 0; i < timeout; i++) {
181*4882a593Smuzhiyun 		u32 val = br32(bp, reg);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		if (clear && !(val & bit))
184*4882a593Smuzhiyun 			break;
185*4882a593Smuzhiyun 		if (!clear && (val & bit))
186*4882a593Smuzhiyun 			break;
187*4882a593Smuzhiyun 		udelay(10);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 	if (i == timeout) {
190*4882a593Smuzhiyun 		if (net_ratelimit())
191*4882a593Smuzhiyun 			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
192*4882a593Smuzhiyun 				   bit, reg, clear ? "clear" : "set");
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		return -ENODEV;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 	return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
__b44_cam_read(struct b44 * bp,unsigned char * data,int index)199*4882a593Smuzhiyun static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	u32 val;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204*4882a593Smuzhiyun 			    (index << CAM_CTRL_INDEX_SHIFT)));
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	val = br32(bp, B44_CAM_DATA_LO);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	data[2] = (val >> 24) & 0xFF;
211*4882a593Smuzhiyun 	data[3] = (val >> 16) & 0xFF;
212*4882a593Smuzhiyun 	data[4] = (val >> 8) & 0xFF;
213*4882a593Smuzhiyun 	data[5] = (val >> 0) & 0xFF;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	val = br32(bp, B44_CAM_DATA_HI);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	data[0] = (val >> 8) & 0xFF;
218*4882a593Smuzhiyun 	data[1] = (val >> 0) & 0xFF;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
__b44_cam_write(struct b44 * bp,unsigned char * data,int index)221*4882a593Smuzhiyun static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	u32 val;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	val  = ((u32) data[2]) << 24;
226*4882a593Smuzhiyun 	val |= ((u32) data[3]) << 16;
227*4882a593Smuzhiyun 	val |= ((u32) data[4]) <<  8;
228*4882a593Smuzhiyun 	val |= ((u32) data[5]) <<  0;
229*4882a593Smuzhiyun 	bw32(bp, B44_CAM_DATA_LO, val);
230*4882a593Smuzhiyun 	val = (CAM_DATA_HI_VALID |
231*4882a593Smuzhiyun 	       (((u32) data[0]) << 8) |
232*4882a593Smuzhiyun 	       (((u32) data[1]) << 0));
233*4882a593Smuzhiyun 	bw32(bp, B44_CAM_DATA_HI, val);
234*4882a593Smuzhiyun 	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235*4882a593Smuzhiyun 			    (index << CAM_CTRL_INDEX_SHIFT)));
236*4882a593Smuzhiyun 	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
__b44_disable_ints(struct b44 * bp)239*4882a593Smuzhiyun static inline void __b44_disable_ints(struct b44 *bp)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	bw32(bp, B44_IMASK, 0);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
b44_disable_ints(struct b44 * bp)244*4882a593Smuzhiyun static void b44_disable_ints(struct b44 *bp)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	__b44_disable_ints(bp);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Flush posted writes. */
249*4882a593Smuzhiyun 	br32(bp, B44_IMASK);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
b44_enable_ints(struct b44 * bp)252*4882a593Smuzhiyun static void b44_enable_ints(struct b44 *bp)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	bw32(bp, B44_IMASK, bp->imask);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
__b44_readphy(struct b44 * bp,int phy_addr,int reg,u32 * val)257*4882a593Smuzhiyun static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	int err;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262*4882a593Smuzhiyun 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263*4882a593Smuzhiyun 			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264*4882a593Smuzhiyun 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
265*4882a593Smuzhiyun 			     (reg << MDIO_DATA_RA_SHIFT) |
266*4882a593Smuzhiyun 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267*4882a593Smuzhiyun 	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268*4882a593Smuzhiyun 	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	return err;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
__b44_writephy(struct b44 * bp,int phy_addr,int reg,u32 val)273*4882a593Smuzhiyun static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276*4882a593Smuzhiyun 	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277*4882a593Smuzhiyun 			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278*4882a593Smuzhiyun 			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
279*4882a593Smuzhiyun 			     (reg << MDIO_DATA_RA_SHIFT) |
280*4882a593Smuzhiyun 			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281*4882a593Smuzhiyun 			     (val & MDIO_DATA_DATA)));
282*4882a593Smuzhiyun 	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
b44_readphy(struct b44 * bp,int reg,u32 * val)285*4882a593Smuzhiyun static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
288*4882a593Smuzhiyun 		return 0;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	return __b44_readphy(bp, bp->phy_addr, reg, val);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
b44_writephy(struct b44 * bp,int reg,u32 val)293*4882a593Smuzhiyun static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
296*4882a593Smuzhiyun 		return 0;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return __b44_writephy(bp, bp->phy_addr, reg, val);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* miilib interface */
b44_mdio_read_mii(struct net_device * dev,int phy_id,int location)302*4882a593Smuzhiyun static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	u32 val;
305*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
306*4882a593Smuzhiyun 	int rc = __b44_readphy(bp, phy_id, location, &val);
307*4882a593Smuzhiyun 	if (rc)
308*4882a593Smuzhiyun 		return 0xffffffff;
309*4882a593Smuzhiyun 	return val;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
b44_mdio_write_mii(struct net_device * dev,int phy_id,int location,int val)312*4882a593Smuzhiyun static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
313*4882a593Smuzhiyun 			       int val)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
316*4882a593Smuzhiyun 	__b44_writephy(bp, phy_id, location, val);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
b44_mdio_read_phylib(struct mii_bus * bus,int phy_id,int location)319*4882a593Smuzhiyun static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	u32 val;
322*4882a593Smuzhiyun 	struct b44 *bp = bus->priv;
323*4882a593Smuzhiyun 	int rc = __b44_readphy(bp, phy_id, location, &val);
324*4882a593Smuzhiyun 	if (rc)
325*4882a593Smuzhiyun 		return 0xffffffff;
326*4882a593Smuzhiyun 	return val;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
b44_mdio_write_phylib(struct mii_bus * bus,int phy_id,int location,u16 val)329*4882a593Smuzhiyun static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
330*4882a593Smuzhiyun 				 u16 val)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct b44 *bp = bus->priv;
333*4882a593Smuzhiyun 	return __b44_writephy(bp, phy_id, location, val);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
b44_phy_reset(struct b44 * bp)336*4882a593Smuzhiyun static int b44_phy_reset(struct b44 *bp)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	u32 val;
339*4882a593Smuzhiyun 	int err;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
342*4882a593Smuzhiyun 		return 0;
343*4882a593Smuzhiyun 	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
344*4882a593Smuzhiyun 	if (err)
345*4882a593Smuzhiyun 		return err;
346*4882a593Smuzhiyun 	udelay(100);
347*4882a593Smuzhiyun 	err = b44_readphy(bp, MII_BMCR, &val);
348*4882a593Smuzhiyun 	if (!err) {
349*4882a593Smuzhiyun 		if (val & BMCR_RESET) {
350*4882a593Smuzhiyun 			netdev_err(bp->dev, "PHY Reset would not complete\n");
351*4882a593Smuzhiyun 			err = -ENODEV;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return err;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
__b44_set_flow_ctrl(struct b44 * bp,u32 pause_flags)358*4882a593Smuzhiyun static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	u32 val;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
363*4882a593Smuzhiyun 	bp->flags |= pause_flags;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	val = br32(bp, B44_RXCONFIG);
366*4882a593Smuzhiyun 	if (pause_flags & B44_FLAG_RX_PAUSE)
367*4882a593Smuzhiyun 		val |= RXCONFIG_FLOW;
368*4882a593Smuzhiyun 	else
369*4882a593Smuzhiyun 		val &= ~RXCONFIG_FLOW;
370*4882a593Smuzhiyun 	bw32(bp, B44_RXCONFIG, val);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	val = br32(bp, B44_MAC_FLOW);
373*4882a593Smuzhiyun 	if (pause_flags & B44_FLAG_TX_PAUSE)
374*4882a593Smuzhiyun 		val |= (MAC_FLOW_PAUSE_ENAB |
375*4882a593Smuzhiyun 			(0xc0 & MAC_FLOW_RX_HI_WATER));
376*4882a593Smuzhiyun 	else
377*4882a593Smuzhiyun 		val &= ~MAC_FLOW_PAUSE_ENAB;
378*4882a593Smuzhiyun 	bw32(bp, B44_MAC_FLOW, val);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
b44_set_flow_ctrl(struct b44 * bp,u32 local,u32 remote)381*4882a593Smuzhiyun static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	u32 pause_enab = 0;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* The driver supports only rx pause by default because
386*4882a593Smuzhiyun 	   the b44 mac tx pause mechanism generates excessive
387*4882a593Smuzhiyun 	   pause frames.
388*4882a593Smuzhiyun 	   Use ethtool to turn on b44 tx pause if necessary.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	if ((local & ADVERTISE_PAUSE_CAP) &&
391*4882a593Smuzhiyun 	    (local & ADVERTISE_PAUSE_ASYM)){
392*4882a593Smuzhiyun 		if ((remote & LPA_PAUSE_ASYM) &&
393*4882a593Smuzhiyun 		    !(remote & LPA_PAUSE_CAP))
394*4882a593Smuzhiyun 			pause_enab |= B44_FLAG_RX_PAUSE;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	__b44_set_flow_ctrl(bp, pause_enab);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun #ifdef CONFIG_BCM47XX
401*4882a593Smuzhiyun #include <linux/bcm47xx_nvram.h>
b44_wap54g10_workaround(struct b44 * bp)402*4882a593Smuzhiyun static void b44_wap54g10_workaround(struct b44 *bp)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	char buf[20];
405*4882a593Smuzhiyun 	u32 val;
406*4882a593Smuzhiyun 	int err;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/*
409*4882a593Smuzhiyun 	 * workaround for bad hardware design in Linksys WAP54G v1.0
410*4882a593Smuzhiyun 	 * see https://dev.openwrt.org/ticket/146
411*4882a593Smuzhiyun 	 * check and reset bit "isolate"
412*4882a593Smuzhiyun 	 */
413*4882a593Smuzhiyun 	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
414*4882a593Smuzhiyun 		return;
415*4882a593Smuzhiyun 	if (simple_strtoul(buf, NULL, 0) == 2) {
416*4882a593Smuzhiyun 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
417*4882a593Smuzhiyun 		if (err)
418*4882a593Smuzhiyun 			goto error;
419*4882a593Smuzhiyun 		if (!(val & BMCR_ISOLATE))
420*4882a593Smuzhiyun 			return;
421*4882a593Smuzhiyun 		val &= ~BMCR_ISOLATE;
422*4882a593Smuzhiyun 		err = __b44_writephy(bp, 0, MII_BMCR, val);
423*4882a593Smuzhiyun 		if (err)
424*4882a593Smuzhiyun 			goto error;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	return;
427*4882a593Smuzhiyun error:
428*4882a593Smuzhiyun 	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun #else
b44_wap54g10_workaround(struct b44 * bp)431*4882a593Smuzhiyun static inline void b44_wap54g10_workaround(struct b44 *bp)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun 
b44_setup_phy(struct b44 * bp)436*4882a593Smuzhiyun static int b44_setup_phy(struct b44 *bp)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	u32 val;
439*4882a593Smuzhiyun 	int err;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	b44_wap54g10_workaround(bp);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
444*4882a593Smuzhiyun 		return 0;
445*4882a593Smuzhiyun 	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446*4882a593Smuzhiyun 		goto out;
447*4882a593Smuzhiyun 	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448*4882a593Smuzhiyun 				val & MII_ALEDCTRL_ALLMSK)) != 0)
449*4882a593Smuzhiyun 		goto out;
450*4882a593Smuzhiyun 	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451*4882a593Smuzhiyun 		goto out;
452*4882a593Smuzhiyun 	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453*4882a593Smuzhiyun 				val | MII_TLEDCTRL_ENABLE)) != 0)
454*4882a593Smuzhiyun 		goto out;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457*4882a593Smuzhiyun 		u32 adv = ADVERTISE_CSMA;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_ADV_10HALF)
460*4882a593Smuzhiyun 			adv |= ADVERTISE_10HALF;
461*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_ADV_10FULL)
462*4882a593Smuzhiyun 			adv |= ADVERTISE_10FULL;
463*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_ADV_100HALF)
464*4882a593Smuzhiyun 			adv |= ADVERTISE_100HALF;
465*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_ADV_100FULL)
466*4882a593Smuzhiyun 			adv |= ADVERTISE_100FULL;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_PAUSE_AUTO)
469*4882a593Smuzhiyun 			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472*4882a593Smuzhiyun 			goto out;
473*4882a593Smuzhiyun 		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474*4882a593Smuzhiyun 						       BMCR_ANRESTART))) != 0)
475*4882a593Smuzhiyun 			goto out;
476*4882a593Smuzhiyun 	} else {
477*4882a593Smuzhiyun 		u32 bmcr;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480*4882a593Smuzhiyun 			goto out;
481*4882a593Smuzhiyun 		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_100_BASE_T)
483*4882a593Smuzhiyun 			bmcr |= BMCR_SPEED100;
484*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
485*4882a593Smuzhiyun 			bmcr |= BMCR_FULLDPLX;
486*4882a593Smuzhiyun 		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487*4882a593Smuzhiyun 			goto out;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		/* Since we will not be negotiating there is no safe way
490*4882a593Smuzhiyun 		 * to determine if the link partner supports flow control
491*4882a593Smuzhiyun 		 * or not.  So just disable it completely in this case.
492*4882a593Smuzhiyun 		 */
493*4882a593Smuzhiyun 		b44_set_flow_ctrl(bp, 0, 0);
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun out:
497*4882a593Smuzhiyun 	return err;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
b44_stats_update(struct b44 * bp)500*4882a593Smuzhiyun static void b44_stats_update(struct b44 *bp)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	unsigned long reg;
503*4882a593Smuzhiyun 	u64 *val;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	val = &bp->hw_stats.tx_good_octets;
506*4882a593Smuzhiyun 	u64_stats_update_begin(&bp->hw_stats.syncp);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
509*4882a593Smuzhiyun 		*val++ += br32(bp, reg);
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
513*4882a593Smuzhiyun 		*val++ += br32(bp, reg);
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	u64_stats_update_end(&bp->hw_stats.syncp);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
b44_link_report(struct b44 * bp)519*4882a593Smuzhiyun static void b44_link_report(struct b44 *bp)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	if (!netif_carrier_ok(bp->dev)) {
522*4882a593Smuzhiyun 		netdev_info(bp->dev, "Link is down\n");
523*4882a593Smuzhiyun 	} else {
524*4882a593Smuzhiyun 		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
525*4882a593Smuzhiyun 			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526*4882a593Smuzhiyun 			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
529*4882a593Smuzhiyun 			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
530*4882a593Smuzhiyun 			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
b44_check_phy(struct b44 * bp)534*4882a593Smuzhiyun static void b44_check_phy(struct b44 *bp)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	u32 bmsr, aux;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
539*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_100_BASE_T;
540*4882a593Smuzhiyun 		if (!netif_carrier_ok(bp->dev)) {
541*4882a593Smuzhiyun 			u32 val = br32(bp, B44_TX_CTRL);
542*4882a593Smuzhiyun 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
543*4882a593Smuzhiyun 				val |= TX_CTRL_DUPLEX;
544*4882a593Smuzhiyun 			else
545*4882a593Smuzhiyun 				val &= ~TX_CTRL_DUPLEX;
546*4882a593Smuzhiyun 			bw32(bp, B44_TX_CTRL, val);
547*4882a593Smuzhiyun 			netif_carrier_on(bp->dev);
548*4882a593Smuzhiyun 			b44_link_report(bp);
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 		return;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
554*4882a593Smuzhiyun 	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
555*4882a593Smuzhiyun 	    (bmsr != 0xffff)) {
556*4882a593Smuzhiyun 		if (aux & MII_AUXCTRL_SPEED)
557*4882a593Smuzhiyun 			bp->flags |= B44_FLAG_100_BASE_T;
558*4882a593Smuzhiyun 		else
559*4882a593Smuzhiyun 			bp->flags &= ~B44_FLAG_100_BASE_T;
560*4882a593Smuzhiyun 		if (aux & MII_AUXCTRL_DUPLEX)
561*4882a593Smuzhiyun 			bp->flags |= B44_FLAG_FULL_DUPLEX;
562*4882a593Smuzhiyun 		else
563*4882a593Smuzhiyun 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 		if (!netif_carrier_ok(bp->dev) &&
566*4882a593Smuzhiyun 		    (bmsr & BMSR_LSTATUS)) {
567*4882a593Smuzhiyun 			u32 val = br32(bp, B44_TX_CTRL);
568*4882a593Smuzhiyun 			u32 local_adv, remote_adv;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 			if (bp->flags & B44_FLAG_FULL_DUPLEX)
571*4882a593Smuzhiyun 				val |= TX_CTRL_DUPLEX;
572*4882a593Smuzhiyun 			else
573*4882a593Smuzhiyun 				val &= ~TX_CTRL_DUPLEX;
574*4882a593Smuzhiyun 			bw32(bp, B44_TX_CTRL, val);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
577*4882a593Smuzhiyun 			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
578*4882a593Smuzhiyun 			    !b44_readphy(bp, MII_LPA, &remote_adv))
579*4882a593Smuzhiyun 				b44_set_flow_ctrl(bp, local_adv, remote_adv);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 			/* Link now up */
582*4882a593Smuzhiyun 			netif_carrier_on(bp->dev);
583*4882a593Smuzhiyun 			b44_link_report(bp);
584*4882a593Smuzhiyun 		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
585*4882a593Smuzhiyun 			/* Link now down */
586*4882a593Smuzhiyun 			netif_carrier_off(bp->dev);
587*4882a593Smuzhiyun 			b44_link_report(bp);
588*4882a593Smuzhiyun 		}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 		if (bmsr & BMSR_RFAULT)
591*4882a593Smuzhiyun 			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
592*4882a593Smuzhiyun 		if (bmsr & BMSR_JCD)
593*4882a593Smuzhiyun 			netdev_warn(bp->dev, "Jabber detected in PHY\n");
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
b44_timer(struct timer_list * t)597*4882a593Smuzhiyun static void b44_timer(struct timer_list *t)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct b44 *bp = from_timer(bp, t, timer);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	b44_check_phy(bp);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	b44_stats_update(bp);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
b44_tx(struct b44 * bp)612*4882a593Smuzhiyun static void b44_tx(struct b44 *bp)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	u32 cur, cons;
615*4882a593Smuzhiyun 	unsigned bytes_compl = 0, pkts_compl = 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
618*4882a593Smuzhiyun 	cur /= sizeof(struct dma_desc);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* XXX needs updating when NETIF_F_SG is supported */
621*4882a593Smuzhiyun 	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
622*4882a593Smuzhiyun 		struct ring_info *rp = &bp->tx_buffers[cons];
623*4882a593Smuzhiyun 		struct sk_buff *skb = rp->skb;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		BUG_ON(skb == NULL);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		dma_unmap_single(bp->sdev->dma_dev,
628*4882a593Smuzhiyun 				 rp->mapping,
629*4882a593Smuzhiyun 				 skb->len,
630*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
631*4882a593Smuzhiyun 		rp->skb = NULL;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		bytes_compl += skb->len;
634*4882a593Smuzhiyun 		pkts_compl++;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		dev_consume_skb_irq(skb);
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
640*4882a593Smuzhiyun 	bp->tx_cons = cons;
641*4882a593Smuzhiyun 	if (netif_queue_stopped(bp->dev) &&
642*4882a593Smuzhiyun 	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
643*4882a593Smuzhiyun 		netif_wake_queue(bp->dev);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	bw32(bp, B44_GPTIMER, 0);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
649*4882a593Smuzhiyun  * before the DMA address you give it.  So we allocate 30 more bytes
650*4882a593Smuzhiyun  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
651*4882a593Smuzhiyun  * point the chip at 30 bytes past where the rx_header will go.
652*4882a593Smuzhiyun  */
b44_alloc_rx_skb(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)653*4882a593Smuzhiyun static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	struct dma_desc *dp;
656*4882a593Smuzhiyun 	struct ring_info *src_map, *map;
657*4882a593Smuzhiyun 	struct rx_header *rh;
658*4882a593Smuzhiyun 	struct sk_buff *skb;
659*4882a593Smuzhiyun 	dma_addr_t mapping;
660*4882a593Smuzhiyun 	int dest_idx;
661*4882a593Smuzhiyun 	u32 ctrl;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	src_map = NULL;
664*4882a593Smuzhiyun 	if (src_idx >= 0)
665*4882a593Smuzhiyun 		src_map = &bp->rx_buffers[src_idx];
666*4882a593Smuzhiyun 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
667*4882a593Smuzhiyun 	map = &bp->rx_buffers[dest_idx];
668*4882a593Smuzhiyun 	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
669*4882a593Smuzhiyun 	if (skb == NULL)
670*4882a593Smuzhiyun 		return -ENOMEM;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
673*4882a593Smuzhiyun 				 RX_PKT_BUF_SZ,
674*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/* Hardware bug work-around, the chip is unable to do PCI DMA
677*4882a593Smuzhiyun 	   to/from anything above 1GB :-( */
678*4882a593Smuzhiyun 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
679*4882a593Smuzhiyun 		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
680*4882a593Smuzhiyun 		/* Sigh... */
681*4882a593Smuzhiyun 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
682*4882a593Smuzhiyun 			dma_unmap_single(bp->sdev->dma_dev, mapping,
683*4882a593Smuzhiyun 					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
684*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
685*4882a593Smuzhiyun 		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
686*4882a593Smuzhiyun 		if (skb == NULL)
687*4882a593Smuzhiyun 			return -ENOMEM;
688*4882a593Smuzhiyun 		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
689*4882a593Smuzhiyun 					 RX_PKT_BUF_SZ,
690*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
691*4882a593Smuzhiyun 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
692*4882a593Smuzhiyun 		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
693*4882a593Smuzhiyun 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
694*4882a593Smuzhiyun 				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
695*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
696*4882a593Smuzhiyun 			return -ENOMEM;
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 		bp->force_copybreak = 1;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	rh = (struct rx_header *) skb->data;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	rh->len = 0;
704*4882a593Smuzhiyun 	rh->flags = 0;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	map->skb = skb;
707*4882a593Smuzhiyun 	map->mapping = mapping;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (src_map != NULL)
710*4882a593Smuzhiyun 		src_map->skb = NULL;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
713*4882a593Smuzhiyun 	if (dest_idx == (B44_RX_RING_SIZE - 1))
714*4882a593Smuzhiyun 		ctrl |= DESC_CTRL_EOT;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	dp = &bp->rx_ring[dest_idx];
717*4882a593Smuzhiyun 	dp->ctrl = cpu_to_le32(ctrl);
718*4882a593Smuzhiyun 	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_RX_RING_HACK)
721*4882a593Smuzhiyun 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
722*4882a593Smuzhiyun 			                    dest_idx * sizeof(*dp),
723*4882a593Smuzhiyun 			                    DMA_BIDIRECTIONAL);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	return RX_PKT_BUF_SZ;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun 
b44_recycle_rx(struct b44 * bp,int src_idx,u32 dest_idx_unmasked)728*4882a593Smuzhiyun static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	struct dma_desc *src_desc, *dest_desc;
731*4882a593Smuzhiyun 	struct ring_info *src_map, *dest_map;
732*4882a593Smuzhiyun 	struct rx_header *rh;
733*4882a593Smuzhiyun 	int dest_idx;
734*4882a593Smuzhiyun 	__le32 ctrl;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
737*4882a593Smuzhiyun 	dest_desc = &bp->rx_ring[dest_idx];
738*4882a593Smuzhiyun 	dest_map = &bp->rx_buffers[dest_idx];
739*4882a593Smuzhiyun 	src_desc = &bp->rx_ring[src_idx];
740*4882a593Smuzhiyun 	src_map = &bp->rx_buffers[src_idx];
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	dest_map->skb = src_map->skb;
743*4882a593Smuzhiyun 	rh = (struct rx_header *) src_map->skb->data;
744*4882a593Smuzhiyun 	rh->len = 0;
745*4882a593Smuzhiyun 	rh->flags = 0;
746*4882a593Smuzhiyun 	dest_map->mapping = src_map->mapping;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_RX_RING_HACK)
749*4882a593Smuzhiyun 		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
750*4882a593Smuzhiyun 			                 src_idx * sizeof(*src_desc),
751*4882a593Smuzhiyun 			                 DMA_BIDIRECTIONAL);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	ctrl = src_desc->ctrl;
754*4882a593Smuzhiyun 	if (dest_idx == (B44_RX_RING_SIZE - 1))
755*4882a593Smuzhiyun 		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
756*4882a593Smuzhiyun 	else
757*4882a593Smuzhiyun 		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	dest_desc->ctrl = ctrl;
760*4882a593Smuzhiyun 	dest_desc->addr = src_desc->addr;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	src_map->skb = NULL;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_RX_RING_HACK)
765*4882a593Smuzhiyun 		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
766*4882a593Smuzhiyun 					     dest_idx * sizeof(*dest_desc),
767*4882a593Smuzhiyun 					     DMA_BIDIRECTIONAL);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
770*4882a593Smuzhiyun 				   RX_PKT_BUF_SZ,
771*4882a593Smuzhiyun 				   DMA_FROM_DEVICE);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
b44_rx(struct b44 * bp,int budget)774*4882a593Smuzhiyun static int b44_rx(struct b44 *bp, int budget)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	int received;
777*4882a593Smuzhiyun 	u32 cons, prod;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	received = 0;
780*4882a593Smuzhiyun 	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
781*4882a593Smuzhiyun 	prod /= sizeof(struct dma_desc);
782*4882a593Smuzhiyun 	cons = bp->rx_cons;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	while (cons != prod && budget > 0) {
785*4882a593Smuzhiyun 		struct ring_info *rp = &bp->rx_buffers[cons];
786*4882a593Smuzhiyun 		struct sk_buff *skb = rp->skb;
787*4882a593Smuzhiyun 		dma_addr_t map = rp->mapping;
788*4882a593Smuzhiyun 		struct rx_header *rh;
789*4882a593Smuzhiyun 		u16 len;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
792*4882a593Smuzhiyun 					RX_PKT_BUF_SZ,
793*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
794*4882a593Smuzhiyun 		rh = (struct rx_header *) skb->data;
795*4882a593Smuzhiyun 		len = le16_to_cpu(rh->len);
796*4882a593Smuzhiyun 		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
797*4882a593Smuzhiyun 		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
798*4882a593Smuzhiyun 		drop_it:
799*4882a593Smuzhiyun 			b44_recycle_rx(bp, cons, bp->rx_prod);
800*4882a593Smuzhiyun 		drop_it_no_recycle:
801*4882a593Smuzhiyun 			bp->dev->stats.rx_dropped++;
802*4882a593Smuzhiyun 			goto next_pkt;
803*4882a593Smuzhiyun 		}
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 		if (len == 0) {
806*4882a593Smuzhiyun 			int i = 0;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 			do {
809*4882a593Smuzhiyun 				udelay(2);
810*4882a593Smuzhiyun 				barrier();
811*4882a593Smuzhiyun 				len = le16_to_cpu(rh->len);
812*4882a593Smuzhiyun 			} while (len == 0 && i++ < 5);
813*4882a593Smuzhiyun 			if (len == 0)
814*4882a593Smuzhiyun 				goto drop_it;
815*4882a593Smuzhiyun 		}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		/* Omit CRC. */
818*4882a593Smuzhiyun 		len -= 4;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
821*4882a593Smuzhiyun 			int skb_size;
822*4882a593Smuzhiyun 			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
823*4882a593Smuzhiyun 			if (skb_size < 0)
824*4882a593Smuzhiyun 				goto drop_it;
825*4882a593Smuzhiyun 			dma_unmap_single(bp->sdev->dma_dev, map,
826*4882a593Smuzhiyun 					 skb_size, DMA_FROM_DEVICE);
827*4882a593Smuzhiyun 			/* Leave out rx_header */
828*4882a593Smuzhiyun 			skb_put(skb, len + RX_PKT_OFFSET);
829*4882a593Smuzhiyun 			skb_pull(skb, RX_PKT_OFFSET);
830*4882a593Smuzhiyun 		} else {
831*4882a593Smuzhiyun 			struct sk_buff *copy_skb;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 			b44_recycle_rx(bp, cons, bp->rx_prod);
834*4882a593Smuzhiyun 			copy_skb = napi_alloc_skb(&bp->napi, len);
835*4882a593Smuzhiyun 			if (copy_skb == NULL)
836*4882a593Smuzhiyun 				goto drop_it_no_recycle;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 			skb_put(copy_skb, len);
839*4882a593Smuzhiyun 			/* DMA sync done above, copy just the actual packet */
840*4882a593Smuzhiyun 			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
841*4882a593Smuzhiyun 							 copy_skb->data, len);
842*4882a593Smuzhiyun 			skb = copy_skb;
843*4882a593Smuzhiyun 		}
844*4882a593Smuzhiyun 		skb_checksum_none_assert(skb);
845*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, bp->dev);
846*4882a593Smuzhiyun 		netif_receive_skb(skb);
847*4882a593Smuzhiyun 		received++;
848*4882a593Smuzhiyun 		budget--;
849*4882a593Smuzhiyun 	next_pkt:
850*4882a593Smuzhiyun 		bp->rx_prod = (bp->rx_prod + 1) &
851*4882a593Smuzhiyun 			(B44_RX_RING_SIZE - 1);
852*4882a593Smuzhiyun 		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	bp->rx_cons = cons;
856*4882a593Smuzhiyun 	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return received;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
b44_poll(struct napi_struct * napi,int budget)861*4882a593Smuzhiyun static int b44_poll(struct napi_struct *napi, int budget)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct b44 *bp = container_of(napi, struct b44, napi);
864*4882a593Smuzhiyun 	int work_done;
865*4882a593Smuzhiyun 	unsigned long flags;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
870*4882a593Smuzhiyun 		/* spin_lock(&bp->tx_lock); */
871*4882a593Smuzhiyun 		b44_tx(bp);
872*4882a593Smuzhiyun 		/* spin_unlock(&bp->tx_lock); */
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
875*4882a593Smuzhiyun 		bp->istat &= ~ISTAT_RFO;
876*4882a593Smuzhiyun 		b44_disable_ints(bp);
877*4882a593Smuzhiyun 		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
878*4882a593Smuzhiyun 		b44_init_rings(bp);
879*4882a593Smuzhiyun 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
880*4882a593Smuzhiyun 		netif_wake_queue(bp->dev);
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	work_done = 0;
886*4882a593Smuzhiyun 	if (bp->istat & ISTAT_RX)
887*4882a593Smuzhiyun 		work_done += b44_rx(bp, budget);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (bp->istat & ISTAT_ERRORS) {
890*4882a593Smuzhiyun 		spin_lock_irqsave(&bp->lock, flags);
891*4882a593Smuzhiyun 		b44_halt(bp);
892*4882a593Smuzhiyun 		b44_init_rings(bp);
893*4882a593Smuzhiyun 		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
894*4882a593Smuzhiyun 		netif_wake_queue(bp->dev);
895*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bp->lock, flags);
896*4882a593Smuzhiyun 		work_done = 0;
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	if (work_done < budget) {
900*4882a593Smuzhiyun 		napi_complete_done(napi, work_done);
901*4882a593Smuzhiyun 		b44_enable_ints(bp);
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	return work_done;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
b44_interrupt(int irq,void * dev_id)907*4882a593Smuzhiyun static irqreturn_t b44_interrupt(int irq, void *dev_id)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
910*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
911*4882a593Smuzhiyun 	u32 istat, imask;
912*4882a593Smuzhiyun 	int handled = 0;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	spin_lock(&bp->lock);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	istat = br32(bp, B44_ISTAT);
917*4882a593Smuzhiyun 	imask = br32(bp, B44_IMASK);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* The interrupt mask register controls which interrupt bits
920*4882a593Smuzhiyun 	 * will actually raise an interrupt to the CPU when set by hw/firmware,
921*4882a593Smuzhiyun 	 * but doesn't mask off the bits.
922*4882a593Smuzhiyun 	 */
923*4882a593Smuzhiyun 	istat &= imask;
924*4882a593Smuzhiyun 	if (istat) {
925*4882a593Smuzhiyun 		handled = 1;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 		if (unlikely(!netif_running(dev))) {
928*4882a593Smuzhiyun 			netdev_info(dev, "late interrupt\n");
929*4882a593Smuzhiyun 			goto irq_ack;
930*4882a593Smuzhiyun 		}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		if (napi_schedule_prep(&bp->napi)) {
933*4882a593Smuzhiyun 			/* NOTE: These writes are posted by the readback of
934*4882a593Smuzhiyun 			 *       the ISTAT register below.
935*4882a593Smuzhiyun 			 */
936*4882a593Smuzhiyun 			bp->istat = istat;
937*4882a593Smuzhiyun 			__b44_disable_ints(bp);
938*4882a593Smuzhiyun 			__napi_schedule(&bp->napi);
939*4882a593Smuzhiyun 		}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun irq_ack:
942*4882a593Smuzhiyun 		bw32(bp, B44_ISTAT, istat);
943*4882a593Smuzhiyun 		br32(bp, B44_ISTAT);
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 	spin_unlock(&bp->lock);
946*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
b44_tx_timeout(struct net_device * dev,unsigned int txqueue)949*4882a593Smuzhiyun static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	netdev_err(dev, "transmit timed out, resetting\n");
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	b44_halt(bp);
958*4882a593Smuzhiyun 	b44_init_rings(bp);
959*4882a593Smuzhiyun 	b44_init_hw(bp, B44_FULL_RESET);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	b44_enable_ints(bp);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	netif_wake_queue(dev);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
b44_start_xmit(struct sk_buff * skb,struct net_device * dev)968*4882a593Smuzhiyun static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
971*4882a593Smuzhiyun 	int rc = NETDEV_TX_OK;
972*4882a593Smuzhiyun 	dma_addr_t mapping;
973*4882a593Smuzhiyun 	u32 len, entry, ctrl;
974*4882a593Smuzhiyun 	unsigned long flags;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	len = skb->len;
977*4882a593Smuzhiyun 	spin_lock_irqsave(&bp->lock, flags);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	/* This is a hard error, log it. */
980*4882a593Smuzhiyun 	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
981*4882a593Smuzhiyun 		netif_stop_queue(dev);
982*4882a593Smuzhiyun 		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
983*4882a593Smuzhiyun 		goto err_out;
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
987*4882a593Smuzhiyun 	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
988*4882a593Smuzhiyun 		struct sk_buff *bounce_skb;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
991*4882a593Smuzhiyun 		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
992*4882a593Smuzhiyun 			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
993*4882a593Smuzhiyun 					     DMA_TO_DEVICE);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
996*4882a593Smuzhiyun 		if (!bounce_skb)
997*4882a593Smuzhiyun 			goto err_out;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1000*4882a593Smuzhiyun 					 len, DMA_TO_DEVICE);
1001*4882a593Smuzhiyun 		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1002*4882a593Smuzhiyun 			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1003*4882a593Smuzhiyun 				dma_unmap_single(bp->sdev->dma_dev, mapping,
1004*4882a593Smuzhiyun 						     len, DMA_TO_DEVICE);
1005*4882a593Smuzhiyun 			dev_kfree_skb_any(bounce_skb);
1006*4882a593Smuzhiyun 			goto err_out;
1007*4882a593Smuzhiyun 		}
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1010*4882a593Smuzhiyun 		dev_consume_skb_any(skb);
1011*4882a593Smuzhiyun 		skb = bounce_skb;
1012*4882a593Smuzhiyun 	}
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	entry = bp->tx_prod;
1015*4882a593Smuzhiyun 	bp->tx_buffers[entry].skb = skb;
1016*4882a593Smuzhiyun 	bp->tx_buffers[entry].mapping = mapping;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	ctrl  = (len & DESC_CTRL_LEN);
1019*4882a593Smuzhiyun 	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020*4882a593Smuzhiyun 	if (entry == (B44_TX_RING_SIZE - 1))
1021*4882a593Smuzhiyun 		ctrl |= DESC_CTRL_EOT;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024*4882a593Smuzhiyun 	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1027*4882a593Smuzhiyun 		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1028*4882a593Smuzhiyun 			                    entry * sizeof(bp->tx_ring[0]),
1029*4882a593Smuzhiyun 			                    DMA_TO_DEVICE);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	entry = NEXT_TX(entry);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	bp->tx_prod = entry;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	wmb();
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039*4882a593Smuzhiyun 		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_REORDER_BUG)
1041*4882a593Smuzhiyun 		br32(bp, B44_DMATX_PTR);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	netdev_sent_queue(dev, skb->len);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (TX_BUFFS_AVAIL(bp) < 1)
1046*4882a593Smuzhiyun 		netif_stop_queue(dev);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun out_unlock:
1049*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bp->lock, flags);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	return rc;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun err_out:
1054*4882a593Smuzhiyun 	rc = NETDEV_TX_BUSY;
1055*4882a593Smuzhiyun 	goto out_unlock;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
b44_change_mtu(struct net_device * dev,int new_mtu)1058*4882a593Smuzhiyun static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	if (!netif_running(dev)) {
1063*4882a593Smuzhiyun 		/* We'll just catch it later when the
1064*4882a593Smuzhiyun 		 * device is up'd.
1065*4882a593Smuzhiyun 		 */
1066*4882a593Smuzhiyun 		dev->mtu = new_mtu;
1067*4882a593Smuzhiyun 		return 0;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1071*4882a593Smuzhiyun 	b44_halt(bp);
1072*4882a593Smuzhiyun 	dev->mtu = new_mtu;
1073*4882a593Smuzhiyun 	b44_init_rings(bp);
1074*4882a593Smuzhiyun 	b44_init_hw(bp, B44_FULL_RESET);
1075*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	b44_enable_ints(bp);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	return 0;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /* Free up pending packets in all rx/tx rings.
1083*4882a593Smuzhiyun  *
1084*4882a593Smuzhiyun  * The chip has been shut down and the driver detached from
1085*4882a593Smuzhiyun  * the networking, so no interrupts or new tx packets will
1086*4882a593Smuzhiyun  * end up in the driver.  bp->lock is not held and we are not
1087*4882a593Smuzhiyun  * in an interrupt context and thus may sleep.
1088*4882a593Smuzhiyun  */
b44_free_rings(struct b44 * bp)1089*4882a593Smuzhiyun static void b44_free_rings(struct b44 *bp)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	struct ring_info *rp;
1092*4882a593Smuzhiyun 	int i;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1095*4882a593Smuzhiyun 		rp = &bp->rx_buffers[i];
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 		if (rp->skb == NULL)
1098*4882a593Smuzhiyun 			continue;
1099*4882a593Smuzhiyun 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1100*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
1101*4882a593Smuzhiyun 		dev_kfree_skb_any(rp->skb);
1102*4882a593Smuzhiyun 		rp->skb = NULL;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	/* XXX needs changes once NETIF_F_SG is set... */
1106*4882a593Smuzhiyun 	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107*4882a593Smuzhiyun 		rp = &bp->tx_buffers[i];
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 		if (rp->skb == NULL)
1110*4882a593Smuzhiyun 			continue;
1111*4882a593Smuzhiyun 		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1112*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
1113*4882a593Smuzhiyun 		dev_kfree_skb_any(rp->skb);
1114*4882a593Smuzhiyun 		rp->skb = NULL;
1115*4882a593Smuzhiyun 	}
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /* Initialize tx/rx rings for packet processing.
1119*4882a593Smuzhiyun  *
1120*4882a593Smuzhiyun  * The chip has been shut down and the driver detached from
1121*4882a593Smuzhiyun  * the networking, so no interrupts or new tx packets will
1122*4882a593Smuzhiyun  * end up in the driver.
1123*4882a593Smuzhiyun  */
b44_init_rings(struct b44 * bp)1124*4882a593Smuzhiyun static void b44_init_rings(struct b44 *bp)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	int i;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	b44_free_rings(bp);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1131*4882a593Smuzhiyun 	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_RX_RING_HACK)
1134*4882a593Smuzhiyun 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1135*4882a593Smuzhiyun 					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_TX_RING_HACK)
1138*4882a593Smuzhiyun 		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1139*4882a593Smuzhiyun 					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	for (i = 0; i < bp->rx_pending; i++) {
1142*4882a593Smuzhiyun 		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1143*4882a593Smuzhiyun 			break;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun /*
1148*4882a593Smuzhiyun  * Must not be invoked with interrupt sources disabled and
1149*4882a593Smuzhiyun  * the hardware shutdown down.
1150*4882a593Smuzhiyun  */
b44_free_consistent(struct b44 * bp)1151*4882a593Smuzhiyun static void b44_free_consistent(struct b44 *bp)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	kfree(bp->rx_buffers);
1154*4882a593Smuzhiyun 	bp->rx_buffers = NULL;
1155*4882a593Smuzhiyun 	kfree(bp->tx_buffers);
1156*4882a593Smuzhiyun 	bp->tx_buffers = NULL;
1157*4882a593Smuzhiyun 	if (bp->rx_ring) {
1158*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1159*4882a593Smuzhiyun 			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1160*4882a593Smuzhiyun 					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1161*4882a593Smuzhiyun 			kfree(bp->rx_ring);
1162*4882a593Smuzhiyun 		} else
1163*4882a593Smuzhiyun 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1164*4882a593Smuzhiyun 					  bp->rx_ring, bp->rx_ring_dma);
1165*4882a593Smuzhiyun 		bp->rx_ring = NULL;
1166*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 	if (bp->tx_ring) {
1169*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1170*4882a593Smuzhiyun 			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1171*4882a593Smuzhiyun 					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1172*4882a593Smuzhiyun 			kfree(bp->tx_ring);
1173*4882a593Smuzhiyun 		} else
1174*4882a593Smuzhiyun 			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1175*4882a593Smuzhiyun 					  bp->tx_ring, bp->tx_ring_dma);
1176*4882a593Smuzhiyun 		bp->tx_ring = NULL;
1177*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun  * Must not be invoked with interrupt sources disabled and
1183*4882a593Smuzhiyun  * the hardware shutdown down.  Can sleep.
1184*4882a593Smuzhiyun  */
b44_alloc_consistent(struct b44 * bp,gfp_t gfp)1185*4882a593Smuzhiyun static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	int size;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1190*4882a593Smuzhiyun 	bp->rx_buffers = kzalloc(size, gfp);
1191*4882a593Smuzhiyun 	if (!bp->rx_buffers)
1192*4882a593Smuzhiyun 		goto out_err;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1195*4882a593Smuzhiyun 	bp->tx_buffers = kzalloc(size, gfp);
1196*4882a593Smuzhiyun 	if (!bp->tx_buffers)
1197*4882a593Smuzhiyun 		goto out_err;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	size = DMA_TABLE_BYTES;
1200*4882a593Smuzhiyun 	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1201*4882a593Smuzhiyun 					 &bp->rx_ring_dma, gfp);
1202*4882a593Smuzhiyun 	if (!bp->rx_ring) {
1203*4882a593Smuzhiyun 		/* Allocation may have failed due to pci_alloc_consistent
1204*4882a593Smuzhiyun 		   insisting on use of GFP_DMA, which is more restrictive
1205*4882a593Smuzhiyun 		   than necessary...  */
1206*4882a593Smuzhiyun 		struct dma_desc *rx_ring;
1207*4882a593Smuzhiyun 		dma_addr_t rx_ring_dma;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 		rx_ring = kzalloc(size, gfp);
1210*4882a593Smuzhiyun 		if (!rx_ring)
1211*4882a593Smuzhiyun 			goto out_err;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1214*4882a593Smuzhiyun 					     DMA_TABLE_BYTES,
1215*4882a593Smuzhiyun 					     DMA_BIDIRECTIONAL);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1218*4882a593Smuzhiyun 			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1219*4882a593Smuzhiyun 			kfree(rx_ring);
1220*4882a593Smuzhiyun 			goto out_err;
1221*4882a593Smuzhiyun 		}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 		bp->rx_ring = rx_ring;
1224*4882a593Smuzhiyun 		bp->rx_ring_dma = rx_ring_dma;
1225*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_RX_RING_HACK;
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1229*4882a593Smuzhiyun 					 &bp->tx_ring_dma, gfp);
1230*4882a593Smuzhiyun 	if (!bp->tx_ring) {
1231*4882a593Smuzhiyun 		/* Allocation may have failed due to ssb_dma_alloc_consistent
1232*4882a593Smuzhiyun 		   insisting on use of GFP_DMA, which is more restrictive
1233*4882a593Smuzhiyun 		   than necessary...  */
1234*4882a593Smuzhiyun 		struct dma_desc *tx_ring;
1235*4882a593Smuzhiyun 		dma_addr_t tx_ring_dma;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 		tx_ring = kzalloc(size, gfp);
1238*4882a593Smuzhiyun 		if (!tx_ring)
1239*4882a593Smuzhiyun 			goto out_err;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1242*4882a593Smuzhiyun 					     DMA_TABLE_BYTES,
1243*4882a593Smuzhiyun 					     DMA_TO_DEVICE);
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1246*4882a593Smuzhiyun 			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1247*4882a593Smuzhiyun 			kfree(tx_ring);
1248*4882a593Smuzhiyun 			goto out_err;
1249*4882a593Smuzhiyun 		}
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 		bp->tx_ring = tx_ring;
1252*4882a593Smuzhiyun 		bp->tx_ring_dma = tx_ring_dma;
1253*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_TX_RING_HACK;
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	return 0;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun out_err:
1259*4882a593Smuzhiyun 	b44_free_consistent(bp);
1260*4882a593Smuzhiyun 	return -ENOMEM;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun /* bp->lock is held. */
b44_clear_stats(struct b44 * bp)1264*4882a593Smuzhiyun static void b44_clear_stats(struct b44 *bp)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	unsigned long reg;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1269*4882a593Smuzhiyun 	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1270*4882a593Smuzhiyun 		br32(bp, reg);
1271*4882a593Smuzhiyun 	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1272*4882a593Smuzhiyun 		br32(bp, reg);
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /* bp->lock is held. */
b44_chip_reset(struct b44 * bp,int reset_kind)1276*4882a593Smuzhiyun static void b44_chip_reset(struct b44 *bp, int reset_kind)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun 	struct ssb_device *sdev = bp->sdev;
1279*4882a593Smuzhiyun 	bool was_enabled;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	was_enabled = ssb_device_is_enabled(bp->sdev);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	ssb_device_enable(bp->sdev, 0);
1284*4882a593Smuzhiyun 	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	if (was_enabled) {
1287*4882a593Smuzhiyun 		bw32(bp, B44_RCV_LAZY, 0);
1288*4882a593Smuzhiyun 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1289*4882a593Smuzhiyun 		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1290*4882a593Smuzhiyun 		bw32(bp, B44_DMATX_CTRL, 0);
1291*4882a593Smuzhiyun 		bp->tx_prod = bp->tx_cons = 0;
1292*4882a593Smuzhiyun 		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293*4882a593Smuzhiyun 			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1294*4882a593Smuzhiyun 				     100, 0);
1295*4882a593Smuzhiyun 		}
1296*4882a593Smuzhiyun 		bw32(bp, B44_DMARX_CTRL, 0);
1297*4882a593Smuzhiyun 		bp->rx_prod = bp->rx_cons = 0;
1298*4882a593Smuzhiyun 	}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	b44_clear_stats(bp);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	/*
1303*4882a593Smuzhiyun 	 * Don't enable PHY if we are doing a partial reset
1304*4882a593Smuzhiyun 	 * we are probably going to power down
1305*4882a593Smuzhiyun 	 */
1306*4882a593Smuzhiyun 	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1307*4882a593Smuzhiyun 		return;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	switch (sdev->bus->bustype) {
1310*4882a593Smuzhiyun 	case SSB_BUSTYPE_SSB:
1311*4882a593Smuzhiyun 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1312*4882a593Smuzhiyun 		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1313*4882a593Smuzhiyun 					B44_MDC_RATIO)
1314*4882a593Smuzhiyun 		     & MDIO_CTRL_MAXF_MASK)));
1315*4882a593Smuzhiyun 		break;
1316*4882a593Smuzhiyun 	case SSB_BUSTYPE_PCI:
1317*4882a593Smuzhiyun 		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1318*4882a593Smuzhiyun 		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1319*4882a593Smuzhiyun 		break;
1320*4882a593Smuzhiyun 	case SSB_BUSTYPE_PCMCIA:
1321*4882a593Smuzhiyun 	case SSB_BUSTYPE_SDIO:
1322*4882a593Smuzhiyun 		WARN_ON(1); /* A device with this bus does not exist. */
1323*4882a593Smuzhiyun 		break;
1324*4882a593Smuzhiyun 	}
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	br32(bp, B44_MDIO_CTRL);
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1329*4882a593Smuzhiyun 		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1330*4882a593Smuzhiyun 		br32(bp, B44_ENET_CTRL);
1331*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1332*4882a593Smuzhiyun 	} else {
1333*4882a593Smuzhiyun 		u32 val = br32(bp, B44_DEVCTRL);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 		if (val & DEVCTRL_EPR) {
1336*4882a593Smuzhiyun 			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1337*4882a593Smuzhiyun 			br32(bp, B44_DEVCTRL);
1338*4882a593Smuzhiyun 			udelay(100);
1339*4882a593Smuzhiyun 		}
1340*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1341*4882a593Smuzhiyun 	}
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun /* bp->lock is held. */
b44_halt(struct b44 * bp)1345*4882a593Smuzhiyun static void b44_halt(struct b44 *bp)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	b44_disable_ints(bp);
1348*4882a593Smuzhiyun 	/* reset PHY */
1349*4882a593Smuzhiyun 	b44_phy_reset(bp);
1350*4882a593Smuzhiyun 	/* power down PHY */
1351*4882a593Smuzhiyun 	netdev_info(bp->dev, "powering down PHY\n");
1352*4882a593Smuzhiyun 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1353*4882a593Smuzhiyun 	/* now reset the chip, but without enabling the MAC&PHY
1354*4882a593Smuzhiyun 	 * part of it. This has to be done _after_ we shut down the PHY */
1355*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1356*4882a593Smuzhiyun 		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1357*4882a593Smuzhiyun 	else
1358*4882a593Smuzhiyun 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun /* bp->lock is held. */
__b44_set_mac_addr(struct b44 * bp)1362*4882a593Smuzhiyun static void __b44_set_mac_addr(struct b44 *bp)
1363*4882a593Smuzhiyun {
1364*4882a593Smuzhiyun 	bw32(bp, B44_CAM_CTRL, 0);
1365*4882a593Smuzhiyun 	if (!(bp->dev->flags & IFF_PROMISC)) {
1366*4882a593Smuzhiyun 		u32 val;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1369*4882a593Smuzhiyun 		val = br32(bp, B44_CAM_CTRL);
1370*4882a593Smuzhiyun 		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1371*4882a593Smuzhiyun 	}
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun 
b44_set_mac_addr(struct net_device * dev,void * p)1374*4882a593Smuzhiyun static int b44_set_mac_addr(struct net_device *dev, void *p)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1377*4882a593Smuzhiyun 	struct sockaddr *addr = p;
1378*4882a593Smuzhiyun 	u32 val;
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	if (netif_running(dev))
1381*4882a593Smuzhiyun 		return -EBUSY;
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
1384*4882a593Smuzhiyun 		return -EINVAL;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	val = br32(bp, B44_RXCONFIG);
1391*4882a593Smuzhiyun 	if (!(val & RXCONFIG_CAM_ABSENT))
1392*4882a593Smuzhiyun 		__b44_set_mac_addr(bp);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	return 0;
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun /* Called at device open time to get the chip ready for
1400*4882a593Smuzhiyun  * packet processing.  Invoked with bp->lock held.
1401*4882a593Smuzhiyun  */
1402*4882a593Smuzhiyun static void __b44_set_rx_mode(struct net_device *);
b44_init_hw(struct b44 * bp,int reset_kind)1403*4882a593Smuzhiyun static void b44_init_hw(struct b44 *bp, int reset_kind)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun 	u32 val;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1408*4882a593Smuzhiyun 	if (reset_kind == B44_FULL_RESET) {
1409*4882a593Smuzhiyun 		b44_phy_reset(bp);
1410*4882a593Smuzhiyun 		b44_setup_phy(bp);
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/* Enable CRC32, set proper LED modes and power on PHY */
1414*4882a593Smuzhiyun 	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1415*4882a593Smuzhiyun 	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	/* This sets the MAC address too.  */
1418*4882a593Smuzhiyun 	__b44_set_rx_mode(bp->dev);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* MTU + eth header + possible VLAN tag + struct rx_header */
1421*4882a593Smuzhiyun 	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1422*4882a593Smuzhiyun 	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1425*4882a593Smuzhiyun 	if (reset_kind == B44_PARTIAL_RESET) {
1426*4882a593Smuzhiyun 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1427*4882a593Smuzhiyun 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1428*4882a593Smuzhiyun 	} else {
1429*4882a593Smuzhiyun 		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1430*4882a593Smuzhiyun 		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1431*4882a593Smuzhiyun 		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1432*4882a593Smuzhiyun 				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1433*4882a593Smuzhiyun 		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1436*4882a593Smuzhiyun 		bp->rx_prod = bp->rx_pending;
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1439*4882a593Smuzhiyun 	}
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	val = br32(bp, B44_ENET_CTRL);
1442*4882a593Smuzhiyun 	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	netdev_reset_queue(bp->dev);
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun 
b44_open(struct net_device * dev)1447*4882a593Smuzhiyun static int b44_open(struct net_device *dev)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1450*4882a593Smuzhiyun 	int err;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	err = b44_alloc_consistent(bp, GFP_KERNEL);
1453*4882a593Smuzhiyun 	if (err)
1454*4882a593Smuzhiyun 		goto out;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	napi_enable(&bp->napi);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	b44_init_rings(bp);
1459*4882a593Smuzhiyun 	b44_init_hw(bp, B44_FULL_RESET);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	b44_check_phy(bp);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1464*4882a593Smuzhiyun 	if (unlikely(err < 0)) {
1465*4882a593Smuzhiyun 		napi_disable(&bp->napi);
1466*4882a593Smuzhiyun 		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1467*4882a593Smuzhiyun 		b44_free_rings(bp);
1468*4882a593Smuzhiyun 		b44_free_consistent(bp);
1469*4882a593Smuzhiyun 		goto out;
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	timer_setup(&bp->timer, b44_timer, 0);
1473*4882a593Smuzhiyun 	bp->timer.expires = jiffies + HZ;
1474*4882a593Smuzhiyun 	add_timer(&bp->timer);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	b44_enable_ints(bp);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1479*4882a593Smuzhiyun 		phy_start(dev->phydev);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	netif_start_queue(dev);
1482*4882a593Smuzhiyun out:
1483*4882a593Smuzhiyun 	return err;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1487*4882a593Smuzhiyun /*
1488*4882a593Smuzhiyun  * Polling receive - used by netconsole and other diagnostic tools
1489*4882a593Smuzhiyun  * to allow network i/o with interrupts disabled.
1490*4882a593Smuzhiyun  */
b44_poll_controller(struct net_device * dev)1491*4882a593Smuzhiyun static void b44_poll_controller(struct net_device *dev)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun 	disable_irq(dev->irq);
1494*4882a593Smuzhiyun 	b44_interrupt(dev->irq, dev);
1495*4882a593Smuzhiyun 	enable_irq(dev->irq);
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun #endif
1498*4882a593Smuzhiyun 
bwfilter_table(struct b44 * bp,u8 * pp,u32 bytes,u32 table_offset)1499*4882a593Smuzhiyun static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun 	u32 i;
1502*4882a593Smuzhiyun 	u32 *pattern = (u32 *) pp;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	for (i = 0; i < bytes; i += sizeof(u32)) {
1505*4882a593Smuzhiyun 		bw32(bp, B44_FILT_ADDR, table_offset + i);
1506*4882a593Smuzhiyun 		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
b44_magic_pattern(u8 * macaddr,u8 * ppattern,u8 * pmask,int offset)1510*4882a593Smuzhiyun static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	int magicsync = 6;
1513*4882a593Smuzhiyun 	int k, j, len = offset;
1514*4882a593Smuzhiyun 	int ethaddr_bytes = ETH_ALEN;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	memset(ppattern + offset, 0xff, magicsync);
1517*4882a593Smuzhiyun 	for (j = 0; j < magicsync; j++) {
1518*4882a593Smuzhiyun 		pmask[len >> 3] |= BIT(len & 7);
1519*4882a593Smuzhiyun 		len++;
1520*4882a593Smuzhiyun 	}
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1523*4882a593Smuzhiyun 		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1524*4882a593Smuzhiyun 			ethaddr_bytes = ETH_ALEN;
1525*4882a593Smuzhiyun 		else
1526*4882a593Smuzhiyun 			ethaddr_bytes = B44_PATTERN_SIZE - len;
1527*4882a593Smuzhiyun 		if (ethaddr_bytes <=0)
1528*4882a593Smuzhiyun 			break;
1529*4882a593Smuzhiyun 		for (k = 0; k< ethaddr_bytes; k++) {
1530*4882a593Smuzhiyun 			ppattern[offset + magicsync +
1531*4882a593Smuzhiyun 				(j * ETH_ALEN) + k] = macaddr[k];
1532*4882a593Smuzhiyun 			pmask[len >> 3] |= BIT(len & 7);
1533*4882a593Smuzhiyun 			len++;
1534*4882a593Smuzhiyun 		}
1535*4882a593Smuzhiyun 	}
1536*4882a593Smuzhiyun 	return len - 1;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun /* Setup magic packet patterns in the b44 WOL
1540*4882a593Smuzhiyun  * pattern matching filter.
1541*4882a593Smuzhiyun  */
b44_setup_pseudo_magicp(struct b44 * bp)1542*4882a593Smuzhiyun static void b44_setup_pseudo_magicp(struct b44 *bp)
1543*4882a593Smuzhiyun {
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	u32 val;
1546*4882a593Smuzhiyun 	int plen0, plen1, plen2;
1547*4882a593Smuzhiyun 	u8 *pwol_pattern;
1548*4882a593Smuzhiyun 	u8 pwol_mask[B44_PMASK_SIZE];
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1551*4882a593Smuzhiyun 	if (!pwol_pattern)
1552*4882a593Smuzhiyun 		return;
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	/* Ipv4 magic packet pattern - pattern 0.*/
1555*4882a593Smuzhiyun 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1556*4882a593Smuzhiyun 	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557*4882a593Smuzhiyun 				  B44_ETHIPV4UDP_HLEN);
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1560*4882a593Smuzhiyun    	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	/* Raw ethernet II magic packet pattern - pattern 1 */
1563*4882a593Smuzhiyun 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1564*4882a593Smuzhiyun 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1565*4882a593Smuzhiyun 	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1566*4882a593Smuzhiyun 				  ETH_HLEN);
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1569*4882a593Smuzhiyun 		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1570*4882a593Smuzhiyun   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1571*4882a593Smuzhiyun 		       B44_PMASK_BASE + B44_PMASK_SIZE);
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	/* Ipv6 magic packet pattern - pattern 2 */
1574*4882a593Smuzhiyun 	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1575*4882a593Smuzhiyun 	memset(pwol_mask, 0, B44_PMASK_SIZE);
1576*4882a593Smuzhiyun 	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1577*4882a593Smuzhiyun 				  B44_ETHIPV6UDP_HLEN);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun    	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1580*4882a593Smuzhiyun 		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1581*4882a593Smuzhiyun   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1582*4882a593Smuzhiyun 		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	kfree(pwol_pattern);
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	/* set these pattern's lengths: one less than each real length */
1587*4882a593Smuzhiyun 	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1588*4882a593Smuzhiyun 	bw32(bp, B44_WKUP_LEN, val);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* enable wakeup pattern matching */
1591*4882a593Smuzhiyun 	val = br32(bp, B44_DEVCTRL);
1592*4882a593Smuzhiyun 	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun #ifdef CONFIG_B44_PCI
b44_setup_wol_pci(struct b44 * bp)1597*4882a593Smuzhiyun static void b44_setup_wol_pci(struct b44 *bp)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun 	u16 val;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1602*4882a593Smuzhiyun 		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1603*4882a593Smuzhiyun 		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1604*4882a593Smuzhiyun 		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1605*4882a593Smuzhiyun 	}
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun #else
b44_setup_wol_pci(struct b44 * bp)1608*4882a593Smuzhiyun static inline void b44_setup_wol_pci(struct b44 *bp) { }
1609*4882a593Smuzhiyun #endif /* CONFIG_B44_PCI */
1610*4882a593Smuzhiyun 
b44_setup_wol(struct b44 * bp)1611*4882a593Smuzhiyun static void b44_setup_wol(struct b44 *bp)
1612*4882a593Smuzhiyun {
1613*4882a593Smuzhiyun 	u32 val;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 		val = bp->dev->dev_addr[2] << 24 |
1622*4882a593Smuzhiyun 			bp->dev->dev_addr[3] << 16 |
1623*4882a593Smuzhiyun 			bp->dev->dev_addr[4] << 8 |
1624*4882a593Smuzhiyun 			bp->dev->dev_addr[5];
1625*4882a593Smuzhiyun 		bw32(bp, B44_ADDR_LO, val);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 		val = bp->dev->dev_addr[0] << 8 |
1628*4882a593Smuzhiyun 			bp->dev->dev_addr[1];
1629*4882a593Smuzhiyun 		bw32(bp, B44_ADDR_HI, val);
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 		val = br32(bp, B44_DEVCTRL);
1632*4882a593Smuzhiyun 		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun  	} else {
1635*4882a593Smuzhiyun  		b44_setup_pseudo_magicp(bp);
1636*4882a593Smuzhiyun  	}
1637*4882a593Smuzhiyun 	b44_setup_wol_pci(bp);
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
b44_close(struct net_device * dev)1640*4882a593Smuzhiyun static int b44_close(struct net_device *dev)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	netif_stop_queue(dev);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1647*4882a593Smuzhiyun 		phy_stop(dev->phydev);
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	napi_disable(&bp->napi);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	del_timer_sync(&bp->timer);
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	b44_halt(bp);
1656*4882a593Smuzhiyun 	b44_free_rings(bp);
1657*4882a593Smuzhiyun 	netif_carrier_off(dev);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1664*4882a593Smuzhiyun 		b44_init_hw(bp, B44_PARTIAL_RESET);
1665*4882a593Smuzhiyun 		b44_setup_wol(bp);
1666*4882a593Smuzhiyun 	}
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	b44_free_consistent(bp);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	return 0;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun 
b44_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * nstat)1673*4882a593Smuzhiyun static void b44_get_stats64(struct net_device *dev,
1674*4882a593Smuzhiyun 			    struct rtnl_link_stats64 *nstat)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1677*4882a593Smuzhiyun 	struct b44_hw_stats *hwstat = &bp->hw_stats;
1678*4882a593Smuzhiyun 	unsigned int start;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	do {
1681*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 		/* Convert HW stats into rtnl_link_stats64 stats. */
1684*4882a593Smuzhiyun 		nstat->rx_packets = hwstat->rx_pkts;
1685*4882a593Smuzhiyun 		nstat->tx_packets = hwstat->tx_pkts;
1686*4882a593Smuzhiyun 		nstat->rx_bytes   = hwstat->rx_octets;
1687*4882a593Smuzhiyun 		nstat->tx_bytes   = hwstat->tx_octets;
1688*4882a593Smuzhiyun 		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1689*4882a593Smuzhiyun 				     hwstat->tx_oversize_pkts +
1690*4882a593Smuzhiyun 				     hwstat->tx_underruns +
1691*4882a593Smuzhiyun 				     hwstat->tx_excessive_cols +
1692*4882a593Smuzhiyun 				     hwstat->tx_late_cols);
1693*4882a593Smuzhiyun 		nstat->multicast  = hwstat->rx_multicast_pkts;
1694*4882a593Smuzhiyun 		nstat->collisions = hwstat->tx_total_cols;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1697*4882a593Smuzhiyun 					   hwstat->rx_undersize);
1698*4882a593Smuzhiyun 		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1699*4882a593Smuzhiyun 		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1700*4882a593Smuzhiyun 		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1701*4882a593Smuzhiyun 		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1702*4882a593Smuzhiyun 					   hwstat->rx_oversize_pkts +
1703*4882a593Smuzhiyun 					   hwstat->rx_missed_pkts +
1704*4882a593Smuzhiyun 					   hwstat->rx_crc_align_errs +
1705*4882a593Smuzhiyun 					   hwstat->rx_undersize +
1706*4882a593Smuzhiyun 					   hwstat->rx_crc_errs +
1707*4882a593Smuzhiyun 					   hwstat->rx_align_errs +
1708*4882a593Smuzhiyun 					   hwstat->rx_symbol_errs);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 		nstat->tx_aborted_errors = hwstat->tx_underruns;
1711*4882a593Smuzhiyun #if 0
1712*4882a593Smuzhiyun 		/* Carrier lost counter seems to be broken for some devices */
1713*4882a593Smuzhiyun 		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1714*4882a593Smuzhiyun #endif
1715*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun 
__b44_load_mcast(struct b44 * bp,struct net_device * dev)1719*4882a593Smuzhiyun static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
1722*4882a593Smuzhiyun 	int i, num_ents;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1725*4882a593Smuzhiyun 	i = 0;
1726*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev) {
1727*4882a593Smuzhiyun 		if (i == num_ents)
1728*4882a593Smuzhiyun 			break;
1729*4882a593Smuzhiyun 		__b44_cam_write(bp, ha->addr, i++ + 1);
1730*4882a593Smuzhiyun 	}
1731*4882a593Smuzhiyun 	return i+1;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
__b44_set_rx_mode(struct net_device * dev)1734*4882a593Smuzhiyun static void __b44_set_rx_mode(struct net_device *dev)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1737*4882a593Smuzhiyun 	u32 val;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	val = br32(bp, B44_RXCONFIG);
1740*4882a593Smuzhiyun 	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1741*4882a593Smuzhiyun 	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1742*4882a593Smuzhiyun 		val |= RXCONFIG_PROMISC;
1743*4882a593Smuzhiyun 		bw32(bp, B44_RXCONFIG, val);
1744*4882a593Smuzhiyun 	} else {
1745*4882a593Smuzhiyun 		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1746*4882a593Smuzhiyun 		int i = 1;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 		__b44_set_mac_addr(bp);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 		if ((dev->flags & IFF_ALLMULTI) ||
1751*4882a593Smuzhiyun 		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1752*4882a593Smuzhiyun 			val |= RXCONFIG_ALLMULTI;
1753*4882a593Smuzhiyun 		else
1754*4882a593Smuzhiyun 			i = __b44_load_mcast(bp, dev);
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 		for (; i < 64; i++)
1757*4882a593Smuzhiyun 			__b44_cam_write(bp, zero, i);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 		bw32(bp, B44_RXCONFIG, val);
1760*4882a593Smuzhiyun         	val = br32(bp, B44_CAM_CTRL);
1761*4882a593Smuzhiyun 	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1762*4882a593Smuzhiyun 	}
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun 
b44_set_rx_mode(struct net_device * dev)1765*4882a593Smuzhiyun static void b44_set_rx_mode(struct net_device *dev)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1770*4882a593Smuzhiyun 	__b44_set_rx_mode(dev);
1771*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun 
b44_get_msglevel(struct net_device * dev)1774*4882a593Smuzhiyun static u32 b44_get_msglevel(struct net_device *dev)
1775*4882a593Smuzhiyun {
1776*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1777*4882a593Smuzhiyun 	return bp->msg_enable;
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun 
b44_set_msglevel(struct net_device * dev,u32 value)1780*4882a593Smuzhiyun static void b44_set_msglevel(struct net_device *dev, u32 value)
1781*4882a593Smuzhiyun {
1782*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1783*4882a593Smuzhiyun 	bp->msg_enable = value;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun 
b44_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1786*4882a593Smuzhiyun static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1787*4882a593Smuzhiyun {
1788*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1789*4882a593Smuzhiyun 	struct ssb_bus *bus = bp->sdev->bus;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1792*4882a593Smuzhiyun 	switch (bus->bustype) {
1793*4882a593Smuzhiyun 	case SSB_BUSTYPE_PCI:
1794*4882a593Smuzhiyun 		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1795*4882a593Smuzhiyun 		break;
1796*4882a593Smuzhiyun 	case SSB_BUSTYPE_SSB:
1797*4882a593Smuzhiyun 		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1798*4882a593Smuzhiyun 		break;
1799*4882a593Smuzhiyun 	case SSB_BUSTYPE_PCMCIA:
1800*4882a593Smuzhiyun 	case SSB_BUSTYPE_SDIO:
1801*4882a593Smuzhiyun 		WARN_ON(1); /* A device with this bus does not exist. */
1802*4882a593Smuzhiyun 		break;
1803*4882a593Smuzhiyun 	}
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun 
b44_nway_reset(struct net_device * dev)1806*4882a593Smuzhiyun static int b44_nway_reset(struct net_device *dev)
1807*4882a593Smuzhiyun {
1808*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1809*4882a593Smuzhiyun 	u32 bmcr;
1810*4882a593Smuzhiyun 	int r;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1813*4882a593Smuzhiyun 	b44_readphy(bp, MII_BMCR, &bmcr);
1814*4882a593Smuzhiyun 	b44_readphy(bp, MII_BMCR, &bmcr);
1815*4882a593Smuzhiyun 	r = -EINVAL;
1816*4882a593Smuzhiyun 	if (bmcr & BMCR_ANENABLE) {
1817*4882a593Smuzhiyun 		b44_writephy(bp, MII_BMCR,
1818*4882a593Smuzhiyun 			     bmcr | BMCR_ANRESTART);
1819*4882a593Smuzhiyun 		r = 0;
1820*4882a593Smuzhiyun 	}
1821*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	return r;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun 
b44_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1826*4882a593Smuzhiyun static int b44_get_link_ksettings(struct net_device *dev,
1827*4882a593Smuzhiyun 				  struct ethtool_link_ksettings *cmd)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1830*4882a593Smuzhiyun 	u32 supported, advertising;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1833*4882a593Smuzhiyun 		BUG_ON(!dev->phydev);
1834*4882a593Smuzhiyun 		phy_ethtool_ksettings_get(dev->phydev, cmd);
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 		return 0;
1837*4882a593Smuzhiyun 	}
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	supported = (SUPPORTED_Autoneg);
1840*4882a593Smuzhiyun 	supported |= (SUPPORTED_100baseT_Half |
1841*4882a593Smuzhiyun 		      SUPPORTED_100baseT_Full |
1842*4882a593Smuzhiyun 		      SUPPORTED_10baseT_Half |
1843*4882a593Smuzhiyun 		      SUPPORTED_10baseT_Full |
1844*4882a593Smuzhiyun 		      SUPPORTED_MII);
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	advertising = 0;
1847*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_ADV_10HALF)
1848*4882a593Smuzhiyun 		advertising |= ADVERTISED_10baseT_Half;
1849*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_ADV_10FULL)
1850*4882a593Smuzhiyun 		advertising |= ADVERTISED_10baseT_Full;
1851*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_ADV_100HALF)
1852*4882a593Smuzhiyun 		advertising |= ADVERTISED_100baseT_Half;
1853*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_ADV_100FULL)
1854*4882a593Smuzhiyun 		advertising |= ADVERTISED_100baseT_Full;
1855*4882a593Smuzhiyun 	advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1856*4882a593Smuzhiyun 	cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1857*4882a593Smuzhiyun 		SPEED_100 : SPEED_10;
1858*4882a593Smuzhiyun 	cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1859*4882a593Smuzhiyun 		DUPLEX_FULL : DUPLEX_HALF;
1860*4882a593Smuzhiyun 	cmd->base.port = 0;
1861*4882a593Smuzhiyun 	cmd->base.phy_address = bp->phy_addr;
1862*4882a593Smuzhiyun 	cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1863*4882a593Smuzhiyun 		AUTONEG_DISABLE : AUTONEG_ENABLE;
1864*4882a593Smuzhiyun 	if (cmd->base.autoneg == AUTONEG_ENABLE)
1865*4882a593Smuzhiyun 		advertising |= ADVERTISED_Autoneg;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1868*4882a593Smuzhiyun 						supported);
1869*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1870*4882a593Smuzhiyun 						advertising);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	if (!netif_running(dev)){
1873*4882a593Smuzhiyun 		cmd->base.speed = 0;
1874*4882a593Smuzhiyun 		cmd->base.duplex = 0xff;
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	return 0;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun 
b44_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1880*4882a593Smuzhiyun static int b44_set_link_ksettings(struct net_device *dev,
1881*4882a593Smuzhiyun 				  const struct ethtool_link_ksettings *cmd)
1882*4882a593Smuzhiyun {
1883*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1884*4882a593Smuzhiyun 	u32 speed;
1885*4882a593Smuzhiyun 	int ret;
1886*4882a593Smuzhiyun 	u32 advertising;
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1889*4882a593Smuzhiyun 		BUG_ON(!dev->phydev);
1890*4882a593Smuzhiyun 		spin_lock_irq(&bp->lock);
1891*4882a593Smuzhiyun 		if (netif_running(dev))
1892*4882a593Smuzhiyun 			b44_setup_phy(bp);
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 		ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		spin_unlock_irq(&bp->lock);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 		return ret;
1899*4882a593Smuzhiyun 	}
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	speed = cmd->base.speed;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1904*4882a593Smuzhiyun 						cmd->link_modes.advertising);
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	/* We do not support gigabit. */
1907*4882a593Smuzhiyun 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1908*4882a593Smuzhiyun 		if (advertising &
1909*4882a593Smuzhiyun 		    (ADVERTISED_1000baseT_Half |
1910*4882a593Smuzhiyun 		     ADVERTISED_1000baseT_Full))
1911*4882a593Smuzhiyun 			return -EINVAL;
1912*4882a593Smuzhiyun 	} else if ((speed != SPEED_100 &&
1913*4882a593Smuzhiyun 		    speed != SPEED_10) ||
1914*4882a593Smuzhiyun 		   (cmd->base.duplex != DUPLEX_HALF &&
1915*4882a593Smuzhiyun 		    cmd->base.duplex != DUPLEX_FULL)) {
1916*4882a593Smuzhiyun 			return -EINVAL;
1917*4882a593Smuzhiyun 	}
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1922*4882a593Smuzhiyun 		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1923*4882a593Smuzhiyun 			       B44_FLAG_100_BASE_T |
1924*4882a593Smuzhiyun 			       B44_FLAG_FULL_DUPLEX |
1925*4882a593Smuzhiyun 			       B44_FLAG_ADV_10HALF |
1926*4882a593Smuzhiyun 			       B44_FLAG_ADV_10FULL |
1927*4882a593Smuzhiyun 			       B44_FLAG_ADV_100HALF |
1928*4882a593Smuzhiyun 			       B44_FLAG_ADV_100FULL);
1929*4882a593Smuzhiyun 		if (advertising == 0) {
1930*4882a593Smuzhiyun 			bp->flags |= (B44_FLAG_ADV_10HALF |
1931*4882a593Smuzhiyun 				      B44_FLAG_ADV_10FULL |
1932*4882a593Smuzhiyun 				      B44_FLAG_ADV_100HALF |
1933*4882a593Smuzhiyun 				      B44_FLAG_ADV_100FULL);
1934*4882a593Smuzhiyun 		} else {
1935*4882a593Smuzhiyun 			if (advertising & ADVERTISED_10baseT_Half)
1936*4882a593Smuzhiyun 				bp->flags |= B44_FLAG_ADV_10HALF;
1937*4882a593Smuzhiyun 			if (advertising & ADVERTISED_10baseT_Full)
1938*4882a593Smuzhiyun 				bp->flags |= B44_FLAG_ADV_10FULL;
1939*4882a593Smuzhiyun 			if (advertising & ADVERTISED_100baseT_Half)
1940*4882a593Smuzhiyun 				bp->flags |= B44_FLAG_ADV_100HALF;
1941*4882a593Smuzhiyun 			if (advertising & ADVERTISED_100baseT_Full)
1942*4882a593Smuzhiyun 				bp->flags |= B44_FLAG_ADV_100FULL;
1943*4882a593Smuzhiyun 		}
1944*4882a593Smuzhiyun 	} else {
1945*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_FORCE_LINK;
1946*4882a593Smuzhiyun 		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1947*4882a593Smuzhiyun 		if (speed == SPEED_100)
1948*4882a593Smuzhiyun 			bp->flags |= B44_FLAG_100_BASE_T;
1949*4882a593Smuzhiyun 		if (cmd->base.duplex == DUPLEX_FULL)
1950*4882a593Smuzhiyun 			bp->flags |= B44_FLAG_FULL_DUPLEX;
1951*4882a593Smuzhiyun 	}
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	if (netif_running(dev))
1954*4882a593Smuzhiyun 		b44_setup_phy(bp);
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	return 0;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun 
b44_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)1961*4882a593Smuzhiyun static void b44_get_ringparam(struct net_device *dev,
1962*4882a593Smuzhiyun 			      struct ethtool_ringparam *ering)
1963*4882a593Smuzhiyun {
1964*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1967*4882a593Smuzhiyun 	ering->rx_pending = bp->rx_pending;
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	/* XXX ethtool lacks a tx_max_pending, oops... */
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun 
b44_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)1972*4882a593Smuzhiyun static int b44_set_ringparam(struct net_device *dev,
1973*4882a593Smuzhiyun 			     struct ethtool_ringparam *ering)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1978*4882a593Smuzhiyun 	    (ering->rx_mini_pending != 0) ||
1979*4882a593Smuzhiyun 	    (ering->rx_jumbo_pending != 0) ||
1980*4882a593Smuzhiyun 	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1981*4882a593Smuzhiyun 		return -EINVAL;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	bp->rx_pending = ering->rx_pending;
1986*4882a593Smuzhiyun 	bp->tx_pending = ering->tx_pending;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	b44_halt(bp);
1989*4882a593Smuzhiyun 	b44_init_rings(bp);
1990*4882a593Smuzhiyun 	b44_init_hw(bp, B44_FULL_RESET);
1991*4882a593Smuzhiyun 	netif_wake_queue(bp->dev);
1992*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	b44_enable_ints(bp);
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	return 0;
1997*4882a593Smuzhiyun }
1998*4882a593Smuzhiyun 
b44_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1999*4882a593Smuzhiyun static void b44_get_pauseparam(struct net_device *dev,
2000*4882a593Smuzhiyun 				struct ethtool_pauseparam *epause)
2001*4882a593Smuzhiyun {
2002*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	epause->autoneg =
2005*4882a593Smuzhiyun 		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2006*4882a593Smuzhiyun 	epause->rx_pause =
2007*4882a593Smuzhiyun 		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
2008*4882a593Smuzhiyun 	epause->tx_pause =
2009*4882a593Smuzhiyun 		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
b44_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)2012*4882a593Smuzhiyun static int b44_set_pauseparam(struct net_device *dev,
2013*4882a593Smuzhiyun 				struct ethtool_pauseparam *epause)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2018*4882a593Smuzhiyun 	if (epause->autoneg)
2019*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_PAUSE_AUTO;
2020*4882a593Smuzhiyun 	else
2021*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2022*4882a593Smuzhiyun 	if (epause->rx_pause)
2023*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_RX_PAUSE;
2024*4882a593Smuzhiyun 	else
2025*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_RX_PAUSE;
2026*4882a593Smuzhiyun 	if (epause->tx_pause)
2027*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_TX_PAUSE;
2028*4882a593Smuzhiyun 	else
2029*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_TX_PAUSE;
2030*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2031*4882a593Smuzhiyun 		b44_halt(bp);
2032*4882a593Smuzhiyun 		b44_init_rings(bp);
2033*4882a593Smuzhiyun 		b44_init_hw(bp, B44_FULL_RESET);
2034*4882a593Smuzhiyun 	} else {
2035*4882a593Smuzhiyun 		__b44_set_flow_ctrl(bp, bp->flags);
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	b44_enable_ints(bp);
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	return 0;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun 
b44_get_strings(struct net_device * dev,u32 stringset,u8 * data)2044*4882a593Smuzhiyun static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	switch(stringset) {
2047*4882a593Smuzhiyun 	case ETH_SS_STATS:
2048*4882a593Smuzhiyun 		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2049*4882a593Smuzhiyun 		break;
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun 
b44_get_sset_count(struct net_device * dev,int sset)2053*4882a593Smuzhiyun static int b44_get_sset_count(struct net_device *dev, int sset)
2054*4882a593Smuzhiyun {
2055*4882a593Smuzhiyun 	switch (sset) {
2056*4882a593Smuzhiyun 	case ETH_SS_STATS:
2057*4882a593Smuzhiyun 		return ARRAY_SIZE(b44_gstrings);
2058*4882a593Smuzhiyun 	default:
2059*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2060*4882a593Smuzhiyun 	}
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun 
b44_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2063*4882a593Smuzhiyun static void b44_get_ethtool_stats(struct net_device *dev,
2064*4882a593Smuzhiyun 				  struct ethtool_stats *stats, u64 *data)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2067*4882a593Smuzhiyun 	struct b44_hw_stats *hwstat = &bp->hw_stats;
2068*4882a593Smuzhiyun 	u64 *data_src, *data_dst;
2069*4882a593Smuzhiyun 	unsigned int start;
2070*4882a593Smuzhiyun 	u32 i;
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2073*4882a593Smuzhiyun 	b44_stats_update(bp);
2074*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	do {
2077*4882a593Smuzhiyun 		data_src = &hwstat->tx_good_octets;
2078*4882a593Smuzhiyun 		data_dst = data;
2079*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2082*4882a593Smuzhiyun 			*data_dst++ = *data_src++;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun 
b44_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2087*4882a593Smuzhiyun static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088*4882a593Smuzhiyun {
2089*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	wol->supported = WAKE_MAGIC;
2092*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_WOL_ENABLE)
2093*4882a593Smuzhiyun 		wol->wolopts = WAKE_MAGIC;
2094*4882a593Smuzhiyun 	else
2095*4882a593Smuzhiyun 		wol->wolopts = 0;
2096*4882a593Smuzhiyun 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun 
b44_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2099*4882a593Smuzhiyun static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2104*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_MAGIC)
2105*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_WOL_ENABLE;
2106*4882a593Smuzhiyun 	else
2107*4882a593Smuzhiyun 		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2108*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2111*4882a593Smuzhiyun 	return 0;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun static const struct ethtool_ops b44_ethtool_ops = {
2115*4882a593Smuzhiyun 	.get_drvinfo		= b44_get_drvinfo,
2116*4882a593Smuzhiyun 	.nway_reset		= b44_nway_reset,
2117*4882a593Smuzhiyun 	.get_link		= ethtool_op_get_link,
2118*4882a593Smuzhiyun 	.get_wol		= b44_get_wol,
2119*4882a593Smuzhiyun 	.set_wol		= b44_set_wol,
2120*4882a593Smuzhiyun 	.get_ringparam		= b44_get_ringparam,
2121*4882a593Smuzhiyun 	.set_ringparam		= b44_set_ringparam,
2122*4882a593Smuzhiyun 	.get_pauseparam		= b44_get_pauseparam,
2123*4882a593Smuzhiyun 	.set_pauseparam		= b44_set_pauseparam,
2124*4882a593Smuzhiyun 	.get_msglevel		= b44_get_msglevel,
2125*4882a593Smuzhiyun 	.set_msglevel		= b44_set_msglevel,
2126*4882a593Smuzhiyun 	.get_strings		= b44_get_strings,
2127*4882a593Smuzhiyun 	.get_sset_count		= b44_get_sset_count,
2128*4882a593Smuzhiyun 	.get_ethtool_stats	= b44_get_ethtool_stats,
2129*4882a593Smuzhiyun 	.get_link_ksettings	= b44_get_link_ksettings,
2130*4882a593Smuzhiyun 	.set_link_ksettings	= b44_set_link_ksettings,
2131*4882a593Smuzhiyun };
2132*4882a593Smuzhiyun 
b44_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2133*4882a593Smuzhiyun static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2134*4882a593Smuzhiyun {
2135*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2136*4882a593Smuzhiyun 	int err = -EINVAL;
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	if (!netif_running(dev))
2139*4882a593Smuzhiyun 		goto out;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2142*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2143*4882a593Smuzhiyun 		BUG_ON(!dev->phydev);
2144*4882a593Smuzhiyun 		err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2145*4882a593Smuzhiyun 	} else {
2146*4882a593Smuzhiyun 		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2147*4882a593Smuzhiyun 	}
2148*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2149*4882a593Smuzhiyun out:
2150*4882a593Smuzhiyun 	return err;
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun 
b44_get_invariants(struct b44 * bp)2153*4882a593Smuzhiyun static int b44_get_invariants(struct b44 *bp)
2154*4882a593Smuzhiyun {
2155*4882a593Smuzhiyun 	struct ssb_device *sdev = bp->sdev;
2156*4882a593Smuzhiyun 	int err = 0;
2157*4882a593Smuzhiyun 	u8 *addr;
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 	bp->dma_offset = ssb_dma_translation(sdev);
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2162*4882a593Smuzhiyun 	    instance > 1) {
2163*4882a593Smuzhiyun 		addr = sdev->bus->sprom.et1mac;
2164*4882a593Smuzhiyun 		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2165*4882a593Smuzhiyun 	} else {
2166*4882a593Smuzhiyun 		addr = sdev->bus->sprom.et0mac;
2167*4882a593Smuzhiyun 		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2168*4882a593Smuzhiyun 	}
2169*4882a593Smuzhiyun 	/* Some ROMs have buggy PHY addresses with the high
2170*4882a593Smuzhiyun 	 * bits set (sign extension?). Truncate them to a
2171*4882a593Smuzhiyun 	 * valid PHY address. */
2172*4882a593Smuzhiyun 	bp->phy_addr &= 0x1F;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2177*4882a593Smuzhiyun 		pr_err("Invalid MAC address found in EEPROM\n");
2178*4882a593Smuzhiyun 		return -EINVAL;
2179*4882a593Smuzhiyun 	}
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	bp->imask = IMASK_DEF;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	/* XXX - really required?
2184*4882a593Smuzhiyun 	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2185*4882a593Smuzhiyun 	*/
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	if (bp->sdev->id.revision >= 7)
2188*4882a593Smuzhiyun 		bp->flags |= B44_FLAG_B0_ANDLATER;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	return err;
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun static const struct net_device_ops b44_netdev_ops = {
2194*4882a593Smuzhiyun 	.ndo_open		= b44_open,
2195*4882a593Smuzhiyun 	.ndo_stop		= b44_close,
2196*4882a593Smuzhiyun 	.ndo_start_xmit		= b44_start_xmit,
2197*4882a593Smuzhiyun 	.ndo_get_stats64	= b44_get_stats64,
2198*4882a593Smuzhiyun 	.ndo_set_rx_mode	= b44_set_rx_mode,
2199*4882a593Smuzhiyun 	.ndo_set_mac_address	= b44_set_mac_addr,
2200*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
2201*4882a593Smuzhiyun 	.ndo_do_ioctl		= b44_ioctl,
2202*4882a593Smuzhiyun 	.ndo_tx_timeout		= b44_tx_timeout,
2203*4882a593Smuzhiyun 	.ndo_change_mtu		= b44_change_mtu,
2204*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2205*4882a593Smuzhiyun 	.ndo_poll_controller	= b44_poll_controller,
2206*4882a593Smuzhiyun #endif
2207*4882a593Smuzhiyun };
2208*4882a593Smuzhiyun 
b44_adjust_link(struct net_device * dev)2209*4882a593Smuzhiyun static void b44_adjust_link(struct net_device *dev)
2210*4882a593Smuzhiyun {
2211*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2212*4882a593Smuzhiyun 	struct phy_device *phydev = dev->phydev;
2213*4882a593Smuzhiyun 	bool status_changed = false;
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	BUG_ON(!phydev);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	if (bp->old_link != phydev->link) {
2218*4882a593Smuzhiyun 		status_changed = true;
2219*4882a593Smuzhiyun 		bp->old_link = phydev->link;
2220*4882a593Smuzhiyun 	}
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	/* reflect duplex change */
2223*4882a593Smuzhiyun 	if (phydev->link) {
2224*4882a593Smuzhiyun 		if ((phydev->duplex == DUPLEX_HALF) &&
2225*4882a593Smuzhiyun 		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2226*4882a593Smuzhiyun 			status_changed = true;
2227*4882a593Smuzhiyun 			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2228*4882a593Smuzhiyun 		} else if ((phydev->duplex == DUPLEX_FULL) &&
2229*4882a593Smuzhiyun 			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2230*4882a593Smuzhiyun 			status_changed = true;
2231*4882a593Smuzhiyun 			bp->flags |= B44_FLAG_FULL_DUPLEX;
2232*4882a593Smuzhiyun 		}
2233*4882a593Smuzhiyun 	}
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	if (status_changed) {
2236*4882a593Smuzhiyun 		u32 val = br32(bp, B44_TX_CTRL);
2237*4882a593Smuzhiyun 		if (bp->flags & B44_FLAG_FULL_DUPLEX)
2238*4882a593Smuzhiyun 			val |= TX_CTRL_DUPLEX;
2239*4882a593Smuzhiyun 		else
2240*4882a593Smuzhiyun 			val &= ~TX_CTRL_DUPLEX;
2241*4882a593Smuzhiyun 		bw32(bp, B44_TX_CTRL, val);
2242*4882a593Smuzhiyun 		phy_print_status(phydev);
2243*4882a593Smuzhiyun 	}
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun 
b44_register_phy_one(struct b44 * bp)2246*4882a593Smuzhiyun static int b44_register_phy_one(struct b44 *bp)
2247*4882a593Smuzhiyun {
2248*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2249*4882a593Smuzhiyun 	struct mii_bus *mii_bus;
2250*4882a593Smuzhiyun 	struct ssb_device *sdev = bp->sdev;
2251*4882a593Smuzhiyun 	struct phy_device *phydev;
2252*4882a593Smuzhiyun 	char bus_id[MII_BUS_ID_SIZE + 3];
2253*4882a593Smuzhiyun 	struct ssb_sprom *sprom = &sdev->bus->sprom;
2254*4882a593Smuzhiyun 	int err;
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	mii_bus = mdiobus_alloc();
2257*4882a593Smuzhiyun 	if (!mii_bus) {
2258*4882a593Smuzhiyun 		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2259*4882a593Smuzhiyun 		err = -ENOMEM;
2260*4882a593Smuzhiyun 		goto err_out;
2261*4882a593Smuzhiyun 	}
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	mii_bus->priv = bp;
2264*4882a593Smuzhiyun 	mii_bus->read = b44_mdio_read_phylib;
2265*4882a593Smuzhiyun 	mii_bus->write = b44_mdio_write_phylib;
2266*4882a593Smuzhiyun 	mii_bus->name = "b44_eth_mii";
2267*4882a593Smuzhiyun 	mii_bus->parent = sdev->dev;
2268*4882a593Smuzhiyun 	mii_bus->phy_mask = ~(1 << bp->phy_addr);
2269*4882a593Smuzhiyun 	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	bp->mii_bus = mii_bus;
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	err = mdiobus_register(mii_bus);
2274*4882a593Smuzhiyun 	if (err) {
2275*4882a593Smuzhiyun 		dev_err(sdev->dev, "failed to register MII bus\n");
2276*4882a593Smuzhiyun 		goto err_out_mdiobus;
2277*4882a593Smuzhiyun 	}
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2280*4882a593Smuzhiyun 	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 		dev_info(sdev->dev,
2283*4882a593Smuzhiyun 			 "could not find PHY at %i, use fixed one\n",
2284*4882a593Smuzhiyun 			 bp->phy_addr);
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 		bp->phy_addr = 0;
2287*4882a593Smuzhiyun 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2288*4882a593Smuzhiyun 			 bp->phy_addr);
2289*4882a593Smuzhiyun 	} else {
2290*4882a593Smuzhiyun 		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2291*4882a593Smuzhiyun 			 bp->phy_addr);
2292*4882a593Smuzhiyun 	}
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun 	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2295*4882a593Smuzhiyun 			     PHY_INTERFACE_MODE_MII);
2296*4882a593Smuzhiyun 	if (IS_ERR(phydev)) {
2297*4882a593Smuzhiyun 		dev_err(sdev->dev, "could not attach PHY at %i\n",
2298*4882a593Smuzhiyun 			bp->phy_addr);
2299*4882a593Smuzhiyun 		err = PTR_ERR(phydev);
2300*4882a593Smuzhiyun 		goto err_out_mdiobus_unregister;
2301*4882a593Smuzhiyun 	}
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	/* mask with MAC supported features */
2304*4882a593Smuzhiyun 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2305*4882a593Smuzhiyun 	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2306*4882a593Smuzhiyun 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2307*4882a593Smuzhiyun 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2308*4882a593Smuzhiyun 	linkmode_and(phydev->supported, phydev->supported, mask);
2309*4882a593Smuzhiyun 	linkmode_copy(phydev->advertising, phydev->supported);
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun 	bp->old_link = 0;
2312*4882a593Smuzhiyun 	bp->phy_addr = phydev->mdio.addr;
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	phy_attached_info(phydev);
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	return 0;
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun err_out_mdiobus_unregister:
2319*4882a593Smuzhiyun 	mdiobus_unregister(mii_bus);
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun err_out_mdiobus:
2322*4882a593Smuzhiyun 	mdiobus_free(mii_bus);
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun err_out:
2325*4882a593Smuzhiyun 	return err;
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun 
b44_unregister_phy_one(struct b44 * bp)2328*4882a593Smuzhiyun static void b44_unregister_phy_one(struct b44 *bp)
2329*4882a593Smuzhiyun {
2330*4882a593Smuzhiyun 	struct net_device *dev = bp->dev;
2331*4882a593Smuzhiyun 	struct mii_bus *mii_bus = bp->mii_bus;
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	phy_disconnect(dev->phydev);
2334*4882a593Smuzhiyun 	mdiobus_unregister(mii_bus);
2335*4882a593Smuzhiyun 	mdiobus_free(mii_bus);
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun 
b44_init_one(struct ssb_device * sdev,const struct ssb_device_id * ent)2338*4882a593Smuzhiyun static int b44_init_one(struct ssb_device *sdev,
2339*4882a593Smuzhiyun 			const struct ssb_device_id *ent)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	struct net_device *dev;
2342*4882a593Smuzhiyun 	struct b44 *bp;
2343*4882a593Smuzhiyun 	int err;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	instance++;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*bp));
2348*4882a593Smuzhiyun 	if (!dev) {
2349*4882a593Smuzhiyun 		err = -ENOMEM;
2350*4882a593Smuzhiyun 		goto out;
2351*4882a593Smuzhiyun 	}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, sdev->dev);
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	/* No interesting netdevice features in this card... */
2356*4882a593Smuzhiyun 	dev->features |= 0;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	bp = netdev_priv(dev);
2359*4882a593Smuzhiyun 	bp->sdev = sdev;
2360*4882a593Smuzhiyun 	bp->dev = dev;
2361*4882a593Smuzhiyun 	bp->force_copybreak = 0;
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	spin_lock_init(&bp->lock);
2366*4882a593Smuzhiyun 	u64_stats_init(&bp->hw_stats.syncp);
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2369*4882a593Smuzhiyun 	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	dev->netdev_ops = &b44_netdev_ops;
2372*4882a593Smuzhiyun 	netif_napi_add(dev, &bp->napi, b44_poll, 64);
2373*4882a593Smuzhiyun 	dev->watchdog_timeo = B44_TX_TIMEOUT;
2374*4882a593Smuzhiyun 	dev->min_mtu = B44_MIN_MTU;
2375*4882a593Smuzhiyun 	dev->max_mtu = B44_MAX_MTU;
2376*4882a593Smuzhiyun 	dev->irq = sdev->irq;
2377*4882a593Smuzhiyun 	dev->ethtool_ops = &b44_ethtool_ops;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	err = ssb_bus_powerup(sdev->bus, 0);
2380*4882a593Smuzhiyun 	if (err) {
2381*4882a593Smuzhiyun 		dev_err(sdev->dev,
2382*4882a593Smuzhiyun 			"Failed to powerup the bus\n");
2383*4882a593Smuzhiyun 		goto err_out_free_dev;
2384*4882a593Smuzhiyun 	}
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 	err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2387*4882a593Smuzhiyun 	if (err) {
2388*4882a593Smuzhiyun 		dev_err(sdev->dev,
2389*4882a593Smuzhiyun 			"Required 30BIT DMA mask unsupported by the system\n");
2390*4882a593Smuzhiyun 		goto err_out_powerdown;
2391*4882a593Smuzhiyun 	}
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	err = b44_get_invariants(bp);
2394*4882a593Smuzhiyun 	if (err) {
2395*4882a593Smuzhiyun 		dev_err(sdev->dev,
2396*4882a593Smuzhiyun 			"Problem fetching invariants of chip, aborting\n");
2397*4882a593Smuzhiyun 		goto err_out_powerdown;
2398*4882a593Smuzhiyun 	}
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun 	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2401*4882a593Smuzhiyun 		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2402*4882a593Smuzhiyun 		err = -ENODEV;
2403*4882a593Smuzhiyun 		goto err_out_powerdown;
2404*4882a593Smuzhiyun 	}
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	bp->mii_if.dev = dev;
2407*4882a593Smuzhiyun 	bp->mii_if.mdio_read = b44_mdio_read_mii;
2408*4882a593Smuzhiyun 	bp->mii_if.mdio_write = b44_mdio_write_mii;
2409*4882a593Smuzhiyun 	bp->mii_if.phy_id = bp->phy_addr;
2410*4882a593Smuzhiyun 	bp->mii_if.phy_id_mask = 0x1f;
2411*4882a593Smuzhiyun 	bp->mii_if.reg_num_mask = 0x1f;
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun 	/* By default, advertise all speed/duplex settings. */
2414*4882a593Smuzhiyun 	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2415*4882a593Smuzhiyun 		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	/* By default, auto-negotiate PAUSE. */
2418*4882a593Smuzhiyun 	bp->flags |= B44_FLAG_PAUSE_AUTO;
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	err = register_netdev(dev);
2421*4882a593Smuzhiyun 	if (err) {
2422*4882a593Smuzhiyun 		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2423*4882a593Smuzhiyun 		goto err_out_powerdown;
2424*4882a593Smuzhiyun 	}
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	netif_carrier_off(dev);
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	ssb_set_drvdata(sdev, dev);
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	/* Chip reset provides power to the b44 MAC & PCI cores, which
2431*4882a593Smuzhiyun 	 * is necessary for MAC register access.
2432*4882a593Smuzhiyun 	 */
2433*4882a593Smuzhiyun 	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	/* do a phy reset to test if there is an active phy */
2436*4882a593Smuzhiyun 	err = b44_phy_reset(bp);
2437*4882a593Smuzhiyun 	if (err < 0) {
2438*4882a593Smuzhiyun 		dev_err(sdev->dev, "phy reset failed\n");
2439*4882a593Smuzhiyun 		goto err_out_unregister_netdev;
2440*4882a593Smuzhiyun 	}
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2443*4882a593Smuzhiyun 		err = b44_register_phy_one(bp);
2444*4882a593Smuzhiyun 		if (err) {
2445*4882a593Smuzhiyun 			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2446*4882a593Smuzhiyun 			goto err_out_unregister_netdev;
2447*4882a593Smuzhiyun 		}
2448*4882a593Smuzhiyun 	}
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	device_set_wakeup_capable(sdev->dev, true);
2451*4882a593Smuzhiyun 	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	return 0;
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun err_out_unregister_netdev:
2456*4882a593Smuzhiyun 	unregister_netdev(dev);
2457*4882a593Smuzhiyun err_out_powerdown:
2458*4882a593Smuzhiyun 	ssb_bus_may_powerdown(sdev->bus);
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun err_out_free_dev:
2461*4882a593Smuzhiyun 	netif_napi_del(&bp->napi);
2462*4882a593Smuzhiyun 	free_netdev(dev);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun out:
2465*4882a593Smuzhiyun 	return err;
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun 
b44_remove_one(struct ssb_device * sdev)2468*4882a593Smuzhiyun static void b44_remove_one(struct ssb_device *sdev)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	struct net_device *dev = ssb_get_drvdata(sdev);
2471*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	unregister_netdev(dev);
2474*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2475*4882a593Smuzhiyun 		b44_unregister_phy_one(bp);
2476*4882a593Smuzhiyun 	ssb_device_disable(sdev, 0);
2477*4882a593Smuzhiyun 	ssb_bus_may_powerdown(sdev->bus);
2478*4882a593Smuzhiyun 	netif_napi_del(&bp->napi);
2479*4882a593Smuzhiyun 	free_netdev(dev);
2480*4882a593Smuzhiyun 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2481*4882a593Smuzhiyun 	ssb_set_drvdata(sdev, NULL);
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun 
b44_suspend(struct ssb_device * sdev,pm_message_t state)2484*4882a593Smuzhiyun static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun 	struct net_device *dev = ssb_get_drvdata(sdev);
2487*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	if (!netif_running(dev))
2490*4882a593Smuzhiyun 		return 0;
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	del_timer_sync(&bp->timer);
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 	b44_halt(bp);
2497*4882a593Smuzhiyun 	netif_carrier_off(bp->dev);
2498*4882a593Smuzhiyun 	netif_device_detach(bp->dev);
2499*4882a593Smuzhiyun 	b44_free_rings(bp);
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
2504*4882a593Smuzhiyun 	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2505*4882a593Smuzhiyun 		b44_init_hw(bp, B44_PARTIAL_RESET);
2506*4882a593Smuzhiyun 		b44_setup_wol(bp);
2507*4882a593Smuzhiyun 	}
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2510*4882a593Smuzhiyun 	return 0;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun 
b44_resume(struct ssb_device * sdev)2513*4882a593Smuzhiyun static int b44_resume(struct ssb_device *sdev)
2514*4882a593Smuzhiyun {
2515*4882a593Smuzhiyun 	struct net_device *dev = ssb_get_drvdata(sdev);
2516*4882a593Smuzhiyun 	struct b44 *bp = netdev_priv(dev);
2517*4882a593Smuzhiyun 	int rc = 0;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	rc = ssb_bus_powerup(sdev->bus, 0);
2520*4882a593Smuzhiyun 	if (rc) {
2521*4882a593Smuzhiyun 		dev_err(sdev->dev,
2522*4882a593Smuzhiyun 			"Failed to powerup the bus\n");
2523*4882a593Smuzhiyun 		return rc;
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun 	if (!netif_running(dev))
2527*4882a593Smuzhiyun 		return 0;
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	spin_lock_irq(&bp->lock);
2530*4882a593Smuzhiyun 	b44_init_rings(bp);
2531*4882a593Smuzhiyun 	b44_init_hw(bp, B44_FULL_RESET);
2532*4882a593Smuzhiyun 	spin_unlock_irq(&bp->lock);
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	/*
2535*4882a593Smuzhiyun 	 * As a shared interrupt, the handler can be called immediately. To be
2536*4882a593Smuzhiyun 	 * able to check the interrupt status the hardware must already be
2537*4882a593Smuzhiyun 	 * powered back on (b44_init_hw).
2538*4882a593Smuzhiyun 	 */
2539*4882a593Smuzhiyun 	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2540*4882a593Smuzhiyun 	if (rc) {
2541*4882a593Smuzhiyun 		netdev_err(dev, "request_irq failed\n");
2542*4882a593Smuzhiyun 		spin_lock_irq(&bp->lock);
2543*4882a593Smuzhiyun 		b44_halt(bp);
2544*4882a593Smuzhiyun 		b44_free_rings(bp);
2545*4882a593Smuzhiyun 		spin_unlock_irq(&bp->lock);
2546*4882a593Smuzhiyun 		return rc;
2547*4882a593Smuzhiyun 	}
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	netif_device_attach(bp->dev);
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	b44_enable_ints(bp);
2552*4882a593Smuzhiyun 	netif_wake_queue(dev);
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	mod_timer(&bp->timer, jiffies + 1);
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	return 0;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun static struct ssb_driver b44_ssb_driver = {
2560*4882a593Smuzhiyun 	.name		= DRV_MODULE_NAME,
2561*4882a593Smuzhiyun 	.id_table	= b44_ssb_tbl,
2562*4882a593Smuzhiyun 	.probe		= b44_init_one,
2563*4882a593Smuzhiyun 	.remove		= b44_remove_one,
2564*4882a593Smuzhiyun 	.suspend	= b44_suspend,
2565*4882a593Smuzhiyun 	.resume		= b44_resume,
2566*4882a593Smuzhiyun };
2567*4882a593Smuzhiyun 
b44_pci_init(void)2568*4882a593Smuzhiyun static inline int __init b44_pci_init(void)
2569*4882a593Smuzhiyun {
2570*4882a593Smuzhiyun 	int err = 0;
2571*4882a593Smuzhiyun #ifdef CONFIG_B44_PCI
2572*4882a593Smuzhiyun 	err = ssb_pcihost_register(&b44_pci_driver);
2573*4882a593Smuzhiyun #endif
2574*4882a593Smuzhiyun 	return err;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun 
b44_pci_exit(void)2577*4882a593Smuzhiyun static inline void b44_pci_exit(void)
2578*4882a593Smuzhiyun {
2579*4882a593Smuzhiyun #ifdef CONFIG_B44_PCI
2580*4882a593Smuzhiyun 	ssb_pcihost_unregister(&b44_pci_driver);
2581*4882a593Smuzhiyun #endif
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
b44_init(void)2584*4882a593Smuzhiyun static int __init b44_init(void)
2585*4882a593Smuzhiyun {
2586*4882a593Smuzhiyun 	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2587*4882a593Smuzhiyun 	int err;
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	/* Setup paramaters for syncing RX/TX DMA descriptors */
2590*4882a593Smuzhiyun 	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	err = b44_pci_init();
2593*4882a593Smuzhiyun 	if (err)
2594*4882a593Smuzhiyun 		return err;
2595*4882a593Smuzhiyun 	err = ssb_driver_register(&b44_ssb_driver);
2596*4882a593Smuzhiyun 	if (err)
2597*4882a593Smuzhiyun 		b44_pci_exit();
2598*4882a593Smuzhiyun 	return err;
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun 
b44_cleanup(void)2601*4882a593Smuzhiyun static void __exit b44_cleanup(void)
2602*4882a593Smuzhiyun {
2603*4882a593Smuzhiyun 	ssb_driver_unregister(&b44_ssb_driver);
2604*4882a593Smuzhiyun 	b44_pci_exit();
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun module_init(b44_init);
2608*4882a593Smuzhiyun module_exit(b44_cleanup);
2609*4882a593Smuzhiyun 
2610