xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/marvell/pxa168_eth.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PXA168 ethernet driver.
4*4882a593Smuzhiyun  * Most of the code is derived from mv643xx ethernet driver.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2010 Marvell International Ltd.
7*4882a593Smuzhiyun  *		Sachin Sanap <ssanap@marvell.com>
8*4882a593Smuzhiyun  *		Zhangfei Gao <zgao6@marvell.com>
9*4882a593Smuzhiyun  *		Philip Rakity <prakity@marvell.com>
10*4882a593Smuzhiyun  *		Mark Brown <markb@marvell.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/clk.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun #include <linux/ethtool.h>
19*4882a593Smuzhiyun #include <linux/in.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/ip.h>
23*4882a593Smuzhiyun #include <linux/kernel.h>
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/of.h>
26*4882a593Smuzhiyun #include <linux/of_net.h>
27*4882a593Smuzhiyun #include <linux/phy.h>
28*4882a593Smuzhiyun #include <linux/platform_device.h>
29*4882a593Smuzhiyun #include <linux/pxa168_eth.h>
30*4882a593Smuzhiyun #include <linux/tcp.h>
31*4882a593Smuzhiyun #include <linux/types.h>
32*4882a593Smuzhiyun #include <linux/udp.h>
33*4882a593Smuzhiyun #include <linux/workqueue.h>
34*4882a593Smuzhiyun #include <linux/pgtable.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <asm/cacheflush.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define DRIVER_NAME	"pxa168-eth"
39*4882a593Smuzhiyun #define DRIVER_VERSION	"0.3"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * Registers
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define PHY_ADDRESS		0x0000
46*4882a593Smuzhiyun #define SMI			0x0010
47*4882a593Smuzhiyun #define PORT_CONFIG		0x0400
48*4882a593Smuzhiyun #define PORT_CONFIG_EXT		0x0408
49*4882a593Smuzhiyun #define PORT_COMMAND		0x0410
50*4882a593Smuzhiyun #define PORT_STATUS		0x0418
51*4882a593Smuzhiyun #define HTPR			0x0428
52*4882a593Smuzhiyun #define MAC_ADDR_LOW		0x0430
53*4882a593Smuzhiyun #define MAC_ADDR_HIGH		0x0438
54*4882a593Smuzhiyun #define SDMA_CONFIG		0x0440
55*4882a593Smuzhiyun #define SDMA_CMD		0x0448
56*4882a593Smuzhiyun #define INT_CAUSE		0x0450
57*4882a593Smuzhiyun #define INT_W_CLEAR		0x0454
58*4882a593Smuzhiyun #define INT_MASK		0x0458
59*4882a593Smuzhiyun #define ETH_F_RX_DESC_0		0x0480
60*4882a593Smuzhiyun #define ETH_C_RX_DESC_0		0x04A0
61*4882a593Smuzhiyun #define ETH_C_TX_DESC_1		0x04E4
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* smi register */
64*4882a593Smuzhiyun #define SMI_BUSY		(1 << 28)	/* 0 - Write, 1 - Read  */
65*4882a593Smuzhiyun #define SMI_R_VALID		(1 << 27)	/* 0 - Write, 1 - Read  */
66*4882a593Smuzhiyun #define SMI_OP_W		(0 << 26)	/* Write operation      */
67*4882a593Smuzhiyun #define SMI_OP_R		(1 << 26)	/* Read operation */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define PHY_WAIT_ITERATIONS	10
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define PXA168_ETH_PHY_ADDR_DEFAULT	0
72*4882a593Smuzhiyun /* RX & TX descriptor command */
73*4882a593Smuzhiyun #define BUF_OWNED_BY_DMA	(1 << 31)
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* RX descriptor status */
76*4882a593Smuzhiyun #define RX_EN_INT		(1 << 23)
77*4882a593Smuzhiyun #define RX_FIRST_DESC		(1 << 17)
78*4882a593Smuzhiyun #define RX_LAST_DESC		(1 << 16)
79*4882a593Smuzhiyun #define RX_ERROR		(1 << 15)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* TX descriptor command */
82*4882a593Smuzhiyun #define TX_EN_INT		(1 << 23)
83*4882a593Smuzhiyun #define TX_GEN_CRC		(1 << 22)
84*4882a593Smuzhiyun #define TX_ZERO_PADDING		(1 << 18)
85*4882a593Smuzhiyun #define TX_FIRST_DESC		(1 << 17)
86*4882a593Smuzhiyun #define TX_LAST_DESC		(1 << 16)
87*4882a593Smuzhiyun #define TX_ERROR		(1 << 15)
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* SDMA_CMD */
90*4882a593Smuzhiyun #define SDMA_CMD_AT		(1 << 31)
91*4882a593Smuzhiyun #define SDMA_CMD_TXDL		(1 << 24)
92*4882a593Smuzhiyun #define SDMA_CMD_TXDH		(1 << 23)
93*4882a593Smuzhiyun #define SDMA_CMD_AR		(1 << 15)
94*4882a593Smuzhiyun #define SDMA_CMD_ERD		(1 << 7)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* Bit definitions of the Port Config Reg */
97*4882a593Smuzhiyun #define PCR_DUPLEX_FULL		(1 << 15)
98*4882a593Smuzhiyun #define PCR_HS			(1 << 12)
99*4882a593Smuzhiyun #define PCR_EN			(1 << 7)
100*4882a593Smuzhiyun #define PCR_PM			(1 << 0)
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* Bit definitions of the Port Config Extend Reg */
103*4882a593Smuzhiyun #define PCXR_2BSM		(1 << 28)
104*4882a593Smuzhiyun #define PCXR_DSCP_EN		(1 << 21)
105*4882a593Smuzhiyun #define PCXR_RMII_EN		(1 << 20)
106*4882a593Smuzhiyun #define PCXR_AN_SPEED_DIS	(1 << 19)
107*4882a593Smuzhiyun #define PCXR_SPEED_100		(1 << 18)
108*4882a593Smuzhiyun #define PCXR_MFL_1518		(0 << 14)
109*4882a593Smuzhiyun #define PCXR_MFL_1536		(1 << 14)
110*4882a593Smuzhiyun #define PCXR_MFL_2048		(2 << 14)
111*4882a593Smuzhiyun #define PCXR_MFL_64K		(3 << 14)
112*4882a593Smuzhiyun #define PCXR_FLOWCTL_DIS	(1 << 12)
113*4882a593Smuzhiyun #define PCXR_FLP		(1 << 11)
114*4882a593Smuzhiyun #define PCXR_AN_FLOWCTL_DIS	(1 << 10)
115*4882a593Smuzhiyun #define PCXR_AN_DUPLEX_DIS	(1 << 9)
116*4882a593Smuzhiyun #define PCXR_PRIO_TX_OFF	3
117*4882a593Smuzhiyun #define PCXR_TX_HIGH_PRI	(7 << PCXR_PRIO_TX_OFF)
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* Bit definitions of the SDMA Config Reg */
120*4882a593Smuzhiyun #define SDCR_BSZ_OFF		12
121*4882a593Smuzhiyun #define SDCR_BSZ8		(3 << SDCR_BSZ_OFF)
122*4882a593Smuzhiyun #define SDCR_BSZ4		(2 << SDCR_BSZ_OFF)
123*4882a593Smuzhiyun #define SDCR_BSZ2		(1 << SDCR_BSZ_OFF)
124*4882a593Smuzhiyun #define SDCR_BSZ1		(0 << SDCR_BSZ_OFF)
125*4882a593Smuzhiyun #define SDCR_BLMR		(1 << 6)
126*4882a593Smuzhiyun #define SDCR_BLMT		(1 << 7)
127*4882a593Smuzhiyun #define SDCR_RIFB		(1 << 9)
128*4882a593Smuzhiyun #define SDCR_RC_OFF		2
129*4882a593Smuzhiyun #define SDCR_RC_MAX_RETRANS	(0xf << SDCR_RC_OFF)
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun  * Bit definitions of the Interrupt Cause Reg
133*4882a593Smuzhiyun  * and Interrupt MASK Reg is the same
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun #define ICR_RXBUF		(1 << 0)
136*4882a593Smuzhiyun #define ICR_TXBUF_H		(1 << 2)
137*4882a593Smuzhiyun #define ICR_TXBUF_L		(1 << 3)
138*4882a593Smuzhiyun #define ICR_TXEND_H		(1 << 6)
139*4882a593Smuzhiyun #define ICR_TXEND_L		(1 << 7)
140*4882a593Smuzhiyun #define ICR_RXERR		(1 << 8)
141*4882a593Smuzhiyun #define ICR_TXERR_H		(1 << 10)
142*4882a593Smuzhiyun #define ICR_TXERR_L		(1 << 11)
143*4882a593Smuzhiyun #define ICR_TX_UDR		(1 << 13)
144*4882a593Smuzhiyun #define ICR_MII_CH		(1 << 28)
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
147*4882a593Smuzhiyun 				ICR_TXERR_H  | ICR_TXERR_L |\
148*4882a593Smuzhiyun 				ICR_TXEND_H  | ICR_TXEND_L |\
149*4882a593Smuzhiyun 				ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define ETH_HW_IP_ALIGN		2	/* hw aligns IP header */
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define NUM_RX_DESCS		64
154*4882a593Smuzhiyun #define NUM_TX_DESCS		64
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define HASH_ADD		0
157*4882a593Smuzhiyun #define HASH_DELETE		1
158*4882a593Smuzhiyun #define HASH_ADDR_TABLE_SIZE	0x4000	/* 16K (1/2K address - PCR_HS == 1) */
159*4882a593Smuzhiyun #define HOP_NUMBER		12
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /* Bit definitions for Port status */
162*4882a593Smuzhiyun #define PORT_SPEED_100		(1 << 0)
163*4882a593Smuzhiyun #define FULL_DUPLEX		(1 << 1)
164*4882a593Smuzhiyun #define FLOW_CONTROL_DISABLED	(1 << 2)
165*4882a593Smuzhiyun #define LINK_UP			(1 << 3)
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* Bit definitions for work to be done */
168*4882a593Smuzhiyun #define WORK_TX_DONE		(1 << 1)
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun  * Misc definitions.
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun #define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun struct rx_desc {
176*4882a593Smuzhiyun 	u32 cmd_sts;		/* Descriptor command status            */
177*4882a593Smuzhiyun 	u16 byte_cnt;		/* Descriptor buffer byte count         */
178*4882a593Smuzhiyun 	u16 buf_size;		/* Buffer size                          */
179*4882a593Smuzhiyun 	u32 buf_ptr;		/* Descriptor buffer pointer            */
180*4882a593Smuzhiyun 	u32 next_desc_ptr;	/* Next descriptor pointer              */
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun struct tx_desc {
184*4882a593Smuzhiyun 	u32 cmd_sts;		/* Command/status field                 */
185*4882a593Smuzhiyun 	u16 reserved;
186*4882a593Smuzhiyun 	u16 byte_cnt;		/* buffer byte count                    */
187*4882a593Smuzhiyun 	u32 buf_ptr;		/* pointer to buffer for this descriptor */
188*4882a593Smuzhiyun 	u32 next_desc_ptr;	/* Pointer to next descriptor           */
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun struct pxa168_eth_private {
192*4882a593Smuzhiyun 	struct platform_device *pdev;
193*4882a593Smuzhiyun 	int port_num;		/* User Ethernet port number    */
194*4882a593Smuzhiyun 	int phy_addr;
195*4882a593Smuzhiyun 	int phy_speed;
196*4882a593Smuzhiyun 	int phy_duplex;
197*4882a593Smuzhiyun 	phy_interface_t phy_intf;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	int rx_resource_err;	/* Rx ring resource error flag */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* Next available and first returning Rx resource */
202*4882a593Smuzhiyun 	int rx_curr_desc_q, rx_used_desc_q;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Next available and first returning Tx resource */
205*4882a593Smuzhiyun 	int tx_curr_desc_q, tx_used_desc_q;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	struct rx_desc *p_rx_desc_area;
208*4882a593Smuzhiyun 	dma_addr_t rx_desc_dma;
209*4882a593Smuzhiyun 	int rx_desc_area_size;
210*4882a593Smuzhiyun 	struct sk_buff **rx_skb;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	struct tx_desc *p_tx_desc_area;
213*4882a593Smuzhiyun 	dma_addr_t tx_desc_dma;
214*4882a593Smuzhiyun 	int tx_desc_area_size;
215*4882a593Smuzhiyun 	struct sk_buff **tx_skb;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	struct work_struct tx_timeout_task;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	struct net_device *dev;
220*4882a593Smuzhiyun 	struct napi_struct napi;
221*4882a593Smuzhiyun 	u8 work_todo;
222*4882a593Smuzhiyun 	int skb_size;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Size of Tx Ring per queue */
225*4882a593Smuzhiyun 	int tx_ring_size;
226*4882a593Smuzhiyun 	/* Number of tx descriptors in use */
227*4882a593Smuzhiyun 	int tx_desc_count;
228*4882a593Smuzhiyun 	/* Size of Rx Ring per queue */
229*4882a593Smuzhiyun 	int rx_ring_size;
230*4882a593Smuzhiyun 	/* Number of rx descriptors in use */
231*4882a593Smuzhiyun 	int rx_desc_count;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/*
234*4882a593Smuzhiyun 	 * Used in case RX Ring is empty, which can occur when
235*4882a593Smuzhiyun 	 * system does not have resources (skb's)
236*4882a593Smuzhiyun 	 */
237*4882a593Smuzhiyun 	struct timer_list timeout;
238*4882a593Smuzhiyun 	struct mii_bus *smi_bus;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* clock */
241*4882a593Smuzhiyun 	struct clk *clk;
242*4882a593Smuzhiyun 	struct pxa168_eth_platform_data *pd;
243*4882a593Smuzhiyun 	/*
244*4882a593Smuzhiyun 	 * Ethernet controller base address.
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	void __iomem *base;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Pointer to the hardware address filter table */
249*4882a593Smuzhiyun 	void *htpr;
250*4882a593Smuzhiyun 	dma_addr_t htpr_dma;
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun struct addr_table_entry {
254*4882a593Smuzhiyun 	__le32 lo;
255*4882a593Smuzhiyun 	__le32 hi;
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* Bit fields of a Hash Table Entry */
259*4882a593Smuzhiyun enum hash_table_entry {
260*4882a593Smuzhiyun 	HASH_ENTRY_VALID = 1,
261*4882a593Smuzhiyun 	SKIP = 2,
262*4882a593Smuzhiyun 	HASH_ENTRY_RECEIVE_DISCARD = 4,
263*4882a593Smuzhiyun 	HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun static int pxa168_init_hw(struct pxa168_eth_private *pep);
267*4882a593Smuzhiyun static int pxa168_init_phy(struct net_device *dev);
268*4882a593Smuzhiyun static void eth_port_reset(struct net_device *dev);
269*4882a593Smuzhiyun static void eth_port_start(struct net_device *dev);
270*4882a593Smuzhiyun static int pxa168_eth_open(struct net_device *dev);
271*4882a593Smuzhiyun static int pxa168_eth_stop(struct net_device *dev);
272*4882a593Smuzhiyun 
rdl(struct pxa168_eth_private * pep,int offset)273*4882a593Smuzhiyun static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	return readl_relaxed(pep->base + offset);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
wrl(struct pxa168_eth_private * pep,int offset,u32 data)278*4882a593Smuzhiyun static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	writel_relaxed(data, pep->base + offset);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
abort_dma(struct pxa168_eth_private * pep)283*4882a593Smuzhiyun static void abort_dma(struct pxa168_eth_private *pep)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	int delay;
286*4882a593Smuzhiyun 	int max_retries = 40;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	do {
289*4882a593Smuzhiyun 		wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
290*4882a593Smuzhiyun 		udelay(100);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		delay = 10;
293*4882a593Smuzhiyun 		while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
294*4882a593Smuzhiyun 		       && delay-- > 0) {
295*4882a593Smuzhiyun 			udelay(10);
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 	} while (max_retries-- > 0 && delay <= 0);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (max_retries <= 0)
300*4882a593Smuzhiyun 		netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
rxq_refill(struct net_device * dev)303*4882a593Smuzhiyun static void rxq_refill(struct net_device *dev)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
306*4882a593Smuzhiyun 	struct sk_buff *skb;
307*4882a593Smuzhiyun 	struct rx_desc *p_used_rx_desc;
308*4882a593Smuzhiyun 	int used_rx_desc;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	while (pep->rx_desc_count < pep->rx_ring_size) {
311*4882a593Smuzhiyun 		int size;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		skb = netdev_alloc_skb(dev, pep->skb_size);
314*4882a593Smuzhiyun 		if (!skb)
315*4882a593Smuzhiyun 			break;
316*4882a593Smuzhiyun 		if (SKB_DMA_REALIGN)
317*4882a593Smuzhiyun 			skb_reserve(skb, SKB_DMA_REALIGN);
318*4882a593Smuzhiyun 		pep->rx_desc_count++;
319*4882a593Smuzhiyun 		/* Get 'used' Rx descriptor */
320*4882a593Smuzhiyun 		used_rx_desc = pep->rx_used_desc_q;
321*4882a593Smuzhiyun 		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
322*4882a593Smuzhiyun 		size = skb_end_pointer(skb) - skb->data;
323*4882a593Smuzhiyun 		p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
324*4882a593Smuzhiyun 							 skb->data,
325*4882a593Smuzhiyun 							 size,
326*4882a593Smuzhiyun 							 DMA_FROM_DEVICE);
327*4882a593Smuzhiyun 		p_used_rx_desc->buf_size = size;
328*4882a593Smuzhiyun 		pep->rx_skb[used_rx_desc] = skb;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		/* Return the descriptor to DMA ownership */
331*4882a593Smuzhiyun 		dma_wmb();
332*4882a593Smuzhiyun 		p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
333*4882a593Smuzhiyun 		dma_wmb();
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		/* Move the used descriptor pointer to the next descriptor */
336*4882a593Smuzhiyun 		pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		/* Any Rx return cancels the Rx resource error status */
339*4882a593Smuzhiyun 		pep->rx_resource_err = 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		skb_reserve(skb, ETH_HW_IP_ALIGN);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * If RX ring is empty of SKB, set a timer to try allocating
346*4882a593Smuzhiyun 	 * again at a later time.
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	if (pep->rx_desc_count == 0) {
349*4882a593Smuzhiyun 		pep->timeout.expires = jiffies + (HZ / 10);
350*4882a593Smuzhiyun 		add_timer(&pep->timeout);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
rxq_refill_timer_wrapper(struct timer_list * t)354*4882a593Smuzhiyun static inline void rxq_refill_timer_wrapper(struct timer_list *t)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
357*4882a593Smuzhiyun 	napi_schedule(&pep->napi);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
flip_8_bits(u8 x)360*4882a593Smuzhiyun static inline u8 flip_8_bits(u8 x)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
363*4882a593Smuzhiyun 	    | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
364*4882a593Smuzhiyun 	    | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
365*4882a593Smuzhiyun 	    | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
nibble_swap_every_byte(unsigned char * mac_addr)368*4882a593Smuzhiyun static void nibble_swap_every_byte(unsigned char *mac_addr)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	int i;
371*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++) {
372*4882a593Smuzhiyun 		mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
373*4882a593Smuzhiyun 				((mac_addr[i] & 0xf0) >> 4);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
inverse_every_nibble(unsigned char * mac_addr)377*4882a593Smuzhiyun static void inverse_every_nibble(unsigned char *mac_addr)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	int i;
380*4882a593Smuzhiyun 	for (i = 0; i < ETH_ALEN; i++)
381*4882a593Smuzhiyun 		mac_addr[i] = flip_8_bits(mac_addr[i]);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
386*4882a593Smuzhiyun  * This function will calculate the hash function of the address.
387*4882a593Smuzhiyun  * Inputs
388*4882a593Smuzhiyun  * mac_addr_orig    - MAC address.
389*4882a593Smuzhiyun  * Outputs
390*4882a593Smuzhiyun  * return the calculated entry.
391*4882a593Smuzhiyun  */
hash_function(unsigned char * mac_addr_orig)392*4882a593Smuzhiyun static u32 hash_function(unsigned char *mac_addr_orig)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	u32 hash_result;
395*4882a593Smuzhiyun 	u32 addr0;
396*4882a593Smuzhiyun 	u32 addr1;
397*4882a593Smuzhiyun 	u32 addr2;
398*4882a593Smuzhiyun 	u32 addr3;
399*4882a593Smuzhiyun 	unsigned char mac_addr[ETH_ALEN];
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* Make a copy of MAC address since we are going to performe bit
402*4882a593Smuzhiyun 	 * operations on it
403*4882a593Smuzhiyun 	 */
404*4882a593Smuzhiyun 	memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	nibble_swap_every_byte(mac_addr);
407*4882a593Smuzhiyun 	inverse_every_nibble(mac_addr);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	addr0 = (mac_addr[5] >> 2) & 0x3f;
410*4882a593Smuzhiyun 	addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
411*4882a593Smuzhiyun 	addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
412*4882a593Smuzhiyun 	addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
415*4882a593Smuzhiyun 	hash_result = hash_result & 0x07ff;
416*4882a593Smuzhiyun 	return hash_result;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
421*4882a593Smuzhiyun  * This function will add/del an entry to the address table.
422*4882a593Smuzhiyun  * Inputs
423*4882a593Smuzhiyun  * pep - ETHERNET .
424*4882a593Smuzhiyun  * mac_addr - MAC address.
425*4882a593Smuzhiyun  * skip - if 1, skip this address.Used in case of deleting an entry which is a
426*4882a593Smuzhiyun  *	  part of chain in the hash table.We can't just delete the entry since
427*4882a593Smuzhiyun  *	  that will break the chain.We need to defragment the tables time to
428*4882a593Smuzhiyun  *	  time.
429*4882a593Smuzhiyun  * rd   - 0 Discard packet upon match.
430*4882a593Smuzhiyun  *	- 1 Receive packet upon match.
431*4882a593Smuzhiyun  * Outputs
432*4882a593Smuzhiyun  * address table entry is added/deleted.
433*4882a593Smuzhiyun  * 0 if success.
434*4882a593Smuzhiyun  * -ENOSPC if table full
435*4882a593Smuzhiyun  */
add_del_hash_entry(struct pxa168_eth_private * pep,unsigned char * mac_addr,u32 rd,u32 skip,int del)436*4882a593Smuzhiyun static int add_del_hash_entry(struct pxa168_eth_private *pep,
437*4882a593Smuzhiyun 			      unsigned char *mac_addr,
438*4882a593Smuzhiyun 			      u32 rd, u32 skip, int del)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	struct addr_table_entry *entry, *start;
441*4882a593Smuzhiyun 	u32 new_high;
442*4882a593Smuzhiyun 	u32 new_low;
443*4882a593Smuzhiyun 	u32 i;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
446*4882a593Smuzhiyun 	    | (((mac_addr[1] >> 0) & 0xf) << 11)
447*4882a593Smuzhiyun 	    | (((mac_addr[0] >> 4) & 0xf) << 7)
448*4882a593Smuzhiyun 	    | (((mac_addr[0] >> 0) & 0xf) << 3)
449*4882a593Smuzhiyun 	    | (((mac_addr[3] >> 4) & 0x1) << 31)
450*4882a593Smuzhiyun 	    | (((mac_addr[3] >> 0) & 0xf) << 27)
451*4882a593Smuzhiyun 	    | (((mac_addr[2] >> 4) & 0xf) << 23)
452*4882a593Smuzhiyun 	    | (((mac_addr[2] >> 0) & 0xf) << 19)
453*4882a593Smuzhiyun 	    | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
454*4882a593Smuzhiyun 	    | HASH_ENTRY_VALID;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
457*4882a593Smuzhiyun 	    | (((mac_addr[5] >> 0) & 0xf) << 11)
458*4882a593Smuzhiyun 	    | (((mac_addr[4] >> 4) & 0xf) << 7)
459*4882a593Smuzhiyun 	    | (((mac_addr[4] >> 0) & 0xf) << 3)
460*4882a593Smuzhiyun 	    | (((mac_addr[3] >> 5) & 0x7) << 0);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * Pick the appropriate table, start scanning for free/reusable
464*4882a593Smuzhiyun 	 * entries at the index obtained by hashing the specified MAC address
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	start = pep->htpr;
467*4882a593Smuzhiyun 	entry = start + hash_function(mac_addr);
468*4882a593Smuzhiyun 	for (i = 0; i < HOP_NUMBER; i++) {
469*4882a593Smuzhiyun 		if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
470*4882a593Smuzhiyun 			break;
471*4882a593Smuzhiyun 		} else {
472*4882a593Smuzhiyun 			/* if same address put in same position */
473*4882a593Smuzhiyun 			if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
474*4882a593Smuzhiyun 				(new_low & 0xfffffff8)) &&
475*4882a593Smuzhiyun 				(le32_to_cpu(entry->hi) == new_high)) {
476*4882a593Smuzhiyun 				break;
477*4882a593Smuzhiyun 			}
478*4882a593Smuzhiyun 		}
479*4882a593Smuzhiyun 		if (entry == start + 0x7ff)
480*4882a593Smuzhiyun 			entry = start;
481*4882a593Smuzhiyun 		else
482*4882a593Smuzhiyun 			entry++;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
486*4882a593Smuzhiyun 	    (le32_to_cpu(entry->hi) != new_high) && del)
487*4882a593Smuzhiyun 		return 0;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (i == HOP_NUMBER) {
490*4882a593Smuzhiyun 		if (!del) {
491*4882a593Smuzhiyun 			netdev_info(pep->dev,
492*4882a593Smuzhiyun 				    "%s: table section is full, need to "
493*4882a593Smuzhiyun 				    "move to 16kB implementation?\n",
494*4882a593Smuzhiyun 				    __FILE__);
495*4882a593Smuzhiyun 			return -ENOSPC;
496*4882a593Smuzhiyun 		} else
497*4882a593Smuzhiyun 			return 0;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * Update the selected entry
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	if (del) {
504*4882a593Smuzhiyun 		entry->hi = 0;
505*4882a593Smuzhiyun 		entry->lo = 0;
506*4882a593Smuzhiyun 	} else {
507*4882a593Smuzhiyun 		entry->hi = cpu_to_le32(new_high);
508*4882a593Smuzhiyun 		entry->lo = cpu_to_le32(new_low);
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
516*4882a593Smuzhiyun  *  Create an addressTable entry from MAC address info
517*4882a593Smuzhiyun  *  found in the specifed net_device struct
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  *  Input : pointer to ethernet interface network device structure
520*4882a593Smuzhiyun  *  Output : N/A
521*4882a593Smuzhiyun  */
update_hash_table_mac_address(struct pxa168_eth_private * pep,unsigned char * oaddr,unsigned char * addr)522*4882a593Smuzhiyun static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
523*4882a593Smuzhiyun 					  unsigned char *oaddr,
524*4882a593Smuzhiyun 					  unsigned char *addr)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	/* Delete old entry */
527*4882a593Smuzhiyun 	if (oaddr)
528*4882a593Smuzhiyun 		add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
529*4882a593Smuzhiyun 	/* Add new entry */
530*4882a593Smuzhiyun 	add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
init_hash_table(struct pxa168_eth_private * pep)533*4882a593Smuzhiyun static int init_hash_table(struct pxa168_eth_private *pep)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	/*
536*4882a593Smuzhiyun 	 * Hardware expects CPU to build a hash table based on a predefined
537*4882a593Smuzhiyun 	 * hash function and populate it based on hardware address. The
538*4882a593Smuzhiyun 	 * location of the hash table is identified by 32-bit pointer stored
539*4882a593Smuzhiyun 	 * in HTPR internal register. Two possible sizes exists for the hash
540*4882a593Smuzhiyun 	 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
541*4882a593Smuzhiyun 	 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
542*4882a593Smuzhiyun 	 * 1/2kB.
543*4882a593Smuzhiyun 	 */
544*4882a593Smuzhiyun 	/* TODO: Add support for 8kB hash table and alternative hash
545*4882a593Smuzhiyun 	 * function.Driver can dynamically switch to them if the 1/2kB hash
546*4882a593Smuzhiyun 	 * table is full.
547*4882a593Smuzhiyun 	 */
548*4882a593Smuzhiyun 	if (!pep->htpr) {
549*4882a593Smuzhiyun 		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
550*4882a593Smuzhiyun 					       HASH_ADDR_TABLE_SIZE,
551*4882a593Smuzhiyun 					       &pep->htpr_dma, GFP_KERNEL);
552*4882a593Smuzhiyun 		if (!pep->htpr)
553*4882a593Smuzhiyun 			return -ENOMEM;
554*4882a593Smuzhiyun 	} else {
555*4882a593Smuzhiyun 		memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 	wrl(pep, HTPR, pep->htpr_dma);
558*4882a593Smuzhiyun 	return 0;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
pxa168_eth_set_rx_mode(struct net_device * dev)561*4882a593Smuzhiyun static void pxa168_eth_set_rx_mode(struct net_device *dev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
564*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
565*4882a593Smuzhiyun 	u32 val;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	val = rdl(pep, PORT_CONFIG);
568*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC)
569*4882a593Smuzhiyun 		val |= PCR_PM;
570*4882a593Smuzhiyun 	else
571*4882a593Smuzhiyun 		val &= ~PCR_PM;
572*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG, val);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	/*
575*4882a593Smuzhiyun 	 * Remove the old list of MAC address and add dev->addr
576*4882a593Smuzhiyun 	 * and multicast address.
577*4882a593Smuzhiyun 	 */
578*4882a593Smuzhiyun 	memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
579*4882a593Smuzhiyun 	update_hash_table_mac_address(pep, NULL, dev->dev_addr);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev)
582*4882a593Smuzhiyun 		update_hash_table_mac_address(pep, NULL, ha->addr);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
pxa168_eth_get_mac_address(struct net_device * dev,unsigned char * addr)585*4882a593Smuzhiyun static void pxa168_eth_get_mac_address(struct net_device *dev,
586*4882a593Smuzhiyun 				       unsigned char *addr)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
589*4882a593Smuzhiyun 	unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
590*4882a593Smuzhiyun 	unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	addr[0] = (mac_h >> 24) & 0xff;
593*4882a593Smuzhiyun 	addr[1] = (mac_h >> 16) & 0xff;
594*4882a593Smuzhiyun 	addr[2] = (mac_h >> 8) & 0xff;
595*4882a593Smuzhiyun 	addr[3] = mac_h & 0xff;
596*4882a593Smuzhiyun 	addr[4] = (mac_l >> 8) & 0xff;
597*4882a593Smuzhiyun 	addr[5] = mac_l & 0xff;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
pxa168_eth_set_mac_address(struct net_device * dev,void * addr)600*4882a593Smuzhiyun static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	struct sockaddr *sa = addr;
603*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
604*4882a593Smuzhiyun 	unsigned char oldMac[ETH_ALEN];
605*4882a593Smuzhiyun 	u32 mac_h, mac_l;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	if (!is_valid_ether_addr(sa->sa_data))
608*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
609*4882a593Smuzhiyun 	memcpy(oldMac, dev->dev_addr, ETH_ALEN);
610*4882a593Smuzhiyun 	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	mac_h = dev->dev_addr[0] << 24;
613*4882a593Smuzhiyun 	mac_h |= dev->dev_addr[1] << 16;
614*4882a593Smuzhiyun 	mac_h |= dev->dev_addr[2] << 8;
615*4882a593Smuzhiyun 	mac_h |= dev->dev_addr[3];
616*4882a593Smuzhiyun 	mac_l = dev->dev_addr[4] << 8;
617*4882a593Smuzhiyun 	mac_l |= dev->dev_addr[5];
618*4882a593Smuzhiyun 	wrl(pep, MAC_ADDR_HIGH, mac_h);
619*4882a593Smuzhiyun 	wrl(pep, MAC_ADDR_LOW, mac_l);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	netif_addr_lock_bh(dev);
622*4882a593Smuzhiyun 	update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
623*4882a593Smuzhiyun 	netif_addr_unlock_bh(dev);
624*4882a593Smuzhiyun 	return 0;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
eth_port_start(struct net_device * dev)627*4882a593Smuzhiyun static void eth_port_start(struct net_device *dev)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	unsigned int val = 0;
630*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
631*4882a593Smuzhiyun 	int tx_curr_desc, rx_curr_desc;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	phy_start(dev->phydev);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/* Assignment of Tx CTRP of given queue */
636*4882a593Smuzhiyun 	tx_curr_desc = pep->tx_curr_desc_q;
637*4882a593Smuzhiyun 	wrl(pep, ETH_C_TX_DESC_1,
638*4882a593Smuzhiyun 	    (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* Assignment of Rx CRDP of given queue */
641*4882a593Smuzhiyun 	rx_curr_desc = pep->rx_curr_desc_q;
642*4882a593Smuzhiyun 	wrl(pep, ETH_C_RX_DESC_0,
643*4882a593Smuzhiyun 	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	wrl(pep, ETH_F_RX_DESC_0,
646*4882a593Smuzhiyun 	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/* Clear all interrupts */
649*4882a593Smuzhiyun 	wrl(pep, INT_CAUSE, 0);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Enable all interrupts for receive, transmit and error. */
652*4882a593Smuzhiyun 	wrl(pep, INT_MASK, ALL_INTS);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	val = rdl(pep, PORT_CONFIG);
655*4882a593Smuzhiyun 	val |= PCR_EN;
656*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG, val);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	/* Start RX DMA engine */
659*4882a593Smuzhiyun 	val = rdl(pep, SDMA_CMD);
660*4882a593Smuzhiyun 	val |= SDMA_CMD_ERD;
661*4882a593Smuzhiyun 	wrl(pep, SDMA_CMD, val);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
eth_port_reset(struct net_device * dev)664*4882a593Smuzhiyun static void eth_port_reset(struct net_device *dev)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
667*4882a593Smuzhiyun 	unsigned int val = 0;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	/* Stop all interrupts for receive, transmit and error. */
670*4882a593Smuzhiyun 	wrl(pep, INT_MASK, 0);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	/* Clear all interrupts */
673*4882a593Smuzhiyun 	wrl(pep, INT_CAUSE, 0);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* Stop RX DMA */
676*4882a593Smuzhiyun 	val = rdl(pep, SDMA_CMD);
677*4882a593Smuzhiyun 	val &= ~SDMA_CMD_ERD;	/* abort dma command */
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	/* Abort any transmit and receive operations and put DMA
680*4882a593Smuzhiyun 	 * in idle state.
681*4882a593Smuzhiyun 	 */
682*4882a593Smuzhiyun 	abort_dma(pep);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* Disable port */
685*4882a593Smuzhiyun 	val = rdl(pep, PORT_CONFIG);
686*4882a593Smuzhiyun 	val &= ~PCR_EN;
687*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG, val);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	phy_stop(dev->phydev);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun  * txq_reclaim - Free the tx desc data for completed descriptors
694*4882a593Smuzhiyun  * If force is non-zero, frees uncompleted descriptors as well
695*4882a593Smuzhiyun  */
txq_reclaim(struct net_device * dev,int force)696*4882a593Smuzhiyun static int txq_reclaim(struct net_device *dev, int force)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
699*4882a593Smuzhiyun 	struct tx_desc *desc;
700*4882a593Smuzhiyun 	u32 cmd_sts;
701*4882a593Smuzhiyun 	struct sk_buff *skb;
702*4882a593Smuzhiyun 	int tx_index;
703*4882a593Smuzhiyun 	dma_addr_t addr;
704*4882a593Smuzhiyun 	int count;
705*4882a593Smuzhiyun 	int released = 0;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	netif_tx_lock(dev);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	pep->work_todo &= ~WORK_TX_DONE;
710*4882a593Smuzhiyun 	while (pep->tx_desc_count > 0) {
711*4882a593Smuzhiyun 		tx_index = pep->tx_used_desc_q;
712*4882a593Smuzhiyun 		desc = &pep->p_tx_desc_area[tx_index];
713*4882a593Smuzhiyun 		cmd_sts = desc->cmd_sts;
714*4882a593Smuzhiyun 		if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
715*4882a593Smuzhiyun 			if (released > 0) {
716*4882a593Smuzhiyun 				goto txq_reclaim_end;
717*4882a593Smuzhiyun 			} else {
718*4882a593Smuzhiyun 				released = -1;
719*4882a593Smuzhiyun 				goto txq_reclaim_end;
720*4882a593Smuzhiyun 			}
721*4882a593Smuzhiyun 		}
722*4882a593Smuzhiyun 		pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
723*4882a593Smuzhiyun 		pep->tx_desc_count--;
724*4882a593Smuzhiyun 		addr = desc->buf_ptr;
725*4882a593Smuzhiyun 		count = desc->byte_cnt;
726*4882a593Smuzhiyun 		skb = pep->tx_skb[tx_index];
727*4882a593Smuzhiyun 		if (skb)
728*4882a593Smuzhiyun 			pep->tx_skb[tx_index] = NULL;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		if (cmd_sts & TX_ERROR) {
731*4882a593Smuzhiyun 			if (net_ratelimit())
732*4882a593Smuzhiyun 				netdev_err(dev, "Error in TX\n");
733*4882a593Smuzhiyun 			dev->stats.tx_errors++;
734*4882a593Smuzhiyun 		}
735*4882a593Smuzhiyun 		dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
736*4882a593Smuzhiyun 		if (skb)
737*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
738*4882a593Smuzhiyun 		released++;
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun txq_reclaim_end:
741*4882a593Smuzhiyun 	netif_tx_unlock(dev);
742*4882a593Smuzhiyun 	return released;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
pxa168_eth_tx_timeout(struct net_device * dev,unsigned int txqueue)745*4882a593Smuzhiyun static void pxa168_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	netdev_info(dev, "TX timeout  desc_count %d\n", pep->tx_desc_count);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	schedule_work(&pep->tx_timeout_task);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
pxa168_eth_tx_timeout_task(struct work_struct * work)754*4882a593Smuzhiyun static void pxa168_eth_tx_timeout_task(struct work_struct *work)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = container_of(work,
757*4882a593Smuzhiyun 						 struct pxa168_eth_private,
758*4882a593Smuzhiyun 						 tx_timeout_task);
759*4882a593Smuzhiyun 	struct net_device *dev = pep->dev;
760*4882a593Smuzhiyun 	pxa168_eth_stop(dev);
761*4882a593Smuzhiyun 	pxa168_eth_open(dev);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
rxq_process(struct net_device * dev,int budget)764*4882a593Smuzhiyun static int rxq_process(struct net_device *dev, int budget)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
767*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
768*4882a593Smuzhiyun 	unsigned int received_packets = 0;
769*4882a593Smuzhiyun 	struct sk_buff *skb;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	while (budget-- > 0) {
772*4882a593Smuzhiyun 		int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
773*4882a593Smuzhiyun 		struct rx_desc *rx_desc;
774*4882a593Smuzhiyun 		unsigned int cmd_sts;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 		/* Do not process Rx ring in case of Rx ring resource error */
777*4882a593Smuzhiyun 		if (pep->rx_resource_err)
778*4882a593Smuzhiyun 			break;
779*4882a593Smuzhiyun 		rx_curr_desc = pep->rx_curr_desc_q;
780*4882a593Smuzhiyun 		rx_used_desc = pep->rx_used_desc_q;
781*4882a593Smuzhiyun 		rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
782*4882a593Smuzhiyun 		cmd_sts = rx_desc->cmd_sts;
783*4882a593Smuzhiyun 		dma_rmb();
784*4882a593Smuzhiyun 		if (cmd_sts & (BUF_OWNED_BY_DMA))
785*4882a593Smuzhiyun 			break;
786*4882a593Smuzhiyun 		skb = pep->rx_skb[rx_curr_desc];
787*4882a593Smuzhiyun 		pep->rx_skb[rx_curr_desc] = NULL;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
790*4882a593Smuzhiyun 		pep->rx_curr_desc_q = rx_next_curr_desc;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 		/* Rx descriptors exhausted. */
793*4882a593Smuzhiyun 		/* Set the Rx ring resource error flag */
794*4882a593Smuzhiyun 		if (rx_next_curr_desc == rx_used_desc)
795*4882a593Smuzhiyun 			pep->rx_resource_err = 1;
796*4882a593Smuzhiyun 		pep->rx_desc_count--;
797*4882a593Smuzhiyun 		dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
798*4882a593Smuzhiyun 				 rx_desc->buf_size,
799*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
800*4882a593Smuzhiyun 		received_packets++;
801*4882a593Smuzhiyun 		/*
802*4882a593Smuzhiyun 		 * Update statistics.
803*4882a593Smuzhiyun 		 * Note byte count includes 4 byte CRC count
804*4882a593Smuzhiyun 		 */
805*4882a593Smuzhiyun 		stats->rx_packets++;
806*4882a593Smuzhiyun 		stats->rx_bytes += rx_desc->byte_cnt;
807*4882a593Smuzhiyun 		/*
808*4882a593Smuzhiyun 		 * In case received a packet without first / last bits on OR
809*4882a593Smuzhiyun 		 * the error summary bit is on, the packets needs to be droped.
810*4882a593Smuzhiyun 		 */
811*4882a593Smuzhiyun 		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
812*4882a593Smuzhiyun 		     (RX_FIRST_DESC | RX_LAST_DESC))
813*4882a593Smuzhiyun 		    || (cmd_sts & RX_ERROR)) {
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 			stats->rx_dropped++;
816*4882a593Smuzhiyun 			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
817*4882a593Smuzhiyun 			    (RX_FIRST_DESC | RX_LAST_DESC)) {
818*4882a593Smuzhiyun 				if (net_ratelimit())
819*4882a593Smuzhiyun 					netdev_err(dev,
820*4882a593Smuzhiyun 						   "Rx pkt on multiple desc\n");
821*4882a593Smuzhiyun 			}
822*4882a593Smuzhiyun 			if (cmd_sts & RX_ERROR)
823*4882a593Smuzhiyun 				stats->rx_errors++;
824*4882a593Smuzhiyun 			dev_kfree_skb_irq(skb);
825*4882a593Smuzhiyun 		} else {
826*4882a593Smuzhiyun 			/*
827*4882a593Smuzhiyun 			 * The -4 is for the CRC in the trailer of the
828*4882a593Smuzhiyun 			 * received packet
829*4882a593Smuzhiyun 			 */
830*4882a593Smuzhiyun 			skb_put(skb, rx_desc->byte_cnt - 4);
831*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
832*4882a593Smuzhiyun 			netif_receive_skb(skb);
833*4882a593Smuzhiyun 		}
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 	/* Fill RX ring with skb's */
836*4882a593Smuzhiyun 	rxq_refill(dev);
837*4882a593Smuzhiyun 	return received_packets;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
pxa168_eth_collect_events(struct pxa168_eth_private * pep,struct net_device * dev)840*4882a593Smuzhiyun static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
841*4882a593Smuzhiyun 				     struct net_device *dev)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	u32 icr;
844*4882a593Smuzhiyun 	int ret = 0;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	icr = rdl(pep, INT_CAUSE);
847*4882a593Smuzhiyun 	if (icr == 0)
848*4882a593Smuzhiyun 		return IRQ_NONE;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	wrl(pep, INT_CAUSE, ~icr);
851*4882a593Smuzhiyun 	if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
852*4882a593Smuzhiyun 		pep->work_todo |= WORK_TX_DONE;
853*4882a593Smuzhiyun 		ret = 1;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 	if (icr & ICR_RXBUF)
856*4882a593Smuzhiyun 		ret = 1;
857*4882a593Smuzhiyun 	return ret;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
pxa168_eth_int_handler(int irq,void * dev_id)860*4882a593Smuzhiyun static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
863*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	if (unlikely(!pxa168_eth_collect_events(pep, dev)))
866*4882a593Smuzhiyun 		return IRQ_NONE;
867*4882a593Smuzhiyun 	/* Disable interrupts */
868*4882a593Smuzhiyun 	wrl(pep, INT_MASK, 0);
869*4882a593Smuzhiyun 	napi_schedule(&pep->napi);
870*4882a593Smuzhiyun 	return IRQ_HANDLED;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
pxa168_eth_recalc_skb_size(struct pxa168_eth_private * pep)873*4882a593Smuzhiyun static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	int skb_size;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	/*
878*4882a593Smuzhiyun 	 * Reserve 2+14 bytes for an ethernet header (the hardware
879*4882a593Smuzhiyun 	 * automatically prepends 2 bytes of dummy data to each
880*4882a593Smuzhiyun 	 * received packet), 16 bytes for up to four VLAN tags, and
881*4882a593Smuzhiyun 	 * 4 bytes for the trailing FCS -- 36 bytes total.
882*4882a593Smuzhiyun 	 */
883*4882a593Smuzhiyun 	skb_size = pep->dev->mtu + 36;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/*
886*4882a593Smuzhiyun 	 * Make sure that the skb size is a multiple of 8 bytes, as
887*4882a593Smuzhiyun 	 * the lower three bits of the receive descriptor's buffer
888*4882a593Smuzhiyun 	 * size field are ignored by the hardware.
889*4882a593Smuzhiyun 	 */
890*4882a593Smuzhiyun 	pep->skb_size = (skb_size + 7) & ~7;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	/*
893*4882a593Smuzhiyun 	 * If NET_SKB_PAD is smaller than a cache line,
894*4882a593Smuzhiyun 	 * netdev_alloc_skb() will cause skb->data to be misaligned
895*4882a593Smuzhiyun 	 * to a cache line boundary.  If this is the case, include
896*4882a593Smuzhiyun 	 * some extra space to allow re-aligning the data area.
897*4882a593Smuzhiyun 	 */
898*4882a593Smuzhiyun 	pep->skb_size += SKB_DMA_REALIGN;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun 
set_port_config_ext(struct pxa168_eth_private * pep)902*4882a593Smuzhiyun static int set_port_config_ext(struct pxa168_eth_private *pep)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	int skb_size;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	pxa168_eth_recalc_skb_size(pep);
907*4882a593Smuzhiyun 	if  (pep->skb_size <= 1518)
908*4882a593Smuzhiyun 		skb_size = PCXR_MFL_1518;
909*4882a593Smuzhiyun 	else if (pep->skb_size <= 1536)
910*4882a593Smuzhiyun 		skb_size = PCXR_MFL_1536;
911*4882a593Smuzhiyun 	else if (pep->skb_size <= 2048)
912*4882a593Smuzhiyun 		skb_size = PCXR_MFL_2048;
913*4882a593Smuzhiyun 	else
914*4882a593Smuzhiyun 		skb_size = PCXR_MFL_64K;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Extended Port Configuration */
917*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG_EXT,
918*4882a593Smuzhiyun 	    PCXR_AN_SPEED_DIS |		 /* Disable HW AN */
919*4882a593Smuzhiyun 	    PCXR_AN_DUPLEX_DIS |
920*4882a593Smuzhiyun 	    PCXR_AN_FLOWCTL_DIS |
921*4882a593Smuzhiyun 	    PCXR_2BSM |			 /* Two byte prefix aligns IP hdr */
922*4882a593Smuzhiyun 	    PCXR_DSCP_EN |		 /* Enable DSCP in IP */
923*4882a593Smuzhiyun 	    skb_size | PCXR_FLP |	 /* do not force link pass */
924*4882a593Smuzhiyun 	    PCXR_TX_HIGH_PRI);		 /* Transmit - high priority queue */
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
pxa168_eth_adjust_link(struct net_device * dev)929*4882a593Smuzhiyun static void pxa168_eth_adjust_link(struct net_device *dev)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
932*4882a593Smuzhiyun 	struct phy_device *phy = dev->phydev;
933*4882a593Smuzhiyun 	u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
934*4882a593Smuzhiyun 	u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	cfg = cfg_o & ~PCR_DUPLEX_FULL;
937*4882a593Smuzhiyun 	cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (phy->interface == PHY_INTERFACE_MODE_RMII)
940*4882a593Smuzhiyun 		cfgext |= PCXR_RMII_EN;
941*4882a593Smuzhiyun 	if (phy->speed == SPEED_100)
942*4882a593Smuzhiyun 		cfgext |= PCXR_SPEED_100;
943*4882a593Smuzhiyun 	if (phy->duplex)
944*4882a593Smuzhiyun 		cfg |= PCR_DUPLEX_FULL;
945*4882a593Smuzhiyun 	if (!phy->pause)
946*4882a593Smuzhiyun 		cfgext |= PCXR_FLOWCTL_DIS;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	/* Bail out if there has nothing changed */
949*4882a593Smuzhiyun 	if (cfg == cfg_o && cfgext == cfgext_o)
950*4882a593Smuzhiyun 		return;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG, cfg);
953*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG_EXT, cfgext);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	phy_print_status(phy);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
pxa168_init_phy(struct net_device * dev)958*4882a593Smuzhiyun static int pxa168_init_phy(struct net_device *dev)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
961*4882a593Smuzhiyun 	struct ethtool_link_ksettings cmd;
962*4882a593Smuzhiyun 	struct phy_device *phy = NULL;
963*4882a593Smuzhiyun 	int err;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (dev->phydev)
966*4882a593Smuzhiyun 		return 0;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
969*4882a593Smuzhiyun 	if (IS_ERR(phy))
970*4882a593Smuzhiyun 		return PTR_ERR(phy);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
973*4882a593Smuzhiyun 				 pep->phy_intf);
974*4882a593Smuzhiyun 	if (err)
975*4882a593Smuzhiyun 		return err;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	cmd.base.phy_address = pep->phy_addr;
978*4882a593Smuzhiyun 	cmd.base.speed = pep->phy_speed;
979*4882a593Smuzhiyun 	cmd.base.duplex = pep->phy_duplex;
980*4882a593Smuzhiyun 	bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
981*4882a593Smuzhiyun 		    __ETHTOOL_LINK_MODE_MASK_NBITS);
982*4882a593Smuzhiyun 	cmd.base.autoneg = AUTONEG_ENABLE;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (cmd.base.speed != 0)
985*4882a593Smuzhiyun 		cmd.base.autoneg = AUTONEG_DISABLE;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	return phy_ethtool_set_link_ksettings(dev, &cmd);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
pxa168_init_hw(struct pxa168_eth_private * pep)990*4882a593Smuzhiyun static int pxa168_init_hw(struct pxa168_eth_private *pep)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	int err = 0;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/* Disable interrupts */
995*4882a593Smuzhiyun 	wrl(pep, INT_MASK, 0);
996*4882a593Smuzhiyun 	wrl(pep, INT_CAUSE, 0);
997*4882a593Smuzhiyun 	/* Write to ICR to clear interrupts. */
998*4882a593Smuzhiyun 	wrl(pep, INT_W_CLEAR, 0);
999*4882a593Smuzhiyun 	/* Abort any transmit and receive operations and put DMA
1000*4882a593Smuzhiyun 	 * in idle state.
1001*4882a593Smuzhiyun 	 */
1002*4882a593Smuzhiyun 	abort_dma(pep);
1003*4882a593Smuzhiyun 	/* Initialize address hash table */
1004*4882a593Smuzhiyun 	err = init_hash_table(pep);
1005*4882a593Smuzhiyun 	if (err)
1006*4882a593Smuzhiyun 		return err;
1007*4882a593Smuzhiyun 	/* SDMA configuration */
1008*4882a593Smuzhiyun 	wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |	/* Burst size = 32 bytes */
1009*4882a593Smuzhiyun 	    SDCR_RIFB |				/* Rx interrupt on frame */
1010*4882a593Smuzhiyun 	    SDCR_BLMT |				/* Little endian transmit */
1011*4882a593Smuzhiyun 	    SDCR_BLMR |				/* Little endian receive */
1012*4882a593Smuzhiyun 	    SDCR_RC_MAX_RETRANS);		/* Max retransmit count */
1013*4882a593Smuzhiyun 	/* Port Configuration */
1014*4882a593Smuzhiyun 	wrl(pep, PORT_CONFIG, PCR_HS);		/* Hash size is 1/2kb */
1015*4882a593Smuzhiyun 	set_port_config_ext(pep);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	return err;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
rxq_init(struct net_device * dev)1020*4882a593Smuzhiyun static int rxq_init(struct net_device *dev)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1023*4882a593Smuzhiyun 	struct rx_desc *p_rx_desc;
1024*4882a593Smuzhiyun 	int size = 0, i = 0;
1025*4882a593Smuzhiyun 	int rx_desc_num = pep->rx_ring_size;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/* Allocate RX skb rings */
1028*4882a593Smuzhiyun 	pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
1029*4882a593Smuzhiyun 	if (!pep->rx_skb)
1030*4882a593Smuzhiyun 		return -ENOMEM;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	/* Allocate RX ring */
1033*4882a593Smuzhiyun 	pep->rx_desc_count = 0;
1034*4882a593Smuzhiyun 	size = pep->rx_ring_size * sizeof(struct rx_desc);
1035*4882a593Smuzhiyun 	pep->rx_desc_area_size = size;
1036*4882a593Smuzhiyun 	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1037*4882a593Smuzhiyun 						 &pep->rx_desc_dma,
1038*4882a593Smuzhiyun 						 GFP_KERNEL);
1039*4882a593Smuzhiyun 	if (!pep->p_rx_desc_area)
1040*4882a593Smuzhiyun 		goto out;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* initialize the next_desc_ptr links in the Rx descriptors ring */
1043*4882a593Smuzhiyun 	p_rx_desc = pep->p_rx_desc_area;
1044*4882a593Smuzhiyun 	for (i = 0; i < rx_desc_num; i++) {
1045*4882a593Smuzhiyun 		p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1046*4882a593Smuzhiyun 		    ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 	/* Save Rx desc pointer to driver struct. */
1049*4882a593Smuzhiyun 	pep->rx_curr_desc_q = 0;
1050*4882a593Smuzhiyun 	pep->rx_used_desc_q = 0;
1051*4882a593Smuzhiyun 	pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1052*4882a593Smuzhiyun 	return 0;
1053*4882a593Smuzhiyun out:
1054*4882a593Smuzhiyun 	kfree(pep->rx_skb);
1055*4882a593Smuzhiyun 	return -ENOMEM;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
rxq_deinit(struct net_device * dev)1058*4882a593Smuzhiyun static void rxq_deinit(struct net_device *dev)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1061*4882a593Smuzhiyun 	int curr;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* Free preallocated skb's on RX rings */
1064*4882a593Smuzhiyun 	for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1065*4882a593Smuzhiyun 		if (pep->rx_skb[curr]) {
1066*4882a593Smuzhiyun 			dev_kfree_skb(pep->rx_skb[curr]);
1067*4882a593Smuzhiyun 			pep->rx_desc_count--;
1068*4882a593Smuzhiyun 		}
1069*4882a593Smuzhiyun 	}
1070*4882a593Smuzhiyun 	if (pep->rx_desc_count)
1071*4882a593Smuzhiyun 		netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
1072*4882a593Smuzhiyun 			   pep->rx_desc_count);
1073*4882a593Smuzhiyun 	/* Free RX ring */
1074*4882a593Smuzhiyun 	if (pep->p_rx_desc_area)
1075*4882a593Smuzhiyun 		dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1076*4882a593Smuzhiyun 				  pep->p_rx_desc_area, pep->rx_desc_dma);
1077*4882a593Smuzhiyun 	kfree(pep->rx_skb);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
txq_init(struct net_device * dev)1080*4882a593Smuzhiyun static int txq_init(struct net_device *dev)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1083*4882a593Smuzhiyun 	struct tx_desc *p_tx_desc;
1084*4882a593Smuzhiyun 	int size = 0, i = 0;
1085*4882a593Smuzhiyun 	int tx_desc_num = pep->tx_ring_size;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
1088*4882a593Smuzhiyun 	if (!pep->tx_skb)
1089*4882a593Smuzhiyun 		return -ENOMEM;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	/* Allocate TX ring */
1092*4882a593Smuzhiyun 	pep->tx_desc_count = 0;
1093*4882a593Smuzhiyun 	size = pep->tx_ring_size * sizeof(struct tx_desc);
1094*4882a593Smuzhiyun 	pep->tx_desc_area_size = size;
1095*4882a593Smuzhiyun 	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1096*4882a593Smuzhiyun 						 &pep->tx_desc_dma,
1097*4882a593Smuzhiyun 						 GFP_KERNEL);
1098*4882a593Smuzhiyun 	if (!pep->p_tx_desc_area)
1099*4882a593Smuzhiyun 		goto out;
1100*4882a593Smuzhiyun 	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
1101*4882a593Smuzhiyun 	p_tx_desc = pep->p_tx_desc_area;
1102*4882a593Smuzhiyun 	for (i = 0; i < tx_desc_num; i++) {
1103*4882a593Smuzhiyun 		p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1104*4882a593Smuzhiyun 		    ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1105*4882a593Smuzhiyun 	}
1106*4882a593Smuzhiyun 	pep->tx_curr_desc_q = 0;
1107*4882a593Smuzhiyun 	pep->tx_used_desc_q = 0;
1108*4882a593Smuzhiyun 	pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1109*4882a593Smuzhiyun 	return 0;
1110*4882a593Smuzhiyun out:
1111*4882a593Smuzhiyun 	kfree(pep->tx_skb);
1112*4882a593Smuzhiyun 	return -ENOMEM;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
txq_deinit(struct net_device * dev)1115*4882a593Smuzhiyun static void txq_deinit(struct net_device *dev)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	/* Free outstanding skb's on TX ring */
1120*4882a593Smuzhiyun 	txq_reclaim(dev, 1);
1121*4882a593Smuzhiyun 	BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1122*4882a593Smuzhiyun 	/* Free TX ring */
1123*4882a593Smuzhiyun 	if (pep->p_tx_desc_area)
1124*4882a593Smuzhiyun 		dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1125*4882a593Smuzhiyun 				  pep->p_tx_desc_area, pep->tx_desc_dma);
1126*4882a593Smuzhiyun 	kfree(pep->tx_skb);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
pxa168_eth_open(struct net_device * dev)1129*4882a593Smuzhiyun static int pxa168_eth_open(struct net_device *dev)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1132*4882a593Smuzhiyun 	int err;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	err = pxa168_init_phy(dev);
1135*4882a593Smuzhiyun 	if (err)
1136*4882a593Smuzhiyun 		return err;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
1139*4882a593Smuzhiyun 	if (err) {
1140*4882a593Smuzhiyun 		dev_err(&dev->dev, "can't assign irq\n");
1141*4882a593Smuzhiyun 		return -EAGAIN;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun 	pep->rx_resource_err = 0;
1144*4882a593Smuzhiyun 	err = rxq_init(dev);
1145*4882a593Smuzhiyun 	if (err != 0)
1146*4882a593Smuzhiyun 		goto out_free_irq;
1147*4882a593Smuzhiyun 	err = txq_init(dev);
1148*4882a593Smuzhiyun 	if (err != 0)
1149*4882a593Smuzhiyun 		goto out_free_rx_skb;
1150*4882a593Smuzhiyun 	pep->rx_used_desc_q = 0;
1151*4882a593Smuzhiyun 	pep->rx_curr_desc_q = 0;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	/* Fill RX ring with skb's */
1154*4882a593Smuzhiyun 	rxq_refill(dev);
1155*4882a593Smuzhiyun 	pep->rx_used_desc_q = 0;
1156*4882a593Smuzhiyun 	pep->rx_curr_desc_q = 0;
1157*4882a593Smuzhiyun 	netif_carrier_off(dev);
1158*4882a593Smuzhiyun 	napi_enable(&pep->napi);
1159*4882a593Smuzhiyun 	eth_port_start(dev);
1160*4882a593Smuzhiyun 	return 0;
1161*4882a593Smuzhiyun out_free_rx_skb:
1162*4882a593Smuzhiyun 	rxq_deinit(dev);
1163*4882a593Smuzhiyun out_free_irq:
1164*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1165*4882a593Smuzhiyun 	return err;
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
pxa168_eth_stop(struct net_device * dev)1168*4882a593Smuzhiyun static int pxa168_eth_stop(struct net_device *dev)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1171*4882a593Smuzhiyun 	eth_port_reset(dev);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	/* Disable interrupts */
1174*4882a593Smuzhiyun 	wrl(pep, INT_MASK, 0);
1175*4882a593Smuzhiyun 	wrl(pep, INT_CAUSE, 0);
1176*4882a593Smuzhiyun 	/* Write to ICR to clear interrupts. */
1177*4882a593Smuzhiyun 	wrl(pep, INT_W_CLEAR, 0);
1178*4882a593Smuzhiyun 	napi_disable(&pep->napi);
1179*4882a593Smuzhiyun 	del_timer_sync(&pep->timeout);
1180*4882a593Smuzhiyun 	netif_carrier_off(dev);
1181*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1182*4882a593Smuzhiyun 	rxq_deinit(dev);
1183*4882a593Smuzhiyun 	txq_deinit(dev);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	return 0;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun 
pxa168_eth_change_mtu(struct net_device * dev,int mtu)1188*4882a593Smuzhiyun static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	dev->mtu = mtu;
1193*4882a593Smuzhiyun 	set_port_config_ext(pep);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (!netif_running(dev))
1196*4882a593Smuzhiyun 		return 0;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	/*
1199*4882a593Smuzhiyun 	 * Stop and then re-open the interface. This will allocate RX
1200*4882a593Smuzhiyun 	 * skbs of the new MTU.
1201*4882a593Smuzhiyun 	 * There is a possible danger that the open will not succeed,
1202*4882a593Smuzhiyun 	 * due to memory being full.
1203*4882a593Smuzhiyun 	 */
1204*4882a593Smuzhiyun 	pxa168_eth_stop(dev);
1205*4882a593Smuzhiyun 	if (pxa168_eth_open(dev)) {
1206*4882a593Smuzhiyun 		dev_err(&dev->dev,
1207*4882a593Smuzhiyun 			"fatal error on re-opening device after MTU change\n");
1208*4882a593Smuzhiyun 	}
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	return 0;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun 
eth_alloc_tx_desc_index(struct pxa168_eth_private * pep)1213*4882a593Smuzhiyun static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	int tx_desc_curr;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	tx_desc_curr = pep->tx_curr_desc_q;
1218*4882a593Smuzhiyun 	pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1219*4882a593Smuzhiyun 	BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1220*4882a593Smuzhiyun 	pep->tx_desc_count++;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	return tx_desc_curr;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
pxa168_rx_poll(struct napi_struct * napi,int budget)1225*4882a593Smuzhiyun static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	struct pxa168_eth_private *pep =
1228*4882a593Smuzhiyun 	    container_of(napi, struct pxa168_eth_private, napi);
1229*4882a593Smuzhiyun 	struct net_device *dev = pep->dev;
1230*4882a593Smuzhiyun 	int work_done = 0;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/*
1233*4882a593Smuzhiyun 	 * We call txq_reclaim every time since in NAPI interupts are disabled
1234*4882a593Smuzhiyun 	 * and due to this we miss the TX_DONE interrupt,which is not updated in
1235*4882a593Smuzhiyun 	 * interrupt status register.
1236*4882a593Smuzhiyun 	 */
1237*4882a593Smuzhiyun 	txq_reclaim(dev, 0);
1238*4882a593Smuzhiyun 	if (netif_queue_stopped(dev)
1239*4882a593Smuzhiyun 	    && pep->tx_ring_size - pep->tx_desc_count > 1) {
1240*4882a593Smuzhiyun 		netif_wake_queue(dev);
1241*4882a593Smuzhiyun 	}
1242*4882a593Smuzhiyun 	work_done = rxq_process(dev, budget);
1243*4882a593Smuzhiyun 	if (work_done < budget) {
1244*4882a593Smuzhiyun 		napi_complete_done(napi, work_done);
1245*4882a593Smuzhiyun 		wrl(pep, INT_MASK, ALL_INTS);
1246*4882a593Smuzhiyun 	}
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	return work_done;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun static netdev_tx_t
pxa168_eth_start_xmit(struct sk_buff * skb,struct net_device * dev)1252*4882a593Smuzhiyun pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1255*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
1256*4882a593Smuzhiyun 	struct tx_desc *desc;
1257*4882a593Smuzhiyun 	int tx_index;
1258*4882a593Smuzhiyun 	int length;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	tx_index = eth_alloc_tx_desc_index(pep);
1261*4882a593Smuzhiyun 	desc = &pep->p_tx_desc_area[tx_index];
1262*4882a593Smuzhiyun 	length = skb->len;
1263*4882a593Smuzhiyun 	pep->tx_skb[tx_index] = skb;
1264*4882a593Smuzhiyun 	desc->byte_cnt = length;
1265*4882a593Smuzhiyun 	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
1266*4882a593Smuzhiyun 					DMA_TO_DEVICE);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	dma_wmb();
1271*4882a593Smuzhiyun 	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1272*4882a593Smuzhiyun 			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1273*4882a593Smuzhiyun 	wmb();
1274*4882a593Smuzhiyun 	wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	stats->tx_bytes += length;
1277*4882a593Smuzhiyun 	stats->tx_packets++;
1278*4882a593Smuzhiyun 	netif_trans_update(dev);
1279*4882a593Smuzhiyun 	if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1280*4882a593Smuzhiyun 		/* We handled the current skb, but now we are out of space.*/
1281*4882a593Smuzhiyun 		netif_stop_queue(dev);
1282*4882a593Smuzhiyun 	}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun 
smi_wait_ready(struct pxa168_eth_private * pep)1287*4882a593Smuzhiyun static int smi_wait_ready(struct pxa168_eth_private *pep)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	int i = 0;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	/* wait for the SMI register to become available */
1292*4882a593Smuzhiyun 	for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1293*4882a593Smuzhiyun 		if (i == PHY_WAIT_ITERATIONS)
1294*4882a593Smuzhiyun 			return -ETIMEDOUT;
1295*4882a593Smuzhiyun 		msleep(10);
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	return 0;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun 
pxa168_smi_read(struct mii_bus * bus,int phy_addr,int regnum)1301*4882a593Smuzhiyun static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1302*4882a593Smuzhiyun {
1303*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = bus->priv;
1304*4882a593Smuzhiyun 	int i = 0;
1305*4882a593Smuzhiyun 	int val;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	if (smi_wait_ready(pep)) {
1308*4882a593Smuzhiyun 		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1309*4882a593Smuzhiyun 		return -ETIMEDOUT;
1310*4882a593Smuzhiyun 	}
1311*4882a593Smuzhiyun 	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1312*4882a593Smuzhiyun 	/* now wait for the data to be valid */
1313*4882a593Smuzhiyun 	for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1314*4882a593Smuzhiyun 		if (i == PHY_WAIT_ITERATIONS) {
1315*4882a593Smuzhiyun 			netdev_warn(pep->dev,
1316*4882a593Smuzhiyun 				    "pxa168_eth: SMI bus read not valid\n");
1317*4882a593Smuzhiyun 			return -ENODEV;
1318*4882a593Smuzhiyun 		}
1319*4882a593Smuzhiyun 		msleep(10);
1320*4882a593Smuzhiyun 	}
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	return val & 0xffff;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun 
pxa168_smi_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)1325*4882a593Smuzhiyun static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1326*4882a593Smuzhiyun 			    u16 value)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = bus->priv;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (smi_wait_ready(pep)) {
1331*4882a593Smuzhiyun 		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1332*4882a593Smuzhiyun 		return -ETIMEDOUT;
1333*4882a593Smuzhiyun 	}
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1336*4882a593Smuzhiyun 	    SMI_OP_W | (value & 0xffff));
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (smi_wait_ready(pep)) {
1339*4882a593Smuzhiyun 		netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1340*4882a593Smuzhiyun 		return -ETIMEDOUT;
1341*4882a593Smuzhiyun 	}
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	return 0;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
pxa168_eth_netpoll(struct net_device * dev)1347*4882a593Smuzhiyun static void pxa168_eth_netpoll(struct net_device *dev)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun 	disable_irq(dev->irq);
1350*4882a593Smuzhiyun 	pxa168_eth_int_handler(dev->irq, dev);
1351*4882a593Smuzhiyun 	enable_irq(dev->irq);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun #endif
1354*4882a593Smuzhiyun 
pxa168_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1355*4882a593Smuzhiyun static void pxa168_get_drvinfo(struct net_device *dev,
1356*4882a593Smuzhiyun 			       struct ethtool_drvinfo *info)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun 	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1359*4882a593Smuzhiyun 	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1360*4882a593Smuzhiyun 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1361*4882a593Smuzhiyun 	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun static const struct ethtool_ops pxa168_ethtool_ops = {
1365*4882a593Smuzhiyun 	.get_drvinfo	= pxa168_get_drvinfo,
1366*4882a593Smuzhiyun 	.nway_reset	= phy_ethtool_nway_reset,
1367*4882a593Smuzhiyun 	.get_link	= ethtool_op_get_link,
1368*4882a593Smuzhiyun 	.get_ts_info	= ethtool_op_get_ts_info,
1369*4882a593Smuzhiyun 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1370*4882a593Smuzhiyun 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1371*4882a593Smuzhiyun };
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun static const struct net_device_ops pxa168_eth_netdev_ops = {
1374*4882a593Smuzhiyun 	.ndo_open		= pxa168_eth_open,
1375*4882a593Smuzhiyun 	.ndo_stop		= pxa168_eth_stop,
1376*4882a593Smuzhiyun 	.ndo_start_xmit		= pxa168_eth_start_xmit,
1377*4882a593Smuzhiyun 	.ndo_set_rx_mode	= pxa168_eth_set_rx_mode,
1378*4882a593Smuzhiyun 	.ndo_set_mac_address	= pxa168_eth_set_mac_address,
1379*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1380*4882a593Smuzhiyun 	.ndo_do_ioctl		= phy_do_ioctl,
1381*4882a593Smuzhiyun 	.ndo_change_mtu		= pxa168_eth_change_mtu,
1382*4882a593Smuzhiyun 	.ndo_tx_timeout		= pxa168_eth_tx_timeout,
1383*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1384*4882a593Smuzhiyun 	.ndo_poll_controller    = pxa168_eth_netpoll,
1385*4882a593Smuzhiyun #endif
1386*4882a593Smuzhiyun };
1387*4882a593Smuzhiyun 
pxa168_eth_probe(struct platform_device * pdev)1388*4882a593Smuzhiyun static int pxa168_eth_probe(struct platform_device *pdev)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = NULL;
1391*4882a593Smuzhiyun 	struct net_device *dev = NULL;
1392*4882a593Smuzhiyun 	struct resource *res;
1393*4882a593Smuzhiyun 	struct clk *clk;
1394*4882a593Smuzhiyun 	struct device_node *np;
1395*4882a593Smuzhiyun 	const unsigned char *mac_addr = NULL;
1396*4882a593Smuzhiyun 	int err;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	clk = devm_clk_get(&pdev->dev, NULL);
1401*4882a593Smuzhiyun 	if (IS_ERR(clk)) {
1402*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
1403*4882a593Smuzhiyun 		return -ENODEV;
1404*4882a593Smuzhiyun 	}
1405*4882a593Smuzhiyun 	clk_prepare_enable(clk);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1408*4882a593Smuzhiyun 	if (!dev) {
1409*4882a593Smuzhiyun 		err = -ENOMEM;
1410*4882a593Smuzhiyun 		goto err_clk;
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
1414*4882a593Smuzhiyun 	pep = netdev_priv(dev);
1415*4882a593Smuzhiyun 	pep->dev = dev;
1416*4882a593Smuzhiyun 	pep->clk = clk;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	pep->base = devm_platform_ioremap_resource(pdev, 0);
1419*4882a593Smuzhiyun 	if (IS_ERR(pep->base)) {
1420*4882a593Smuzhiyun 		err = PTR_ERR(pep->base);
1421*4882a593Smuzhiyun 		goto err_netdev;
1422*4882a593Smuzhiyun 	}
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1425*4882a593Smuzhiyun 	BUG_ON(!res);
1426*4882a593Smuzhiyun 	dev->irq = res->start;
1427*4882a593Smuzhiyun 	dev->netdev_ops = &pxa168_eth_netdev_ops;
1428*4882a593Smuzhiyun 	dev->watchdog_timeo = 2 * HZ;
1429*4882a593Smuzhiyun 	dev->base_addr = 0;
1430*4882a593Smuzhiyun 	dev->ethtool_ops = &pxa168_ethtool_ops;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	/* MTU range: 68 - 9500 */
1433*4882a593Smuzhiyun 	dev->min_mtu = ETH_MIN_MTU;
1434*4882a593Smuzhiyun 	dev->max_mtu = 9500;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	if (pdev->dev.of_node)
1439*4882a593Smuzhiyun 		mac_addr = of_get_mac_address(pdev->dev.of_node);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(mac_addr)) {
1442*4882a593Smuzhiyun 		ether_addr_copy(dev->dev_addr, mac_addr);
1443*4882a593Smuzhiyun 	} else {
1444*4882a593Smuzhiyun 		/* try reading the mac address, if set by the bootloader */
1445*4882a593Smuzhiyun 		pxa168_eth_get_mac_address(dev, dev->dev_addr);
1446*4882a593Smuzhiyun 		if (!is_valid_ether_addr(dev->dev_addr)) {
1447*4882a593Smuzhiyun 			dev_info(&pdev->dev, "Using random mac address\n");
1448*4882a593Smuzhiyun 			eth_hw_addr_random(dev);
1449*4882a593Smuzhiyun 		}
1450*4882a593Smuzhiyun 	}
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	pep->rx_ring_size = NUM_RX_DESCS;
1453*4882a593Smuzhiyun 	pep->tx_ring_size = NUM_TX_DESCS;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	pep->pd = dev_get_platdata(&pdev->dev);
1456*4882a593Smuzhiyun 	if (pep->pd) {
1457*4882a593Smuzhiyun 		if (pep->pd->rx_queue_size)
1458*4882a593Smuzhiyun 			pep->rx_ring_size = pep->pd->rx_queue_size;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 		if (pep->pd->tx_queue_size)
1461*4882a593Smuzhiyun 			pep->tx_ring_size = pep->pd->tx_queue_size;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 		pep->port_num = pep->pd->port_number;
1464*4882a593Smuzhiyun 		pep->phy_addr = pep->pd->phy_addr;
1465*4882a593Smuzhiyun 		pep->phy_speed = pep->pd->speed;
1466*4882a593Smuzhiyun 		pep->phy_duplex = pep->pd->duplex;
1467*4882a593Smuzhiyun 		pep->phy_intf = pep->pd->intf;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 		if (pep->pd->init)
1470*4882a593Smuzhiyun 			pep->pd->init();
1471*4882a593Smuzhiyun 	} else if (pdev->dev.of_node) {
1472*4882a593Smuzhiyun 		of_property_read_u32(pdev->dev.of_node, "port-id",
1473*4882a593Smuzhiyun 				     &pep->port_num);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1476*4882a593Smuzhiyun 		if (!np) {
1477*4882a593Smuzhiyun 			dev_err(&pdev->dev, "missing phy-handle\n");
1478*4882a593Smuzhiyun 			err = -EINVAL;
1479*4882a593Smuzhiyun 			goto err_netdev;
1480*4882a593Smuzhiyun 		}
1481*4882a593Smuzhiyun 		of_property_read_u32(np, "reg", &pep->phy_addr);
1482*4882a593Smuzhiyun 		of_node_put(np);
1483*4882a593Smuzhiyun 		err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
1484*4882a593Smuzhiyun 		if (err && err != -ENODEV)
1485*4882a593Smuzhiyun 			goto err_netdev;
1486*4882a593Smuzhiyun 	}
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	/* Hardware supports only 3 ports */
1489*4882a593Smuzhiyun 	BUG_ON(pep->port_num > 2);
1490*4882a593Smuzhiyun 	netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	memset(&pep->timeout, 0, sizeof(struct timer_list));
1493*4882a593Smuzhiyun 	timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	pep->smi_bus = mdiobus_alloc();
1496*4882a593Smuzhiyun 	if (!pep->smi_bus) {
1497*4882a593Smuzhiyun 		err = -ENOMEM;
1498*4882a593Smuzhiyun 		goto err_netdev;
1499*4882a593Smuzhiyun 	}
1500*4882a593Smuzhiyun 	pep->smi_bus->priv = pep;
1501*4882a593Smuzhiyun 	pep->smi_bus->name = "pxa168_eth smi";
1502*4882a593Smuzhiyun 	pep->smi_bus->read = pxa168_smi_read;
1503*4882a593Smuzhiyun 	pep->smi_bus->write = pxa168_smi_write;
1504*4882a593Smuzhiyun 	snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1505*4882a593Smuzhiyun 		pdev->name, pdev->id);
1506*4882a593Smuzhiyun 	pep->smi_bus->parent = &pdev->dev;
1507*4882a593Smuzhiyun 	pep->smi_bus->phy_mask = 0xffffffff;
1508*4882a593Smuzhiyun 	err = mdiobus_register(pep->smi_bus);
1509*4882a593Smuzhiyun 	if (err)
1510*4882a593Smuzhiyun 		goto err_free_mdio;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	pep->pdev = pdev;
1513*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
1514*4882a593Smuzhiyun 	pxa168_init_hw(pep);
1515*4882a593Smuzhiyun 	err = register_netdev(dev);
1516*4882a593Smuzhiyun 	if (err)
1517*4882a593Smuzhiyun 		goto err_mdiobus;
1518*4882a593Smuzhiyun 	return 0;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun err_mdiobus:
1521*4882a593Smuzhiyun 	mdiobus_unregister(pep->smi_bus);
1522*4882a593Smuzhiyun err_free_mdio:
1523*4882a593Smuzhiyun 	mdiobus_free(pep->smi_bus);
1524*4882a593Smuzhiyun err_netdev:
1525*4882a593Smuzhiyun 	free_netdev(dev);
1526*4882a593Smuzhiyun err_clk:
1527*4882a593Smuzhiyun 	clk_disable_unprepare(clk);
1528*4882a593Smuzhiyun 	return err;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun 
pxa168_eth_remove(struct platform_device * pdev)1531*4882a593Smuzhiyun static int pxa168_eth_remove(struct platform_device *pdev)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun 	struct net_device *dev = platform_get_drvdata(pdev);
1534*4882a593Smuzhiyun 	struct pxa168_eth_private *pep = netdev_priv(dev);
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	if (pep->htpr) {
1537*4882a593Smuzhiyun 		dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1538*4882a593Smuzhiyun 				  pep->htpr, pep->htpr_dma);
1539*4882a593Smuzhiyun 		pep->htpr = NULL;
1540*4882a593Smuzhiyun 	}
1541*4882a593Smuzhiyun 	if (dev->phydev)
1542*4882a593Smuzhiyun 		phy_disconnect(dev->phydev);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	clk_disable_unprepare(pep->clk);
1545*4882a593Smuzhiyun 	mdiobus_unregister(pep->smi_bus);
1546*4882a593Smuzhiyun 	mdiobus_free(pep->smi_bus);
1547*4882a593Smuzhiyun 	cancel_work_sync(&pep->tx_timeout_task);
1548*4882a593Smuzhiyun 	unregister_netdev(dev);
1549*4882a593Smuzhiyun 	free_netdev(dev);
1550*4882a593Smuzhiyun 	return 0;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun 
pxa168_eth_shutdown(struct platform_device * pdev)1553*4882a593Smuzhiyun static void pxa168_eth_shutdown(struct platform_device *pdev)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct net_device *dev = platform_get_drvdata(pdev);
1556*4882a593Smuzhiyun 	eth_port_reset(dev);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun #ifdef CONFIG_PM
pxa168_eth_resume(struct platform_device * pdev)1560*4882a593Smuzhiyun static int pxa168_eth_resume(struct platform_device *pdev)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun 	return -ENOSYS;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun 
pxa168_eth_suspend(struct platform_device * pdev,pm_message_t state)1565*4882a593Smuzhiyun static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun 	return -ENOSYS;
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun #else
1571*4882a593Smuzhiyun #define pxa168_eth_resume NULL
1572*4882a593Smuzhiyun #define pxa168_eth_suspend NULL
1573*4882a593Smuzhiyun #endif
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun static const struct of_device_id pxa168_eth_of_match[] = {
1576*4882a593Smuzhiyun 	{ .compatible = "marvell,pxa168-eth" },
1577*4882a593Smuzhiyun 	{ },
1578*4882a593Smuzhiyun };
1579*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun static struct platform_driver pxa168_eth_driver = {
1582*4882a593Smuzhiyun 	.probe = pxa168_eth_probe,
1583*4882a593Smuzhiyun 	.remove = pxa168_eth_remove,
1584*4882a593Smuzhiyun 	.shutdown = pxa168_eth_shutdown,
1585*4882a593Smuzhiyun 	.resume = pxa168_eth_resume,
1586*4882a593Smuzhiyun 	.suspend = pxa168_eth_suspend,
1587*4882a593Smuzhiyun 	.driver = {
1588*4882a593Smuzhiyun 		.name		= DRIVER_NAME,
1589*4882a593Smuzhiyun 		.of_match_table	= of_match_ptr(pxa168_eth_of_match),
1590*4882a593Smuzhiyun 	},
1591*4882a593Smuzhiyun };
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun module_platform_driver(pxa168_eth_driver);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1596*4882a593Smuzhiyun MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1597*4882a593Smuzhiyun MODULE_ALIAS("platform:pxa168_eth");
1598