xref: /OK3568_Linux_fs/u-boot/drivers/net/ks8851_mll.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Micrel KS8851_MLL 16bit Network driver
3*4882a593Smuzhiyun  * Copyright (c) 2011 Roberto Cerati <roberto.cerati@bticino.it>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/io.h>
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <command.h>
11*4882a593Smuzhiyun #include <malloc.h>
12*4882a593Smuzhiyun #include <net.h>
13*4882a593Smuzhiyun #include <miiphy.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "ks8851_mll.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define DRIVERNAME			"ks8851_mll"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define MAX_RECV_FRAMES			32
20*4882a593Smuzhiyun #define MAX_BUF_SIZE			2048
21*4882a593Smuzhiyun #define TX_BUF_SIZE			2000
22*4882a593Smuzhiyun #define RX_BUF_SIZE			2000
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static const struct chip_id chip_ids[] =  {
25*4882a593Smuzhiyun 	{CIDER_ID, "KSZ8851"},
26*4882a593Smuzhiyun 	{0, NULL},
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * union ks_tx_hdr - tx header data
31*4882a593Smuzhiyun  * @txb: The header as bytes
32*4882a593Smuzhiyun  * @txw: The header as 16bit, little-endian words
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * A dual representation of the tx header data to allow
35*4882a593Smuzhiyun  * access to individual bytes, and to allow 16bit accesses
36*4882a593Smuzhiyun  * with 16bit alignment.
37*4882a593Smuzhiyun  */
38*4882a593Smuzhiyun union ks_tx_hdr {
39*4882a593Smuzhiyun 	u8      txb[4];
40*4882a593Smuzhiyun 	__le16  txw[2];
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * struct ks_net - KS8851 driver private data
45*4882a593Smuzhiyun  * @net_device	: The network device we're bound to
46*4882a593Smuzhiyun  * @txh		: temporaly buffer to save status/length.
47*4882a593Smuzhiyun  * @frame_head_info	: frame header information for multi-pkt rx.
48*4882a593Smuzhiyun  * @statelock	: Lock on this structure for tx list.
49*4882a593Smuzhiyun  * @msg_enable	: The message flags controlling driver output (see ethtool).
50*4882a593Smuzhiyun  * @frame_cnt	: number of frames received.
51*4882a593Smuzhiyun  * @bus_width	: i/o bus width.
52*4882a593Smuzhiyun  * @irq		: irq number assigned to this device.
53*4882a593Smuzhiyun  * @rc_rxqcr	: Cached copy of KS_RXQCR.
54*4882a593Smuzhiyun  * @rc_txcr	: Cached copy of KS_TXCR.
55*4882a593Smuzhiyun  * @rc_ier	: Cached copy of KS_IER.
56*4882a593Smuzhiyun  * @sharedbus	: Multipex(addr and data bus) mode indicator.
57*4882a593Smuzhiyun  * @cmd_reg_cache	: command register cached.
58*4882a593Smuzhiyun  * @cmd_reg_cache_int	: command register cached. Used in the irq handler.
59*4882a593Smuzhiyun  * @promiscuous	: promiscuous mode indicator.
60*4882a593Smuzhiyun  * @all_mcast	: mutlicast indicator.
61*4882a593Smuzhiyun  * @mcast_lst_size	: size of multicast list.
62*4882a593Smuzhiyun  * @mcast_lst		: multicast list.
63*4882a593Smuzhiyun  * @mcast_bits		: multicast enabed.
64*4882a593Smuzhiyun  * @mac_addr		: MAC address assigned to this device.
65*4882a593Smuzhiyun  * @fid			: frame id.
66*4882a593Smuzhiyun  * @extra_byte		: number of extra byte prepended rx pkt.
67*4882a593Smuzhiyun  * @enabled		: indicator this device works.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* Receive multiplex framer header info */
71*4882a593Smuzhiyun struct type_frame_head {
72*4882a593Smuzhiyun 	u16	sts;         /* Frame status */
73*4882a593Smuzhiyun 	u16	len;         /* Byte count */
74*4882a593Smuzhiyun } fr_h_i[MAX_RECV_FRAMES];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct ks_net {
77*4882a593Smuzhiyun 	struct net_device	*netdev;
78*4882a593Smuzhiyun 	union ks_tx_hdr		txh;
79*4882a593Smuzhiyun 	struct type_frame_head	*frame_head_info;
80*4882a593Smuzhiyun 	u32			msg_enable;
81*4882a593Smuzhiyun 	u32			frame_cnt;
82*4882a593Smuzhiyun 	int			bus_width;
83*4882a593Smuzhiyun 	int			irq;
84*4882a593Smuzhiyun 	u16			rc_rxqcr;
85*4882a593Smuzhiyun 	u16			rc_txcr;
86*4882a593Smuzhiyun 	u16			rc_ier;
87*4882a593Smuzhiyun 	u16			sharedbus;
88*4882a593Smuzhiyun 	u16			cmd_reg_cache;
89*4882a593Smuzhiyun 	u16			cmd_reg_cache_int;
90*4882a593Smuzhiyun 	u16			promiscuous;
91*4882a593Smuzhiyun 	u16			all_mcast;
92*4882a593Smuzhiyun 	u16			mcast_lst_size;
93*4882a593Smuzhiyun 	u8			mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
94*4882a593Smuzhiyun 	u8			mcast_bits[HW_MCAST_SIZE];
95*4882a593Smuzhiyun 	u8			mac_addr[6];
96*4882a593Smuzhiyun 	u8                      fid;
97*4882a593Smuzhiyun 	u8			extra_byte;
98*4882a593Smuzhiyun 	u8			enabled;
99*4882a593Smuzhiyun } ks_str, *ks;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define BE3             0x8000      /* Byte Enable 3 */
102*4882a593Smuzhiyun #define BE2             0x4000      /* Byte Enable 2 */
103*4882a593Smuzhiyun #define BE1             0x2000      /* Byte Enable 1 */
104*4882a593Smuzhiyun #define BE0             0x1000      /* Byte Enable 0 */
105*4882a593Smuzhiyun 
ks_rdreg8(struct eth_device * dev,u16 offset)106*4882a593Smuzhiyun static u8 ks_rdreg8(struct eth_device *dev, u16 offset)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	u8 shift_bit = offset & 0x03;
109*4882a593Smuzhiyun 	u8 shift_data = (offset & 1) << 3;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	writew(offset | (BE0 << shift_bit), dev->iobase + 2);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return (u8)(readw(dev->iobase) >> shift_data);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
ks_rdreg16(struct eth_device * dev,u16 offset)116*4882a593Smuzhiyun static u16 ks_rdreg16(struct eth_device *dev, u16 offset)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	writew(offset | ((BE1 | BE0) << (offset & 0x02)), dev->iobase + 2);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return readw(dev->iobase);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
ks_wrreg8(struct eth_device * dev,u16 offset,u8 val)123*4882a593Smuzhiyun static void ks_wrreg8(struct eth_device *dev, u16 offset, u8 val)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	u8 shift_bit = (offset & 0x03);
126*4882a593Smuzhiyun 	u16 value_write = (u16)(val << ((offset & 1) << 3));
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	writew(offset | (BE0 << shift_bit), dev->iobase + 2);
129*4882a593Smuzhiyun 	writew(value_write, dev->iobase);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
ks_wrreg16(struct eth_device * dev,u16 offset,u16 val)132*4882a593Smuzhiyun static void ks_wrreg16(struct eth_device *dev, u16 offset, u16 val)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	writew(offset | ((BE1 | BE0) << (offset & 0x02)), dev->iobase + 2);
135*4882a593Smuzhiyun 	writew(val, dev->iobase);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun  * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode
140*4882a593Smuzhiyun  * enabled.
141*4882a593Smuzhiyun  * @ks: The chip state
142*4882a593Smuzhiyun  * @wptr: buffer address to save data
143*4882a593Smuzhiyun  * @len: length in byte to read
144*4882a593Smuzhiyun  */
ks_inblk(struct eth_device * dev,u16 * wptr,u32 len)145*4882a593Smuzhiyun static inline void ks_inblk(struct eth_device *dev, u16 *wptr, u32 len)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	len >>= 1;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	while (len--)
150*4882a593Smuzhiyun 		*wptr++ = readw(dev->iobase);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun  * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
155*4882a593Smuzhiyun  * @ks: The chip information
156*4882a593Smuzhiyun  * @wptr: buffer address
157*4882a593Smuzhiyun  * @len: length in byte to write
158*4882a593Smuzhiyun  */
ks_outblk(struct eth_device * dev,u16 * wptr,u32 len)159*4882a593Smuzhiyun static inline void ks_outblk(struct eth_device *dev, u16 *wptr, u32 len)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	len >>= 1;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	while (len--)
164*4882a593Smuzhiyun 		writew(*wptr++, dev->iobase);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
ks_enable_int(struct eth_device * dev)167*4882a593Smuzhiyun static void ks_enable_int(struct eth_device *dev)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_IER, ks->rc_ier);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
ks_set_powermode(struct eth_device * dev,unsigned pwrmode)172*4882a593Smuzhiyun static void ks_set_powermode(struct eth_device *dev, unsigned pwrmode)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	unsigned pmecr;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	ks_rdreg16(dev, KS_GRR);
177*4882a593Smuzhiyun 	pmecr = ks_rdreg16(dev, KS_PMECR);
178*4882a593Smuzhiyun 	pmecr &= ~PMECR_PM_MASK;
179*4882a593Smuzhiyun 	pmecr |= pwrmode;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_PMECR, pmecr);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * ks_read_config - read chip configuration of bus width.
186*4882a593Smuzhiyun  * @ks: The chip information
187*4882a593Smuzhiyun  */
ks_read_config(struct eth_device * dev)188*4882a593Smuzhiyun static void ks_read_config(struct eth_device *dev)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	u16 reg_data = 0;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Regardless of bus width, 8 bit read should always work. */
193*4882a593Smuzhiyun 	reg_data = ks_rdreg8(dev, KS_CCR) & 0x00FF;
194*4882a593Smuzhiyun 	reg_data |= ks_rdreg8(dev, KS_CCR + 1) << 8;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* addr/data bus are multiplexed */
197*4882a593Smuzhiyun 	ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/*
200*4882a593Smuzhiyun 	 * There are garbage data when reading data from QMU,
201*4882a593Smuzhiyun 	 * depending on bus-width.
202*4882a593Smuzhiyun 	 */
203*4882a593Smuzhiyun 	if (reg_data & CCR_8BIT) {
204*4882a593Smuzhiyun 		ks->bus_width = ENUM_BUS_8BIT;
205*4882a593Smuzhiyun 		ks->extra_byte = 1;
206*4882a593Smuzhiyun 	} else if (reg_data & CCR_16BIT) {
207*4882a593Smuzhiyun 		ks->bus_width = ENUM_BUS_16BIT;
208*4882a593Smuzhiyun 		ks->extra_byte = 2;
209*4882a593Smuzhiyun 	} else {
210*4882a593Smuzhiyun 		ks->bus_width = ENUM_BUS_32BIT;
211*4882a593Smuzhiyun 		ks->extra_byte = 4;
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * ks_soft_reset - issue one of the soft reset to the device
217*4882a593Smuzhiyun  * @ks: The device state.
218*4882a593Smuzhiyun  * @op: The bit(s) to set in the GRR
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * Issue the relevant soft-reset command to the device's GRR register
221*4882a593Smuzhiyun  * specified by @op.
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  * Note, the delays are in there as a caution to ensure that the reset
224*4882a593Smuzhiyun  * has time to take effect and then complete. Since the datasheet does
225*4882a593Smuzhiyun  * not currently specify the exact sequence, we have chosen something
226*4882a593Smuzhiyun  * that seems to work with our device.
227*4882a593Smuzhiyun  */
ks_soft_reset(struct eth_device * dev,unsigned op)228*4882a593Smuzhiyun static void ks_soft_reset(struct eth_device *dev, unsigned op)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	/* Disable interrupt first */
231*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_IER, 0x0000);
232*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_GRR, op);
233*4882a593Smuzhiyun 	mdelay(10);	/* wait a short time to effect reset */
234*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_GRR, 0);
235*4882a593Smuzhiyun 	mdelay(1);	/* wait for condition to clear */
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
ks_enable_qmu(struct eth_device * dev)238*4882a593Smuzhiyun void ks_enable_qmu(struct eth_device *dev)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	u16 w;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_TXCR);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* Enables QMU Transmit (TXCR). */
245*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXCR, w | TXCR_TXE);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Enable RX Frame Count Threshold and Auto-Dequeue RXQ Frame */
248*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_RXQCR);
249*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXQCR, w | RXQCR_RXFCTE);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* Enables QMU Receive (RXCR1). */
252*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_RXCR1);
253*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXCR1, w | RXCR1_RXE);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
ks_disable_qmu(struct eth_device * dev)256*4882a593Smuzhiyun static void ks_disable_qmu(struct eth_device *dev)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	u16 w;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_TXCR);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* Disables QMU Transmit (TXCR). */
263*4882a593Smuzhiyun 	w &= ~TXCR_TXE;
264*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXCR, w);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Disables QMU Receive (RXCR1). */
267*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_RXCR1);
268*4882a593Smuzhiyun 	w &= ~RXCR1_RXE;
269*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXCR1, w);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
ks_read_qmu(struct eth_device * dev,u16 * buf,u32 len)272*4882a593Smuzhiyun static inline void ks_read_qmu(struct eth_device *dev, u16 *buf, u32 len)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	u32 r = ks->extra_byte & 0x1;
275*4882a593Smuzhiyun 	u32 w = ks->extra_byte - r;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* 1. set sudo DMA mode */
278*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXFDPR, RXFDPR_RXFPAI);
279*4882a593Smuzhiyun 	ks_wrreg8(dev, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/*
282*4882a593Smuzhiyun 	 * 2. read prepend data
283*4882a593Smuzhiyun 	 *
284*4882a593Smuzhiyun 	 * read 4 + extra bytes and discard them.
285*4882a593Smuzhiyun 	 * extra bytes for dummy, 2 for status, 2 for len
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (r)
289*4882a593Smuzhiyun 		ks_rdreg8(dev, 0);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	ks_inblk(dev, buf, w + 2 + 2);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* 3. read pkt data */
294*4882a593Smuzhiyun 	ks_inblk(dev, buf, ALIGN(len, 4));
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* 4. reset sudo DMA Mode */
297*4882a593Smuzhiyun 	ks_wrreg8(dev, KS_RXQCR, (ks->rc_rxqcr & ~RXQCR_SDA) & 0xff);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
ks_rcv(struct eth_device * dev,uchar ** pv_data)300*4882a593Smuzhiyun static void ks_rcv(struct eth_device *dev, uchar **pv_data)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct type_frame_head *frame_hdr = ks->frame_head_info;
303*4882a593Smuzhiyun 	int i;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	ks->frame_cnt = ks_rdreg16(dev, KS_RXFCTR) >> 8;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* read all header information */
308*4882a593Smuzhiyun 	for (i = 0; i < ks->frame_cnt; i++) {
309*4882a593Smuzhiyun 		/* Checking Received packet status */
310*4882a593Smuzhiyun 		frame_hdr->sts = ks_rdreg16(dev, KS_RXFHSR);
311*4882a593Smuzhiyun 		/* Get packet len from hardware */
312*4882a593Smuzhiyun 		frame_hdr->len = ks_rdreg16(dev, KS_RXFHBCR);
313*4882a593Smuzhiyun 		frame_hdr++;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	frame_hdr = ks->frame_head_info;
317*4882a593Smuzhiyun 	while (ks->frame_cnt--) {
318*4882a593Smuzhiyun 		if ((frame_hdr->sts & RXFSHR_RXFV) &&
319*4882a593Smuzhiyun 		    (frame_hdr->len < RX_BUF_SIZE) &&
320*4882a593Smuzhiyun 		    frame_hdr->len) {
321*4882a593Smuzhiyun 			/* read data block including CRC 4 bytes */
322*4882a593Smuzhiyun 			ks_read_qmu(dev, (u16 *)(*pv_data), frame_hdr->len);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 			/* net_rx_packets buffer size is ok (*pv_data) */
325*4882a593Smuzhiyun 			net_process_received_packet(*pv_data, frame_hdr->len);
326*4882a593Smuzhiyun 			pv_data++;
327*4882a593Smuzhiyun 		} else {
328*4882a593Smuzhiyun 			ks_wrreg16(dev, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
329*4882a593Smuzhiyun 			printf(DRIVERNAME ": bad packet\n");
330*4882a593Smuzhiyun 		}
331*4882a593Smuzhiyun 		frame_hdr++;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun  * ks_read_selftest - read the selftest memory info.
337*4882a593Smuzhiyun  * @ks: The device state
338*4882a593Smuzhiyun  *
339*4882a593Smuzhiyun  * Read and check the TX/RX memory selftest information.
340*4882a593Smuzhiyun  */
ks_read_selftest(struct eth_device * dev)341*4882a593Smuzhiyun static int ks_read_selftest(struct eth_device *dev)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	u16 both_done = MBIR_TXMBF | MBIR_RXMBF;
344*4882a593Smuzhiyun 	u16 mbir;
345*4882a593Smuzhiyun 	int ret = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	mbir = ks_rdreg16(dev, KS_MBIR);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if ((mbir & both_done) != both_done) {
350*4882a593Smuzhiyun 		printf(DRIVERNAME ": Memory selftest not finished\n");
351*4882a593Smuzhiyun 		return 0;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (mbir & MBIR_TXMBFA) {
355*4882a593Smuzhiyun 		printf(DRIVERNAME ": TX memory selftest fails\n");
356*4882a593Smuzhiyun 		ret |= 1;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (mbir & MBIR_RXMBFA) {
360*4882a593Smuzhiyun 		printf(DRIVERNAME ": RX memory selftest fails\n");
361*4882a593Smuzhiyun 		ret |= 2;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	debug(DRIVERNAME ": the selftest passes\n");
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	return ret;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
ks_setup(struct eth_device * dev)369*4882a593Smuzhiyun static void ks_setup(struct eth_device *dev)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	u16 w;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
374*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXFDPR, TXFDPR_TXFPAI);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* Setup Receive Frame Data Pointer Auto-Increment */
377*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXFDPR, RXFDPR_RXFPAI);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
380*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* Setup RxQ Command Control (RXQCR) */
383*4882a593Smuzhiyun 	ks->rc_rxqcr = RXQCR_CMD_CNTL;
384*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXQCR, ks->rc_rxqcr);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/*
387*4882a593Smuzhiyun 	 * set the force mode to half duplex, default is full duplex
388*4882a593Smuzhiyun 	 * because if the auto-negotiation fails, most switch uses
389*4882a593Smuzhiyun 	 * half-duplex.
390*4882a593Smuzhiyun 	 */
391*4882a593Smuzhiyun 	w = ks_rdreg16(dev, KS_P1MBCR);
392*4882a593Smuzhiyun 	w &= ~P1MBCR_FORCE_FDX;
393*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_P1MBCR, w);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
396*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXCR, w);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Normal mode */
401*4882a593Smuzhiyun 	w |= RXCR1_RXPAFMA;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_RXCR1, w);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
ks_setup_int(struct eth_device * dev)406*4882a593Smuzhiyun static void ks_setup_int(struct eth_device *dev)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	ks->rc_ier = 0x00;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* Clear the interrupts status of the hardware. */
411*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_ISR, 0xffff);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* Enables the interrupts of the hardware. */
414*4882a593Smuzhiyun 	ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
ks8851_mll_detect_chip(struct eth_device * dev)417*4882a593Smuzhiyun static int ks8851_mll_detect_chip(struct eth_device *dev)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	unsigned short val, i;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	ks_read_config(dev);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	val = ks_rdreg16(dev, KS_CIDER);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (val == 0xffff) {
426*4882a593Smuzhiyun 		/* Special case -- no chip present */
427*4882a593Smuzhiyun 		printf(DRIVERNAME ":  is chip mounted ?\n");
428*4882a593Smuzhiyun 		return -1;
429*4882a593Smuzhiyun 	} else if ((val & 0xfff0) != CIDER_ID) {
430*4882a593Smuzhiyun 		printf(DRIVERNAME ": Invalid chip id 0x%04x\n", val);
431*4882a593Smuzhiyun 		return -1;
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	debug("Read back KS8851 id 0x%x\n", val);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/* only one entry in the table */
437*4882a593Smuzhiyun 	val &= 0xfff0;
438*4882a593Smuzhiyun 	for (i = 0; chip_ids[i].id != 0; i++) {
439*4882a593Smuzhiyun 		if (chip_ids[i].id == val)
440*4882a593Smuzhiyun 			break;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 	if (!chip_ids[i].id) {
443*4882a593Smuzhiyun 		printf(DRIVERNAME ": Unknown chip ID %04x\n", val);
444*4882a593Smuzhiyun 		return -1;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	dev->priv = (void *)&chip_ids[i];
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
ks8851_mll_reset(struct eth_device * dev)452*4882a593Smuzhiyun static void ks8851_mll_reset(struct eth_device *dev)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	/* wake up powermode to normal mode */
455*4882a593Smuzhiyun 	ks_set_powermode(dev, PMECR_PM_NORMAL);
456*4882a593Smuzhiyun 	mdelay(1);	/* wait for normal mode to take effect */
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* Disable interrupt and reset */
459*4882a593Smuzhiyun 	ks_soft_reset(dev, GRR_GSR);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* turn off the IRQs and ack any outstanding */
462*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_IER, 0x0000);
463*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_ISR, 0xffff);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* shutdown RX/TX QMU */
466*4882a593Smuzhiyun 	ks_disable_qmu(dev);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
ks8851_mll_phy_configure(struct eth_device * dev)469*4882a593Smuzhiyun static void ks8851_mll_phy_configure(struct eth_device *dev)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	u16 data;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	ks_setup(dev);
474*4882a593Smuzhiyun 	ks_setup_int(dev);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	/* Probing the phy */
477*4882a593Smuzhiyun 	data = ks_rdreg16(dev, KS_OBCR);
478*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_OBCR, data | OBCR_ODS_16MA);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	debug(DRIVERNAME ": phy initialized\n");
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
ks8851_mll_enable(struct eth_device * dev)483*4882a593Smuzhiyun static void ks8851_mll_enable(struct eth_device *dev)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_ISR, 0xffff);
486*4882a593Smuzhiyun 	ks_enable_int(dev);
487*4882a593Smuzhiyun 	ks_enable_qmu(dev);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
ks8851_mll_init(struct eth_device * dev,bd_t * bd)490*4882a593Smuzhiyun static int ks8851_mll_init(struct eth_device *dev, bd_t *bd)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct chip_id *id = dev->priv;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	debug(DRIVERNAME ": detected %s controller\n", id->name);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (ks_read_selftest(dev)) {
497*4882a593Smuzhiyun 		printf(DRIVERNAME ": Selftest failed\n");
498*4882a593Smuzhiyun 		return -1;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	ks8851_mll_reset(dev);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* Configure the PHY, initialize the link state */
504*4882a593Smuzhiyun 	ks8851_mll_phy_configure(dev);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* static allocation of private informations */
507*4882a593Smuzhiyun 	ks->frame_head_info = fr_h_i;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* Turn on Tx + Rx */
510*4882a593Smuzhiyun 	ks8851_mll_enable(dev);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	return 0;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
ks_write_qmu(struct eth_device * dev,u8 * pdata,u16 len)515*4882a593Smuzhiyun static void ks_write_qmu(struct eth_device *dev, u8 *pdata, u16 len)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	/* start header at txb[0] to align txw entries */
518*4882a593Smuzhiyun 	ks->txh.txw[0] = 0;
519*4882a593Smuzhiyun 	ks->txh.txw[1] = cpu_to_le16(len);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* 1. set sudo-DMA mode */
522*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXFDPR, TXFDPR_TXFPAI);
523*4882a593Smuzhiyun 	ks_wrreg8(dev, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
524*4882a593Smuzhiyun 	/* 2. write status/lenth info */
525*4882a593Smuzhiyun 	ks_outblk(dev, ks->txh.txw, 4);
526*4882a593Smuzhiyun 	/* 3. write pkt data */
527*4882a593Smuzhiyun 	ks_outblk(dev, (u16 *)pdata, ALIGN(len, 4));
528*4882a593Smuzhiyun 	/* 4. reset sudo-DMA mode */
529*4882a593Smuzhiyun 	ks_wrreg8(dev, KS_RXQCR, (ks->rc_rxqcr & ~RXQCR_SDA) & 0xff);
530*4882a593Smuzhiyun 	/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
531*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_TXQCR, TXQCR_METFE);
532*4882a593Smuzhiyun 	/* 6. wait until TXQCR_METFE is auto-cleared */
533*4882a593Smuzhiyun 	do { } while (ks_rdreg16(dev, KS_TXQCR) & TXQCR_METFE);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
ks8851_mll_send(struct eth_device * dev,void * packet,int length)536*4882a593Smuzhiyun static int ks8851_mll_send(struct eth_device *dev, void *packet, int length)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	u8 *data = (u8 *)packet;
539*4882a593Smuzhiyun 	u16 tmplen = (u16)length;
540*4882a593Smuzhiyun 	u16 retv;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/*
543*4882a593Smuzhiyun 	 * Extra space are required:
544*4882a593Smuzhiyun 	 * 4 byte for alignment, 4 for status/length, 4 for CRC
545*4882a593Smuzhiyun 	 */
546*4882a593Smuzhiyun 	retv = ks_rdreg16(dev, KS_TXMIR) & 0x1fff;
547*4882a593Smuzhiyun 	if (retv >= tmplen + 12) {
548*4882a593Smuzhiyun 		ks_write_qmu(dev, data, tmplen);
549*4882a593Smuzhiyun 		return 0;
550*4882a593Smuzhiyun 	} else {
551*4882a593Smuzhiyun 		printf(DRIVERNAME ": failed to send packet: No buffer\n");
552*4882a593Smuzhiyun 		return -1;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
ks8851_mll_halt(struct eth_device * dev)556*4882a593Smuzhiyun static void ks8851_mll_halt(struct eth_device *dev)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	ks8851_mll_reset(dev);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun  * Maximum receive ring size; that is, the number of packets
563*4882a593Smuzhiyun  * we can buffer before overflow happens. Basically, this just
564*4882a593Smuzhiyun  * needs to be enough to prevent a packet being discarded while
565*4882a593Smuzhiyun  * we are processing the previous one.
566*4882a593Smuzhiyun  */
ks8851_mll_recv(struct eth_device * dev)567*4882a593Smuzhiyun static int ks8851_mll_recv(struct eth_device *dev)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	u16 status;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	status = ks_rdreg16(dev, KS_ISR);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_ISR, status);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if ((status & IRQ_RXI))
576*4882a593Smuzhiyun 		ks_rcv(dev, (uchar **)net_rx_packets);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if ((status & IRQ_LDI)) {
579*4882a593Smuzhiyun 		u16 pmecr = ks_rdreg16(dev, KS_PMECR);
580*4882a593Smuzhiyun 		pmecr &= ~PMECR_WKEVT_MASK;
581*4882a593Smuzhiyun 		ks_wrreg16(dev, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
ks8851_mll_write_hwaddr(struct eth_device * dev)587*4882a593Smuzhiyun static int ks8851_mll_write_hwaddr(struct eth_device *dev)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	u16 addrl, addrm, addrh;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	addrh = (dev->enetaddr[0] << 8) | dev->enetaddr[1];
592*4882a593Smuzhiyun 	addrm = (dev->enetaddr[2] << 8) | dev->enetaddr[3];
593*4882a593Smuzhiyun 	addrl = (dev->enetaddr[4] << 8) | dev->enetaddr[5];
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_MARH, addrh);
596*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_MARM, addrm);
597*4882a593Smuzhiyun 	ks_wrreg16(dev, KS_MARL, addrl);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
ks8851_mll_initialize(u8 dev_num,int base_addr)602*4882a593Smuzhiyun int ks8851_mll_initialize(u8 dev_num, int base_addr)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct eth_device *dev;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	dev = malloc(sizeof(*dev));
607*4882a593Smuzhiyun 	if (!dev) {
608*4882a593Smuzhiyun 		printf("Error: Failed to allocate memory\n");
609*4882a593Smuzhiyun 		return -1;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 	memset(dev, 0, sizeof(*dev));
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	dev->iobase = base_addr;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	ks = &ks_str;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/* Try to detect chip. Will fail if not present. */
618*4882a593Smuzhiyun 	if (ks8851_mll_detect_chip(dev)) {
619*4882a593Smuzhiyun 		free(dev);
620*4882a593Smuzhiyun 		return -1;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	dev->init = ks8851_mll_init;
624*4882a593Smuzhiyun 	dev->halt = ks8851_mll_halt;
625*4882a593Smuzhiyun 	dev->send = ks8851_mll_send;
626*4882a593Smuzhiyun 	dev->recv = ks8851_mll_recv;
627*4882a593Smuzhiyun 	dev->write_hwaddr = ks8851_mll_write_hwaddr;
628*4882a593Smuzhiyun 	sprintf(dev->name, "%s-%hu", DRIVERNAME, dev_num);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	eth_register(dev);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return 0;
633*4882a593Smuzhiyun }
634