1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* drivers/net/ethernet/micrel/ks8851.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2009 Simtec Electronics
5*4882a593Smuzhiyun * http://www.simtec.co.uk/
6*4882a593Smuzhiyun * Ben Dooks <ben@simtec.co.uk>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define DEBUG
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun #include <linux/ethtool.h>
19*4882a593Smuzhiyun #include <linux/cache.h>
20*4882a593Smuzhiyun #include <linux/crc32.h>
21*4882a593Smuzhiyun #include <linux/mii.h>
22*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/gpio.h>
25*4882a593Smuzhiyun #include <linux/of_gpio.h>
26*4882a593Smuzhiyun #include <linux/of_net.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "ks8851.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /**
31*4882a593Smuzhiyun * ks8851_lock - register access lock
32*4882a593Smuzhiyun * @ks: The chip state
33*4882a593Smuzhiyun * @flags: Spinlock flags
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Claim chip register access lock
36*4882a593Smuzhiyun */
ks8851_lock(struct ks8851_net * ks,unsigned long * flags)37*4882a593Smuzhiyun static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun ks->lock(ks, flags);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * ks8851_unlock - register access unlock
44*4882a593Smuzhiyun * @ks: The chip state
45*4882a593Smuzhiyun * @flags: Spinlock flags
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Release chip register access lock
48*4882a593Smuzhiyun */
ks8851_unlock(struct ks8851_net * ks,unsigned long * flags)49*4882a593Smuzhiyun static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun ks->unlock(ks, flags);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun * ks8851_wrreg16 - write 16bit register value to chip
56*4882a593Smuzhiyun * @ks: The chip state
57*4882a593Smuzhiyun * @reg: The register address
58*4882a593Smuzhiyun * @val: The value to write
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Issue a write to put the value @val into the register specified in @reg.
61*4882a593Smuzhiyun */
ks8851_wrreg16(struct ks8851_net * ks,unsigned int reg,unsigned int val)62*4882a593Smuzhiyun static void ks8851_wrreg16(struct ks8851_net *ks, unsigned int reg,
63*4882a593Smuzhiyun unsigned int val)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun ks->wrreg16(ks, reg, val);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /**
69*4882a593Smuzhiyun * ks8851_rdreg16 - read 16 bit register from device
70*4882a593Smuzhiyun * @ks: The chip information
71*4882a593Smuzhiyun * @reg: The register address
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Read a 16bit register from the chip, returning the result
74*4882a593Smuzhiyun */
ks8851_rdreg16(struct ks8851_net * ks,unsigned int reg)75*4882a593Smuzhiyun static unsigned int ks8851_rdreg16(struct ks8851_net *ks,
76*4882a593Smuzhiyun unsigned int reg)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return ks->rdreg16(ks, reg);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun * ks8851_soft_reset - issue one of the soft reset to the device
83*4882a593Smuzhiyun * @ks: The device state.
84*4882a593Smuzhiyun * @op: The bit(s) to set in the GRR
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * Issue the relevant soft-reset command to the device's GRR register
87*4882a593Smuzhiyun * specified by @op.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Note, the delays are in there as a caution to ensure that the reset
90*4882a593Smuzhiyun * has time to take effect and then complete. Since the datasheet does
91*4882a593Smuzhiyun * not currently specify the exact sequence, we have chosen something
92*4882a593Smuzhiyun * that seems to work with our device.
93*4882a593Smuzhiyun */
ks8851_soft_reset(struct ks8851_net * ks,unsigned op)94*4882a593Smuzhiyun static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_GRR, op);
97*4882a593Smuzhiyun mdelay(1); /* wait a short time to effect reset */
98*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_GRR, 0);
99*4882a593Smuzhiyun mdelay(1); /* wait for condition to clear */
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * ks8851_set_powermode - set power mode of the device
104*4882a593Smuzhiyun * @ks: The device state
105*4882a593Smuzhiyun * @pwrmode: The power mode value to write to KS_PMECR.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * Change the power mode of the chip.
108*4882a593Smuzhiyun */
ks8851_set_powermode(struct ks8851_net * ks,unsigned pwrmode)109*4882a593Smuzhiyun static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun unsigned pmecr;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun pmecr = ks8851_rdreg16(ks, KS_PMECR);
116*4882a593Smuzhiyun pmecr &= ~PMECR_PM_MASK;
117*4882a593Smuzhiyun pmecr |= pwrmode;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_PMECR, pmecr);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun * ks8851_write_mac_addr - write mac address to device registers
124*4882a593Smuzhiyun * @dev: The network device
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * Update the KS8851 MAC address registers from the address in @dev.
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * This call assumes that the chip is not running, so there is no need to
129*4882a593Smuzhiyun * shutdown the RXQ process whilst setting this.
130*4882a593Smuzhiyun */
ks8851_write_mac_addr(struct net_device * dev)131*4882a593Smuzhiyun static int ks8851_write_mac_addr(struct net_device *dev)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun u16 val;
136*4882a593Smuzhiyun int i;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun ks8851_lock(ks, &flags);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Wake up chip in case it was powered off when stopped; otherwise,
142*4882a593Smuzhiyun * the first write to the MAC address does not take effect.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun ks8851_set_powermode(ks, PMECR_PM_NORMAL);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN; i += 2) {
147*4882a593Smuzhiyun val = (dev->dev_addr[i] << 8) | dev->dev_addr[i + 1];
148*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_MAR(i), val);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (!netif_running(dev))
152*4882a593Smuzhiyun ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun * ks8851_read_mac_addr - read mac address from device registers
161*4882a593Smuzhiyun * @dev: The network device
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * Update our copy of the KS8851 MAC address from the registers of @dev.
164*4882a593Smuzhiyun */
ks8851_read_mac_addr(struct net_device * dev)165*4882a593Smuzhiyun static void ks8851_read_mac_addr(struct net_device *dev)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
168*4882a593Smuzhiyun unsigned long flags;
169*4882a593Smuzhiyun u16 reg;
170*4882a593Smuzhiyun int i;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun ks8851_lock(ks, &flags);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun for (i = 0; i < ETH_ALEN; i += 2) {
175*4882a593Smuzhiyun reg = ks8851_rdreg16(ks, KS_MAR(i));
176*4882a593Smuzhiyun dev->dev_addr[i] = reg >> 8;
177*4882a593Smuzhiyun dev->dev_addr[i + 1] = reg & 0xff;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun * ks8851_init_mac - initialise the mac address
185*4882a593Smuzhiyun * @ks: The device structure
186*4882a593Smuzhiyun * @np: The device node pointer
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * Get or create the initial mac address for the device and then set that
189*4882a593Smuzhiyun * into the station address register. A mac address supplied in the device
190*4882a593Smuzhiyun * tree takes precedence. Otherwise, if there is an EEPROM present, then
191*4882a593Smuzhiyun * we try that. If no valid mac address is found we use eth_random_addr()
192*4882a593Smuzhiyun * to create a new one.
193*4882a593Smuzhiyun */
ks8851_init_mac(struct ks8851_net * ks,struct device_node * np)194*4882a593Smuzhiyun static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun struct net_device *dev = ks->netdev;
197*4882a593Smuzhiyun const u8 *mac_addr;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun mac_addr = of_get_mac_address(np);
200*4882a593Smuzhiyun if (!IS_ERR(mac_addr)) {
201*4882a593Smuzhiyun ether_addr_copy(dev->dev_addr, mac_addr);
202*4882a593Smuzhiyun ks8851_write_mac_addr(dev);
203*4882a593Smuzhiyun return;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (ks->rc_ccr & CCR_EEPROM) {
207*4882a593Smuzhiyun ks8851_read_mac_addr(dev);
208*4882a593Smuzhiyun if (is_valid_ether_addr(dev->dev_addr))
209*4882a593Smuzhiyun return;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun netdev_err(ks->netdev, "invalid mac address read %pM\n",
212*4882a593Smuzhiyun dev->dev_addr);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun eth_hw_addr_random(dev);
216*4882a593Smuzhiyun ks8851_write_mac_addr(dev);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun * ks8851_dbg_dumpkkt - dump initial packet contents to debug
221*4882a593Smuzhiyun * @ks: The device state
222*4882a593Smuzhiyun * @rxpkt: The data for the received packet
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Dump the initial data from the packet to dev_dbg().
225*4882a593Smuzhiyun */
ks8851_dbg_dumpkkt(struct ks8851_net * ks,u8 * rxpkt)226*4882a593Smuzhiyun static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun netdev_dbg(ks->netdev,
229*4882a593Smuzhiyun "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
230*4882a593Smuzhiyun rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
231*4882a593Smuzhiyun rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
232*4882a593Smuzhiyun rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun * ks8851_rx_skb - receive skbuff
237*4882a593Smuzhiyun * @ks: The device state.
238*4882a593Smuzhiyun * @skb: The skbuff
239*4882a593Smuzhiyun */
ks8851_rx_skb(struct ks8851_net * ks,struct sk_buff * skb)240*4882a593Smuzhiyun static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun ks->rx_skb(ks, skb);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun * ks8851_rx_pkts - receive packets from the host
247*4882a593Smuzhiyun * @ks: The device information.
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * This is called from the IRQ work queue when the system detects that there
250*4882a593Smuzhiyun * are packets in the receive queue. Find out how many packets there are and
251*4882a593Smuzhiyun * read them from the FIFO.
252*4882a593Smuzhiyun */
ks8851_rx_pkts(struct ks8851_net * ks)253*4882a593Smuzhiyun static void ks8851_rx_pkts(struct ks8851_net *ks)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct sk_buff *skb;
256*4882a593Smuzhiyun unsigned rxfc;
257*4882a593Smuzhiyun unsigned rxlen;
258*4882a593Smuzhiyun unsigned rxstat;
259*4882a593Smuzhiyun u8 *rxpkt;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun rxfc = (ks8851_rdreg16(ks, KS_RXFCTR) >> 8) & 0xff;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun netif_dbg(ks, rx_status, ks->netdev,
264*4882a593Smuzhiyun "%s: %d packets\n", __func__, rxfc);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* Currently we're issuing a read per packet, but we could possibly
267*4882a593Smuzhiyun * improve the code by issuing a single read, getting the receive
268*4882a593Smuzhiyun * header, allocating the packet and then reading the packet data
269*4882a593Smuzhiyun * out in one go.
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * This form of operation would require us to hold the SPI bus'
272*4882a593Smuzhiyun * chipselect low during the entie transaction to avoid any
273*4882a593Smuzhiyun * reset to the data stream coming from the chip.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun for (; rxfc != 0; rxfc--) {
277*4882a593Smuzhiyun rxstat = ks8851_rdreg16(ks, KS_RXFHSR);
278*4882a593Smuzhiyun rxlen = ks8851_rdreg16(ks, KS_RXFHBCR) & RXFHBCR_CNT_MASK;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun netif_dbg(ks, rx_status, ks->netdev,
281*4882a593Smuzhiyun "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* the length of the packet includes the 32bit CRC */
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* set dma read address */
286*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* start DMA access */
289*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (rxlen > 4) {
292*4882a593Smuzhiyun unsigned int rxalign;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun rxlen -= 4;
295*4882a593Smuzhiyun rxalign = ALIGN(rxlen, 4);
296*4882a593Smuzhiyun skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
297*4882a593Smuzhiyun if (skb) {
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* 4 bytes of status header + 4 bytes of
300*4882a593Smuzhiyun * garbage: we put them before ethernet
301*4882a593Smuzhiyun * header, so that they are copied,
302*4882a593Smuzhiyun * but ignored.
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun rxpkt = skb_put(skb, rxlen) - 8;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun ks->rdfifo(ks, rxpkt, rxalign + 8);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (netif_msg_pktdata(ks))
310*4882a593Smuzhiyun ks8851_dbg_dumpkkt(ks, rxpkt);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ks->netdev);
313*4882a593Smuzhiyun ks8851_rx_skb(ks, skb);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun ks->netdev->stats.rx_packets++;
316*4882a593Smuzhiyun ks->netdev->stats.rx_bytes += rxlen;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* end DMA access and dequeue packet */
321*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun * ks8851_irq - IRQ handler for dealing with interrupt requests
327*4882a593Smuzhiyun * @irq: IRQ number
328*4882a593Smuzhiyun * @_ks: cookie
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * This handler is invoked when the IRQ line asserts to find out what happened.
331*4882a593Smuzhiyun * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs
332*4882a593Smuzhiyun * in thread context.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Read the interrupt status, work out what needs to be done and then clear
335*4882a593Smuzhiyun * any of the interrupts that are not needed.
336*4882a593Smuzhiyun */
ks8851_irq(int irq,void * _ks)337*4882a593Smuzhiyun static irqreturn_t ks8851_irq(int irq, void *_ks)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct ks8851_net *ks = _ks;
340*4882a593Smuzhiyun unsigned handled = 0;
341*4882a593Smuzhiyun unsigned long flags;
342*4882a593Smuzhiyun unsigned int status;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun ks8851_lock(ks, &flags);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun status = ks8851_rdreg16(ks, KS_ISR);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun netif_dbg(ks, intr, ks->netdev,
349*4882a593Smuzhiyun "%s: status 0x%04x\n", __func__, status);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (status & IRQ_LCI)
352*4882a593Smuzhiyun handled |= IRQ_LCI;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (status & IRQ_LDI) {
355*4882a593Smuzhiyun u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
356*4882a593Smuzhiyun pmecr &= ~PMECR_WKEVT_MASK;
357*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun handled |= IRQ_LDI;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (status & IRQ_RXPSI)
363*4882a593Smuzhiyun handled |= IRQ_RXPSI;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (status & IRQ_TXI) {
366*4882a593Smuzhiyun handled |= IRQ_TXI;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* no lock here, tx queue should have been stopped */
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* update our idea of how much tx space is available to the
371*4882a593Smuzhiyun * system */
372*4882a593Smuzhiyun ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun netif_dbg(ks, intr, ks->netdev,
375*4882a593Smuzhiyun "%s: txspace %d\n", __func__, ks->tx_space);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (status & IRQ_RXI)
379*4882a593Smuzhiyun handled |= IRQ_RXI;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (status & IRQ_SPIBEI) {
382*4882a593Smuzhiyun netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
383*4882a593Smuzhiyun handled |= IRQ_SPIBEI;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_ISR, handled);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (status & IRQ_RXI) {
389*4882a593Smuzhiyun /* the datasheet says to disable the rx interrupt during
390*4882a593Smuzhiyun * packet read-out, however we're masking the interrupt
391*4882a593Smuzhiyun * from the device so do not bother masking just the RX
392*4882a593Smuzhiyun * from the device. */
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun ks8851_rx_pkts(ks);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* if something stopped the rx process, probably due to wanting
398*4882a593Smuzhiyun * to change the rx settings, then do something about restarting
399*4882a593Smuzhiyun * it. */
400*4882a593Smuzhiyun if (status & IRQ_RXPSI) {
401*4882a593Smuzhiyun struct ks8851_rxctrl *rxc = &ks->rxctrl;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* update the multicast hash table */
404*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_MAHTR0, rxc->mchash[0]);
405*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_MAHTR1, rxc->mchash[1]);
406*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_MAHTR2, rxc->mchash[2]);
407*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_MAHTR3, rxc->mchash[3]);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR2, rxc->rxcr2);
410*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (status & IRQ_LCI)
416*4882a593Smuzhiyun mii_check_link(&ks->mii);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (status & IRQ_TXI)
419*4882a593Smuzhiyun netif_wake_queue(ks->netdev);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun return IRQ_HANDLED;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun * ks8851_flush_tx_work - flush outstanding TX work
426*4882a593Smuzhiyun * @ks: The device state
427*4882a593Smuzhiyun */
ks8851_flush_tx_work(struct ks8851_net * ks)428*4882a593Smuzhiyun static void ks8851_flush_tx_work(struct ks8851_net *ks)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun if (ks->flush_tx_work)
431*4882a593Smuzhiyun ks->flush_tx_work(ks);
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /**
435*4882a593Smuzhiyun * ks8851_net_open - open network device
436*4882a593Smuzhiyun * @dev: The network device being opened.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * Called when the network device is marked active, such as a user executing
439*4882a593Smuzhiyun * 'ifconfig up' on the device.
440*4882a593Smuzhiyun */
ks8851_net_open(struct net_device * dev)441*4882a593Smuzhiyun static int ks8851_net_open(struct net_device *dev)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
444*4882a593Smuzhiyun unsigned long flags;
445*4882a593Smuzhiyun int ret;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
448*4882a593Smuzhiyun IRQF_TRIGGER_LOW | IRQF_ONESHOT,
449*4882a593Smuzhiyun dev->name, ks);
450*4882a593Smuzhiyun if (ret < 0) {
451*4882a593Smuzhiyun netdev_err(dev, "failed to get irq\n");
452*4882a593Smuzhiyun return ret;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* lock the card, even if we may not actually be doing anything
456*4882a593Smuzhiyun * else at the moment */
457*4882a593Smuzhiyun ks8851_lock(ks, &flags);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun netif_dbg(ks, ifup, ks->netdev, "opening\n");
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* bring chip out of any power saving mode it was in */
462*4882a593Smuzhiyun ks8851_set_powermode(ks, PMECR_PM_NORMAL);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* issue a soft reset to the RX/TX QMU to put it into a known
465*4882a593Smuzhiyun * state. */
466*4882a593Smuzhiyun ks8851_soft_reset(ks, GRR_QMU);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* setup transmission parameters */
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_TXCR, (TXCR_TXE | /* enable transmit process */
471*4882a593Smuzhiyun TXCR_TXPE | /* pad to min length */
472*4882a593Smuzhiyun TXCR_TXCRC | /* add CRC */
473*4882a593Smuzhiyun TXCR_TXFCE)); /* enable flow control */
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* auto-increment tx data, reset tx pointer */
476*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* setup receiver control */
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR1, (RXCR1_RXPAFMA | /* from mac filter */
481*4882a593Smuzhiyun RXCR1_RXFCE | /* enable flow control */
482*4882a593Smuzhiyun RXCR1_RXBE | /* broadcast enable */
483*4882a593Smuzhiyun RXCR1_RXUE | /* unicast enable */
484*4882a593Smuzhiyun RXCR1_RXE)); /* enable rx block */
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* transfer entire frames out in one go */
487*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR2, RXCR2_SRDBL_FRAME);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* set receive counter timeouts */
490*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXDTTR, 1000); /* 1ms after first frame to IRQ */
491*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXDBCTR, 4096); /* >4Kbytes in buffer to IRQ */
492*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXFCTR, 10); /* 10 frames to IRQ */
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun ks->rc_rxqcr = (RXQCR_RXFCTE | /* IRQ on frame count exceeded */
495*4882a593Smuzhiyun RXQCR_RXDBCTE | /* IRQ on byte count exceeded */
496*4882a593Smuzhiyun RXQCR_RXDTTE); /* IRQ on time exceeded */
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* clear then enable interrupts */
501*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
502*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun netif_start_queue(ks->netdev);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun netif_dbg(ks, ifup, ks->netdev, "network device up\n");
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
509*4882a593Smuzhiyun mii_check_link(&ks->mii);
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * ks8851_net_stop - close network device
515*4882a593Smuzhiyun * @dev: The device being closed.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Called to close down a network device which has been active. Cancell any
518*4882a593Smuzhiyun * work, shutdown the RX and TX process and then place the chip into a low
519*4882a593Smuzhiyun * power state whilst it is not being used.
520*4882a593Smuzhiyun */
ks8851_net_stop(struct net_device * dev)521*4882a593Smuzhiyun static int ks8851_net_stop(struct net_device *dev)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
524*4882a593Smuzhiyun unsigned long flags;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun netif_info(ks, ifdown, dev, "shutting down\n");
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun netif_stop_queue(dev);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ks8851_lock(ks, &flags);
531*4882a593Smuzhiyun /* turn off the IRQs and ack any outstanding */
532*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_IER, 0x0000);
533*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_ISR, 0xffff);
534*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* stop any outstanding work */
537*4882a593Smuzhiyun ks8851_flush_tx_work(ks);
538*4882a593Smuzhiyun flush_work(&ks->rxctrl_work);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun ks8851_lock(ks, &flags);
541*4882a593Smuzhiyun /* shutdown RX process */
542*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* shutdown TX process */
545*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_TXCR, 0x0000);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* set powermode to soft power down to save power */
548*4882a593Smuzhiyun ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
549*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* ensure any queued tx buffers are dumped */
552*4882a593Smuzhiyun while (!skb_queue_empty(&ks->txq)) {
553*4882a593Smuzhiyun struct sk_buff *txb = skb_dequeue(&ks->txq);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun netif_dbg(ks, ifdown, ks->netdev,
556*4882a593Smuzhiyun "%s: freeing txb %p\n", __func__, txb);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun dev_kfree_skb(txb);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun free_irq(dev->irq, ks);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * ks8851_start_xmit - transmit packet
568*4882a593Smuzhiyun * @skb: The buffer to transmit
569*4882a593Smuzhiyun * @dev: The device used to transmit the packet.
570*4882a593Smuzhiyun *
571*4882a593Smuzhiyun * Called by the network layer to transmit the @skb. Queue the packet for
572*4882a593Smuzhiyun * the device and schedule the necessary work to transmit the packet when
573*4882a593Smuzhiyun * it is free.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * We do this to firstly avoid sleeping with the network device locked,
576*4882a593Smuzhiyun * and secondly so we can round up more than one packet to transmit which
577*4882a593Smuzhiyun * means we can try and avoid generating too many transmit done interrupts.
578*4882a593Smuzhiyun */
ks8851_start_xmit(struct sk_buff * skb,struct net_device * dev)579*4882a593Smuzhiyun static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
580*4882a593Smuzhiyun struct net_device *dev)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun return ks->start_xmit(skb, dev);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /**
588*4882a593Smuzhiyun * ks8851_rxctrl_work - work handler to change rx mode
589*4882a593Smuzhiyun * @work: The work structure this belongs to.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * Lock the device and issue the necessary changes to the receive mode from
592*4882a593Smuzhiyun * the network device layer. This is done so that we can do this without
593*4882a593Smuzhiyun * having to sleep whilst holding the network device lock.
594*4882a593Smuzhiyun *
595*4882a593Smuzhiyun * Since the recommendation from Micrel is that the RXQ is shutdown whilst the
596*4882a593Smuzhiyun * receive parameters are programmed, we issue a write to disable the RXQ and
597*4882a593Smuzhiyun * then wait for the interrupt handler to be triggered once the RXQ shutdown is
598*4882a593Smuzhiyun * complete. The interrupt handler then writes the new values into the chip.
599*4882a593Smuzhiyun */
ks8851_rxctrl_work(struct work_struct * work)600*4882a593Smuzhiyun static void ks8851_rxctrl_work(struct work_struct *work)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
603*4882a593Smuzhiyun unsigned long flags;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun ks8851_lock(ks, &flags);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* need to shutdown RXQ before modifying filter parameters */
608*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_RXCR1, 0x00);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
ks8851_set_rx_mode(struct net_device * dev)613*4882a593Smuzhiyun static void ks8851_set_rx_mode(struct net_device *dev)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
616*4882a593Smuzhiyun struct ks8851_rxctrl rxctrl;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun memset(&rxctrl, 0, sizeof(rxctrl));
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
621*4882a593Smuzhiyun /* interface to receive everything */
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun rxctrl.rxcr1 = RXCR1_RXAE | RXCR1_RXINVF;
624*4882a593Smuzhiyun } else if (dev->flags & IFF_ALLMULTI) {
625*4882a593Smuzhiyun /* accept all multicast packets */
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
628*4882a593Smuzhiyun RXCR1_RXPAFMA | RXCR1_RXMAFMA);
629*4882a593Smuzhiyun } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
630*4882a593Smuzhiyun struct netdev_hw_addr *ha;
631*4882a593Smuzhiyun u32 crc;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* accept some multicast */
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
636*4882a593Smuzhiyun crc = ether_crc(ETH_ALEN, ha->addr);
637*4882a593Smuzhiyun crc >>= (32 - 6); /* get top six bits */
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
643*4882a593Smuzhiyun } else {
644*4882a593Smuzhiyun /* just accept broadcast / unicast */
645*4882a593Smuzhiyun rxctrl.rxcr1 = RXCR1_RXPAFMA;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun rxctrl.rxcr1 |= (RXCR1_RXUE | /* unicast enable */
649*4882a593Smuzhiyun RXCR1_RXBE | /* broadcast enable */
650*4882a593Smuzhiyun RXCR1_RXE | /* RX process enable */
651*4882a593Smuzhiyun RXCR1_RXFCE); /* enable flow control */
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun rxctrl.rxcr2 |= RXCR2_SRDBL_FRAME;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* schedule work to do the actual set of the data if needed */
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun spin_lock(&ks->statelock);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
660*4882a593Smuzhiyun memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
661*4882a593Smuzhiyun schedule_work(&ks->rxctrl_work);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun spin_unlock(&ks->statelock);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
ks8851_set_mac_address(struct net_device * dev,void * addr)667*4882a593Smuzhiyun static int ks8851_set_mac_address(struct net_device *dev, void *addr)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct sockaddr *sa = addr;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (netif_running(dev))
672*4882a593Smuzhiyun return -EBUSY;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (!is_valid_ether_addr(sa->sa_data))
675*4882a593Smuzhiyun return -EADDRNOTAVAIL;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
678*4882a593Smuzhiyun return ks8851_write_mac_addr(dev);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
ks8851_net_ioctl(struct net_device * dev,struct ifreq * req,int cmd)681*4882a593Smuzhiyun static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (!netif_running(dev))
686*4882a593Smuzhiyun return -EINVAL;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun static const struct net_device_ops ks8851_netdev_ops = {
692*4882a593Smuzhiyun .ndo_open = ks8851_net_open,
693*4882a593Smuzhiyun .ndo_stop = ks8851_net_stop,
694*4882a593Smuzhiyun .ndo_do_ioctl = ks8851_net_ioctl,
695*4882a593Smuzhiyun .ndo_start_xmit = ks8851_start_xmit,
696*4882a593Smuzhiyun .ndo_set_mac_address = ks8851_set_mac_address,
697*4882a593Smuzhiyun .ndo_set_rx_mode = ks8851_set_rx_mode,
698*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
699*4882a593Smuzhiyun };
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* ethtool support */
702*4882a593Smuzhiyun
ks8851_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * di)703*4882a593Smuzhiyun static void ks8851_get_drvinfo(struct net_device *dev,
704*4882a593Smuzhiyun struct ethtool_drvinfo *di)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun strlcpy(di->driver, "KS8851", sizeof(di->driver));
707*4882a593Smuzhiyun strlcpy(di->version, "1.00", sizeof(di->version));
708*4882a593Smuzhiyun strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
ks8851_get_msglevel(struct net_device * dev)711*4882a593Smuzhiyun static u32 ks8851_get_msglevel(struct net_device *dev)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
714*4882a593Smuzhiyun return ks->msg_enable;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
ks8851_set_msglevel(struct net_device * dev,u32 to)717*4882a593Smuzhiyun static void ks8851_set_msglevel(struct net_device *dev, u32 to)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
720*4882a593Smuzhiyun ks->msg_enable = to;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
ks8851_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)723*4882a593Smuzhiyun static int ks8851_get_link_ksettings(struct net_device *dev,
724*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun mii_ethtool_get_link_ksettings(&ks->mii, cmd);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
ks8851_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)733*4882a593Smuzhiyun static int ks8851_set_link_ksettings(struct net_device *dev,
734*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
737*4882a593Smuzhiyun return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
ks8851_get_link(struct net_device * dev)740*4882a593Smuzhiyun static u32 ks8851_get_link(struct net_device *dev)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
743*4882a593Smuzhiyun return mii_link_ok(&ks->mii);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
ks8851_nway_reset(struct net_device * dev)746*4882a593Smuzhiyun static int ks8851_nway_reset(struct net_device *dev)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
749*4882a593Smuzhiyun return mii_nway_restart(&ks->mii);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* EEPROM support */
753*4882a593Smuzhiyun
ks8851_eeprom_regread(struct eeprom_93cx6 * ee)754*4882a593Smuzhiyun static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun struct ks8851_net *ks = ee->data;
757*4882a593Smuzhiyun unsigned val;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun val = ks8851_rdreg16(ks, KS_EEPCR);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
762*4882a593Smuzhiyun ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
763*4882a593Smuzhiyun ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
ks8851_eeprom_regwrite(struct eeprom_93cx6 * ee)766*4882a593Smuzhiyun static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct ks8851_net *ks = ee->data;
769*4882a593Smuzhiyun unsigned val = EEPCR_EESA; /* default - eeprom access on */
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun if (ee->drive_data)
772*4882a593Smuzhiyun val |= EEPCR_EESRWA;
773*4882a593Smuzhiyun if (ee->reg_data_in)
774*4882a593Smuzhiyun val |= EEPCR_EEDO;
775*4882a593Smuzhiyun if (ee->reg_data_clock)
776*4882a593Smuzhiyun val |= EEPCR_EESCK;
777*4882a593Smuzhiyun if (ee->reg_chip_select)
778*4882a593Smuzhiyun val |= EEPCR_EECS;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_EEPCR, val);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /**
784*4882a593Smuzhiyun * ks8851_eeprom_claim - claim device EEPROM and activate the interface
785*4882a593Smuzhiyun * @ks: The network device state.
786*4882a593Smuzhiyun *
787*4882a593Smuzhiyun * Check for the presence of an EEPROM, and then activate software access
788*4882a593Smuzhiyun * to the device.
789*4882a593Smuzhiyun */
ks8851_eeprom_claim(struct ks8851_net * ks)790*4882a593Smuzhiyun static int ks8851_eeprom_claim(struct ks8851_net *ks)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun /* start with clock low, cs high */
793*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /**
798*4882a593Smuzhiyun * ks8851_eeprom_release - release the EEPROM interface
799*4882a593Smuzhiyun * @ks: The device state
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * Release the software access to the device EEPROM
802*4882a593Smuzhiyun */
ks8851_eeprom_release(struct ks8851_net * ks)803*4882a593Smuzhiyun static void ks8851_eeprom_release(struct ks8851_net *ks)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun #define KS_EEPROM_MAGIC (0x00008851)
811*4882a593Smuzhiyun
ks8851_set_eeprom(struct net_device * dev,struct ethtool_eeprom * ee,u8 * data)812*4882a593Smuzhiyun static int ks8851_set_eeprom(struct net_device *dev,
813*4882a593Smuzhiyun struct ethtool_eeprom *ee, u8 *data)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
816*4882a593Smuzhiyun int offset = ee->offset;
817*4882a593Smuzhiyun unsigned long flags;
818*4882a593Smuzhiyun int len = ee->len;
819*4882a593Smuzhiyun u16 tmp;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* currently only support byte writing */
822*4882a593Smuzhiyun if (len != 1)
823*4882a593Smuzhiyun return -EINVAL;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (ee->magic != KS_EEPROM_MAGIC)
826*4882a593Smuzhiyun return -EINVAL;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (!(ks->rc_ccr & CCR_EEPROM))
829*4882a593Smuzhiyun return -ENOENT;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun ks8851_lock(ks, &flags);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun ks8851_eeprom_claim(ks);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun eeprom_93cx6_wren(&ks->eeprom, true);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* ethtool currently only supports writing bytes, which means
838*4882a593Smuzhiyun * we have to read/modify/write our 16bit EEPROMs */
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun if (offset & 1) {
843*4882a593Smuzhiyun tmp &= 0xff;
844*4882a593Smuzhiyun tmp |= *data << 8;
845*4882a593Smuzhiyun } else {
846*4882a593Smuzhiyun tmp &= 0xff00;
847*4882a593Smuzhiyun tmp |= *data;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
851*4882a593Smuzhiyun eeprom_93cx6_wren(&ks->eeprom, false);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun ks8851_eeprom_release(ks);
854*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun return 0;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
ks8851_get_eeprom(struct net_device * dev,struct ethtool_eeprom * ee,u8 * data)859*4882a593Smuzhiyun static int ks8851_get_eeprom(struct net_device *dev,
860*4882a593Smuzhiyun struct ethtool_eeprom *ee, u8 *data)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
863*4882a593Smuzhiyun int offset = ee->offset;
864*4882a593Smuzhiyun unsigned long flags;
865*4882a593Smuzhiyun int len = ee->len;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun /* must be 2 byte aligned */
868*4882a593Smuzhiyun if (len & 1 || offset & 1)
869*4882a593Smuzhiyun return -EINVAL;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (!(ks->rc_ccr & CCR_EEPROM))
872*4882a593Smuzhiyun return -ENOENT;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun ks8851_lock(ks, &flags);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun ks8851_eeprom_claim(ks);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun ee->magic = KS_EEPROM_MAGIC;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
881*4882a593Smuzhiyun ks8851_eeprom_release(ks);
882*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun return 0;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
ks8851_get_eeprom_len(struct net_device * dev)887*4882a593Smuzhiyun static int ks8851_get_eeprom_len(struct net_device *dev)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* currently, we assume it is an 93C46 attached, so return 128 */
892*4882a593Smuzhiyun return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun static const struct ethtool_ops ks8851_ethtool_ops = {
896*4882a593Smuzhiyun .get_drvinfo = ks8851_get_drvinfo,
897*4882a593Smuzhiyun .get_msglevel = ks8851_get_msglevel,
898*4882a593Smuzhiyun .set_msglevel = ks8851_set_msglevel,
899*4882a593Smuzhiyun .get_link = ks8851_get_link,
900*4882a593Smuzhiyun .nway_reset = ks8851_nway_reset,
901*4882a593Smuzhiyun .get_eeprom_len = ks8851_get_eeprom_len,
902*4882a593Smuzhiyun .get_eeprom = ks8851_get_eeprom,
903*4882a593Smuzhiyun .set_eeprom = ks8851_set_eeprom,
904*4882a593Smuzhiyun .get_link_ksettings = ks8851_get_link_ksettings,
905*4882a593Smuzhiyun .set_link_ksettings = ks8851_set_link_ksettings,
906*4882a593Smuzhiyun };
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /* MII interface controls */
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /**
911*4882a593Smuzhiyun * ks8851_phy_reg - convert MII register into a KS8851 register
912*4882a593Smuzhiyun * @reg: MII register number.
913*4882a593Smuzhiyun *
914*4882a593Smuzhiyun * Return the KS8851 register number for the corresponding MII PHY register
915*4882a593Smuzhiyun * if possible. Return zero if the MII register has no direct mapping to the
916*4882a593Smuzhiyun * KS8851 register set.
917*4882a593Smuzhiyun */
ks8851_phy_reg(int reg)918*4882a593Smuzhiyun static int ks8851_phy_reg(int reg)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun switch (reg) {
921*4882a593Smuzhiyun case MII_BMCR:
922*4882a593Smuzhiyun return KS_P1MBCR;
923*4882a593Smuzhiyun case MII_BMSR:
924*4882a593Smuzhiyun return KS_P1MBSR;
925*4882a593Smuzhiyun case MII_PHYSID1:
926*4882a593Smuzhiyun return KS_PHY1ILR;
927*4882a593Smuzhiyun case MII_PHYSID2:
928*4882a593Smuzhiyun return KS_PHY1IHR;
929*4882a593Smuzhiyun case MII_ADVERTISE:
930*4882a593Smuzhiyun return KS_P1ANAR;
931*4882a593Smuzhiyun case MII_LPA:
932*4882a593Smuzhiyun return KS_P1ANLPR;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun return 0x0;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /**
939*4882a593Smuzhiyun * ks8851_phy_read - MII interface PHY register read.
940*4882a593Smuzhiyun * @dev: The network device the PHY is on.
941*4882a593Smuzhiyun * @phy_addr: Address of PHY (ignored as we only have one)
942*4882a593Smuzhiyun * @reg: The register to read.
943*4882a593Smuzhiyun *
944*4882a593Smuzhiyun * This call reads data from the PHY register specified in @reg. Since the
945*4882a593Smuzhiyun * device does not support all the MII registers, the non-existent values
946*4882a593Smuzhiyun * are always returned as zero.
947*4882a593Smuzhiyun *
948*4882a593Smuzhiyun * We return zero for unsupported registers as the MII code does not check
949*4882a593Smuzhiyun * the value returned for any error status, and simply returns it to the
950*4882a593Smuzhiyun * caller. The mii-tool that the driver was tested with takes any -ve error
951*4882a593Smuzhiyun * as real PHY capabilities, thus displaying incorrect data to the user.
952*4882a593Smuzhiyun */
ks8851_phy_read(struct net_device * dev,int phy_addr,int reg)953*4882a593Smuzhiyun static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
956*4882a593Smuzhiyun unsigned long flags;
957*4882a593Smuzhiyun int ksreg;
958*4882a593Smuzhiyun int result;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun ksreg = ks8851_phy_reg(reg);
961*4882a593Smuzhiyun if (!ksreg)
962*4882a593Smuzhiyun return 0x0; /* no error return allowed, so use zero */
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun ks8851_lock(ks, &flags);
965*4882a593Smuzhiyun result = ks8851_rdreg16(ks, ksreg);
966*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun return result;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
ks8851_phy_write(struct net_device * dev,int phy,int reg,int value)971*4882a593Smuzhiyun static void ks8851_phy_write(struct net_device *dev,
972*4882a593Smuzhiyun int phy, int reg, int value)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(dev);
975*4882a593Smuzhiyun unsigned long flags;
976*4882a593Smuzhiyun int ksreg;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun ksreg = ks8851_phy_reg(reg);
979*4882a593Smuzhiyun if (ksreg) {
980*4882a593Smuzhiyun ks8851_lock(ks, &flags);
981*4882a593Smuzhiyun ks8851_wrreg16(ks, ksreg, value);
982*4882a593Smuzhiyun ks8851_unlock(ks, &flags);
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /**
987*4882a593Smuzhiyun * ks8851_read_selftest - read the selftest memory info.
988*4882a593Smuzhiyun * @ks: The device state
989*4882a593Smuzhiyun *
990*4882a593Smuzhiyun * Read and check the TX/RX memory selftest information.
991*4882a593Smuzhiyun */
ks8851_read_selftest(struct ks8851_net * ks)992*4882a593Smuzhiyun static int ks8851_read_selftest(struct ks8851_net *ks)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
995*4882a593Smuzhiyun int ret = 0;
996*4882a593Smuzhiyun unsigned rd;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun rd = ks8851_rdreg16(ks, KS_MBIR);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if ((rd & both_done) != both_done) {
1001*4882a593Smuzhiyun netdev_warn(ks->netdev, "Memory selftest not finished\n");
1002*4882a593Smuzhiyun return 0;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (rd & MBIR_TXMBFA) {
1006*4882a593Smuzhiyun netdev_err(ks->netdev, "TX memory selftest fail\n");
1007*4882a593Smuzhiyun ret |= 1;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun if (rd & MBIR_RXMBFA) {
1011*4882a593Smuzhiyun netdev_err(ks->netdev, "RX memory selftest fail\n");
1012*4882a593Smuzhiyun ret |= 2;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun return 0;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* driver bus management functions */
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1021*4882a593Smuzhiyun
ks8851_suspend(struct device * dev)1022*4882a593Smuzhiyun int ks8851_suspend(struct device *dev)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun struct ks8851_net *ks = dev_get_drvdata(dev);
1025*4882a593Smuzhiyun struct net_device *netdev = ks->netdev;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun if (netif_running(netdev)) {
1028*4882a593Smuzhiyun netif_device_detach(netdev);
1029*4882a593Smuzhiyun ks8851_net_stop(netdev);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return 0;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ks8851_suspend);
1035*4882a593Smuzhiyun
ks8851_resume(struct device * dev)1036*4882a593Smuzhiyun int ks8851_resume(struct device *dev)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct ks8851_net *ks = dev_get_drvdata(dev);
1039*4882a593Smuzhiyun struct net_device *netdev = ks->netdev;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (netif_running(netdev)) {
1042*4882a593Smuzhiyun ks8851_net_open(netdev);
1043*4882a593Smuzhiyun netif_device_attach(netdev);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun return 0;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ks8851_resume);
1049*4882a593Smuzhiyun #endif
1050*4882a593Smuzhiyun
ks8851_probe_common(struct net_device * netdev,struct device * dev,int msg_en)1051*4882a593Smuzhiyun int ks8851_probe_common(struct net_device *netdev, struct device *dev,
1052*4882a593Smuzhiyun int msg_en)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun struct ks8851_net *ks = netdev_priv(netdev);
1055*4882a593Smuzhiyun unsigned cider;
1056*4882a593Smuzhiyun int gpio;
1057*4882a593Smuzhiyun int ret;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun ks->netdev = netdev;
1060*4882a593Smuzhiyun ks->tx_space = 6144;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
1063*4882a593Smuzhiyun if (gpio == -EPROBE_DEFER)
1064*4882a593Smuzhiyun return gpio;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun ks->gpio = gpio;
1067*4882a593Smuzhiyun if (gpio_is_valid(gpio)) {
1068*4882a593Smuzhiyun ret = devm_gpio_request_one(dev, gpio,
1069*4882a593Smuzhiyun GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
1070*4882a593Smuzhiyun if (ret) {
1071*4882a593Smuzhiyun dev_err(dev, "reset gpio request failed\n");
1072*4882a593Smuzhiyun return ret;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun ks->vdd_io = devm_regulator_get(dev, "vdd-io");
1077*4882a593Smuzhiyun if (IS_ERR(ks->vdd_io)) {
1078*4882a593Smuzhiyun ret = PTR_ERR(ks->vdd_io);
1079*4882a593Smuzhiyun goto err_reg_io;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun ret = regulator_enable(ks->vdd_io);
1083*4882a593Smuzhiyun if (ret) {
1084*4882a593Smuzhiyun dev_err(dev, "regulator vdd_io enable fail: %d\n", ret);
1085*4882a593Smuzhiyun goto err_reg_io;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun ks->vdd_reg = devm_regulator_get(dev, "vdd");
1089*4882a593Smuzhiyun if (IS_ERR(ks->vdd_reg)) {
1090*4882a593Smuzhiyun ret = PTR_ERR(ks->vdd_reg);
1091*4882a593Smuzhiyun goto err_reg;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun ret = regulator_enable(ks->vdd_reg);
1095*4882a593Smuzhiyun if (ret) {
1096*4882a593Smuzhiyun dev_err(dev, "regulator vdd enable fail: %d\n", ret);
1097*4882a593Smuzhiyun goto err_reg;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (gpio_is_valid(gpio)) {
1101*4882a593Smuzhiyun usleep_range(10000, 11000);
1102*4882a593Smuzhiyun gpio_set_value(gpio, 1);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun spin_lock_init(&ks->statelock);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /* setup EEPROM state */
1110*4882a593Smuzhiyun ks->eeprom.data = ks;
1111*4882a593Smuzhiyun ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
1112*4882a593Smuzhiyun ks->eeprom.register_read = ks8851_eeprom_regread;
1113*4882a593Smuzhiyun ks->eeprom.register_write = ks8851_eeprom_regwrite;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /* setup mii state */
1116*4882a593Smuzhiyun ks->mii.dev = netdev;
1117*4882a593Smuzhiyun ks->mii.phy_id = 1,
1118*4882a593Smuzhiyun ks->mii.phy_id_mask = 1;
1119*4882a593Smuzhiyun ks->mii.reg_num_mask = 0xf;
1120*4882a593Smuzhiyun ks->mii.mdio_read = ks8851_phy_read;
1121*4882a593Smuzhiyun ks->mii.mdio_write = ks8851_phy_write;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun dev_info(dev, "message enable is %d\n", msg_en);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* set the default message enable */
1126*4882a593Smuzhiyun ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
1127*4882a593Smuzhiyun NETIF_MSG_PROBE |
1128*4882a593Smuzhiyun NETIF_MSG_LINK);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun skb_queue_head_init(&ks->txq);
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun netdev->ethtool_ops = &ks8851_ethtool_ops;
1133*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, dev);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun dev_set_drvdata(dev, ks);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun netif_carrier_off(ks->netdev);
1138*4882a593Smuzhiyun netdev->if_port = IF_PORT_100BASET;
1139*4882a593Smuzhiyun netdev->netdev_ops = &ks8851_netdev_ops;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /* issue a global soft reset to reset the device. */
1142*4882a593Smuzhiyun ks8851_soft_reset(ks, GRR_GSR);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* simple check for a valid chip being connected to the bus */
1145*4882a593Smuzhiyun cider = ks8851_rdreg16(ks, KS_CIDER);
1146*4882a593Smuzhiyun if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
1147*4882a593Smuzhiyun dev_err(dev, "failed to read device ID\n");
1148*4882a593Smuzhiyun ret = -ENODEV;
1149*4882a593Smuzhiyun goto err_id;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* cache the contents of the CCR register for EEPROM, etc. */
1153*4882a593Smuzhiyun ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun ks8851_read_selftest(ks);
1156*4882a593Smuzhiyun ks8851_init_mac(ks, dev->of_node);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun ret = register_netdev(netdev);
1159*4882a593Smuzhiyun if (ret) {
1160*4882a593Smuzhiyun dev_err(dev, "failed to register network device\n");
1161*4882a593Smuzhiyun goto err_netdev;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
1165*4882a593Smuzhiyun CIDER_REV_GET(cider), netdev->dev_addr, netdev->irq,
1166*4882a593Smuzhiyun ks->rc_ccr & CCR_EEPROM ? "has" : "no");
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun return 0;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun err_netdev:
1171*4882a593Smuzhiyun err_id:
1172*4882a593Smuzhiyun if (gpio_is_valid(gpio))
1173*4882a593Smuzhiyun gpio_set_value(gpio, 0);
1174*4882a593Smuzhiyun regulator_disable(ks->vdd_reg);
1175*4882a593Smuzhiyun err_reg:
1176*4882a593Smuzhiyun regulator_disable(ks->vdd_io);
1177*4882a593Smuzhiyun err_reg_io:
1178*4882a593Smuzhiyun return ret;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ks8851_probe_common);
1181*4882a593Smuzhiyun
ks8851_remove_common(struct device * dev)1182*4882a593Smuzhiyun int ks8851_remove_common(struct device *dev)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun struct ks8851_net *priv = dev_get_drvdata(dev);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (netif_msg_drv(priv))
1187*4882a593Smuzhiyun dev_info(dev, "remove\n");
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun unregister_netdev(priv->netdev);
1190*4882a593Smuzhiyun if (gpio_is_valid(priv->gpio))
1191*4882a593Smuzhiyun gpio_set_value(priv->gpio, 0);
1192*4882a593Smuzhiyun regulator_disable(priv->vdd_reg);
1193*4882a593Smuzhiyun regulator_disable(priv->vdd_io);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun return 0;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ks8851_remove_common);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun MODULE_DESCRIPTION("KS8851 Network driver");
1200*4882a593Smuzhiyun MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
1201*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1202