1*4882a593Smuzhiyun /* MOXA ART Ethernet (RTL8201CP) driver.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (C) 2013 Jonas Jensen
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Jonas Jensen <jonas.jensen@gmail.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Based on code from
8*4882a593Smuzhiyun * Moxa Technology Co., Ltd. <www.moxa.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public
11*4882a593Smuzhiyun * License version 2. This program is licensed "as is" without any
12*4882a593Smuzhiyun * warranty of any kind, whether express or implied.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun #include <linux/skbuff.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/ethtool.h>
21*4882a593Smuzhiyun #include <linux/platform_device.h>
22*4882a593Smuzhiyun #include <linux/interrupt.h>
23*4882a593Smuzhiyun #include <linux/irq.h>
24*4882a593Smuzhiyun #include <linux/of_address.h>
25*4882a593Smuzhiyun #include <linux/of_irq.h>
26*4882a593Smuzhiyun #include <linux/crc32.h>
27*4882a593Smuzhiyun #include <linux/crc32c.h>
28*4882a593Smuzhiyun #include <linux/circ_buf.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "moxart_ether.h"
31*4882a593Smuzhiyun
moxart_desc_write(u32 data,u32 * desc)32*4882a593Smuzhiyun static inline void moxart_desc_write(u32 data, u32 *desc)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun *desc = cpu_to_le32(data);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
moxart_desc_read(u32 * desc)37*4882a593Smuzhiyun static inline u32 moxart_desc_read(u32 *desc)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun return le32_to_cpu(*desc);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
moxart_emac_write(struct net_device * ndev,unsigned int reg,unsigned long value)42*4882a593Smuzhiyun static inline void moxart_emac_write(struct net_device *ndev,
43*4882a593Smuzhiyun unsigned int reg, unsigned long value)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun writel(value, priv->base + reg);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
moxart_update_mac_address(struct net_device * ndev)50*4882a593Smuzhiyun static void moxart_update_mac_address(struct net_device *ndev)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
53*4882a593Smuzhiyun ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
54*4882a593Smuzhiyun moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
55*4882a593Smuzhiyun ((ndev->dev_addr[2] << 24) |
56*4882a593Smuzhiyun (ndev->dev_addr[3] << 16) |
57*4882a593Smuzhiyun (ndev->dev_addr[4] << 8) |
58*4882a593Smuzhiyun (ndev->dev_addr[5])));
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
moxart_set_mac_address(struct net_device * ndev,void * addr)61*4882a593Smuzhiyun static int moxart_set_mac_address(struct net_device *ndev, void *addr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct sockaddr *address = addr;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (!is_valid_ether_addr(address->sa_data))
66*4882a593Smuzhiyun return -EADDRNOTAVAIL;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
69*4882a593Smuzhiyun moxart_update_mac_address(ndev);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
moxart_mac_free_memory(struct net_device * ndev)74*4882a593Smuzhiyun static void moxart_mac_free_memory(struct net_device *ndev)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (priv->tx_desc_base)
79*4882a593Smuzhiyun dma_free_coherent(&priv->pdev->dev,
80*4882a593Smuzhiyun TX_REG_DESC_SIZE * TX_DESC_NUM,
81*4882a593Smuzhiyun priv->tx_desc_base, priv->tx_base);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (priv->rx_desc_base)
84*4882a593Smuzhiyun dma_free_coherent(&priv->pdev->dev,
85*4882a593Smuzhiyun RX_REG_DESC_SIZE * RX_DESC_NUM,
86*4882a593Smuzhiyun priv->rx_desc_base, priv->rx_base);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun kfree(priv->tx_buf_base);
89*4882a593Smuzhiyun kfree(priv->rx_buf_base);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
moxart_mac_reset(struct net_device * ndev)92*4882a593Smuzhiyun static void moxart_mac_reset(struct net_device *ndev)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun writel(SW_RST, priv->base + REG_MAC_CTRL);
97*4882a593Smuzhiyun while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
98*4882a593Smuzhiyun mdelay(10);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun writel(0, priv->base + REG_INTERRUPT_MASK);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
moxart_mac_enable(struct net_device * ndev)105*4882a593Smuzhiyun static void moxart_mac_enable(struct net_device *ndev)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
110*4882a593Smuzhiyun writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
111*4882a593Smuzhiyun writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
114*4882a593Smuzhiyun writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
117*4882a593Smuzhiyun writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
moxart_mac_setup_desc_ring(struct net_device * ndev)120*4882a593Smuzhiyun static void moxart_mac_setup_desc_ring(struct net_device *ndev)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
123*4882a593Smuzhiyun void *desc;
124*4882a593Smuzhiyun int i;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun for (i = 0; i < TX_DESC_NUM; i++) {
127*4882a593Smuzhiyun desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
128*4882a593Smuzhiyun memset(desc, 0, TX_REG_DESC_SIZE);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun priv->tx_head = 0;
135*4882a593Smuzhiyun priv->tx_tail = 0;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++) {
138*4882a593Smuzhiyun desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
139*4882a593Smuzhiyun memset(desc, 0, RX_REG_DESC_SIZE);
140*4882a593Smuzhiyun moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
141*4882a593Smuzhiyun moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
142*4882a593Smuzhiyun desc + RX_REG_OFFSET_DESC1);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
145*4882a593Smuzhiyun priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
146*4882a593Smuzhiyun priv->rx_buf[i],
147*4882a593Smuzhiyun priv->rx_buf_size,
148*4882a593Smuzhiyun DMA_FROM_DEVICE);
149*4882a593Smuzhiyun if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
150*4882a593Smuzhiyun netdev_err(ndev, "DMA mapping error\n");
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun moxart_desc_write(priv->rx_mapping[i],
153*4882a593Smuzhiyun desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
154*4882a593Smuzhiyun moxart_desc_write((uintptr_t)priv->rx_buf[i],
155*4882a593Smuzhiyun desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun priv->rx_head = 0;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* reset the MAC controller TX/RX descriptor base address */
162*4882a593Smuzhiyun writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
163*4882a593Smuzhiyun writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
moxart_mac_open(struct net_device * ndev)166*4882a593Smuzhiyun static int moxart_mac_open(struct net_device *ndev)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!is_valid_ether_addr(ndev->dev_addr))
171*4882a593Smuzhiyun return -EADDRNOTAVAIL;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun napi_enable(&priv->napi);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun moxart_mac_reset(ndev);
176*4882a593Smuzhiyun moxart_update_mac_address(ndev);
177*4882a593Smuzhiyun moxart_mac_setup_desc_ring(ndev);
178*4882a593Smuzhiyun moxart_mac_enable(ndev);
179*4882a593Smuzhiyun netif_start_queue(ndev);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
182*4882a593Smuzhiyun __func__, readl(priv->base + REG_INTERRUPT_MASK),
183*4882a593Smuzhiyun readl(priv->base + REG_MAC_CTRL));
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
moxart_mac_stop(struct net_device * ndev)188*4882a593Smuzhiyun static int moxart_mac_stop(struct net_device *ndev)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
191*4882a593Smuzhiyun int i;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun napi_disable(&priv->napi);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun netif_stop_queue(ndev);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* disable all interrupts */
198*4882a593Smuzhiyun writel(0, priv->base + REG_INTERRUPT_MASK);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* disable all functions */
201*4882a593Smuzhiyun writel(0, priv->base + REG_MAC_CTRL);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* unmap areas mapped in moxart_mac_setup_desc_ring() */
204*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++)
205*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
206*4882a593Smuzhiyun priv->rx_buf_size, DMA_FROM_DEVICE);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
moxart_rx_poll(struct napi_struct * napi,int budget)211*4882a593Smuzhiyun static int moxart_rx_poll(struct napi_struct *napi, int budget)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = container_of(napi,
214*4882a593Smuzhiyun struct moxart_mac_priv_t,
215*4882a593Smuzhiyun napi);
216*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
217*4882a593Smuzhiyun struct sk_buff *skb;
218*4882a593Smuzhiyun void *desc;
219*4882a593Smuzhiyun unsigned int desc0, len;
220*4882a593Smuzhiyun int rx_head = priv->rx_head;
221*4882a593Smuzhiyun int rx = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun while (rx < budget) {
224*4882a593Smuzhiyun desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
225*4882a593Smuzhiyun desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
226*4882a593Smuzhiyun rmb(); /* ensure desc0 is up to date */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (desc0 & RX_DESC0_DMA_OWN)
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
232*4882a593Smuzhiyun RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
233*4882a593Smuzhiyun net_dbg_ratelimited("packet error\n");
234*4882a593Smuzhiyun ndev->stats.rx_dropped++;
235*4882a593Smuzhiyun ndev->stats.rx_errors++;
236*4882a593Smuzhiyun goto rx_next;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun len = desc0 & RX_DESC0_FRAME_LEN_MASK;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun if (len > RX_BUF_SIZE)
242*4882a593Smuzhiyun len = RX_BUF_SIZE;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun dma_sync_single_for_cpu(&priv->pdev->dev,
245*4882a593Smuzhiyun priv->rx_mapping[rx_head],
246*4882a593Smuzhiyun priv->rx_buf_size, DMA_FROM_DEVICE);
247*4882a593Smuzhiyun skb = netdev_alloc_skb_ip_align(ndev, len);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (unlikely(!skb)) {
250*4882a593Smuzhiyun net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
251*4882a593Smuzhiyun ndev->stats.rx_dropped++;
252*4882a593Smuzhiyun ndev->stats.rx_errors++;
253*4882a593Smuzhiyun goto rx_next;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun memcpy(skb->data, priv->rx_buf[rx_head], len);
257*4882a593Smuzhiyun skb_put(skb, len);
258*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
259*4882a593Smuzhiyun napi_gro_receive(&priv->napi, skb);
260*4882a593Smuzhiyun rx++;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun ndev->stats.rx_packets++;
263*4882a593Smuzhiyun ndev->stats.rx_bytes += len;
264*4882a593Smuzhiyun if (desc0 & RX_DESC0_MULTICAST)
265*4882a593Smuzhiyun ndev->stats.multicast++;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun rx_next:
268*4882a593Smuzhiyun wmb(); /* prevent setting ownership back too early */
269*4882a593Smuzhiyun moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun rx_head = RX_NEXT(rx_head);
272*4882a593Smuzhiyun priv->rx_head = rx_head;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (rx < budget)
276*4882a593Smuzhiyun napi_complete_done(napi, rx);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun priv->reg_imr |= RPKT_FINISH_M;
279*4882a593Smuzhiyun writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return rx;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
moxart_tx_queue_space(struct net_device * ndev)284*4882a593Smuzhiyun static int moxart_tx_queue_space(struct net_device *ndev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
moxart_tx_finished(struct net_device * ndev)291*4882a593Smuzhiyun static void moxart_tx_finished(struct net_device *ndev)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
294*4882a593Smuzhiyun unsigned int tx_head = priv->tx_head;
295*4882a593Smuzhiyun unsigned int tx_tail = priv->tx_tail;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun while (tx_tail != tx_head) {
298*4882a593Smuzhiyun dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
299*4882a593Smuzhiyun priv->tx_len[tx_tail], DMA_TO_DEVICE);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun ndev->stats.tx_packets++;
302*4882a593Smuzhiyun ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun dev_consume_skb_irq(priv->tx_skb[tx_tail]);
305*4882a593Smuzhiyun priv->tx_skb[tx_tail] = NULL;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun tx_tail = TX_NEXT(tx_tail);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun priv->tx_tail = tx_tail;
310*4882a593Smuzhiyun if (netif_queue_stopped(ndev) &&
311*4882a593Smuzhiyun moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
312*4882a593Smuzhiyun netif_wake_queue(ndev);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
moxart_mac_interrupt(int irq,void * dev_id)315*4882a593Smuzhiyun static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun struct net_device *ndev = (struct net_device *)dev_id;
318*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
319*4882a593Smuzhiyun unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (ists & XPKT_OK_INT_STS)
322*4882a593Smuzhiyun moxart_tx_finished(ndev);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (ists & RPKT_FINISH) {
325*4882a593Smuzhiyun if (napi_schedule_prep(&priv->napi)) {
326*4882a593Smuzhiyun priv->reg_imr &= ~RPKT_FINISH_M;
327*4882a593Smuzhiyun writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
328*4882a593Smuzhiyun __napi_schedule(&priv->napi);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return IRQ_HANDLED;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
moxart_mac_start_xmit(struct sk_buff * skb,struct net_device * ndev)335*4882a593Smuzhiyun static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
336*4882a593Smuzhiyun struct net_device *ndev)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
339*4882a593Smuzhiyun void *desc;
340*4882a593Smuzhiyun unsigned int len;
341*4882a593Smuzhiyun unsigned int tx_head;
342*4882a593Smuzhiyun u32 txdes1;
343*4882a593Smuzhiyun netdev_tx_t ret = NETDEV_TX_BUSY;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun spin_lock_irq(&priv->txlock);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun tx_head = priv->tx_head;
348*4882a593Smuzhiyun desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (moxart_tx_queue_space(ndev) == 1)
351*4882a593Smuzhiyun netif_stop_queue(ndev);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
354*4882a593Smuzhiyun net_dbg_ratelimited("no TX space for packet\n");
355*4882a593Smuzhiyun ndev->stats.tx_dropped++;
356*4882a593Smuzhiyun goto out_unlock;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
363*4882a593Smuzhiyun len, DMA_TO_DEVICE);
364*4882a593Smuzhiyun if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
365*4882a593Smuzhiyun netdev_err(ndev, "DMA mapping error\n");
366*4882a593Smuzhiyun goto out_unlock;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun priv->tx_len[tx_head] = len;
370*4882a593Smuzhiyun priv->tx_skb[tx_head] = skb;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun moxart_desc_write(priv->tx_mapping[tx_head],
373*4882a593Smuzhiyun desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
374*4882a593Smuzhiyun moxart_desc_write((uintptr_t)skb->data,
375*4882a593Smuzhiyun desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (skb->len < ETH_ZLEN) {
378*4882a593Smuzhiyun memset(&skb->data[skb->len],
379*4882a593Smuzhiyun 0, ETH_ZLEN - skb->len);
380*4882a593Smuzhiyun len = ETH_ZLEN;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
384*4882a593Smuzhiyun priv->tx_buf_size, DMA_TO_DEVICE);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
387*4882a593Smuzhiyun if (tx_head == TX_DESC_NUM_MASK)
388*4882a593Smuzhiyun txdes1 |= TX_DESC1_END;
389*4882a593Smuzhiyun moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
390*4882a593Smuzhiyun wmb(); /* flush descriptor before transferring ownership */
391*4882a593Smuzhiyun moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* start to send packet */
394*4882a593Smuzhiyun writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun priv->tx_head = TX_NEXT(tx_head);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun netif_trans_update(ndev);
399*4882a593Smuzhiyun ret = NETDEV_TX_OK;
400*4882a593Smuzhiyun out_unlock:
401*4882a593Smuzhiyun spin_unlock_irq(&priv->txlock);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return ret;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
moxart_mac_setmulticast(struct net_device * ndev)406*4882a593Smuzhiyun static void moxart_mac_setmulticast(struct net_device *ndev)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
409*4882a593Smuzhiyun struct netdev_hw_addr *ha;
410*4882a593Smuzhiyun int crc_val;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, ndev) {
413*4882a593Smuzhiyun crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
414*4882a593Smuzhiyun crc_val = (crc_val >> 26) & 0x3f;
415*4882a593Smuzhiyun if (crc_val >= 32) {
416*4882a593Smuzhiyun writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
417*4882a593Smuzhiyun (1UL << (crc_val - 32)),
418*4882a593Smuzhiyun priv->base + REG_MCAST_HASH_TABLE1);
419*4882a593Smuzhiyun } else {
420*4882a593Smuzhiyun writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
421*4882a593Smuzhiyun (1UL << crc_val),
422*4882a593Smuzhiyun priv->base + REG_MCAST_HASH_TABLE0);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
moxart_mac_set_rx_mode(struct net_device * ndev)427*4882a593Smuzhiyun static void moxart_mac_set_rx_mode(struct net_device *ndev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct moxart_mac_priv_t *priv = netdev_priv(ndev);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_lock_irq(&priv->txlock);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
434*4882a593Smuzhiyun (priv->reg_maccr &= ~RCV_ALL);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
437*4882a593Smuzhiyun (priv->reg_maccr &= ~RX_MULTIPKT);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
440*4882a593Smuzhiyun priv->reg_maccr |= HT_MULTI_EN;
441*4882a593Smuzhiyun moxart_mac_setmulticast(ndev);
442*4882a593Smuzhiyun } else {
443*4882a593Smuzhiyun priv->reg_maccr &= ~HT_MULTI_EN;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun spin_unlock_irq(&priv->txlock);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun static const struct net_device_ops moxart_netdev_ops = {
452*4882a593Smuzhiyun .ndo_open = moxart_mac_open,
453*4882a593Smuzhiyun .ndo_stop = moxart_mac_stop,
454*4882a593Smuzhiyun .ndo_start_xmit = moxart_mac_start_xmit,
455*4882a593Smuzhiyun .ndo_set_rx_mode = moxart_mac_set_rx_mode,
456*4882a593Smuzhiyun .ndo_set_mac_address = moxart_set_mac_address,
457*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun
moxart_mac_probe(struct platform_device * pdev)460*4882a593Smuzhiyun static int moxart_mac_probe(struct platform_device *pdev)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct device *p_dev = &pdev->dev;
463*4882a593Smuzhiyun struct device_node *node = p_dev->of_node;
464*4882a593Smuzhiyun struct net_device *ndev;
465*4882a593Smuzhiyun struct moxart_mac_priv_t *priv;
466*4882a593Smuzhiyun struct resource *res;
467*4882a593Smuzhiyun unsigned int irq;
468*4882a593Smuzhiyun int ret;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
471*4882a593Smuzhiyun if (!ndev)
472*4882a593Smuzhiyun return -ENOMEM;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun irq = irq_of_parse_and_map(node, 0);
475*4882a593Smuzhiyun if (irq <= 0) {
476*4882a593Smuzhiyun netdev_err(ndev, "irq_of_parse_and_map failed\n");
477*4882a593Smuzhiyun ret = -EINVAL;
478*4882a593Smuzhiyun goto irq_map_fail;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun priv = netdev_priv(ndev);
482*4882a593Smuzhiyun priv->ndev = ndev;
483*4882a593Smuzhiyun priv->pdev = pdev;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
486*4882a593Smuzhiyun if (IS_ERR(priv->base)) {
487*4882a593Smuzhiyun dev_err(p_dev, "devm_ioremap_resource failed\n");
488*4882a593Smuzhiyun ret = PTR_ERR(priv->base);
489*4882a593Smuzhiyun goto init_fail;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun ndev->base_addr = res->start;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun spin_lock_init(&priv->txlock);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun priv->tx_buf_size = TX_BUF_SIZE;
496*4882a593Smuzhiyun priv->rx_buf_size = RX_BUF_SIZE;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
499*4882a593Smuzhiyun TX_DESC_NUM, &priv->tx_base,
500*4882a593Smuzhiyun GFP_DMA | GFP_KERNEL);
501*4882a593Smuzhiyun if (!priv->tx_desc_base) {
502*4882a593Smuzhiyun ret = -ENOMEM;
503*4882a593Smuzhiyun goto init_fail;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
507*4882a593Smuzhiyun RX_DESC_NUM, &priv->rx_base,
508*4882a593Smuzhiyun GFP_DMA | GFP_KERNEL);
509*4882a593Smuzhiyun if (!priv->rx_desc_base) {
510*4882a593Smuzhiyun ret = -ENOMEM;
511*4882a593Smuzhiyun goto init_fail;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
515*4882a593Smuzhiyun GFP_ATOMIC);
516*4882a593Smuzhiyun if (!priv->tx_buf_base) {
517*4882a593Smuzhiyun ret = -ENOMEM;
518*4882a593Smuzhiyun goto init_fail;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
522*4882a593Smuzhiyun GFP_ATOMIC);
523*4882a593Smuzhiyun if (!priv->rx_buf_base) {
524*4882a593Smuzhiyun ret = -ENOMEM;
525*4882a593Smuzhiyun goto init_fail;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun platform_set_drvdata(pdev, ndev);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
531*4882a593Smuzhiyun pdev->name, ndev);
532*4882a593Smuzhiyun if (ret) {
533*4882a593Smuzhiyun netdev_err(ndev, "devm_request_irq failed\n");
534*4882a593Smuzhiyun goto init_fail;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun ndev->netdev_ops = &moxart_netdev_ops;
538*4882a593Smuzhiyun netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
539*4882a593Smuzhiyun ndev->priv_flags |= IFF_UNICAST_FLT;
540*4882a593Smuzhiyun ndev->irq = irq;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &pdev->dev);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun ret = register_netdev(ndev);
545*4882a593Smuzhiyun if (ret)
546*4882a593Smuzhiyun goto init_fail;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
549*4882a593Smuzhiyun __func__, ndev->irq, ndev->dev_addr);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun return 0;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun init_fail:
554*4882a593Smuzhiyun netdev_err(ndev, "init failed\n");
555*4882a593Smuzhiyun moxart_mac_free_memory(ndev);
556*4882a593Smuzhiyun irq_map_fail:
557*4882a593Smuzhiyun free_netdev(ndev);
558*4882a593Smuzhiyun return ret;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
moxart_remove(struct platform_device * pdev)561*4882a593Smuzhiyun static int moxart_remove(struct platform_device *pdev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun unregister_netdev(ndev);
566*4882a593Smuzhiyun devm_free_irq(&pdev->dev, ndev->irq, ndev);
567*4882a593Smuzhiyun moxart_mac_free_memory(ndev);
568*4882a593Smuzhiyun free_netdev(ndev);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun return 0;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun static const struct of_device_id moxart_mac_match[] = {
574*4882a593Smuzhiyun { .compatible = "moxa,moxart-mac" },
575*4882a593Smuzhiyun { }
576*4882a593Smuzhiyun };
577*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, moxart_mac_match);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun static struct platform_driver moxart_mac_driver = {
580*4882a593Smuzhiyun .probe = moxart_mac_probe,
581*4882a593Smuzhiyun .remove = moxart_remove,
582*4882a593Smuzhiyun .driver = {
583*4882a593Smuzhiyun .name = "moxart-ethernet",
584*4882a593Smuzhiyun .of_match_table = moxart_mac_match,
585*4882a593Smuzhiyun },
586*4882a593Smuzhiyun };
587*4882a593Smuzhiyun module_platform_driver(moxart_mac_driver);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
590*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
591*4882a593Smuzhiyun MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
592