1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Xilinx Axi Ethernet device driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6*4882a593Smuzhiyun * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7*4882a593Smuzhiyun * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8*4882a593Smuzhiyun * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9*4882a593Smuzhiyun * Copyright (c) 2010 - 2011 PetaLogix
10*4882a593Smuzhiyun * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
11*4882a593Smuzhiyun * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14*4882a593Smuzhiyun * and Spartan6.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * TODO:
17*4882a593Smuzhiyun * - Add Axi Fifo support.
18*4882a593Smuzhiyun * - Factor out Axi DMA code into separate driver.
19*4882a593Smuzhiyun * - Test and fix basic multicast filtering.
20*4882a593Smuzhiyun * - Add support for extended multicast filtering.
21*4882a593Smuzhiyun * - Test basic VLAN support.
22*4882a593Smuzhiyun * - Add support for extended VLAN support.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <linux/clk.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/etherdevice.h>
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <linux/netdevice.h>
30*4882a593Smuzhiyun #include <linux/of_mdio.h>
31*4882a593Smuzhiyun #include <linux/of_net.h>
32*4882a593Smuzhiyun #include <linux/of_platform.h>
33*4882a593Smuzhiyun #include <linux/of_irq.h>
34*4882a593Smuzhiyun #include <linux/of_address.h>
35*4882a593Smuzhiyun #include <linux/skbuff.h>
36*4882a593Smuzhiyun #include <linux/spinlock.h>
37*4882a593Smuzhiyun #include <linux/phy.h>
38*4882a593Smuzhiyun #include <linux/mii.h>
39*4882a593Smuzhiyun #include <linux/ethtool.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include "xilinx_axienet.h"
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Descriptors defines for Tx and Rx DMA */
44*4882a593Smuzhiyun #define TX_BD_NUM_DEFAULT 128
45*4882a593Smuzhiyun #define RX_BD_NUM_DEFAULT 1024
46*4882a593Smuzhiyun #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
47*4882a593Smuzhiyun #define TX_BD_NUM_MAX 4096
48*4882a593Smuzhiyun #define RX_BD_NUM_MAX 4096
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
51*4882a593Smuzhiyun #define DRIVER_NAME "xaxienet"
52*4882a593Smuzhiyun #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
53*4882a593Smuzhiyun #define DRIVER_VERSION "1.00a"
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define AXIENET_REGS_N 40
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Match table for of_platform binding */
58*4882a593Smuzhiyun static const struct of_device_id axienet_of_match[] = {
59*4882a593Smuzhiyun { .compatible = "xlnx,axi-ethernet-1.00.a", },
60*4882a593Smuzhiyun { .compatible = "xlnx,axi-ethernet-1.01.a", },
61*4882a593Smuzhiyun { .compatible = "xlnx,axi-ethernet-2.01.a", },
62*4882a593Smuzhiyun {},
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, axienet_of_match);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Option table for setting up Axi Ethernet hardware options */
68*4882a593Smuzhiyun static struct axienet_option axienet_options[] = {
69*4882a593Smuzhiyun /* Turn on jumbo packet support for both Rx and Tx */
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun .opt = XAE_OPTION_JUMBO,
72*4882a593Smuzhiyun .reg = XAE_TC_OFFSET,
73*4882a593Smuzhiyun .m_or = XAE_TC_JUM_MASK,
74*4882a593Smuzhiyun }, {
75*4882a593Smuzhiyun .opt = XAE_OPTION_JUMBO,
76*4882a593Smuzhiyun .reg = XAE_RCW1_OFFSET,
77*4882a593Smuzhiyun .m_or = XAE_RCW1_JUM_MASK,
78*4882a593Smuzhiyun }, { /* Turn on VLAN packet support for both Rx and Tx */
79*4882a593Smuzhiyun .opt = XAE_OPTION_VLAN,
80*4882a593Smuzhiyun .reg = XAE_TC_OFFSET,
81*4882a593Smuzhiyun .m_or = XAE_TC_VLAN_MASK,
82*4882a593Smuzhiyun }, {
83*4882a593Smuzhiyun .opt = XAE_OPTION_VLAN,
84*4882a593Smuzhiyun .reg = XAE_RCW1_OFFSET,
85*4882a593Smuzhiyun .m_or = XAE_RCW1_VLAN_MASK,
86*4882a593Smuzhiyun }, { /* Turn on FCS stripping on receive packets */
87*4882a593Smuzhiyun .opt = XAE_OPTION_FCS_STRIP,
88*4882a593Smuzhiyun .reg = XAE_RCW1_OFFSET,
89*4882a593Smuzhiyun .m_or = XAE_RCW1_FCS_MASK,
90*4882a593Smuzhiyun }, { /* Turn on FCS insertion on transmit packets */
91*4882a593Smuzhiyun .opt = XAE_OPTION_FCS_INSERT,
92*4882a593Smuzhiyun .reg = XAE_TC_OFFSET,
93*4882a593Smuzhiyun .m_or = XAE_TC_FCS_MASK,
94*4882a593Smuzhiyun }, { /* Turn off length/type field checking on receive packets */
95*4882a593Smuzhiyun .opt = XAE_OPTION_LENTYPE_ERR,
96*4882a593Smuzhiyun .reg = XAE_RCW1_OFFSET,
97*4882a593Smuzhiyun .m_or = XAE_RCW1_LT_DIS_MASK,
98*4882a593Smuzhiyun }, { /* Turn on Rx flow control */
99*4882a593Smuzhiyun .opt = XAE_OPTION_FLOW_CONTROL,
100*4882a593Smuzhiyun .reg = XAE_FCC_OFFSET,
101*4882a593Smuzhiyun .m_or = XAE_FCC_FCRX_MASK,
102*4882a593Smuzhiyun }, { /* Turn on Tx flow control */
103*4882a593Smuzhiyun .opt = XAE_OPTION_FLOW_CONTROL,
104*4882a593Smuzhiyun .reg = XAE_FCC_OFFSET,
105*4882a593Smuzhiyun .m_or = XAE_FCC_FCTX_MASK,
106*4882a593Smuzhiyun }, { /* Turn on promiscuous frame filtering */
107*4882a593Smuzhiyun .opt = XAE_OPTION_PROMISC,
108*4882a593Smuzhiyun .reg = XAE_FMI_OFFSET,
109*4882a593Smuzhiyun .m_or = XAE_FMI_PM_MASK,
110*4882a593Smuzhiyun }, { /* Enable transmitter */
111*4882a593Smuzhiyun .opt = XAE_OPTION_TXEN,
112*4882a593Smuzhiyun .reg = XAE_TC_OFFSET,
113*4882a593Smuzhiyun .m_or = XAE_TC_TX_MASK,
114*4882a593Smuzhiyun }, { /* Enable receiver */
115*4882a593Smuzhiyun .opt = XAE_OPTION_RXEN,
116*4882a593Smuzhiyun .reg = XAE_RCW1_OFFSET,
117*4882a593Smuzhiyun .m_or = XAE_RCW1_RX_MASK,
118*4882a593Smuzhiyun },
119*4882a593Smuzhiyun {}
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun * axienet_dma_in32 - Memory mapped Axi DMA register read
124*4882a593Smuzhiyun * @lp: Pointer to axienet local structure
125*4882a593Smuzhiyun * @reg: Address offset from the base address of the Axi DMA core
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun * Return: The contents of the Axi DMA register
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * This function returns the contents of the corresponding Axi DMA register.
130*4882a593Smuzhiyun */
axienet_dma_in32(struct axienet_local * lp,off_t reg)131*4882a593Smuzhiyun static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun return ioread32(lp->dma_regs + reg);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /**
137*4882a593Smuzhiyun * axienet_dma_out32 - Memory mapped Axi DMA register write.
138*4882a593Smuzhiyun * @lp: Pointer to axienet local structure
139*4882a593Smuzhiyun * @reg: Address offset from the base address of the Axi DMA core
140*4882a593Smuzhiyun * @value: Value to be written into the Axi DMA register
141*4882a593Smuzhiyun *
142*4882a593Smuzhiyun * This function writes the desired value into the corresponding Axi DMA
143*4882a593Smuzhiyun * register.
144*4882a593Smuzhiyun */
axienet_dma_out32(struct axienet_local * lp,off_t reg,u32 value)145*4882a593Smuzhiyun static inline void axienet_dma_out32(struct axienet_local *lp,
146*4882a593Smuzhiyun off_t reg, u32 value)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun iowrite32(value, lp->dma_regs + reg);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
axienet_dma_out_addr(struct axienet_local * lp,off_t reg,dma_addr_t addr)151*4882a593Smuzhiyun static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
152*4882a593Smuzhiyun dma_addr_t addr)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun axienet_dma_out32(lp, reg, lower_32_bits(addr));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_DMA_64BIT)
157*4882a593Smuzhiyun axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)160*4882a593Smuzhiyun static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
161*4882a593Smuzhiyun struct axidma_bd *desc)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun desc->phys = lower_32_bits(addr);
164*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_DMA_64BIT)
165*4882a593Smuzhiyun desc->phys_msb = upper_32_bits(addr);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)168*4882a593Smuzhiyun static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
169*4882a593Smuzhiyun struct axidma_bd *desc)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun dma_addr_t ret = desc->phys;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_DMA_64BIT)
174*4882a593Smuzhiyun ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return ret;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun * axienet_dma_bd_release - Release buffer descriptor rings
181*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * This function is used to release the descriptors allocated in
184*4882a593Smuzhiyun * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
185*4882a593Smuzhiyun * driver stop api is called.
186*4882a593Smuzhiyun */
axienet_dma_bd_release(struct net_device * ndev)187*4882a593Smuzhiyun static void axienet_dma_bd_release(struct net_device *ndev)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun int i;
190*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* If we end up here, tx_bd_v must have been DMA allocated. */
193*4882a593Smuzhiyun dma_free_coherent(ndev->dev.parent,
194*4882a593Smuzhiyun sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
195*4882a593Smuzhiyun lp->tx_bd_v,
196*4882a593Smuzhiyun lp->tx_bd_p);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (!lp->rx_bd_v)
199*4882a593Smuzhiyun return;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < lp->rx_bd_num; i++) {
202*4882a593Smuzhiyun dma_addr_t phys;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* A NULL skb means this descriptor has not been initialised
205*4882a593Smuzhiyun * at all.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun if (!lp->rx_bd_v[i].skb)
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun dev_kfree_skb(lp->rx_bd_v[i].skb);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* For each descriptor, we programmed cntrl with the (non-zero)
213*4882a593Smuzhiyun * descriptor size, after it had been successfully allocated.
214*4882a593Smuzhiyun * So a non-zero value in there means we need to unmap it.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun if (lp->rx_bd_v[i].cntrl) {
217*4882a593Smuzhiyun phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
218*4882a593Smuzhiyun dma_unmap_single(ndev->dev.parent, phys,
219*4882a593Smuzhiyun lp->max_frm_size, DMA_FROM_DEVICE);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun dma_free_coherent(ndev->dev.parent,
224*4882a593Smuzhiyun sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
225*4882a593Smuzhiyun lp->rx_bd_v,
226*4882a593Smuzhiyun lp->rx_bd_p);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
231*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * Return: 0, on success -ENOMEM, on failure
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * This function is called to initialize the Rx and Tx DMA descriptor
236*4882a593Smuzhiyun * rings. This initializes the descriptors with required default values
237*4882a593Smuzhiyun * and is called when Axi Ethernet driver reset is called.
238*4882a593Smuzhiyun */
axienet_dma_bd_init(struct net_device * ndev)239*4882a593Smuzhiyun static int axienet_dma_bd_init(struct net_device *ndev)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun u32 cr;
242*4882a593Smuzhiyun int i;
243*4882a593Smuzhiyun struct sk_buff *skb;
244*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Reset the indexes which are used for accessing the BDs */
247*4882a593Smuzhiyun lp->tx_bd_ci = 0;
248*4882a593Smuzhiyun lp->tx_bd_tail = 0;
249*4882a593Smuzhiyun lp->rx_bd_ci = 0;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Allocate the Tx and Rx buffer descriptors. */
252*4882a593Smuzhiyun lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
253*4882a593Smuzhiyun sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
254*4882a593Smuzhiyun &lp->tx_bd_p, GFP_KERNEL);
255*4882a593Smuzhiyun if (!lp->tx_bd_v)
256*4882a593Smuzhiyun return -ENOMEM;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
259*4882a593Smuzhiyun sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
260*4882a593Smuzhiyun &lp->rx_bd_p, GFP_KERNEL);
261*4882a593Smuzhiyun if (!lp->rx_bd_v)
262*4882a593Smuzhiyun goto out;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun for (i = 0; i < lp->tx_bd_num; i++) {
265*4882a593Smuzhiyun dma_addr_t addr = lp->tx_bd_p +
266*4882a593Smuzhiyun sizeof(*lp->tx_bd_v) *
267*4882a593Smuzhiyun ((i + 1) % lp->tx_bd_num);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun lp->tx_bd_v[i].next = lower_32_bits(addr);
270*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_DMA_64BIT)
271*4882a593Smuzhiyun lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun for (i = 0; i < lp->rx_bd_num; i++) {
275*4882a593Smuzhiyun dma_addr_t addr;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
278*4882a593Smuzhiyun ((i + 1) % lp->rx_bd_num);
279*4882a593Smuzhiyun lp->rx_bd_v[i].next = lower_32_bits(addr);
280*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_DMA_64BIT)
281*4882a593Smuzhiyun lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
284*4882a593Smuzhiyun if (!skb)
285*4882a593Smuzhiyun goto out;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun lp->rx_bd_v[i].skb = skb;
288*4882a593Smuzhiyun addr = dma_map_single(ndev->dev.parent, skb->data,
289*4882a593Smuzhiyun lp->max_frm_size, DMA_FROM_DEVICE);
290*4882a593Smuzhiyun if (dma_mapping_error(ndev->dev.parent, addr)) {
291*4882a593Smuzhiyun netdev_err(ndev, "DMA mapping error\n");
292*4882a593Smuzhiyun goto out;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun lp->rx_bd_v[i].cntrl = lp->max_frm_size;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /* Start updating the Rx channel control register */
300*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
301*4882a593Smuzhiyun /* Update the interrupt coalesce count */
302*4882a593Smuzhiyun cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
303*4882a593Smuzhiyun ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
304*4882a593Smuzhiyun /* Update the delay timer count */
305*4882a593Smuzhiyun cr = ((cr & ~XAXIDMA_DELAY_MASK) |
306*4882a593Smuzhiyun (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
307*4882a593Smuzhiyun /* Enable coalesce, delay timer and error interrupts */
308*4882a593Smuzhiyun cr |= XAXIDMA_IRQ_ALL_MASK;
309*4882a593Smuzhiyun /* Write to the Rx channel control register */
310*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Start updating the Tx channel control register */
313*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
314*4882a593Smuzhiyun /* Update the interrupt coalesce count */
315*4882a593Smuzhiyun cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
316*4882a593Smuzhiyun ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
317*4882a593Smuzhiyun /* Update the delay timer count */
318*4882a593Smuzhiyun cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
319*4882a593Smuzhiyun (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
320*4882a593Smuzhiyun /* Enable coalesce, delay timer and error interrupts */
321*4882a593Smuzhiyun cr |= XAXIDMA_IRQ_ALL_MASK;
322*4882a593Smuzhiyun /* Write to the Tx channel control register */
323*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Populate the tail pointer and bring the Rx Axi DMA engine out of
326*4882a593Smuzhiyun * halted state. This will make the Rx side ready for reception.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
329*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
330*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
331*4882a593Smuzhiyun cr | XAXIDMA_CR_RUNSTOP_MASK);
332*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
333*4882a593Smuzhiyun (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Write to the RS (Run-stop) bit in the Tx channel control register.
336*4882a593Smuzhiyun * Tx channel is now ready to run. But only after we write to the
337*4882a593Smuzhiyun * tail pointer register that the Tx channel will start transmitting.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
340*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
341*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
342*4882a593Smuzhiyun cr | XAXIDMA_CR_RUNSTOP_MASK);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun out:
346*4882a593Smuzhiyun axienet_dma_bd_release(ndev);
347*4882a593Smuzhiyun return -ENOMEM;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun * axienet_set_mac_address - Write the MAC address
352*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
353*4882a593Smuzhiyun * @address: 6 byte Address to be written as MAC address
354*4882a593Smuzhiyun *
355*4882a593Smuzhiyun * This function is called to initialize the MAC address of the Axi Ethernet
356*4882a593Smuzhiyun * core. It writes to the UAW0 and UAW1 registers of the core.
357*4882a593Smuzhiyun */
axienet_set_mac_address(struct net_device * ndev,const void * address)358*4882a593Smuzhiyun static void axienet_set_mac_address(struct net_device *ndev,
359*4882a593Smuzhiyun const void *address)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (address)
364*4882a593Smuzhiyun memcpy(ndev->dev_addr, address, ETH_ALEN);
365*4882a593Smuzhiyun if (!is_valid_ether_addr(ndev->dev_addr))
366*4882a593Smuzhiyun eth_hw_addr_random(ndev);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Set up unicast MAC address filter set its mac address */
369*4882a593Smuzhiyun axienet_iow(lp, XAE_UAW0_OFFSET,
370*4882a593Smuzhiyun (ndev->dev_addr[0]) |
371*4882a593Smuzhiyun (ndev->dev_addr[1] << 8) |
372*4882a593Smuzhiyun (ndev->dev_addr[2] << 16) |
373*4882a593Smuzhiyun (ndev->dev_addr[3] << 24));
374*4882a593Smuzhiyun axienet_iow(lp, XAE_UAW1_OFFSET,
375*4882a593Smuzhiyun (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
376*4882a593Smuzhiyun ~XAE_UAW1_UNICASTADDR_MASK) |
377*4882a593Smuzhiyun (ndev->dev_addr[4] |
378*4882a593Smuzhiyun (ndev->dev_addr[5] << 8))));
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun * netdev_set_mac_address - Write the MAC address (from outside the driver)
383*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
384*4882a593Smuzhiyun * @p: 6 byte Address to be written as MAC address
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * Return: 0 for all conditions. Presently, there is no failure case.
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * This function is called to initialize the MAC address of the Axi Ethernet
389*4882a593Smuzhiyun * core. It calls the core specific axienet_set_mac_address. This is the
390*4882a593Smuzhiyun * function that goes into net_device_ops structure entry ndo_set_mac_address.
391*4882a593Smuzhiyun */
netdev_set_mac_address(struct net_device * ndev,void * p)392*4882a593Smuzhiyun static int netdev_set_mac_address(struct net_device *ndev, void *p)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun struct sockaddr *addr = p;
395*4882a593Smuzhiyun axienet_set_mac_address(ndev, addr->sa_data);
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /**
400*4882a593Smuzhiyun * axienet_set_multicast_list - Prepare the multicast table
401*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * This function is called to initialize the multicast table during
404*4882a593Smuzhiyun * initialization. The Axi Ethernet basic multicast support has a four-entry
405*4882a593Smuzhiyun * multicast table which is initialized here. Additionally this function
406*4882a593Smuzhiyun * goes into the net_device_ops structure entry ndo_set_multicast_list. This
407*4882a593Smuzhiyun * means whenever the multicast table entries need to be updated this
408*4882a593Smuzhiyun * function gets called.
409*4882a593Smuzhiyun */
axienet_set_multicast_list(struct net_device * ndev)410*4882a593Smuzhiyun static void axienet_set_multicast_list(struct net_device *ndev)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun int i;
413*4882a593Smuzhiyun u32 reg, af0reg, af1reg;
414*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
417*4882a593Smuzhiyun netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
418*4882a593Smuzhiyun /* We must make the kernel realize we had to move into
419*4882a593Smuzhiyun * promiscuous mode. If it was a promiscuous mode request
420*4882a593Smuzhiyun * the flag is already set. If not we set it.
421*4882a593Smuzhiyun */
422*4882a593Smuzhiyun ndev->flags |= IFF_PROMISC;
423*4882a593Smuzhiyun reg = axienet_ior(lp, XAE_FMI_OFFSET);
424*4882a593Smuzhiyun reg |= XAE_FMI_PM_MASK;
425*4882a593Smuzhiyun axienet_iow(lp, XAE_FMI_OFFSET, reg);
426*4882a593Smuzhiyun dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
427*4882a593Smuzhiyun } else if (!netdev_mc_empty(ndev)) {
428*4882a593Smuzhiyun struct netdev_hw_addr *ha;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun i = 0;
431*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, ndev) {
432*4882a593Smuzhiyun if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun af0reg = (ha->addr[0]);
436*4882a593Smuzhiyun af0reg |= (ha->addr[1] << 8);
437*4882a593Smuzhiyun af0reg |= (ha->addr[2] << 16);
438*4882a593Smuzhiyun af0reg |= (ha->addr[3] << 24);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun af1reg = (ha->addr[4]);
441*4882a593Smuzhiyun af1reg |= (ha->addr[5] << 8);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
444*4882a593Smuzhiyun reg |= i;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun axienet_iow(lp, XAE_FMI_OFFSET, reg);
447*4882a593Smuzhiyun axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
448*4882a593Smuzhiyun axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
449*4882a593Smuzhiyun i++;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun } else {
452*4882a593Smuzhiyun reg = axienet_ior(lp, XAE_FMI_OFFSET);
453*4882a593Smuzhiyun reg &= ~XAE_FMI_PM_MASK;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun axienet_iow(lp, XAE_FMI_OFFSET, reg);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
458*4882a593Smuzhiyun reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
459*4882a593Smuzhiyun reg |= i;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun axienet_iow(lp, XAE_FMI_OFFSET, reg);
462*4882a593Smuzhiyun axienet_iow(lp, XAE_AF0_OFFSET, 0);
463*4882a593Smuzhiyun axienet_iow(lp, XAE_AF1_OFFSET, 0);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun * axienet_setoptions - Set an Axi Ethernet option
472*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
473*4882a593Smuzhiyun * @options: Option to be enabled/disabled
474*4882a593Smuzhiyun *
475*4882a593Smuzhiyun * The Axi Ethernet core has multiple features which can be selectively turned
476*4882a593Smuzhiyun * on or off. The typical options could be jumbo frame option, basic VLAN
477*4882a593Smuzhiyun * option, promiscuous mode option etc. This function is used to set or clear
478*4882a593Smuzhiyun * these options in the Axi Ethernet hardware. This is done through
479*4882a593Smuzhiyun * axienet_option structure .
480*4882a593Smuzhiyun */
axienet_setoptions(struct net_device * ndev,u32 options)481*4882a593Smuzhiyun static void axienet_setoptions(struct net_device *ndev, u32 options)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun int reg;
484*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
485*4882a593Smuzhiyun struct axienet_option *tp = &axienet_options[0];
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun while (tp->opt) {
488*4882a593Smuzhiyun reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
489*4882a593Smuzhiyun if (options & tp->opt)
490*4882a593Smuzhiyun reg |= tp->m_or;
491*4882a593Smuzhiyun axienet_iow(lp, tp->reg, reg);
492*4882a593Smuzhiyun tp++;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun lp->options |= options;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
__axienet_device_reset(struct axienet_local * lp)498*4882a593Smuzhiyun static int __axienet_device_reset(struct axienet_local *lp)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun u32 value;
501*4882a593Smuzhiyun int ret;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
504*4882a593Smuzhiyun * process of Axi DMA takes a while to complete as all pending
505*4882a593Smuzhiyun * commands/transfers will be flushed or completed during this
506*4882a593Smuzhiyun * reset process.
507*4882a593Smuzhiyun * Note that even though both TX and RX have their own reset register,
508*4882a593Smuzhiyun * they both reset the entire DMA core, so only one needs to be used.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
511*4882a593Smuzhiyun ret = read_poll_timeout(axienet_dma_in32, value,
512*4882a593Smuzhiyun !(value & XAXIDMA_CR_RESET_MASK),
513*4882a593Smuzhiyun DELAY_OF_ONE_MILLISEC, 50000, false, lp,
514*4882a593Smuzhiyun XAXIDMA_TX_CR_OFFSET);
515*4882a593Smuzhiyun if (ret) {
516*4882a593Smuzhiyun dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
517*4882a593Smuzhiyun return ret;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
521*4882a593Smuzhiyun ret = read_poll_timeout(axienet_ior, value,
522*4882a593Smuzhiyun value & XAE_INT_PHYRSTCMPLT_MASK,
523*4882a593Smuzhiyun DELAY_OF_ONE_MILLISEC, 50000, false, lp,
524*4882a593Smuzhiyun XAE_IS_OFFSET);
525*4882a593Smuzhiyun if (ret) {
526*4882a593Smuzhiyun dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
527*4882a593Smuzhiyun return ret;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun return 0;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
535*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
536*4882a593Smuzhiyun *
537*4882a593Smuzhiyun * This function is called to reset and initialize the Axi Ethernet core. This
538*4882a593Smuzhiyun * is typically called during initialization. It does a reset of the Axi DMA
539*4882a593Smuzhiyun * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
540*4882a593Smuzhiyun * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
541*4882a593Smuzhiyun * Ethernet core. No separate hardware reset is done for the Axi Ethernet
542*4882a593Smuzhiyun * core.
543*4882a593Smuzhiyun * Returns 0 on success or a negative error number otherwise.
544*4882a593Smuzhiyun */
axienet_device_reset(struct net_device * ndev)545*4882a593Smuzhiyun static int axienet_device_reset(struct net_device *ndev)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun u32 axienet_status;
548*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
549*4882a593Smuzhiyun int ret;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun ret = __axienet_device_reset(lp);
552*4882a593Smuzhiyun if (ret)
553*4882a593Smuzhiyun return ret;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
556*4882a593Smuzhiyun lp->options |= XAE_OPTION_VLAN;
557*4882a593Smuzhiyun lp->options &= (~XAE_OPTION_JUMBO);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if ((ndev->mtu > XAE_MTU) &&
560*4882a593Smuzhiyun (ndev->mtu <= XAE_JUMBO_MTU)) {
561*4882a593Smuzhiyun lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
562*4882a593Smuzhiyun XAE_TRL_SIZE;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (lp->max_frm_size <= lp->rxmem)
565*4882a593Smuzhiyun lp->options |= XAE_OPTION_JUMBO;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun ret = axienet_dma_bd_init(ndev);
569*4882a593Smuzhiyun if (ret) {
570*4882a593Smuzhiyun netdev_err(ndev, "%s: descriptor allocation failed\n",
571*4882a593Smuzhiyun __func__);
572*4882a593Smuzhiyun return ret;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
576*4882a593Smuzhiyun axienet_status &= ~XAE_RCW1_RX_MASK;
577*4882a593Smuzhiyun axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
580*4882a593Smuzhiyun if (axienet_status & XAE_INT_RXRJECT_MASK)
581*4882a593Smuzhiyun axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
582*4882a593Smuzhiyun axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
583*4882a593Smuzhiyun XAE_INT_RECV_ERROR_MASK : 0);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* Sync default options with HW but leave receiver and
588*4882a593Smuzhiyun * transmitter disabled.
589*4882a593Smuzhiyun */
590*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options &
591*4882a593Smuzhiyun ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
592*4882a593Smuzhiyun axienet_set_mac_address(ndev, NULL);
593*4882a593Smuzhiyun axienet_set_multicast_list(ndev);
594*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun netif_trans_update(ndev);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /**
602*4882a593Smuzhiyun * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
603*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
604*4882a593Smuzhiyun * @first_bd: Index of first descriptor to clean up
605*4882a593Smuzhiyun * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
606*4882a593Smuzhiyun * @sizep: Pointer to a u32 filled with the total sum of all bytes
607*4882a593Smuzhiyun * in all cleaned-up descriptors. Ignored if NULL.
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * Would either be called after a successful transmit operation, or after
610*4882a593Smuzhiyun * there was an error when setting up the chain.
611*4882a593Smuzhiyun * Returns the number of descriptors handled.
612*4882a593Smuzhiyun */
axienet_free_tx_chain(struct net_device * ndev,u32 first_bd,int nr_bds,u32 * sizep)613*4882a593Smuzhiyun static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
614*4882a593Smuzhiyun int nr_bds, u32 *sizep)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
617*4882a593Smuzhiyun struct axidma_bd *cur_p;
618*4882a593Smuzhiyun int max_bds = nr_bds;
619*4882a593Smuzhiyun unsigned int status;
620*4882a593Smuzhiyun dma_addr_t phys;
621*4882a593Smuzhiyun int i;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (max_bds == -1)
624*4882a593Smuzhiyun max_bds = lp->tx_bd_num;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun for (i = 0; i < max_bds; i++) {
627*4882a593Smuzhiyun cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
628*4882a593Smuzhiyun status = cur_p->status;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* If no number is given, clean up *all* descriptors that have
631*4882a593Smuzhiyun * been completed by the MAC.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
634*4882a593Smuzhiyun break;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Ensure we see complete descriptor update */
637*4882a593Smuzhiyun dma_rmb();
638*4882a593Smuzhiyun phys = desc_get_phys_addr(lp, cur_p);
639*4882a593Smuzhiyun dma_unmap_single(ndev->dev.parent, phys,
640*4882a593Smuzhiyun (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
641*4882a593Smuzhiyun DMA_TO_DEVICE);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
644*4882a593Smuzhiyun dev_consume_skb_irq(cur_p->skb);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun cur_p->app0 = 0;
647*4882a593Smuzhiyun cur_p->app1 = 0;
648*4882a593Smuzhiyun cur_p->app2 = 0;
649*4882a593Smuzhiyun cur_p->app4 = 0;
650*4882a593Smuzhiyun cur_p->skb = NULL;
651*4882a593Smuzhiyun /* ensure our transmit path and device don't prematurely see status cleared */
652*4882a593Smuzhiyun wmb();
653*4882a593Smuzhiyun cur_p->cntrl = 0;
654*4882a593Smuzhiyun cur_p->status = 0;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (sizep)
657*4882a593Smuzhiyun *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun return i;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /**
664*4882a593Smuzhiyun * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
665*4882a593Smuzhiyun * @lp: Pointer to the axienet_local structure
666*4882a593Smuzhiyun * @num_frag: The number of BDs to check for
667*4882a593Smuzhiyun *
668*4882a593Smuzhiyun * Return: 0, on success
669*4882a593Smuzhiyun * NETDEV_TX_BUSY, if any of the descriptors are not free
670*4882a593Smuzhiyun *
671*4882a593Smuzhiyun * This function is invoked before BDs are allocated and transmission starts.
672*4882a593Smuzhiyun * This function returns 0 if a BD or group of BDs can be allocated for
673*4882a593Smuzhiyun * transmission. If the BD or any of the BDs are not free the function
674*4882a593Smuzhiyun * returns a busy status. This is invoked from axienet_start_xmit.
675*4882a593Smuzhiyun */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)676*4882a593Smuzhiyun static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
677*4882a593Smuzhiyun int num_frag)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun struct axidma_bd *cur_p;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Ensure we see all descriptor updates from device or TX IRQ path */
682*4882a593Smuzhiyun rmb();
683*4882a593Smuzhiyun cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
684*4882a593Smuzhiyun if (cur_p->cntrl)
685*4882a593Smuzhiyun return NETDEV_TX_BUSY;
686*4882a593Smuzhiyun return 0;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /**
690*4882a593Smuzhiyun * axienet_start_xmit_done - Invoked once a transmit is completed by the
691*4882a593Smuzhiyun * Axi DMA Tx channel.
692*4882a593Smuzhiyun * @ndev: Pointer to the net_device structure
693*4882a593Smuzhiyun *
694*4882a593Smuzhiyun * This function is invoked from the Axi DMA Tx isr to notify the completion
695*4882a593Smuzhiyun * of transmit operation. It clears fields in the corresponding Tx BDs and
696*4882a593Smuzhiyun * unmaps the corresponding buffer so that CPU can regain ownership of the
697*4882a593Smuzhiyun * buffer. It finally invokes "netif_wake_queue" to restart transmission if
698*4882a593Smuzhiyun * required.
699*4882a593Smuzhiyun */
axienet_start_xmit_done(struct net_device * ndev)700*4882a593Smuzhiyun static void axienet_start_xmit_done(struct net_device *ndev)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
703*4882a593Smuzhiyun u32 packets = 0;
704*4882a593Smuzhiyun u32 size = 0;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun lp->tx_bd_ci += packets;
709*4882a593Smuzhiyun if (lp->tx_bd_ci >= lp->tx_bd_num)
710*4882a593Smuzhiyun lp->tx_bd_ci -= lp->tx_bd_num;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun ndev->stats.tx_packets += packets;
713*4882a593Smuzhiyun ndev->stats.tx_bytes += size;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /* Matches barrier in axienet_start_xmit */
716*4882a593Smuzhiyun smp_mb();
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
719*4882a593Smuzhiyun netif_wake_queue(ndev);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /**
723*4882a593Smuzhiyun * axienet_start_xmit - Starts the transmission.
724*4882a593Smuzhiyun * @skb: sk_buff pointer that contains data to be Txed.
725*4882a593Smuzhiyun * @ndev: Pointer to net_device structure.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * Return: NETDEV_TX_OK, on success
728*4882a593Smuzhiyun * NETDEV_TX_BUSY, if any of the descriptors are not free
729*4882a593Smuzhiyun *
730*4882a593Smuzhiyun * This function is invoked from upper layers to initiate transmission. The
731*4882a593Smuzhiyun * function uses the next available free BDs and populates their fields to
732*4882a593Smuzhiyun * start the transmission. Additionally if checksum offloading is supported,
733*4882a593Smuzhiyun * it populates AXI Stream Control fields with appropriate values.
734*4882a593Smuzhiyun */
735*4882a593Smuzhiyun static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)736*4882a593Smuzhiyun axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun u32 ii;
739*4882a593Smuzhiyun u32 num_frag;
740*4882a593Smuzhiyun u32 csum_start_off;
741*4882a593Smuzhiyun u32 csum_index_off;
742*4882a593Smuzhiyun skb_frag_t *frag;
743*4882a593Smuzhiyun dma_addr_t tail_p, phys;
744*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
745*4882a593Smuzhiyun struct axidma_bd *cur_p;
746*4882a593Smuzhiyun u32 orig_tail_ptr = lp->tx_bd_tail;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun num_frag = skb_shinfo(skb)->nr_frags;
749*4882a593Smuzhiyun cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
752*4882a593Smuzhiyun /* Should not happen as last start_xmit call should have
753*4882a593Smuzhiyun * checked for sufficient space and queue should only be
754*4882a593Smuzhiyun * woken when sufficient space is available.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun netif_stop_queue(ndev);
757*4882a593Smuzhiyun if (net_ratelimit())
758*4882a593Smuzhiyun netdev_warn(ndev, "TX ring unexpectedly full\n");
759*4882a593Smuzhiyun return NETDEV_TX_BUSY;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
763*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
764*4882a593Smuzhiyun /* Tx Full Checksum Offload Enabled */
765*4882a593Smuzhiyun cur_p->app0 |= 2;
766*4882a593Smuzhiyun } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
767*4882a593Smuzhiyun csum_start_off = skb_transport_offset(skb);
768*4882a593Smuzhiyun csum_index_off = csum_start_off + skb->csum_offset;
769*4882a593Smuzhiyun /* Tx Partial Checksum Offload Enabled */
770*4882a593Smuzhiyun cur_p->app0 |= 1;
771*4882a593Smuzhiyun cur_p->app1 = (csum_start_off << 16) | csum_index_off;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
774*4882a593Smuzhiyun cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun phys = dma_map_single(ndev->dev.parent, skb->data,
778*4882a593Smuzhiyun skb_headlen(skb), DMA_TO_DEVICE);
779*4882a593Smuzhiyun if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
780*4882a593Smuzhiyun if (net_ratelimit())
781*4882a593Smuzhiyun netdev_err(ndev, "TX DMA mapping error\n");
782*4882a593Smuzhiyun ndev->stats.tx_dropped++;
783*4882a593Smuzhiyun return NETDEV_TX_OK;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun desc_set_phys_addr(lp, phys, cur_p);
786*4882a593Smuzhiyun cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun for (ii = 0; ii < num_frag; ii++) {
789*4882a593Smuzhiyun if (++lp->tx_bd_tail >= lp->tx_bd_num)
790*4882a593Smuzhiyun lp->tx_bd_tail = 0;
791*4882a593Smuzhiyun cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
792*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[ii];
793*4882a593Smuzhiyun phys = dma_map_single(ndev->dev.parent,
794*4882a593Smuzhiyun skb_frag_address(frag),
795*4882a593Smuzhiyun skb_frag_size(frag),
796*4882a593Smuzhiyun DMA_TO_DEVICE);
797*4882a593Smuzhiyun if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
798*4882a593Smuzhiyun if (net_ratelimit())
799*4882a593Smuzhiyun netdev_err(ndev, "TX DMA mapping error\n");
800*4882a593Smuzhiyun ndev->stats.tx_dropped++;
801*4882a593Smuzhiyun axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
802*4882a593Smuzhiyun NULL);
803*4882a593Smuzhiyun lp->tx_bd_tail = orig_tail_ptr;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun return NETDEV_TX_OK;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun desc_set_phys_addr(lp, phys, cur_p);
808*4882a593Smuzhiyun cur_p->cntrl = skb_frag_size(frag);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
812*4882a593Smuzhiyun cur_p->skb = skb;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
815*4882a593Smuzhiyun /* Start the transfer */
816*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
817*4882a593Smuzhiyun if (++lp->tx_bd_tail >= lp->tx_bd_num)
818*4882a593Smuzhiyun lp->tx_bd_tail = 0;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /* Stop queue if next transmit may not have space */
821*4882a593Smuzhiyun if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
822*4882a593Smuzhiyun netif_stop_queue(ndev);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* Matches barrier in axienet_start_xmit_done */
825*4882a593Smuzhiyun smp_mb();
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* Space might have just been freed - check again */
828*4882a593Smuzhiyun if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
829*4882a593Smuzhiyun netif_wake_queue(ndev);
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun return NETDEV_TX_OK;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /**
836*4882a593Smuzhiyun * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
837*4882a593Smuzhiyun * BD processing.
838*4882a593Smuzhiyun * @ndev: Pointer to net_device structure.
839*4882a593Smuzhiyun *
840*4882a593Smuzhiyun * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
841*4882a593Smuzhiyun * does minimal processing and invokes "netif_rx" to complete further
842*4882a593Smuzhiyun * processing.
843*4882a593Smuzhiyun */
axienet_recv(struct net_device * ndev)844*4882a593Smuzhiyun static void axienet_recv(struct net_device *ndev)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun u32 length;
847*4882a593Smuzhiyun u32 csumstatus;
848*4882a593Smuzhiyun u32 size = 0;
849*4882a593Smuzhiyun u32 packets = 0;
850*4882a593Smuzhiyun dma_addr_t tail_p = 0;
851*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
852*4882a593Smuzhiyun struct sk_buff *skb, *new_skb;
853*4882a593Smuzhiyun struct axidma_bd *cur_p;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
858*4882a593Smuzhiyun dma_addr_t phys;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* Ensure we see complete descriptor update */
861*4882a593Smuzhiyun dma_rmb();
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun skb = cur_p->skb;
864*4882a593Smuzhiyun cur_p->skb = NULL;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* skb could be NULL if a previous pass already received the
867*4882a593Smuzhiyun * packet for this slot in the ring, but failed to refill it
868*4882a593Smuzhiyun * with a newly allocated buffer. In this case, don't try to
869*4882a593Smuzhiyun * receive it again.
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun if (likely(skb)) {
872*4882a593Smuzhiyun length = cur_p->app4 & 0x0000FFFF;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun phys = desc_get_phys_addr(lp, cur_p);
875*4882a593Smuzhiyun dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
876*4882a593Smuzhiyun DMA_FROM_DEVICE);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun skb_put(skb, length);
879*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
880*4882a593Smuzhiyun /*skb_checksum_none_assert(skb);*/
881*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* if we're doing Rx csum offload, set it up */
884*4882a593Smuzhiyun if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
885*4882a593Smuzhiyun csumstatus = (cur_p->app2 &
886*4882a593Smuzhiyun XAE_FULL_CSUM_STATUS_MASK) >> 3;
887*4882a593Smuzhiyun if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
888*4882a593Smuzhiyun csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
889*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
892*4882a593Smuzhiyun skb->protocol == htons(ETH_P_IP) &&
893*4882a593Smuzhiyun skb->len > 64) {
894*4882a593Smuzhiyun skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
895*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_COMPLETE;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun netif_rx(skb);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun size += length;
901*4882a593Smuzhiyun packets++;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
905*4882a593Smuzhiyun if (!new_skb)
906*4882a593Smuzhiyun break;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun phys = dma_map_single(ndev->dev.parent, new_skb->data,
909*4882a593Smuzhiyun lp->max_frm_size,
910*4882a593Smuzhiyun DMA_FROM_DEVICE);
911*4882a593Smuzhiyun if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
912*4882a593Smuzhiyun if (net_ratelimit())
913*4882a593Smuzhiyun netdev_err(ndev, "RX DMA mapping error\n");
914*4882a593Smuzhiyun dev_kfree_skb(new_skb);
915*4882a593Smuzhiyun break;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun desc_set_phys_addr(lp, phys, cur_p);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun cur_p->cntrl = lp->max_frm_size;
920*4882a593Smuzhiyun cur_p->status = 0;
921*4882a593Smuzhiyun cur_p->skb = new_skb;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* Only update tail_p to mark this slot as usable after it has
924*4882a593Smuzhiyun * been successfully refilled.
925*4882a593Smuzhiyun */
926*4882a593Smuzhiyun tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (++lp->rx_bd_ci >= lp->rx_bd_num)
929*4882a593Smuzhiyun lp->rx_bd_ci = 0;
930*4882a593Smuzhiyun cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun ndev->stats.rx_packets += packets;
934*4882a593Smuzhiyun ndev->stats.rx_bytes += size;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun if (tail_p)
937*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /**
941*4882a593Smuzhiyun * axienet_tx_irq - Tx Done Isr.
942*4882a593Smuzhiyun * @irq: irq number
943*4882a593Smuzhiyun * @_ndev: net_device pointer
944*4882a593Smuzhiyun *
945*4882a593Smuzhiyun * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
946*4882a593Smuzhiyun *
947*4882a593Smuzhiyun * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
948*4882a593Smuzhiyun * to complete the BD processing.
949*4882a593Smuzhiyun */
axienet_tx_irq(int irq,void * _ndev)950*4882a593Smuzhiyun static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun u32 cr;
953*4882a593Smuzhiyun unsigned int status;
954*4882a593Smuzhiyun struct net_device *ndev = _ndev;
955*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
958*4882a593Smuzhiyun if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
959*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
960*4882a593Smuzhiyun axienet_start_xmit_done(lp->ndev);
961*4882a593Smuzhiyun goto out;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun if (!(status & XAXIDMA_IRQ_ALL_MASK))
964*4882a593Smuzhiyun return IRQ_NONE;
965*4882a593Smuzhiyun if (status & XAXIDMA_IRQ_ERROR_MASK) {
966*4882a593Smuzhiyun dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
967*4882a593Smuzhiyun dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
968*4882a593Smuzhiyun (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
969*4882a593Smuzhiyun (lp->tx_bd_v[lp->tx_bd_ci]).phys);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
972*4882a593Smuzhiyun /* Disable coalesce, delay timer and error interrupts */
973*4882a593Smuzhiyun cr &= (~XAXIDMA_IRQ_ALL_MASK);
974*4882a593Smuzhiyun /* Write to the Tx channel control register */
975*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
978*4882a593Smuzhiyun /* Disable coalesce, delay timer and error interrupts */
979*4882a593Smuzhiyun cr &= (~XAXIDMA_IRQ_ALL_MASK);
980*4882a593Smuzhiyun /* Write to the Rx channel control register */
981*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun schedule_work(&lp->dma_err_task);
984*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun out:
987*4882a593Smuzhiyun return IRQ_HANDLED;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /**
991*4882a593Smuzhiyun * axienet_rx_irq - Rx Isr.
992*4882a593Smuzhiyun * @irq: irq number
993*4882a593Smuzhiyun * @_ndev: net_device pointer
994*4882a593Smuzhiyun *
995*4882a593Smuzhiyun * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
996*4882a593Smuzhiyun *
997*4882a593Smuzhiyun * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
998*4882a593Smuzhiyun * processing.
999*4882a593Smuzhiyun */
axienet_rx_irq(int irq,void * _ndev)1000*4882a593Smuzhiyun static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun u32 cr;
1003*4882a593Smuzhiyun unsigned int status;
1004*4882a593Smuzhiyun struct net_device *ndev = _ndev;
1005*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1008*4882a593Smuzhiyun if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
1009*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1010*4882a593Smuzhiyun axienet_recv(lp->ndev);
1011*4882a593Smuzhiyun goto out;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun if (!(status & XAXIDMA_IRQ_ALL_MASK))
1014*4882a593Smuzhiyun return IRQ_NONE;
1015*4882a593Smuzhiyun if (status & XAXIDMA_IRQ_ERROR_MASK) {
1016*4882a593Smuzhiyun dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
1017*4882a593Smuzhiyun dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
1018*4882a593Smuzhiyun (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1019*4882a593Smuzhiyun (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1022*4882a593Smuzhiyun /* Disable coalesce, delay timer and error interrupts */
1023*4882a593Smuzhiyun cr &= (~XAXIDMA_IRQ_ALL_MASK);
1024*4882a593Smuzhiyun /* Finally write to the Tx channel control register */
1025*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1028*4882a593Smuzhiyun /* Disable coalesce, delay timer and error interrupts */
1029*4882a593Smuzhiyun cr &= (~XAXIDMA_IRQ_ALL_MASK);
1030*4882a593Smuzhiyun /* write to the Rx channel control register */
1031*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun schedule_work(&lp->dma_err_task);
1034*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun out:
1037*4882a593Smuzhiyun return IRQ_HANDLED;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun /**
1041*4882a593Smuzhiyun * axienet_eth_irq - Ethernet core Isr.
1042*4882a593Smuzhiyun * @irq: irq number
1043*4882a593Smuzhiyun * @_ndev: net_device pointer
1044*4882a593Smuzhiyun *
1045*4882a593Smuzhiyun * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1046*4882a593Smuzhiyun *
1047*4882a593Smuzhiyun * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1048*4882a593Smuzhiyun */
axienet_eth_irq(int irq,void * _ndev)1049*4882a593Smuzhiyun static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun struct net_device *ndev = _ndev;
1052*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1053*4882a593Smuzhiyun unsigned int pending;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun pending = axienet_ior(lp, XAE_IP_OFFSET);
1056*4882a593Smuzhiyun if (!pending)
1057*4882a593Smuzhiyun return IRQ_NONE;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (pending & XAE_INT_RXFIFOOVR_MASK)
1060*4882a593Smuzhiyun ndev->stats.rx_missed_errors++;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (pending & XAE_INT_RXRJECT_MASK)
1063*4882a593Smuzhiyun ndev->stats.rx_frame_errors++;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun axienet_iow(lp, XAE_IS_OFFSET, pending);
1066*4882a593Smuzhiyun return IRQ_HANDLED;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun static void axienet_dma_err_handler(struct work_struct *work);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /**
1072*4882a593Smuzhiyun * axienet_open - Driver open routine.
1073*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1074*4882a593Smuzhiyun *
1075*4882a593Smuzhiyun * Return: 0, on success.
1076*4882a593Smuzhiyun * non-zero error value on failure
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * This is the driver open routine. It calls phylink_start to start the
1079*4882a593Smuzhiyun * PHY device.
1080*4882a593Smuzhiyun * It also allocates interrupt service routines, enables the interrupt lines
1081*4882a593Smuzhiyun * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1082*4882a593Smuzhiyun * descriptors are initialized.
1083*4882a593Smuzhiyun */
axienet_open(struct net_device * ndev)1084*4882a593Smuzhiyun static int axienet_open(struct net_device *ndev)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun int ret;
1087*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun dev_dbg(&ndev->dev, "axienet_open()\n");
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1092*4882a593Smuzhiyun * When we do an Axi Ethernet reset, it resets the complete core
1093*4882a593Smuzhiyun * including the MDIO. MDIO must be disabled before resetting
1094*4882a593Smuzhiyun * and re-enabled afterwards.
1095*4882a593Smuzhiyun * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1096*4882a593Smuzhiyun */
1097*4882a593Smuzhiyun mutex_lock(&lp->mii_bus->mdio_lock);
1098*4882a593Smuzhiyun axienet_mdio_disable(lp);
1099*4882a593Smuzhiyun ret = axienet_device_reset(ndev);
1100*4882a593Smuzhiyun if (ret == 0)
1101*4882a593Smuzhiyun ret = axienet_mdio_enable(lp);
1102*4882a593Smuzhiyun mutex_unlock(&lp->mii_bus->mdio_lock);
1103*4882a593Smuzhiyun if (ret < 0)
1104*4882a593Smuzhiyun return ret;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1107*4882a593Smuzhiyun if (ret) {
1108*4882a593Smuzhiyun dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1109*4882a593Smuzhiyun return ret;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun phylink_start(lp->phylink);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun /* Enable worker thread for Axi DMA error handling */
1115*4882a593Smuzhiyun INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /* Enable interrupts for Axi DMA Tx */
1118*4882a593Smuzhiyun ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1119*4882a593Smuzhiyun ndev->name, ndev);
1120*4882a593Smuzhiyun if (ret)
1121*4882a593Smuzhiyun goto err_tx_irq;
1122*4882a593Smuzhiyun /* Enable interrupts for Axi DMA Rx */
1123*4882a593Smuzhiyun ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1124*4882a593Smuzhiyun ndev->name, ndev);
1125*4882a593Smuzhiyun if (ret)
1126*4882a593Smuzhiyun goto err_rx_irq;
1127*4882a593Smuzhiyun /* Enable interrupts for Axi Ethernet core (if defined) */
1128*4882a593Smuzhiyun if (lp->eth_irq > 0) {
1129*4882a593Smuzhiyun ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1130*4882a593Smuzhiyun ndev->name, ndev);
1131*4882a593Smuzhiyun if (ret)
1132*4882a593Smuzhiyun goto err_eth_irq;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun return 0;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun err_eth_irq:
1138*4882a593Smuzhiyun free_irq(lp->rx_irq, ndev);
1139*4882a593Smuzhiyun err_rx_irq:
1140*4882a593Smuzhiyun free_irq(lp->tx_irq, ndev);
1141*4882a593Smuzhiyun err_tx_irq:
1142*4882a593Smuzhiyun phylink_stop(lp->phylink);
1143*4882a593Smuzhiyun phylink_disconnect_phy(lp->phylink);
1144*4882a593Smuzhiyun cancel_work_sync(&lp->dma_err_task);
1145*4882a593Smuzhiyun dev_err(lp->dev, "request_irq() failed\n");
1146*4882a593Smuzhiyun return ret;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /**
1150*4882a593Smuzhiyun * axienet_stop - Driver stop routine.
1151*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1152*4882a593Smuzhiyun *
1153*4882a593Smuzhiyun * Return: 0, on success.
1154*4882a593Smuzhiyun *
1155*4882a593Smuzhiyun * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1156*4882a593Smuzhiyun * device. It also removes the interrupt handlers and disables the interrupts.
1157*4882a593Smuzhiyun * The Axi DMA Tx/Rx BDs are released.
1158*4882a593Smuzhiyun */
axienet_stop(struct net_device * ndev)1159*4882a593Smuzhiyun static int axienet_stop(struct net_device *ndev)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun u32 cr, sr;
1162*4882a593Smuzhiyun int count;
1163*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun dev_dbg(&ndev->dev, "axienet_close()\n");
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun phylink_stop(lp->phylink);
1168*4882a593Smuzhiyun phylink_disconnect_phy(lp->phylink);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options &
1171*4882a593Smuzhiyun ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1174*4882a593Smuzhiyun cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1175*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1178*4882a593Smuzhiyun cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
1179*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun axienet_iow(lp, XAE_IE_OFFSET, 0);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /* Give DMAs a chance to halt gracefully */
1184*4882a593Smuzhiyun sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1185*4882a593Smuzhiyun for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1186*4882a593Smuzhiyun msleep(20);
1187*4882a593Smuzhiyun sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1191*4882a593Smuzhiyun for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
1192*4882a593Smuzhiyun msleep(20);
1193*4882a593Smuzhiyun sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Do a reset to ensure DMA is really stopped */
1197*4882a593Smuzhiyun mutex_lock(&lp->mii_bus->mdio_lock);
1198*4882a593Smuzhiyun axienet_mdio_disable(lp);
1199*4882a593Smuzhiyun __axienet_device_reset(lp);
1200*4882a593Smuzhiyun axienet_mdio_enable(lp);
1201*4882a593Smuzhiyun mutex_unlock(&lp->mii_bus->mdio_lock);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun cancel_work_sync(&lp->dma_err_task);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun if (lp->eth_irq > 0)
1206*4882a593Smuzhiyun free_irq(lp->eth_irq, ndev);
1207*4882a593Smuzhiyun free_irq(lp->tx_irq, ndev);
1208*4882a593Smuzhiyun free_irq(lp->rx_irq, ndev);
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun axienet_dma_bd_release(ndev);
1211*4882a593Smuzhiyun return 0;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun /**
1215*4882a593Smuzhiyun * axienet_change_mtu - Driver change mtu routine.
1216*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1217*4882a593Smuzhiyun * @new_mtu: New mtu value to be applied
1218*4882a593Smuzhiyun *
1219*4882a593Smuzhiyun * Return: Always returns 0 (success).
1220*4882a593Smuzhiyun *
1221*4882a593Smuzhiyun * This is the change mtu driver routine. It checks if the Axi Ethernet
1222*4882a593Smuzhiyun * hardware supports jumbo frames before changing the mtu. This can be
1223*4882a593Smuzhiyun * called only when the device is not up.
1224*4882a593Smuzhiyun */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1225*4882a593Smuzhiyun static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (netif_running(ndev))
1230*4882a593Smuzhiyun return -EBUSY;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if ((new_mtu + VLAN_ETH_HLEN +
1233*4882a593Smuzhiyun XAE_TRL_SIZE) > lp->rxmem)
1234*4882a593Smuzhiyun return -EINVAL;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun ndev->mtu = new_mtu;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun return 0;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1242*4882a593Smuzhiyun /**
1243*4882a593Smuzhiyun * axienet_poll_controller - Axi Ethernet poll mechanism.
1244*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1245*4882a593Smuzhiyun *
1246*4882a593Smuzhiyun * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1247*4882a593Smuzhiyun * to polling the ISRs and are enabled back after the polling is done.
1248*4882a593Smuzhiyun */
axienet_poll_controller(struct net_device * ndev)1249*4882a593Smuzhiyun static void axienet_poll_controller(struct net_device *ndev)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1252*4882a593Smuzhiyun disable_irq(lp->tx_irq);
1253*4882a593Smuzhiyun disable_irq(lp->rx_irq);
1254*4882a593Smuzhiyun axienet_rx_irq(lp->tx_irq, ndev);
1255*4882a593Smuzhiyun axienet_tx_irq(lp->rx_irq, ndev);
1256*4882a593Smuzhiyun enable_irq(lp->tx_irq);
1257*4882a593Smuzhiyun enable_irq(lp->rx_irq);
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun #endif
1260*4882a593Smuzhiyun
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1261*4882a593Smuzhiyun static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(dev);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun if (!netif_running(dev))
1266*4882a593Smuzhiyun return -EINVAL;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun return phylink_mii_ioctl(lp->phylink, rq, cmd);
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun static const struct net_device_ops axienet_netdev_ops = {
1272*4882a593Smuzhiyun .ndo_open = axienet_open,
1273*4882a593Smuzhiyun .ndo_stop = axienet_stop,
1274*4882a593Smuzhiyun .ndo_start_xmit = axienet_start_xmit,
1275*4882a593Smuzhiyun .ndo_change_mtu = axienet_change_mtu,
1276*4882a593Smuzhiyun .ndo_set_mac_address = netdev_set_mac_address,
1277*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
1278*4882a593Smuzhiyun .ndo_do_ioctl = axienet_ioctl,
1279*4882a593Smuzhiyun .ndo_set_rx_mode = axienet_set_multicast_list,
1280*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1281*4882a593Smuzhiyun .ndo_poll_controller = axienet_poll_controller,
1282*4882a593Smuzhiyun #endif
1283*4882a593Smuzhiyun };
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun /**
1286*4882a593Smuzhiyun * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1287*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1288*4882a593Smuzhiyun * @ed: Pointer to ethtool_drvinfo structure
1289*4882a593Smuzhiyun *
1290*4882a593Smuzhiyun * This implements ethtool command for getting the driver information.
1291*4882a593Smuzhiyun * Issue "ethtool -i ethX" under linux prompt to execute this function.
1292*4882a593Smuzhiyun */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1293*4882a593Smuzhiyun static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1294*4882a593Smuzhiyun struct ethtool_drvinfo *ed)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1297*4882a593Smuzhiyun strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /**
1301*4882a593Smuzhiyun * axienet_ethtools_get_regs_len - Get the total regs length present in the
1302*4882a593Smuzhiyun * AxiEthernet core.
1303*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1304*4882a593Smuzhiyun *
1305*4882a593Smuzhiyun * This implements ethtool command for getting the total register length
1306*4882a593Smuzhiyun * information.
1307*4882a593Smuzhiyun *
1308*4882a593Smuzhiyun * Return: the total regs length
1309*4882a593Smuzhiyun */
axienet_ethtools_get_regs_len(struct net_device * ndev)1310*4882a593Smuzhiyun static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun return sizeof(u32) * AXIENET_REGS_N;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /**
1316*4882a593Smuzhiyun * axienet_ethtools_get_regs - Dump the contents of all registers present
1317*4882a593Smuzhiyun * in AxiEthernet core.
1318*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1319*4882a593Smuzhiyun * @regs: Pointer to ethtool_regs structure
1320*4882a593Smuzhiyun * @ret: Void pointer used to return the contents of the registers.
1321*4882a593Smuzhiyun *
1322*4882a593Smuzhiyun * This implements ethtool command for getting the Axi Ethernet register dump.
1323*4882a593Smuzhiyun * Issue "ethtool -d ethX" to execute this function.
1324*4882a593Smuzhiyun */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1325*4882a593Smuzhiyun static void axienet_ethtools_get_regs(struct net_device *ndev,
1326*4882a593Smuzhiyun struct ethtool_regs *regs, void *ret)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun u32 *data = (u32 *) ret;
1329*4882a593Smuzhiyun size_t len = sizeof(u32) * AXIENET_REGS_N;
1330*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun regs->version = 0;
1333*4882a593Smuzhiyun regs->len = len;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun memset(data, 0, len);
1336*4882a593Smuzhiyun data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1337*4882a593Smuzhiyun data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1338*4882a593Smuzhiyun data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1339*4882a593Smuzhiyun data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1340*4882a593Smuzhiyun data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1341*4882a593Smuzhiyun data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1342*4882a593Smuzhiyun data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1343*4882a593Smuzhiyun data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1344*4882a593Smuzhiyun data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1345*4882a593Smuzhiyun data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1346*4882a593Smuzhiyun data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1347*4882a593Smuzhiyun data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1348*4882a593Smuzhiyun data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1349*4882a593Smuzhiyun data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1350*4882a593Smuzhiyun data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1351*4882a593Smuzhiyun data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1352*4882a593Smuzhiyun data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1353*4882a593Smuzhiyun data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1354*4882a593Smuzhiyun data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1355*4882a593Smuzhiyun data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1356*4882a593Smuzhiyun data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1357*4882a593Smuzhiyun data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1358*4882a593Smuzhiyun data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1359*4882a593Smuzhiyun data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1360*4882a593Smuzhiyun data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1361*4882a593Smuzhiyun data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1362*4882a593Smuzhiyun data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1363*4882a593Smuzhiyun data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1364*4882a593Smuzhiyun data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1365*4882a593Smuzhiyun data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1366*4882a593Smuzhiyun data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1367*4882a593Smuzhiyun data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1368*4882a593Smuzhiyun data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1369*4882a593Smuzhiyun data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1370*4882a593Smuzhiyun data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1371*4882a593Smuzhiyun data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)1374*4882a593Smuzhiyun static void axienet_ethtools_get_ringparam(struct net_device *ndev,
1375*4882a593Smuzhiyun struct ethtool_ringparam *ering)
1376*4882a593Smuzhiyun {
1377*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun ering->rx_max_pending = RX_BD_NUM_MAX;
1380*4882a593Smuzhiyun ering->rx_mini_max_pending = 0;
1381*4882a593Smuzhiyun ering->rx_jumbo_max_pending = 0;
1382*4882a593Smuzhiyun ering->tx_max_pending = TX_BD_NUM_MAX;
1383*4882a593Smuzhiyun ering->rx_pending = lp->rx_bd_num;
1384*4882a593Smuzhiyun ering->rx_mini_pending = 0;
1385*4882a593Smuzhiyun ering->rx_jumbo_pending = 0;
1386*4882a593Smuzhiyun ering->tx_pending = lp->tx_bd_num;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)1389*4882a593Smuzhiyun static int axienet_ethtools_set_ringparam(struct net_device *ndev,
1390*4882a593Smuzhiyun struct ethtool_ringparam *ering)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun if (ering->rx_pending > RX_BD_NUM_MAX ||
1395*4882a593Smuzhiyun ering->rx_mini_pending ||
1396*4882a593Smuzhiyun ering->rx_jumbo_pending ||
1397*4882a593Smuzhiyun ering->tx_pending < TX_BD_NUM_MIN ||
1398*4882a593Smuzhiyun ering->tx_pending > TX_BD_NUM_MAX)
1399*4882a593Smuzhiyun return -EINVAL;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun if (netif_running(ndev))
1402*4882a593Smuzhiyun return -EBUSY;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun lp->rx_bd_num = ering->rx_pending;
1405*4882a593Smuzhiyun lp->tx_bd_num = ering->tx_pending;
1406*4882a593Smuzhiyun return 0;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /**
1410*4882a593Smuzhiyun * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1411*4882a593Smuzhiyun * Tx and Rx paths.
1412*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1413*4882a593Smuzhiyun * @epauseparm: Pointer to ethtool_pauseparam structure.
1414*4882a593Smuzhiyun *
1415*4882a593Smuzhiyun * This implements ethtool command for getting axi ethernet pause frame
1416*4882a593Smuzhiyun * setting. Issue "ethtool -a ethX" to execute this function.
1417*4882a593Smuzhiyun */
1418*4882a593Smuzhiyun static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1419*4882a593Smuzhiyun axienet_ethtools_get_pauseparam(struct net_device *ndev,
1420*4882a593Smuzhiyun struct ethtool_pauseparam *epauseparm)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun /**
1428*4882a593Smuzhiyun * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1429*4882a593Smuzhiyun * settings.
1430*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1431*4882a593Smuzhiyun * @epauseparm:Pointer to ethtool_pauseparam structure
1432*4882a593Smuzhiyun *
1433*4882a593Smuzhiyun * This implements ethtool command for enabling flow control on Rx and Tx
1434*4882a593Smuzhiyun * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1435*4882a593Smuzhiyun * function.
1436*4882a593Smuzhiyun *
1437*4882a593Smuzhiyun * Return: 0 on success, -EFAULT if device is running
1438*4882a593Smuzhiyun */
1439*4882a593Smuzhiyun static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1440*4882a593Smuzhiyun axienet_ethtools_set_pauseparam(struct net_device *ndev,
1441*4882a593Smuzhiyun struct ethtool_pauseparam *epauseparm)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /**
1449*4882a593Smuzhiyun * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1450*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1451*4882a593Smuzhiyun * @ecoalesce: Pointer to ethtool_coalesce structure
1452*4882a593Smuzhiyun *
1453*4882a593Smuzhiyun * This implements ethtool command for getting the DMA interrupt coalescing
1454*4882a593Smuzhiyun * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1455*4882a593Smuzhiyun * execute this function.
1456*4882a593Smuzhiyun *
1457*4882a593Smuzhiyun * Return: 0 always
1458*4882a593Smuzhiyun */
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce)1459*4882a593Smuzhiyun static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1460*4882a593Smuzhiyun struct ethtool_coalesce *ecoalesce)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun u32 regval = 0;
1463*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1464*4882a593Smuzhiyun regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1465*4882a593Smuzhiyun ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1466*4882a593Smuzhiyun >> XAXIDMA_COALESCE_SHIFT;
1467*4882a593Smuzhiyun regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1468*4882a593Smuzhiyun ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1469*4882a593Smuzhiyun >> XAXIDMA_COALESCE_SHIFT;
1470*4882a593Smuzhiyun return 0;
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /**
1474*4882a593Smuzhiyun * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1475*4882a593Smuzhiyun * @ndev: Pointer to net_device structure
1476*4882a593Smuzhiyun * @ecoalesce: Pointer to ethtool_coalesce structure
1477*4882a593Smuzhiyun *
1478*4882a593Smuzhiyun * This implements ethtool command for setting the DMA interrupt coalescing
1479*4882a593Smuzhiyun * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1480*4882a593Smuzhiyun * prompt to execute this function.
1481*4882a593Smuzhiyun *
1482*4882a593Smuzhiyun * Return: 0, on success, Non-zero error value on failure.
1483*4882a593Smuzhiyun */
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce)1484*4882a593Smuzhiyun static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1485*4882a593Smuzhiyun struct ethtool_coalesce *ecoalesce)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun if (netif_running(ndev)) {
1490*4882a593Smuzhiyun netdev_err(ndev,
1491*4882a593Smuzhiyun "Please stop netif before applying configuration\n");
1492*4882a593Smuzhiyun return -EFAULT;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun if (ecoalesce->rx_max_coalesced_frames)
1496*4882a593Smuzhiyun lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1497*4882a593Smuzhiyun if (ecoalesce->tx_max_coalesced_frames)
1498*4882a593Smuzhiyun lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun return 0;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)1504*4882a593Smuzhiyun axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1505*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)1513*4882a593Smuzhiyun axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1514*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun static const struct ethtool_ops axienet_ethtool_ops = {
1522*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
1523*4882a593Smuzhiyun .get_drvinfo = axienet_ethtools_get_drvinfo,
1524*4882a593Smuzhiyun .get_regs_len = axienet_ethtools_get_regs_len,
1525*4882a593Smuzhiyun .get_regs = axienet_ethtools_get_regs,
1526*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
1527*4882a593Smuzhiyun .get_ringparam = axienet_ethtools_get_ringparam,
1528*4882a593Smuzhiyun .set_ringparam = axienet_ethtools_set_ringparam,
1529*4882a593Smuzhiyun .get_pauseparam = axienet_ethtools_get_pauseparam,
1530*4882a593Smuzhiyun .set_pauseparam = axienet_ethtools_set_pauseparam,
1531*4882a593Smuzhiyun .get_coalesce = axienet_ethtools_get_coalesce,
1532*4882a593Smuzhiyun .set_coalesce = axienet_ethtools_set_coalesce,
1533*4882a593Smuzhiyun .get_link_ksettings = axienet_ethtools_get_link_ksettings,
1534*4882a593Smuzhiyun .set_link_ksettings = axienet_ethtools_set_link_ksettings,
1535*4882a593Smuzhiyun };
1536*4882a593Smuzhiyun
axienet_validate(struct phylink_config * config,unsigned long * supported,struct phylink_link_state * state)1537*4882a593Smuzhiyun static void axienet_validate(struct phylink_config *config,
1538*4882a593Smuzhiyun unsigned long *supported,
1539*4882a593Smuzhiyun struct phylink_link_state *state)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun struct net_device *ndev = to_net_dev(config->dev);
1542*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1543*4882a593Smuzhiyun __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /* Only support the mode we are configured for */
1546*4882a593Smuzhiyun if (state->interface != PHY_INTERFACE_MODE_NA &&
1547*4882a593Smuzhiyun state->interface != lp->phy_mode) {
1548*4882a593Smuzhiyun netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
1549*4882a593Smuzhiyun phy_modes(state->interface),
1550*4882a593Smuzhiyun phy_modes(lp->phy_mode));
1551*4882a593Smuzhiyun bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1552*4882a593Smuzhiyun return;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun phylink_set(mask, Autoneg);
1556*4882a593Smuzhiyun phylink_set_port_modes(mask);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun phylink_set(mask, Asym_Pause);
1559*4882a593Smuzhiyun phylink_set(mask, Pause);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun switch (state->interface) {
1562*4882a593Smuzhiyun case PHY_INTERFACE_MODE_NA:
1563*4882a593Smuzhiyun case PHY_INTERFACE_MODE_1000BASEX:
1564*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
1565*4882a593Smuzhiyun case PHY_INTERFACE_MODE_GMII:
1566*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII:
1567*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_ID:
1568*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_RXID:
1569*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_TXID:
1570*4882a593Smuzhiyun phylink_set(mask, 1000baseX_Full);
1571*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
1572*4882a593Smuzhiyun if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
1573*4882a593Smuzhiyun break;
1574*4882a593Smuzhiyun fallthrough;
1575*4882a593Smuzhiyun case PHY_INTERFACE_MODE_MII:
1576*4882a593Smuzhiyun phylink_set(mask, 100baseT_Full);
1577*4882a593Smuzhiyun phylink_set(mask, 10baseT_Full);
1578*4882a593Smuzhiyun default:
1579*4882a593Smuzhiyun break;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun bitmap_and(supported, supported, mask,
1583*4882a593Smuzhiyun __ETHTOOL_LINK_MODE_MASK_NBITS);
1584*4882a593Smuzhiyun bitmap_and(state->advertising, state->advertising, mask,
1585*4882a593Smuzhiyun __ETHTOOL_LINK_MODE_MASK_NBITS);
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun
axienet_mac_pcs_get_state(struct phylink_config * config,struct phylink_link_state * state)1588*4882a593Smuzhiyun static void axienet_mac_pcs_get_state(struct phylink_config *config,
1589*4882a593Smuzhiyun struct phylink_link_state *state)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun struct net_device *ndev = to_net_dev(config->dev);
1592*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun switch (state->interface) {
1595*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
1596*4882a593Smuzhiyun case PHY_INTERFACE_MODE_1000BASEX:
1597*4882a593Smuzhiyun phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
1598*4882a593Smuzhiyun break;
1599*4882a593Smuzhiyun default:
1600*4882a593Smuzhiyun break;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
axienet_mac_an_restart(struct phylink_config * config)1604*4882a593Smuzhiyun static void axienet_mac_an_restart(struct phylink_config *config)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun struct net_device *ndev = to_net_dev(config->dev);
1607*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1612*4882a593Smuzhiyun static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1613*4882a593Smuzhiyun const struct phylink_link_state *state)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun struct net_device *ndev = to_net_dev(config->dev);
1616*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1617*4882a593Smuzhiyun int ret;
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun switch (state->interface) {
1620*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
1621*4882a593Smuzhiyun case PHY_INTERFACE_MODE_1000BASEX:
1622*4882a593Smuzhiyun ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
1623*4882a593Smuzhiyun state->interface,
1624*4882a593Smuzhiyun state->advertising);
1625*4882a593Smuzhiyun if (ret < 0)
1626*4882a593Smuzhiyun netdev_warn(ndev, "Failed to configure PCS: %d\n",
1627*4882a593Smuzhiyun ret);
1628*4882a593Smuzhiyun break;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun default:
1631*4882a593Smuzhiyun break;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1635*4882a593Smuzhiyun static void axienet_mac_link_down(struct phylink_config *config,
1636*4882a593Smuzhiyun unsigned int mode,
1637*4882a593Smuzhiyun phy_interface_t interface)
1638*4882a593Smuzhiyun {
1639*4882a593Smuzhiyun /* nothing meaningful to do */
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1642*4882a593Smuzhiyun static void axienet_mac_link_up(struct phylink_config *config,
1643*4882a593Smuzhiyun struct phy_device *phy,
1644*4882a593Smuzhiyun unsigned int mode, phy_interface_t interface,
1645*4882a593Smuzhiyun int speed, int duplex,
1646*4882a593Smuzhiyun bool tx_pause, bool rx_pause)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun struct net_device *ndev = to_net_dev(config->dev);
1649*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
1650*4882a593Smuzhiyun u32 emmc_reg, fcc_reg;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1653*4882a593Smuzhiyun emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun switch (speed) {
1656*4882a593Smuzhiyun case SPEED_1000:
1657*4882a593Smuzhiyun emmc_reg |= XAE_EMMC_LINKSPD_1000;
1658*4882a593Smuzhiyun break;
1659*4882a593Smuzhiyun case SPEED_100:
1660*4882a593Smuzhiyun emmc_reg |= XAE_EMMC_LINKSPD_100;
1661*4882a593Smuzhiyun break;
1662*4882a593Smuzhiyun case SPEED_10:
1663*4882a593Smuzhiyun emmc_reg |= XAE_EMMC_LINKSPD_10;
1664*4882a593Smuzhiyun break;
1665*4882a593Smuzhiyun default:
1666*4882a593Smuzhiyun dev_err(&ndev->dev,
1667*4882a593Smuzhiyun "Speed other than 10, 100 or 1Gbps is not supported\n");
1668*4882a593Smuzhiyun break;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1674*4882a593Smuzhiyun if (tx_pause)
1675*4882a593Smuzhiyun fcc_reg |= XAE_FCC_FCTX_MASK;
1676*4882a593Smuzhiyun else
1677*4882a593Smuzhiyun fcc_reg &= ~XAE_FCC_FCTX_MASK;
1678*4882a593Smuzhiyun if (rx_pause)
1679*4882a593Smuzhiyun fcc_reg |= XAE_FCC_FCRX_MASK;
1680*4882a593Smuzhiyun else
1681*4882a593Smuzhiyun fcc_reg &= ~XAE_FCC_FCRX_MASK;
1682*4882a593Smuzhiyun axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun static const struct phylink_mac_ops axienet_phylink_ops = {
1686*4882a593Smuzhiyun .validate = axienet_validate,
1687*4882a593Smuzhiyun .mac_pcs_get_state = axienet_mac_pcs_get_state,
1688*4882a593Smuzhiyun .mac_an_restart = axienet_mac_an_restart,
1689*4882a593Smuzhiyun .mac_config = axienet_mac_config,
1690*4882a593Smuzhiyun .mac_link_down = axienet_mac_link_down,
1691*4882a593Smuzhiyun .mac_link_up = axienet_mac_link_up,
1692*4882a593Smuzhiyun };
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun /**
1695*4882a593Smuzhiyun * axienet_dma_err_handler - Work queue task for Axi DMA Error
1696*4882a593Smuzhiyun * @work: pointer to work_struct
1697*4882a593Smuzhiyun *
1698*4882a593Smuzhiyun * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1699*4882a593Smuzhiyun * Tx/Rx BDs.
1700*4882a593Smuzhiyun */
axienet_dma_err_handler(struct work_struct * work)1701*4882a593Smuzhiyun static void axienet_dma_err_handler(struct work_struct *work)
1702*4882a593Smuzhiyun {
1703*4882a593Smuzhiyun u32 axienet_status;
1704*4882a593Smuzhiyun u32 cr, i;
1705*4882a593Smuzhiyun struct axienet_local *lp = container_of(work, struct axienet_local,
1706*4882a593Smuzhiyun dma_err_task);
1707*4882a593Smuzhiyun struct net_device *ndev = lp->ndev;
1708*4882a593Smuzhiyun struct axidma_bd *cur_p;
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options &
1711*4882a593Smuzhiyun ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1712*4882a593Smuzhiyun /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1713*4882a593Smuzhiyun * When we do an Axi Ethernet reset, it resets the complete core
1714*4882a593Smuzhiyun * including the MDIO. MDIO must be disabled before resetting
1715*4882a593Smuzhiyun * and re-enabled afterwards.
1716*4882a593Smuzhiyun * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1717*4882a593Smuzhiyun */
1718*4882a593Smuzhiyun mutex_lock(&lp->mii_bus->mdio_lock);
1719*4882a593Smuzhiyun axienet_mdio_disable(lp);
1720*4882a593Smuzhiyun __axienet_device_reset(lp);
1721*4882a593Smuzhiyun axienet_mdio_enable(lp);
1722*4882a593Smuzhiyun mutex_unlock(&lp->mii_bus->mdio_lock);
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun for (i = 0; i < lp->tx_bd_num; i++) {
1725*4882a593Smuzhiyun cur_p = &lp->tx_bd_v[i];
1726*4882a593Smuzhiyun if (cur_p->cntrl) {
1727*4882a593Smuzhiyun dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun dma_unmap_single(ndev->dev.parent, addr,
1730*4882a593Smuzhiyun (cur_p->cntrl &
1731*4882a593Smuzhiyun XAXIDMA_BD_CTRL_LENGTH_MASK),
1732*4882a593Smuzhiyun DMA_TO_DEVICE);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun if (cur_p->skb)
1735*4882a593Smuzhiyun dev_kfree_skb_irq(cur_p->skb);
1736*4882a593Smuzhiyun cur_p->phys = 0;
1737*4882a593Smuzhiyun cur_p->phys_msb = 0;
1738*4882a593Smuzhiyun cur_p->cntrl = 0;
1739*4882a593Smuzhiyun cur_p->status = 0;
1740*4882a593Smuzhiyun cur_p->app0 = 0;
1741*4882a593Smuzhiyun cur_p->app1 = 0;
1742*4882a593Smuzhiyun cur_p->app2 = 0;
1743*4882a593Smuzhiyun cur_p->app3 = 0;
1744*4882a593Smuzhiyun cur_p->app4 = 0;
1745*4882a593Smuzhiyun cur_p->skb = NULL;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun for (i = 0; i < lp->rx_bd_num; i++) {
1749*4882a593Smuzhiyun cur_p = &lp->rx_bd_v[i];
1750*4882a593Smuzhiyun cur_p->status = 0;
1751*4882a593Smuzhiyun cur_p->app0 = 0;
1752*4882a593Smuzhiyun cur_p->app1 = 0;
1753*4882a593Smuzhiyun cur_p->app2 = 0;
1754*4882a593Smuzhiyun cur_p->app3 = 0;
1755*4882a593Smuzhiyun cur_p->app4 = 0;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun lp->tx_bd_ci = 0;
1759*4882a593Smuzhiyun lp->tx_bd_tail = 0;
1760*4882a593Smuzhiyun lp->rx_bd_ci = 0;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun /* Start updating the Rx channel control register */
1763*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1764*4882a593Smuzhiyun /* Update the interrupt coalesce count */
1765*4882a593Smuzhiyun cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1766*4882a593Smuzhiyun (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1767*4882a593Smuzhiyun /* Update the delay timer count */
1768*4882a593Smuzhiyun cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1769*4882a593Smuzhiyun (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1770*4882a593Smuzhiyun /* Enable coalesce, delay timer and error interrupts */
1771*4882a593Smuzhiyun cr |= XAXIDMA_IRQ_ALL_MASK;
1772*4882a593Smuzhiyun /* Finally write to the Rx channel control register */
1773*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun /* Start updating the Tx channel control register */
1776*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1777*4882a593Smuzhiyun /* Update the interrupt coalesce count */
1778*4882a593Smuzhiyun cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1779*4882a593Smuzhiyun (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1780*4882a593Smuzhiyun /* Update the delay timer count */
1781*4882a593Smuzhiyun cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1782*4882a593Smuzhiyun (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1783*4882a593Smuzhiyun /* Enable coalesce, delay timer and error interrupts */
1784*4882a593Smuzhiyun cr |= XAXIDMA_IRQ_ALL_MASK;
1785*4882a593Smuzhiyun /* Finally write to the Tx channel control register */
1786*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1789*4882a593Smuzhiyun * halted state. This will make the Rx side ready for reception.
1790*4882a593Smuzhiyun */
1791*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1792*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1793*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1794*4882a593Smuzhiyun cr | XAXIDMA_CR_RUNSTOP_MASK);
1795*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1796*4882a593Smuzhiyun (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun /* Write to the RS (Run-stop) bit in the Tx channel control register.
1799*4882a593Smuzhiyun * Tx channel is now ready to run. But only after we write to the
1800*4882a593Smuzhiyun * tail pointer register that the Tx channel will start transmitting
1801*4882a593Smuzhiyun */
1802*4882a593Smuzhiyun axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1803*4882a593Smuzhiyun cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1804*4882a593Smuzhiyun axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1805*4882a593Smuzhiyun cr | XAXIDMA_CR_RUNSTOP_MASK);
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1808*4882a593Smuzhiyun axienet_status &= ~XAE_RCW1_RX_MASK;
1809*4882a593Smuzhiyun axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1812*4882a593Smuzhiyun if (axienet_status & XAE_INT_RXRJECT_MASK)
1813*4882a593Smuzhiyun axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1814*4882a593Smuzhiyun axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1815*4882a593Smuzhiyun XAE_INT_RECV_ERROR_MASK : 0);
1816*4882a593Smuzhiyun axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /* Sync default options with HW but leave receiver and
1819*4882a593Smuzhiyun * transmitter disabled.
1820*4882a593Smuzhiyun */
1821*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options &
1822*4882a593Smuzhiyun ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1823*4882a593Smuzhiyun axienet_set_mac_address(ndev, NULL);
1824*4882a593Smuzhiyun axienet_set_multicast_list(ndev);
1825*4882a593Smuzhiyun axienet_setoptions(ndev, lp->options);
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun /**
1829*4882a593Smuzhiyun * axienet_probe - Axi Ethernet probe function.
1830*4882a593Smuzhiyun * @pdev: Pointer to platform device structure.
1831*4882a593Smuzhiyun *
1832*4882a593Smuzhiyun * Return: 0, on success
1833*4882a593Smuzhiyun * Non-zero error value on failure.
1834*4882a593Smuzhiyun *
1835*4882a593Smuzhiyun * This is the probe routine for Axi Ethernet driver. This is called before
1836*4882a593Smuzhiyun * any other driver routines are invoked. It allocates and sets up the Ethernet
1837*4882a593Smuzhiyun * device. Parses through device tree and populates fields of
1838*4882a593Smuzhiyun * axienet_local. It registers the Ethernet device.
1839*4882a593Smuzhiyun */
axienet_probe(struct platform_device * pdev)1840*4882a593Smuzhiyun static int axienet_probe(struct platform_device *pdev)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun int ret;
1843*4882a593Smuzhiyun struct device_node *np;
1844*4882a593Smuzhiyun struct axienet_local *lp;
1845*4882a593Smuzhiyun struct net_device *ndev;
1846*4882a593Smuzhiyun const void *mac_addr;
1847*4882a593Smuzhiyun struct resource *ethres;
1848*4882a593Smuzhiyun int addr_width = 32;
1849*4882a593Smuzhiyun u32 value;
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(*lp));
1852*4882a593Smuzhiyun if (!ndev)
1853*4882a593Smuzhiyun return -ENOMEM;
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun platform_set_drvdata(pdev, ndev);
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &pdev->dev);
1858*4882a593Smuzhiyun ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1859*4882a593Smuzhiyun ndev->features = NETIF_F_SG;
1860*4882a593Smuzhiyun ndev->netdev_ops = &axienet_netdev_ops;
1861*4882a593Smuzhiyun ndev->ethtool_ops = &axienet_ethtool_ops;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun /* MTU range: 64 - 9000 */
1864*4882a593Smuzhiyun ndev->min_mtu = 64;
1865*4882a593Smuzhiyun ndev->max_mtu = XAE_JUMBO_MTU;
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun lp = netdev_priv(ndev);
1868*4882a593Smuzhiyun lp->ndev = ndev;
1869*4882a593Smuzhiyun lp->dev = &pdev->dev;
1870*4882a593Smuzhiyun lp->options = XAE_OPTION_DEFAULTS;
1871*4882a593Smuzhiyun lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1872*4882a593Smuzhiyun lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
1875*4882a593Smuzhiyun if (IS_ERR(lp->clk)) {
1876*4882a593Smuzhiyun ret = PTR_ERR(lp->clk);
1877*4882a593Smuzhiyun goto free_netdev;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun ret = clk_prepare_enable(lp->clk);
1880*4882a593Smuzhiyun if (ret) {
1881*4882a593Smuzhiyun dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
1882*4882a593Smuzhiyun goto free_netdev;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun /* Map device registers */
1886*4882a593Smuzhiyun ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1887*4882a593Smuzhiyun lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1888*4882a593Smuzhiyun if (IS_ERR(lp->regs)) {
1889*4882a593Smuzhiyun dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
1890*4882a593Smuzhiyun ret = PTR_ERR(lp->regs);
1891*4882a593Smuzhiyun goto cleanup_clk;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun lp->regs_start = ethres->start;
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun /* Setup checksum offload, but default to off if not specified */
1896*4882a593Smuzhiyun lp->features = 0;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1899*4882a593Smuzhiyun if (!ret) {
1900*4882a593Smuzhiyun switch (value) {
1901*4882a593Smuzhiyun case 1:
1902*4882a593Smuzhiyun lp->csum_offload_on_tx_path =
1903*4882a593Smuzhiyun XAE_FEATURE_PARTIAL_TX_CSUM;
1904*4882a593Smuzhiyun lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1905*4882a593Smuzhiyun /* Can checksum TCP/UDP over IPv4. */
1906*4882a593Smuzhiyun ndev->features |= NETIF_F_IP_CSUM;
1907*4882a593Smuzhiyun break;
1908*4882a593Smuzhiyun case 2:
1909*4882a593Smuzhiyun lp->csum_offload_on_tx_path =
1910*4882a593Smuzhiyun XAE_FEATURE_FULL_TX_CSUM;
1911*4882a593Smuzhiyun lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1912*4882a593Smuzhiyun /* Can checksum TCP/UDP over IPv4. */
1913*4882a593Smuzhiyun ndev->features |= NETIF_F_IP_CSUM;
1914*4882a593Smuzhiyun break;
1915*4882a593Smuzhiyun default:
1916*4882a593Smuzhiyun lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1920*4882a593Smuzhiyun if (!ret) {
1921*4882a593Smuzhiyun switch (value) {
1922*4882a593Smuzhiyun case 1:
1923*4882a593Smuzhiyun lp->csum_offload_on_rx_path =
1924*4882a593Smuzhiyun XAE_FEATURE_PARTIAL_RX_CSUM;
1925*4882a593Smuzhiyun lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1926*4882a593Smuzhiyun break;
1927*4882a593Smuzhiyun case 2:
1928*4882a593Smuzhiyun lp->csum_offload_on_rx_path =
1929*4882a593Smuzhiyun XAE_FEATURE_FULL_RX_CSUM;
1930*4882a593Smuzhiyun lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1931*4882a593Smuzhiyun break;
1932*4882a593Smuzhiyun default:
1933*4882a593Smuzhiyun lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1934*4882a593Smuzhiyun }
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun /* For supporting jumbo frames, the Axi Ethernet hardware must have
1937*4882a593Smuzhiyun * a larger Rx/Tx Memory. Typically, the size must be large so that
1938*4882a593Smuzhiyun * we can enable jumbo option and start supporting jumbo frames.
1939*4882a593Smuzhiyun * Here we check for memory allocated for Rx/Tx in the hardware from
1940*4882a593Smuzhiyun * the device-tree and accordingly set flags.
1941*4882a593Smuzhiyun */
1942*4882a593Smuzhiyun of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /* Start with the proprietary, and broken phy_type */
1945*4882a593Smuzhiyun ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1946*4882a593Smuzhiyun if (!ret) {
1947*4882a593Smuzhiyun netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1948*4882a593Smuzhiyun switch (value) {
1949*4882a593Smuzhiyun case XAE_PHY_TYPE_MII:
1950*4882a593Smuzhiyun lp->phy_mode = PHY_INTERFACE_MODE_MII;
1951*4882a593Smuzhiyun break;
1952*4882a593Smuzhiyun case XAE_PHY_TYPE_GMII:
1953*4882a593Smuzhiyun lp->phy_mode = PHY_INTERFACE_MODE_GMII;
1954*4882a593Smuzhiyun break;
1955*4882a593Smuzhiyun case XAE_PHY_TYPE_RGMII_2_0:
1956*4882a593Smuzhiyun lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
1957*4882a593Smuzhiyun break;
1958*4882a593Smuzhiyun case XAE_PHY_TYPE_SGMII:
1959*4882a593Smuzhiyun lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
1960*4882a593Smuzhiyun break;
1961*4882a593Smuzhiyun case XAE_PHY_TYPE_1000BASE_X:
1962*4882a593Smuzhiyun lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
1963*4882a593Smuzhiyun break;
1964*4882a593Smuzhiyun default:
1965*4882a593Smuzhiyun ret = -EINVAL;
1966*4882a593Smuzhiyun goto cleanup_clk;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun } else {
1969*4882a593Smuzhiyun ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
1970*4882a593Smuzhiyun if (ret)
1971*4882a593Smuzhiyun goto cleanup_clk;
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1975*4882a593Smuzhiyun np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
1976*4882a593Smuzhiyun if (np) {
1977*4882a593Smuzhiyun struct resource dmares;
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &dmares);
1980*4882a593Smuzhiyun if (ret) {
1981*4882a593Smuzhiyun dev_err(&pdev->dev,
1982*4882a593Smuzhiyun "unable to get DMA resource\n");
1983*4882a593Smuzhiyun of_node_put(np);
1984*4882a593Smuzhiyun goto cleanup_clk;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun lp->dma_regs = devm_ioremap_resource(&pdev->dev,
1987*4882a593Smuzhiyun &dmares);
1988*4882a593Smuzhiyun lp->rx_irq = irq_of_parse_and_map(np, 1);
1989*4882a593Smuzhiyun lp->tx_irq = irq_of_parse_and_map(np, 0);
1990*4882a593Smuzhiyun of_node_put(np);
1991*4882a593Smuzhiyun lp->eth_irq = platform_get_irq_optional(pdev, 0);
1992*4882a593Smuzhiyun } else {
1993*4882a593Smuzhiyun /* Check for these resources directly on the Ethernet node. */
1994*4882a593Smuzhiyun struct resource *res = platform_get_resource(pdev,
1995*4882a593Smuzhiyun IORESOURCE_MEM, 1);
1996*4882a593Smuzhiyun lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
1997*4882a593Smuzhiyun lp->rx_irq = platform_get_irq(pdev, 1);
1998*4882a593Smuzhiyun lp->tx_irq = platform_get_irq(pdev, 0);
1999*4882a593Smuzhiyun lp->eth_irq = platform_get_irq_optional(pdev, 2);
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun if (IS_ERR(lp->dma_regs)) {
2002*4882a593Smuzhiyun dev_err(&pdev->dev, "could not map DMA regs\n");
2003*4882a593Smuzhiyun ret = PTR_ERR(lp->dma_regs);
2004*4882a593Smuzhiyun goto cleanup_clk;
2005*4882a593Smuzhiyun }
2006*4882a593Smuzhiyun if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2007*4882a593Smuzhiyun dev_err(&pdev->dev, "could not determine irqs\n");
2008*4882a593Smuzhiyun ret = -ENOMEM;
2009*4882a593Smuzhiyun goto cleanup_clk;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun /* Autodetect the need for 64-bit DMA pointers.
2013*4882a593Smuzhiyun * When the IP is configured for a bus width bigger than 32 bits,
2014*4882a593Smuzhiyun * writing the MSB registers is mandatory, even if they are all 0.
2015*4882a593Smuzhiyun * We can detect this case by writing all 1's to one such register
2016*4882a593Smuzhiyun * and see if that sticks: when the IP is configured for 32 bits
2017*4882a593Smuzhiyun * only, those registers are RES0.
2018*4882a593Smuzhiyun * Those MSB registers were introduced in IP v7.1, which we check first.
2019*4882a593Smuzhiyun */
2020*4882a593Smuzhiyun if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2021*4882a593Smuzhiyun void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun iowrite32(0x0, desc);
2024*4882a593Smuzhiyun if (ioread32(desc) == 0) { /* sanity check */
2025*4882a593Smuzhiyun iowrite32(0xffffffff, desc);
2026*4882a593Smuzhiyun if (ioread32(desc) > 0) {
2027*4882a593Smuzhiyun lp->features |= XAE_FEATURE_DMA_64BIT;
2028*4882a593Smuzhiyun addr_width = 64;
2029*4882a593Smuzhiyun dev_info(&pdev->dev,
2030*4882a593Smuzhiyun "autodetected 64-bit DMA range\n");
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun iowrite32(0x0, desc);
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2037*4882a593Smuzhiyun if (ret) {
2038*4882a593Smuzhiyun dev_err(&pdev->dev, "No suitable DMA available\n");
2039*4882a593Smuzhiyun goto cleanup_clk;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun /* Check for Ethernet core IRQ (optional) */
2043*4882a593Smuzhiyun if (lp->eth_irq <= 0)
2044*4882a593Smuzhiyun dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun /* Retrieve the MAC address */
2047*4882a593Smuzhiyun mac_addr = of_get_mac_address(pdev->dev.of_node);
2048*4882a593Smuzhiyun if (IS_ERR(mac_addr)) {
2049*4882a593Smuzhiyun dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
2050*4882a593Smuzhiyun PTR_ERR(mac_addr));
2051*4882a593Smuzhiyun mac_addr = NULL;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun axienet_set_mac_address(ndev, mac_addr);
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2056*4882a593Smuzhiyun lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun /* Reset core now that clocks are enabled, prior to accessing MDIO */
2059*4882a593Smuzhiyun ret = __axienet_device_reset(lp);
2060*4882a593Smuzhiyun if (ret)
2061*4882a593Smuzhiyun goto cleanup_clk;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun ret = axienet_mdio_setup(lp);
2064*4882a593Smuzhiyun if (ret)
2065*4882a593Smuzhiyun dev_warn(&pdev->dev,
2066*4882a593Smuzhiyun "error registering MDIO bus: %d\n", ret);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2069*4882a593Smuzhiyun lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2070*4882a593Smuzhiyun lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2071*4882a593Smuzhiyun if (!lp->phy_node) {
2072*4882a593Smuzhiyun dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
2073*4882a593Smuzhiyun ret = -EINVAL;
2074*4882a593Smuzhiyun goto cleanup_mdio;
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun lp->pcs_phy = of_mdio_find_device(lp->phy_node);
2077*4882a593Smuzhiyun if (!lp->pcs_phy) {
2078*4882a593Smuzhiyun ret = -EPROBE_DEFER;
2079*4882a593Smuzhiyun goto cleanup_mdio;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun lp->phylink_config.pcs_poll = true;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun lp->phylink_config.dev = &ndev->dev;
2085*4882a593Smuzhiyun lp->phylink_config.type = PHYLINK_NETDEV;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2088*4882a593Smuzhiyun lp->phy_mode,
2089*4882a593Smuzhiyun &axienet_phylink_ops);
2090*4882a593Smuzhiyun if (IS_ERR(lp->phylink)) {
2091*4882a593Smuzhiyun ret = PTR_ERR(lp->phylink);
2092*4882a593Smuzhiyun dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2093*4882a593Smuzhiyun goto cleanup_mdio;
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun ret = register_netdev(lp->ndev);
2097*4882a593Smuzhiyun if (ret) {
2098*4882a593Smuzhiyun dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2099*4882a593Smuzhiyun goto cleanup_phylink;
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun return 0;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun cleanup_phylink:
2105*4882a593Smuzhiyun phylink_destroy(lp->phylink);
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun cleanup_mdio:
2108*4882a593Smuzhiyun if (lp->pcs_phy)
2109*4882a593Smuzhiyun put_device(&lp->pcs_phy->dev);
2110*4882a593Smuzhiyun if (lp->mii_bus)
2111*4882a593Smuzhiyun axienet_mdio_teardown(lp);
2112*4882a593Smuzhiyun of_node_put(lp->phy_node);
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun cleanup_clk:
2115*4882a593Smuzhiyun clk_disable_unprepare(lp->clk);
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun free_netdev:
2118*4882a593Smuzhiyun free_netdev(ndev);
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun return ret;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun
axienet_remove(struct platform_device * pdev)2123*4882a593Smuzhiyun static int axienet_remove(struct platform_device *pdev)
2124*4882a593Smuzhiyun {
2125*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
2126*4882a593Smuzhiyun struct axienet_local *lp = netdev_priv(ndev);
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun unregister_netdev(ndev);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun if (lp->phylink)
2131*4882a593Smuzhiyun phylink_destroy(lp->phylink);
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun if (lp->pcs_phy)
2134*4882a593Smuzhiyun put_device(&lp->pcs_phy->dev);
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun axienet_mdio_teardown(lp);
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun clk_disable_unprepare(lp->clk);
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun of_node_put(lp->phy_node);
2141*4882a593Smuzhiyun lp->phy_node = NULL;
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun free_netdev(ndev);
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun return 0;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun
axienet_shutdown(struct platform_device * pdev)2148*4882a593Smuzhiyun static void axienet_shutdown(struct platform_device *pdev)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun rtnl_lock();
2153*4882a593Smuzhiyun netif_device_detach(ndev);
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun if (netif_running(ndev))
2156*4882a593Smuzhiyun dev_close(ndev);
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun rtnl_unlock();
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun static struct platform_driver axienet_driver = {
2162*4882a593Smuzhiyun .probe = axienet_probe,
2163*4882a593Smuzhiyun .remove = axienet_remove,
2164*4882a593Smuzhiyun .shutdown = axienet_shutdown,
2165*4882a593Smuzhiyun .driver = {
2166*4882a593Smuzhiyun .name = "xilinx_axienet",
2167*4882a593Smuzhiyun .of_match_table = axienet_of_match,
2168*4882a593Smuzhiyun },
2169*4882a593Smuzhiyun };
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun module_platform_driver(axienet_driver);
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2174*4882a593Smuzhiyun MODULE_AUTHOR("Xilinx");
2175*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2176