1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Hisilicon Fast Ethernet MAC Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/circ_buf.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/etherdevice.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/of_mdio.h>
14*4882a593Smuzhiyun #include <linux/of_net.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/reset.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* MAC control register list */
19*4882a593Smuzhiyun #define MAC_PORTSEL 0x0200
20*4882a593Smuzhiyun #define MAC_PORTSEL_STAT_CPU BIT(0)
21*4882a593Smuzhiyun #define MAC_PORTSEL_RMII BIT(1)
22*4882a593Smuzhiyun #define MAC_PORTSET 0x0208
23*4882a593Smuzhiyun #define MAC_PORTSET_DUPLEX_FULL BIT(0)
24*4882a593Smuzhiyun #define MAC_PORTSET_LINKED BIT(1)
25*4882a593Smuzhiyun #define MAC_PORTSET_SPEED_100M BIT(2)
26*4882a593Smuzhiyun #define MAC_SET 0x0210
27*4882a593Smuzhiyun #define MAX_FRAME_SIZE 1600
28*4882a593Smuzhiyun #define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
29*4882a593Smuzhiyun #define BIT_PAUSE_EN BIT(18)
30*4882a593Smuzhiyun #define RX_COALESCE_SET 0x0340
31*4882a593Smuzhiyun #define RX_COALESCED_FRAME_OFFSET 24
32*4882a593Smuzhiyun #define RX_COALESCED_FRAMES 8
33*4882a593Smuzhiyun #define RX_COALESCED_TIMER 0x74
34*4882a593Smuzhiyun #define QLEN_SET 0x0344
35*4882a593Smuzhiyun #define RX_DEPTH_OFFSET 8
36*4882a593Smuzhiyun #define MAX_HW_FIFO_DEPTH 64
37*4882a593Smuzhiyun #define HW_TX_FIFO_DEPTH 12
38*4882a593Smuzhiyun #define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
39*4882a593Smuzhiyun #define IQFRM_DES 0x0354
40*4882a593Smuzhiyun #define RX_FRAME_LEN_MASK GENMASK(11, 0)
41*4882a593Smuzhiyun #define IQ_ADDR 0x0358
42*4882a593Smuzhiyun #define EQ_ADDR 0x0360
43*4882a593Smuzhiyun #define EQFRM_LEN 0x0364
44*4882a593Smuzhiyun #define ADDRQ_STAT 0x036C
45*4882a593Smuzhiyun #define TX_CNT_INUSE_MASK GENMASK(5, 0)
46*4882a593Smuzhiyun #define BIT_TX_READY BIT(24)
47*4882a593Smuzhiyun #define BIT_RX_READY BIT(25)
48*4882a593Smuzhiyun /* global control register list */
49*4882a593Smuzhiyun #define GLB_HOSTMAC_L32 0x0000
50*4882a593Smuzhiyun #define GLB_HOSTMAC_H16 0x0004
51*4882a593Smuzhiyun #define GLB_SOFT_RESET 0x0008
52*4882a593Smuzhiyun #define SOFT_RESET_ALL BIT(0)
53*4882a593Smuzhiyun #define GLB_FWCTRL 0x0010
54*4882a593Smuzhiyun #define FWCTRL_VLAN_ENABLE BIT(0)
55*4882a593Smuzhiyun #define FWCTRL_FW2CPU_ENA BIT(5)
56*4882a593Smuzhiyun #define FWCTRL_FWALL2CPU BIT(7)
57*4882a593Smuzhiyun #define GLB_MACTCTRL 0x0014
58*4882a593Smuzhiyun #define MACTCTRL_UNI2CPU BIT(1)
59*4882a593Smuzhiyun #define MACTCTRL_MULTI2CPU BIT(3)
60*4882a593Smuzhiyun #define MACTCTRL_BROAD2CPU BIT(5)
61*4882a593Smuzhiyun #define MACTCTRL_MACT_ENA BIT(7)
62*4882a593Smuzhiyun #define GLB_IRQ_STAT 0x0030
63*4882a593Smuzhiyun #define GLB_IRQ_ENA 0x0034
64*4882a593Smuzhiyun #define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
65*4882a593Smuzhiyun #define IRQ_ENA_PORT0 BIT(18)
66*4882a593Smuzhiyun #define IRQ_ENA_ALL BIT(19)
67*4882a593Smuzhiyun #define GLB_IRQ_RAW 0x0038
68*4882a593Smuzhiyun #define IRQ_INT_RX_RDY BIT(0)
69*4882a593Smuzhiyun #define IRQ_INT_TX_PER_PACKET BIT(1)
70*4882a593Smuzhiyun #define IRQ_INT_TX_FIFO_EMPTY BIT(6)
71*4882a593Smuzhiyun #define IRQ_INT_MULTI_RXRDY BIT(7)
72*4882a593Smuzhiyun #define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
73*4882a593Smuzhiyun IRQ_INT_TX_PER_PACKET | \
74*4882a593Smuzhiyun IRQ_INT_TX_FIFO_EMPTY)
75*4882a593Smuzhiyun #define GLB_MAC_L32_BASE 0x0100
76*4882a593Smuzhiyun #define GLB_MAC_H16_BASE 0x0104
77*4882a593Smuzhiyun #define MACFLT_HI16_MASK GENMASK(15, 0)
78*4882a593Smuzhiyun #define BIT_MACFLT_ENA BIT(17)
79*4882a593Smuzhiyun #define BIT_MACFLT_FW2CPU BIT(21)
80*4882a593Smuzhiyun #define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
81*4882a593Smuzhiyun #define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
82*4882a593Smuzhiyun #define MAX_MAC_FILTER_NUM 8
83*4882a593Smuzhiyun #define MAX_UNICAST_ADDRESSES 2
84*4882a593Smuzhiyun #define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
85*4882a593Smuzhiyun MAX_UNICAST_ADDRESSES)
86*4882a593Smuzhiyun /* software tx and rx queue number, should be power of 2 */
87*4882a593Smuzhiyun #define TXQ_NUM 64
88*4882a593Smuzhiyun #define RXQ_NUM 128
89*4882a593Smuzhiyun #define FEMAC_POLL_WEIGHT 16
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun enum phy_reset_delays {
94*4882a593Smuzhiyun PRE_DELAY,
95*4882a593Smuzhiyun PULSE,
96*4882a593Smuzhiyun POST_DELAY,
97*4882a593Smuzhiyun DELAYS_NUM,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct hisi_femac_queue {
101*4882a593Smuzhiyun struct sk_buff **skb;
102*4882a593Smuzhiyun dma_addr_t *dma_phys;
103*4882a593Smuzhiyun int num;
104*4882a593Smuzhiyun unsigned int head;
105*4882a593Smuzhiyun unsigned int tail;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct hisi_femac_priv {
109*4882a593Smuzhiyun void __iomem *port_base;
110*4882a593Smuzhiyun void __iomem *glb_base;
111*4882a593Smuzhiyun struct clk *clk;
112*4882a593Smuzhiyun struct reset_control *mac_rst;
113*4882a593Smuzhiyun struct reset_control *phy_rst;
114*4882a593Smuzhiyun u32 phy_reset_delays[DELAYS_NUM];
115*4882a593Smuzhiyun u32 link_status;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun struct device *dev;
118*4882a593Smuzhiyun struct net_device *ndev;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct hisi_femac_queue txq;
121*4882a593Smuzhiyun struct hisi_femac_queue rxq;
122*4882a593Smuzhiyun u32 tx_fifo_used_cnt;
123*4882a593Smuzhiyun struct napi_struct napi;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
hisi_femac_irq_enable(struct hisi_femac_priv * priv,int irqs)126*4882a593Smuzhiyun static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun u32 val;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_IRQ_ENA);
131*4882a593Smuzhiyun writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
hisi_femac_irq_disable(struct hisi_femac_priv * priv,int irqs)134*4882a593Smuzhiyun static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun u32 val;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_IRQ_ENA);
139*4882a593Smuzhiyun writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
hisi_femac_tx_dma_unmap(struct hisi_femac_priv * priv,struct sk_buff * skb,unsigned int pos)142*4882a593Smuzhiyun static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
143*4882a593Smuzhiyun struct sk_buff *skb, unsigned int pos)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun dma_addr_t dma_addr;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun dma_addr = priv->txq.dma_phys[pos];
148*4882a593Smuzhiyun dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
hisi_femac_xmit_reclaim(struct net_device * dev)151*4882a593Smuzhiyun static void hisi_femac_xmit_reclaim(struct net_device *dev)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct sk_buff *skb;
154*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
155*4882a593Smuzhiyun struct hisi_femac_queue *txq = &priv->txq;
156*4882a593Smuzhiyun unsigned int bytes_compl = 0, pkts_compl = 0;
157*4882a593Smuzhiyun u32 val;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun netif_tx_lock(dev);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
162*4882a593Smuzhiyun while (val < priv->tx_fifo_used_cnt) {
163*4882a593Smuzhiyun skb = txq->skb[txq->tail];
164*4882a593Smuzhiyun if (unlikely(!skb)) {
165*4882a593Smuzhiyun netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
166*4882a593Smuzhiyun val, priv->tx_fifo_used_cnt);
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
170*4882a593Smuzhiyun pkts_compl++;
171*4882a593Smuzhiyun bytes_compl += skb->len;
172*4882a593Smuzhiyun dev_kfree_skb_any(skb);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun priv->tx_fifo_used_cnt--;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
177*4882a593Smuzhiyun txq->skb[txq->tail] = NULL;
178*4882a593Smuzhiyun txq->tail = (txq->tail + 1) % txq->num;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun netdev_completed_queue(dev, pkts_compl, bytes_compl);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
184*4882a593Smuzhiyun netif_wake_queue(dev);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun netif_tx_unlock(dev);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
hisi_femac_adjust_link(struct net_device * dev)189*4882a593Smuzhiyun static void hisi_femac_adjust_link(struct net_device *dev)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
192*4882a593Smuzhiyun struct phy_device *phy = dev->phydev;
193*4882a593Smuzhiyun u32 status = 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (phy->link)
196*4882a593Smuzhiyun status |= MAC_PORTSET_LINKED;
197*4882a593Smuzhiyun if (phy->duplex == DUPLEX_FULL)
198*4882a593Smuzhiyun status |= MAC_PORTSET_DUPLEX_FULL;
199*4882a593Smuzhiyun if (phy->speed == SPEED_100)
200*4882a593Smuzhiyun status |= MAC_PORTSET_SPEED_100M;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if ((status != priv->link_status) &&
203*4882a593Smuzhiyun ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
204*4882a593Smuzhiyun writel(status, priv->port_base + MAC_PORTSET);
205*4882a593Smuzhiyun priv->link_status = status;
206*4882a593Smuzhiyun phy_print_status(phy);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
hisi_femac_rx_refill(struct hisi_femac_priv * priv)210*4882a593Smuzhiyun static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct hisi_femac_queue *rxq = &priv->rxq;
213*4882a593Smuzhiyun struct sk_buff *skb;
214*4882a593Smuzhiyun u32 pos;
215*4882a593Smuzhiyun u32 len = MAX_FRAME_SIZE;
216*4882a593Smuzhiyun dma_addr_t addr;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun pos = rxq->head;
219*4882a593Smuzhiyun while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
220*4882a593Smuzhiyun if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
221*4882a593Smuzhiyun break;
222*4882a593Smuzhiyun if (unlikely(rxq->skb[pos])) {
223*4882a593Smuzhiyun netdev_err(priv->ndev, "err skb[%d]=%p\n",
224*4882a593Smuzhiyun pos, rxq->skb[pos]);
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun skb = netdev_alloc_skb_ip_align(priv->ndev, len);
228*4882a593Smuzhiyun if (unlikely(!skb))
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun addr = dma_map_single(priv->dev, skb->data, len,
232*4882a593Smuzhiyun DMA_FROM_DEVICE);
233*4882a593Smuzhiyun if (dma_mapping_error(priv->dev, addr)) {
234*4882a593Smuzhiyun dev_kfree_skb_any(skb);
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun rxq->dma_phys[pos] = addr;
238*4882a593Smuzhiyun rxq->skb[pos] = skb;
239*4882a593Smuzhiyun writel(addr, priv->port_base + IQ_ADDR);
240*4882a593Smuzhiyun pos = (pos + 1) % rxq->num;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun rxq->head = pos;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
hisi_femac_rx(struct net_device * dev,int limit)245*4882a593Smuzhiyun static int hisi_femac_rx(struct net_device *dev, int limit)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
248*4882a593Smuzhiyun struct hisi_femac_queue *rxq = &priv->rxq;
249*4882a593Smuzhiyun struct sk_buff *skb;
250*4882a593Smuzhiyun dma_addr_t addr;
251*4882a593Smuzhiyun u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun pos = rxq->tail;
254*4882a593Smuzhiyun while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
255*4882a593Smuzhiyun rx_pkt_info = readl(priv->port_base + IQFRM_DES);
256*4882a593Smuzhiyun len = rx_pkt_info & RX_FRAME_LEN_MASK;
257*4882a593Smuzhiyun len -= ETH_FCS_LEN;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* tell hardware we will deal with this packet */
260*4882a593Smuzhiyun writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun rx_pkts_num++;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun skb = rxq->skb[pos];
265*4882a593Smuzhiyun if (unlikely(!skb)) {
266*4882a593Smuzhiyun netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun rxq->skb[pos] = NULL;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun addr = rxq->dma_phys[pos];
272*4882a593Smuzhiyun dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
273*4882a593Smuzhiyun DMA_FROM_DEVICE);
274*4882a593Smuzhiyun skb_put(skb, len);
275*4882a593Smuzhiyun if (unlikely(skb->len > MAX_FRAME_SIZE)) {
276*4882a593Smuzhiyun netdev_err(dev, "rcv len err, len = %d\n", skb->len);
277*4882a593Smuzhiyun dev->stats.rx_errors++;
278*4882a593Smuzhiyun dev->stats.rx_length_errors++;
279*4882a593Smuzhiyun dev_kfree_skb_any(skb);
280*4882a593Smuzhiyun goto next;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
284*4882a593Smuzhiyun napi_gro_receive(&priv->napi, skb);
285*4882a593Smuzhiyun dev->stats.rx_packets++;
286*4882a593Smuzhiyun dev->stats.rx_bytes += len;
287*4882a593Smuzhiyun next:
288*4882a593Smuzhiyun pos = (pos + 1) % rxq->num;
289*4882a593Smuzhiyun if (rx_pkts_num >= limit)
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun rxq->tail = pos;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun hisi_femac_rx_refill(priv);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return rx_pkts_num;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
hisi_femac_poll(struct napi_struct * napi,int budget)299*4882a593Smuzhiyun static int hisi_femac_poll(struct napi_struct *napi, int budget)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct hisi_femac_priv *priv = container_of(napi,
302*4882a593Smuzhiyun struct hisi_femac_priv, napi);
303*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
304*4882a593Smuzhiyun int work_done = 0, task = budget;
305*4882a593Smuzhiyun int ints, num;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun do {
308*4882a593Smuzhiyun hisi_femac_xmit_reclaim(dev);
309*4882a593Smuzhiyun num = hisi_femac_rx(dev, task);
310*4882a593Smuzhiyun work_done += num;
311*4882a593Smuzhiyun task -= num;
312*4882a593Smuzhiyun if (work_done >= budget)
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun ints = readl(priv->glb_base + GLB_IRQ_RAW);
316*4882a593Smuzhiyun writel(ints & DEF_INT_MASK,
317*4882a593Smuzhiyun priv->glb_base + GLB_IRQ_RAW);
318*4882a593Smuzhiyun } while (ints & DEF_INT_MASK);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (work_done < budget) {
321*4882a593Smuzhiyun napi_complete_done(napi, work_done);
322*4882a593Smuzhiyun hisi_femac_irq_enable(priv, DEF_INT_MASK &
323*4882a593Smuzhiyun (~IRQ_INT_TX_PER_PACKET));
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return work_done;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
hisi_femac_interrupt(int irq,void * dev_id)329*4882a593Smuzhiyun static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun int ints;
332*4882a593Smuzhiyun struct net_device *dev = (struct net_device *)dev_id;
333*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun ints = readl(priv->glb_base + GLB_IRQ_RAW);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (likely(ints & DEF_INT_MASK)) {
338*4882a593Smuzhiyun writel(ints & DEF_INT_MASK,
339*4882a593Smuzhiyun priv->glb_base + GLB_IRQ_RAW);
340*4882a593Smuzhiyun hisi_femac_irq_disable(priv, DEF_INT_MASK);
341*4882a593Smuzhiyun napi_schedule(&priv->napi);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun return IRQ_HANDLED;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
hisi_femac_init_queue(struct device * dev,struct hisi_femac_queue * queue,unsigned int num)347*4882a593Smuzhiyun static int hisi_femac_init_queue(struct device *dev,
348*4882a593Smuzhiyun struct hisi_femac_queue *queue,
349*4882a593Smuzhiyun unsigned int num)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
352*4882a593Smuzhiyun GFP_KERNEL);
353*4882a593Smuzhiyun if (!queue->skb)
354*4882a593Smuzhiyun return -ENOMEM;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
357*4882a593Smuzhiyun GFP_KERNEL);
358*4882a593Smuzhiyun if (!queue->dma_phys)
359*4882a593Smuzhiyun return -ENOMEM;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun queue->num = num;
362*4882a593Smuzhiyun queue->head = 0;
363*4882a593Smuzhiyun queue->tail = 0;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv * priv)368*4882a593Smuzhiyun static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun int ret;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
373*4882a593Smuzhiyun if (ret)
374*4882a593Smuzhiyun return ret;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
377*4882a593Smuzhiyun if (ret)
378*4882a593Smuzhiyun return ret;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun priv->tx_fifo_used_cnt = 0;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
hisi_femac_free_skb_rings(struct hisi_femac_priv * priv)385*4882a593Smuzhiyun static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct hisi_femac_queue *txq = &priv->txq;
388*4882a593Smuzhiyun struct hisi_femac_queue *rxq = &priv->rxq;
389*4882a593Smuzhiyun struct sk_buff *skb;
390*4882a593Smuzhiyun dma_addr_t dma_addr;
391*4882a593Smuzhiyun u32 pos;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun pos = rxq->tail;
394*4882a593Smuzhiyun while (pos != rxq->head) {
395*4882a593Smuzhiyun skb = rxq->skb[pos];
396*4882a593Smuzhiyun if (unlikely(!skb)) {
397*4882a593Smuzhiyun netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
398*4882a593Smuzhiyun pos, rxq->head);
399*4882a593Smuzhiyun continue;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun dma_addr = rxq->dma_phys[pos];
403*4882a593Smuzhiyun dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
404*4882a593Smuzhiyun DMA_FROM_DEVICE);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun dev_kfree_skb_any(skb);
407*4882a593Smuzhiyun rxq->skb[pos] = NULL;
408*4882a593Smuzhiyun pos = (pos + 1) % rxq->num;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun rxq->tail = pos;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun pos = txq->tail;
413*4882a593Smuzhiyun while (pos != txq->head) {
414*4882a593Smuzhiyun skb = txq->skb[pos];
415*4882a593Smuzhiyun if (unlikely(!skb)) {
416*4882a593Smuzhiyun netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
417*4882a593Smuzhiyun pos, txq->head);
418*4882a593Smuzhiyun continue;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun hisi_femac_tx_dma_unmap(priv, skb, pos);
421*4882a593Smuzhiyun dev_kfree_skb_any(skb);
422*4882a593Smuzhiyun txq->skb[pos] = NULL;
423*4882a593Smuzhiyun pos = (pos + 1) % txq->num;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun txq->tail = pos;
426*4882a593Smuzhiyun priv->tx_fifo_used_cnt = 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
hisi_femac_set_hw_mac_addr(struct hisi_femac_priv * priv,unsigned char * mac)429*4882a593Smuzhiyun static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
430*4882a593Smuzhiyun unsigned char *mac)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun u32 reg;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun reg = mac[1] | (mac[0] << 8);
435*4882a593Smuzhiyun writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
438*4882a593Smuzhiyun writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
hisi_femac_port_reset(struct hisi_femac_priv * priv)443*4882a593Smuzhiyun static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun u32 val;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_SOFT_RESET);
448*4882a593Smuzhiyun val |= SOFT_RESET_ALL;
449*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_SOFT_RESET);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun usleep_range(500, 800);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun val &= ~SOFT_RESET_ALL;
454*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_SOFT_RESET);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
hisi_femac_net_open(struct net_device * dev)459*4882a593Smuzhiyun static int hisi_femac_net_open(struct net_device *dev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun hisi_femac_port_reset(priv);
464*4882a593Smuzhiyun hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
465*4882a593Smuzhiyun hisi_femac_rx_refill(priv);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun netif_carrier_off(dev);
468*4882a593Smuzhiyun netdev_reset_queue(dev);
469*4882a593Smuzhiyun netif_start_queue(dev);
470*4882a593Smuzhiyun napi_enable(&priv->napi);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun priv->link_status = 0;
473*4882a593Smuzhiyun if (dev->phydev)
474*4882a593Smuzhiyun phy_start(dev->phydev);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
477*4882a593Smuzhiyun hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
hisi_femac_net_close(struct net_device * dev)482*4882a593Smuzhiyun static int hisi_femac_net_close(struct net_device *dev)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (dev->phydev)
489*4882a593Smuzhiyun phy_stop(dev->phydev);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun netif_stop_queue(dev);
492*4882a593Smuzhiyun napi_disable(&priv->napi);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun hisi_femac_free_skb_rings(priv);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
hisi_femac_net_xmit(struct sk_buff * skb,struct net_device * dev)499*4882a593Smuzhiyun static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
500*4882a593Smuzhiyun struct net_device *dev)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
503*4882a593Smuzhiyun struct hisi_femac_queue *txq = &priv->txq;
504*4882a593Smuzhiyun dma_addr_t addr;
505*4882a593Smuzhiyun u32 val;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun val = readl(priv->port_base + ADDRQ_STAT);
508*4882a593Smuzhiyun val &= BIT_TX_READY;
509*4882a593Smuzhiyun if (!val) {
510*4882a593Smuzhiyun hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
511*4882a593Smuzhiyun dev->stats.tx_dropped++;
512*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
513*4882a593Smuzhiyun netif_stop_queue(dev);
514*4882a593Smuzhiyun return NETDEV_TX_BUSY;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
518*4882a593Smuzhiyun txq->num))) {
519*4882a593Smuzhiyun hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
520*4882a593Smuzhiyun dev->stats.tx_dropped++;
521*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
522*4882a593Smuzhiyun netif_stop_queue(dev);
523*4882a593Smuzhiyun return NETDEV_TX_BUSY;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun addr = dma_map_single(priv->dev, skb->data,
527*4882a593Smuzhiyun skb->len, DMA_TO_DEVICE);
528*4882a593Smuzhiyun if (unlikely(dma_mapping_error(priv->dev, addr))) {
529*4882a593Smuzhiyun dev_kfree_skb_any(skb);
530*4882a593Smuzhiyun dev->stats.tx_dropped++;
531*4882a593Smuzhiyun return NETDEV_TX_OK;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun txq->dma_phys[txq->head] = addr;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun txq->skb[txq->head] = skb;
536*4882a593Smuzhiyun txq->head = (txq->head + 1) % txq->num;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun writel(addr, priv->port_base + EQ_ADDR);
539*4882a593Smuzhiyun writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun priv->tx_fifo_used_cnt++;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun dev->stats.tx_packets++;
544*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
545*4882a593Smuzhiyun netdev_sent_queue(dev, skb->len);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return NETDEV_TX_OK;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
hisi_femac_set_mac_address(struct net_device * dev,void * p)550*4882a593Smuzhiyun static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
553*4882a593Smuzhiyun struct sockaddr *skaddr = p;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (!is_valid_ether_addr(skaddr->sa_data))
556*4882a593Smuzhiyun return -EADDRNOTAVAIL;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
559*4882a593Smuzhiyun dev->addr_assign_type &= ~NET_ADDR_RANDOM;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv * priv,unsigned int reg_n,bool enable)566*4882a593Smuzhiyun static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
567*4882a593Smuzhiyun unsigned int reg_n, bool enable)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun u32 val;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
572*4882a593Smuzhiyun if (enable)
573*4882a593Smuzhiyun val |= BIT_MACFLT_ENA;
574*4882a593Smuzhiyun else
575*4882a593Smuzhiyun val &= ~BIT_MACFLT_ENA;
576*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
hisi_femac_set_hw_addr_filter(struct hisi_femac_priv * priv,unsigned char * addr,unsigned int reg_n)579*4882a593Smuzhiyun static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
580*4882a593Smuzhiyun unsigned char *addr,
581*4882a593Smuzhiyun unsigned int reg_n)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun unsigned int high, low;
584*4882a593Smuzhiyun u32 val;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun high = GLB_MAC_H16(reg_n);
587*4882a593Smuzhiyun low = GLB_MAC_L32(reg_n);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
590*4882a593Smuzhiyun writel(val, priv->glb_base + low);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun val = readl(priv->glb_base + high);
593*4882a593Smuzhiyun val &= ~MACFLT_HI16_MASK;
594*4882a593Smuzhiyun val |= ((addr[0] << 8) | addr[1]);
595*4882a593Smuzhiyun val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
596*4882a593Smuzhiyun writel(val, priv->glb_base + high);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
hisi_femac_set_promisc_mode(struct hisi_femac_priv * priv,bool promisc_mode)599*4882a593Smuzhiyun static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
600*4882a593Smuzhiyun bool promisc_mode)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun u32 val;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_FWCTRL);
605*4882a593Smuzhiyun if (promisc_mode)
606*4882a593Smuzhiyun val |= FWCTRL_FWALL2CPU;
607*4882a593Smuzhiyun else
608*4882a593Smuzhiyun val &= ~FWCTRL_FWALL2CPU;
609*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_FWCTRL);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* Handle multiple multicast addresses (perfect filtering)*/
hisi_femac_set_mc_addr_filter(struct hisi_femac_priv * priv)613*4882a593Smuzhiyun static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
616*4882a593Smuzhiyun u32 val;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_MACTCTRL);
619*4882a593Smuzhiyun if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
620*4882a593Smuzhiyun (dev->flags & IFF_ALLMULTI)) {
621*4882a593Smuzhiyun val |= MACTCTRL_MULTI2CPU;
622*4882a593Smuzhiyun } else {
623*4882a593Smuzhiyun int reg = MAX_UNICAST_ADDRESSES;
624*4882a593Smuzhiyun int i;
625*4882a593Smuzhiyun struct netdev_hw_addr *ha;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
628*4882a593Smuzhiyun hisi_femac_enable_hw_addr_filter(priv, i, false);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
631*4882a593Smuzhiyun hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
632*4882a593Smuzhiyun reg++;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun val &= ~MACTCTRL_MULTI2CPU;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_MACTCTRL);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* Handle multiple unicast addresses (perfect filtering)*/
hisi_femac_set_uc_addr_filter(struct hisi_femac_priv * priv)640*4882a593Smuzhiyun static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun struct net_device *dev = priv->ndev;
643*4882a593Smuzhiyun u32 val;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_MACTCTRL);
646*4882a593Smuzhiyun if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
647*4882a593Smuzhiyun val |= MACTCTRL_UNI2CPU;
648*4882a593Smuzhiyun } else {
649*4882a593Smuzhiyun int reg = 0;
650*4882a593Smuzhiyun int i;
651*4882a593Smuzhiyun struct netdev_hw_addr *ha;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
654*4882a593Smuzhiyun hisi_femac_enable_hw_addr_filter(priv, i, false);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun netdev_for_each_uc_addr(ha, dev) {
657*4882a593Smuzhiyun hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
658*4882a593Smuzhiyun reg++;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun val &= ~MACTCTRL_UNI2CPU;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_MACTCTRL);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
hisi_femac_net_set_rx_mode(struct net_device * dev)665*4882a593Smuzhiyun static void hisi_femac_net_set_rx_mode(struct net_device *dev)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(dev);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
670*4882a593Smuzhiyun hisi_femac_set_promisc_mode(priv, true);
671*4882a593Smuzhiyun } else {
672*4882a593Smuzhiyun hisi_femac_set_promisc_mode(priv, false);
673*4882a593Smuzhiyun hisi_femac_set_mc_addr_filter(priv);
674*4882a593Smuzhiyun hisi_femac_set_uc_addr_filter(priv);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun static const struct ethtool_ops hisi_femac_ethtools_ops = {
679*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
680*4882a593Smuzhiyun .get_link_ksettings = phy_ethtool_get_link_ksettings,
681*4882a593Smuzhiyun .set_link_ksettings = phy_ethtool_set_link_ksettings,
682*4882a593Smuzhiyun };
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun static const struct net_device_ops hisi_femac_netdev_ops = {
685*4882a593Smuzhiyun .ndo_open = hisi_femac_net_open,
686*4882a593Smuzhiyun .ndo_stop = hisi_femac_net_close,
687*4882a593Smuzhiyun .ndo_start_xmit = hisi_femac_net_xmit,
688*4882a593Smuzhiyun .ndo_do_ioctl = phy_do_ioctl_running,
689*4882a593Smuzhiyun .ndo_set_mac_address = hisi_femac_set_mac_address,
690*4882a593Smuzhiyun .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
691*4882a593Smuzhiyun };
692*4882a593Smuzhiyun
hisi_femac_core_reset(struct hisi_femac_priv * priv)693*4882a593Smuzhiyun static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun reset_control_assert(priv->mac_rst);
696*4882a593Smuzhiyun reset_control_deassert(priv->mac_rst);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
hisi_femac_sleep_us(u32 time_us)699*4882a593Smuzhiyun static void hisi_femac_sleep_us(u32 time_us)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun u32 time_ms;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (!time_us)
704*4882a593Smuzhiyun return;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun time_ms = DIV_ROUND_UP(time_us, 1000);
707*4882a593Smuzhiyun if (time_ms < 20)
708*4882a593Smuzhiyun usleep_range(time_us, time_us + 500);
709*4882a593Smuzhiyun else
710*4882a593Smuzhiyun msleep(time_ms);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
hisi_femac_phy_reset(struct hisi_femac_priv * priv)713*4882a593Smuzhiyun static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun /* To make sure PHY hardware reset success,
716*4882a593Smuzhiyun * we must keep PHY in deassert state first and
717*4882a593Smuzhiyun * then complete the hardware reset operation
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun reset_control_deassert(priv->phy_rst);
720*4882a593Smuzhiyun hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun reset_control_assert(priv->phy_rst);
723*4882a593Smuzhiyun /* delay some time to ensure reset ok,
724*4882a593Smuzhiyun * this depends on PHY hardware feature
725*4882a593Smuzhiyun */
726*4882a593Smuzhiyun hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
727*4882a593Smuzhiyun reset_control_deassert(priv->phy_rst);
728*4882a593Smuzhiyun /* delay some time to ensure later MDIO access */
729*4882a593Smuzhiyun hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
hisi_femac_port_init(struct hisi_femac_priv * priv)732*4882a593Smuzhiyun static void hisi_femac_port_init(struct hisi_femac_priv *priv)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun u32 val;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* MAC gets link status info and phy mode by software config */
737*4882a593Smuzhiyun val = MAC_PORTSEL_STAT_CPU;
738*4882a593Smuzhiyun if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
739*4882a593Smuzhiyun val |= MAC_PORTSEL_RMII;
740*4882a593Smuzhiyun writel(val, priv->port_base + MAC_PORTSEL);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /*clear all interrupt status */
743*4882a593Smuzhiyun writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
744*4882a593Smuzhiyun hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_FWCTRL);
747*4882a593Smuzhiyun val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
748*4882a593Smuzhiyun val |= FWCTRL_FW2CPU_ENA;
749*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_FWCTRL);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun val = readl(priv->glb_base + GLB_MACTCTRL);
752*4882a593Smuzhiyun val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
753*4882a593Smuzhiyun writel(val, priv->glb_base + GLB_MACTCTRL);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun val = readl(priv->port_base + MAC_SET);
756*4882a593Smuzhiyun val &= ~MAX_FRAME_SIZE_MASK;
757*4882a593Smuzhiyun val |= MAX_FRAME_SIZE;
758*4882a593Smuzhiyun writel(val, priv->port_base + MAC_SET);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun val = RX_COALESCED_TIMER |
761*4882a593Smuzhiyun (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
762*4882a593Smuzhiyun writel(val, priv->port_base + RX_COALESCE_SET);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
765*4882a593Smuzhiyun writel(val, priv->port_base + QLEN_SET);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
hisi_femac_drv_probe(struct platform_device * pdev)768*4882a593Smuzhiyun static int hisi_femac_drv_probe(struct platform_device *pdev)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun struct device *dev = &pdev->dev;
771*4882a593Smuzhiyun struct device_node *node = dev->of_node;
772*4882a593Smuzhiyun struct net_device *ndev;
773*4882a593Smuzhiyun struct hisi_femac_priv *priv;
774*4882a593Smuzhiyun struct phy_device *phy;
775*4882a593Smuzhiyun const char *mac_addr;
776*4882a593Smuzhiyun int ret;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(*priv));
779*4882a593Smuzhiyun if (!ndev)
780*4882a593Smuzhiyun return -ENOMEM;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun platform_set_drvdata(pdev, ndev);
783*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &pdev->dev);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun priv = netdev_priv(ndev);
786*4882a593Smuzhiyun priv->dev = dev;
787*4882a593Smuzhiyun priv->ndev = ndev;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun priv->port_base = devm_platform_ioremap_resource(pdev, 0);
790*4882a593Smuzhiyun if (IS_ERR(priv->port_base)) {
791*4882a593Smuzhiyun ret = PTR_ERR(priv->port_base);
792*4882a593Smuzhiyun goto out_free_netdev;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun priv->glb_base = devm_platform_ioremap_resource(pdev, 1);
796*4882a593Smuzhiyun if (IS_ERR(priv->glb_base)) {
797*4882a593Smuzhiyun ret = PTR_ERR(priv->glb_base);
798*4882a593Smuzhiyun goto out_free_netdev;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun priv->clk = devm_clk_get(&pdev->dev, NULL);
802*4882a593Smuzhiyun if (IS_ERR(priv->clk)) {
803*4882a593Smuzhiyun dev_err(dev, "failed to get clk\n");
804*4882a593Smuzhiyun ret = -ENODEV;
805*4882a593Smuzhiyun goto out_free_netdev;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun ret = clk_prepare_enable(priv->clk);
809*4882a593Smuzhiyun if (ret) {
810*4882a593Smuzhiyun dev_err(dev, "failed to enable clk %d\n", ret);
811*4882a593Smuzhiyun goto out_free_netdev;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun priv->mac_rst = devm_reset_control_get(dev, "mac");
815*4882a593Smuzhiyun if (IS_ERR(priv->mac_rst)) {
816*4882a593Smuzhiyun ret = PTR_ERR(priv->mac_rst);
817*4882a593Smuzhiyun goto out_disable_clk;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun hisi_femac_core_reset(priv);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun priv->phy_rst = devm_reset_control_get(dev, "phy");
822*4882a593Smuzhiyun if (IS_ERR(priv->phy_rst)) {
823*4882a593Smuzhiyun priv->phy_rst = NULL;
824*4882a593Smuzhiyun } else {
825*4882a593Smuzhiyun ret = of_property_read_u32_array(node,
826*4882a593Smuzhiyun PHY_RESET_DELAYS_PROPERTY,
827*4882a593Smuzhiyun priv->phy_reset_delays,
828*4882a593Smuzhiyun DELAYS_NUM);
829*4882a593Smuzhiyun if (ret)
830*4882a593Smuzhiyun goto out_disable_clk;
831*4882a593Smuzhiyun hisi_femac_phy_reset(priv);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
835*4882a593Smuzhiyun if (!phy) {
836*4882a593Smuzhiyun dev_err(dev, "connect to PHY failed!\n");
837*4882a593Smuzhiyun ret = -ENODEV;
838*4882a593Smuzhiyun goto out_disable_clk;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
842*4882a593Smuzhiyun (unsigned long)phy->phy_id,
843*4882a593Smuzhiyun phy_modes(phy->interface));
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun mac_addr = of_get_mac_address(node);
846*4882a593Smuzhiyun if (!IS_ERR(mac_addr))
847*4882a593Smuzhiyun ether_addr_copy(ndev->dev_addr, mac_addr);
848*4882a593Smuzhiyun if (!is_valid_ether_addr(ndev->dev_addr)) {
849*4882a593Smuzhiyun eth_hw_addr_random(ndev);
850*4882a593Smuzhiyun dev_warn(dev, "using random MAC address %pM\n",
851*4882a593Smuzhiyun ndev->dev_addr);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun ndev->watchdog_timeo = 6 * HZ;
855*4882a593Smuzhiyun ndev->priv_flags |= IFF_UNICAST_FLT;
856*4882a593Smuzhiyun ndev->netdev_ops = &hisi_femac_netdev_ops;
857*4882a593Smuzhiyun ndev->ethtool_ops = &hisi_femac_ethtools_ops;
858*4882a593Smuzhiyun netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun hisi_femac_port_init(priv);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun ret = hisi_femac_init_tx_and_rx_queues(priv);
863*4882a593Smuzhiyun if (ret)
864*4882a593Smuzhiyun goto out_disconnect_phy;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun ndev->irq = platform_get_irq(pdev, 0);
867*4882a593Smuzhiyun if (ndev->irq <= 0) {
868*4882a593Smuzhiyun ret = -ENODEV;
869*4882a593Smuzhiyun goto out_disconnect_phy;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
873*4882a593Smuzhiyun IRQF_SHARED, pdev->name, ndev);
874*4882a593Smuzhiyun if (ret) {
875*4882a593Smuzhiyun dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
876*4882a593Smuzhiyun goto out_disconnect_phy;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun ret = register_netdev(ndev);
880*4882a593Smuzhiyun if (ret) {
881*4882a593Smuzhiyun dev_err(dev, "register_netdev failed!\n");
882*4882a593Smuzhiyun goto out_disconnect_phy;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun return ret;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun out_disconnect_phy:
888*4882a593Smuzhiyun netif_napi_del(&priv->napi);
889*4882a593Smuzhiyun phy_disconnect(phy);
890*4882a593Smuzhiyun out_disable_clk:
891*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
892*4882a593Smuzhiyun out_free_netdev:
893*4882a593Smuzhiyun free_netdev(ndev);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun return ret;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
hisi_femac_drv_remove(struct platform_device * pdev)898*4882a593Smuzhiyun static int hisi_femac_drv_remove(struct platform_device *pdev)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
901*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(ndev);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun netif_napi_del(&priv->napi);
904*4882a593Smuzhiyun unregister_netdev(ndev);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun phy_disconnect(ndev->phydev);
907*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
908*4882a593Smuzhiyun free_netdev(ndev);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun return 0;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun #ifdef CONFIG_PM
hisi_femac_drv_suspend(struct platform_device * pdev,pm_message_t state)914*4882a593Smuzhiyun static int hisi_femac_drv_suspend(struct platform_device *pdev,
915*4882a593Smuzhiyun pm_message_t state)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
918*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(ndev);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun disable_irq(ndev->irq);
921*4882a593Smuzhiyun if (netif_running(ndev)) {
922*4882a593Smuzhiyun hisi_femac_net_close(ndev);
923*4882a593Smuzhiyun netif_device_detach(ndev);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun return 0;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
hisi_femac_drv_resume(struct platform_device * pdev)931*4882a593Smuzhiyun static int hisi_femac_drv_resume(struct platform_device *pdev)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
934*4882a593Smuzhiyun struct hisi_femac_priv *priv = netdev_priv(ndev);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun clk_prepare_enable(priv->clk);
937*4882a593Smuzhiyun if (priv->phy_rst)
938*4882a593Smuzhiyun hisi_femac_phy_reset(priv);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (netif_running(ndev)) {
941*4882a593Smuzhiyun hisi_femac_port_init(priv);
942*4882a593Smuzhiyun hisi_femac_net_open(ndev);
943*4882a593Smuzhiyun netif_device_attach(ndev);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun enable_irq(ndev->irq);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun return 0;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun #endif
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun static const struct of_device_id hisi_femac_match[] = {
952*4882a593Smuzhiyun {.compatible = "hisilicon,hisi-femac-v1",},
953*4882a593Smuzhiyun {.compatible = "hisilicon,hisi-femac-v2",},
954*4882a593Smuzhiyun {.compatible = "hisilicon,hi3516cv300-femac",},
955*4882a593Smuzhiyun {},
956*4882a593Smuzhiyun };
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, hisi_femac_match);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun static struct platform_driver hisi_femac_driver = {
961*4882a593Smuzhiyun .driver = {
962*4882a593Smuzhiyun .name = "hisi-femac",
963*4882a593Smuzhiyun .of_match_table = hisi_femac_match,
964*4882a593Smuzhiyun },
965*4882a593Smuzhiyun .probe = hisi_femac_drv_probe,
966*4882a593Smuzhiyun .remove = hisi_femac_drv_remove,
967*4882a593Smuzhiyun #ifdef CONFIG_PM
968*4882a593Smuzhiyun .suspend = hisi_femac_drv_suspend,
969*4882a593Smuzhiyun .resume = hisi_femac_drv_resume,
970*4882a593Smuzhiyun #endif
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun module_platform_driver(hisi_femac_driver);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
976*4882a593Smuzhiyun MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
977*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
978*4882a593Smuzhiyun MODULE_ALIAS("platform:hisi-femac");
979