1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Copyright (c) 2014 Linaro Ltd.
4*4882a593Smuzhiyun * Copyright (c) 2014 Hisilicon Limited.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/etherdevice.h>
9*4882a593Smuzhiyun #include <linux/platform_device.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/ktime.h>
12*4882a593Smuzhiyun #include <linux/of_address.h>
13*4882a593Smuzhiyun #include <linux/phy.h>
14*4882a593Smuzhiyun #include <linux/of_mdio.h>
15*4882a593Smuzhiyun #include <linux/of_net.h>
16*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
17*4882a593Smuzhiyun #include <linux/regmap.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define SC_PPE_RESET_DREQ 0x026C
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define PPE_CFG_RX_ADDR 0x100
22*4882a593Smuzhiyun #define PPE_CFG_POOL_GRP 0x300
23*4882a593Smuzhiyun #define PPE_CFG_RX_BUF_SIZE 0x400
24*4882a593Smuzhiyun #define PPE_CFG_RX_FIFO_SIZE 0x500
25*4882a593Smuzhiyun #define PPE_CURR_BUF_CNT 0xa200
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define GE_DUPLEX_TYPE 0x08
28*4882a593Smuzhiyun #define GE_MAX_FRM_SIZE_REG 0x3c
29*4882a593Smuzhiyun #define GE_PORT_MODE 0x40
30*4882a593Smuzhiyun #define GE_PORT_EN 0x44
31*4882a593Smuzhiyun #define GE_SHORT_RUNTS_THR_REG 0x50
32*4882a593Smuzhiyun #define GE_TX_LOCAL_PAGE_REG 0x5c
33*4882a593Smuzhiyun #define GE_TRANSMIT_CONTROL_REG 0x60
34*4882a593Smuzhiyun #define GE_CF_CRC_STRIP_REG 0x1b0
35*4882a593Smuzhiyun #define GE_MODE_CHANGE_REG 0x1b4
36*4882a593Smuzhiyun #define GE_RECV_CONTROL_REG 0x1e0
37*4882a593Smuzhiyun #define GE_STATION_MAC_ADDRESS 0x210
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define PPE_CFG_BUS_CTRL_REG 0x424
40*4882a593Smuzhiyun #define PPE_CFG_RX_CTRL_REG 0x428
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
43*4882a593Smuzhiyun #define PPE_CFG_CPU_ADD_ADDR 0x6D0
44*4882a593Smuzhiyun #define PPE_CFG_MAX_FRAME_LEN_REG 0x500
45*4882a593Smuzhiyun #define PPE_CFG_RX_PKT_MODE_REG 0x504
46*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_GEN 0x520
47*4882a593Smuzhiyun #define PPE_CFG_RX_PKT_INT 0x740
48*4882a593Smuzhiyun #define PPE_INTEN 0x700
49*4882a593Smuzhiyun #define PPE_INTSTS 0x708
50*4882a593Smuzhiyun #define PPE_RINT 0x704
51*4882a593Smuzhiyun #define PPE_CFG_STS_MODE 0x880
52*4882a593Smuzhiyun #else
53*4882a593Smuzhiyun #define PPE_CFG_CPU_ADD_ADDR 0x580
54*4882a593Smuzhiyun #define PPE_CFG_MAX_FRAME_LEN_REG 0x408
55*4882a593Smuzhiyun #define PPE_CFG_RX_PKT_MODE_REG 0x438
56*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_GEN 0x500
57*4882a593Smuzhiyun #define PPE_CFG_RX_PKT_INT 0x538
58*4882a593Smuzhiyun #define PPE_INTEN 0x600
59*4882a593Smuzhiyun #define PPE_INTSTS 0x608
60*4882a593Smuzhiyun #define PPE_RINT 0x604
61*4882a593Smuzhiyun #define PPE_CFG_STS_MODE 0x700
62*4882a593Smuzhiyun #endif /* CONFIG_HI13X1_GMAC */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define PPE_HIS_RX_PKT_CNT 0x804
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define RESET_DREQ_ALL 0xffffffff
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* REG_INTERRUPT */
69*4882a593Smuzhiyun #define RCV_INT BIT(10)
70*4882a593Smuzhiyun #define RCV_NOBUF BIT(8)
71*4882a593Smuzhiyun #define RCV_DROP BIT(7)
72*4882a593Smuzhiyun #define TX_DROP BIT(6)
73*4882a593Smuzhiyun #define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
74*4882a593Smuzhiyun #define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* TX descriptor config */
77*4882a593Smuzhiyun #define TX_FREE_MEM BIT(0)
78*4882a593Smuzhiyun #define TX_READ_ALLOC_L3 BIT(1)
79*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
80*4882a593Smuzhiyun #define TX_CLEAR_WB BIT(7)
81*4882a593Smuzhiyun #define TX_RELEASE_TO_PPE BIT(4)
82*4882a593Smuzhiyun #define TX_FINISH_CACHE_INV BIT(6)
83*4882a593Smuzhiyun #define TX_POOL_SHIFT 16
84*4882a593Smuzhiyun #else
85*4882a593Smuzhiyun #define TX_CLEAR_WB BIT(4)
86*4882a593Smuzhiyun #define TX_FINISH_CACHE_INV BIT(2)
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun #define TX_L3_CHECKSUM BIT(5)
89*4882a593Smuzhiyun #define TX_LOOP_BACK BIT(11)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* RX error */
92*4882a593Smuzhiyun #define RX_PKT_DROP BIT(0)
93*4882a593Smuzhiyun #define RX_L2_ERR BIT(1)
94*4882a593Smuzhiyun #define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #define SGMII_SPEED_1000 0x08
97*4882a593Smuzhiyun #define SGMII_SPEED_100 0x07
98*4882a593Smuzhiyun #define SGMII_SPEED_10 0x06
99*4882a593Smuzhiyun #define MII_SPEED_100 0x01
100*4882a593Smuzhiyun #define MII_SPEED_10 0x00
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define GE_DUPLEX_FULL BIT(0)
103*4882a593Smuzhiyun #define GE_DUPLEX_HALF 0x00
104*4882a593Smuzhiyun #define GE_MODE_CHANGE_EN BIT(0)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define GE_TX_AUTO_NEG BIT(5)
107*4882a593Smuzhiyun #define GE_TX_ADD_CRC BIT(6)
108*4882a593Smuzhiyun #define GE_TX_SHORT_PAD_THROUGH BIT(7)
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define GE_RX_STRIP_CRC BIT(0)
111*4882a593Smuzhiyun #define GE_RX_STRIP_PAD BIT(3)
112*4882a593Smuzhiyun #define GE_RX_PAD_EN BIT(4)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define GE_AUTO_NEG_CTL BIT(0)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #define GE_RX_INT_THRESHOLD BIT(6)
117*4882a593Smuzhiyun #define GE_RX_TIMEOUT 0x04
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun #define GE_RX_PORT_EN BIT(1)
120*4882a593Smuzhiyun #define GE_TX_PORT_EN BIT(2)
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun #define PPE_CFG_RX_PKT_ALIGN BIT(18)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
125*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_GRP_SHIFT 4
126*4882a593Smuzhiyun #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
127*4882a593Smuzhiyun #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
128*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_MODE BIT(15)
129*4882a593Smuzhiyun #define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* buf unit size is cache_line_size, which is 64, so the shift is 6 */
132*4882a593Smuzhiyun #define PPE_BUF_SIZE_SHIFT 6
133*4882a593Smuzhiyun #define PPE_TX_BUF_HOLD BIT(31)
134*4882a593Smuzhiyun #define SOC_CACHE_LINE_MASK 0x3F
135*4882a593Smuzhiyun #else
136*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_GRP_SHIFT 8
137*4882a593Smuzhiyun #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
138*4882a593Smuzhiyun #define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
139*4882a593Smuzhiyun #define PPE_CFG_QOS_VMID_MODE BIT(14)
140*4882a593Smuzhiyun #define PPE_CFG_BUS_LOCAL_REL BIT(14)
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* buf unit size is 1, so the shift is 6 */
143*4882a593Smuzhiyun #define PPE_BUF_SIZE_SHIFT 0
144*4882a593Smuzhiyun #define PPE_TX_BUF_HOLD 0
145*4882a593Smuzhiyun #endif /* CONFIG_HI13X1_GMAC */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #define PPE_CFG_RX_FIFO_FSFU BIT(11)
148*4882a593Smuzhiyun #define PPE_CFG_RX_DEPTH_SHIFT 16
149*4882a593Smuzhiyun #define PPE_CFG_RX_START_SHIFT 0
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #define RX_DESC_NUM 128
154*4882a593Smuzhiyun #define TX_DESC_NUM 256
155*4882a593Smuzhiyun #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
156*4882a593Smuzhiyun #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define GMAC_PPE_RX_PKT_MAX_LEN 379
159*4882a593Smuzhiyun #define GMAC_MAX_PKT_LEN 1516
160*4882a593Smuzhiyun #define GMAC_MIN_PKT_LEN 31
161*4882a593Smuzhiyun #define RX_BUF_SIZE 1600
162*4882a593Smuzhiyun #define RESET_TIMEOUT 1000
163*4882a593Smuzhiyun #define TX_TIMEOUT (6 * HZ)
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define DRV_NAME "hip04-ether"
166*4882a593Smuzhiyun #define DRV_VERSION "v1.0"
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #define HIP04_MAX_TX_COALESCE_USECS 200
169*4882a593Smuzhiyun #define HIP04_MIN_TX_COALESCE_USECS 100
170*4882a593Smuzhiyun #define HIP04_MAX_TX_COALESCE_FRAMES 200
171*4882a593Smuzhiyun #define HIP04_MIN_TX_COALESCE_FRAMES 100
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun struct tx_desc {
174*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
175*4882a593Smuzhiyun u32 reserved1[2];
176*4882a593Smuzhiyun u32 send_addr;
177*4882a593Smuzhiyun u16 send_size;
178*4882a593Smuzhiyun u16 data_offset;
179*4882a593Smuzhiyun u32 reserved2[7];
180*4882a593Smuzhiyun u32 cfg;
181*4882a593Smuzhiyun u32 wb_addr;
182*4882a593Smuzhiyun u32 reserved3[3];
183*4882a593Smuzhiyun #else
184*4882a593Smuzhiyun u32 send_addr;
185*4882a593Smuzhiyun u32 send_size;
186*4882a593Smuzhiyun u32 next_addr;
187*4882a593Smuzhiyun u32 cfg;
188*4882a593Smuzhiyun u32 wb_addr;
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun } __aligned(64);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun struct rx_desc {
193*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
194*4882a593Smuzhiyun u32 reserved1[3];
195*4882a593Smuzhiyun u16 pkt_len;
196*4882a593Smuzhiyun u16 reserved_16;
197*4882a593Smuzhiyun u32 reserved2[6];
198*4882a593Smuzhiyun u32 pkt_err;
199*4882a593Smuzhiyun u32 reserved3[5];
200*4882a593Smuzhiyun #else
201*4882a593Smuzhiyun u16 reserved_16;
202*4882a593Smuzhiyun u16 pkt_len;
203*4882a593Smuzhiyun u32 reserve1[3];
204*4882a593Smuzhiyun u32 pkt_err;
205*4882a593Smuzhiyun u32 reserve2[4];
206*4882a593Smuzhiyun #endif
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun struct hip04_priv {
210*4882a593Smuzhiyun void __iomem *base;
211*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
212*4882a593Smuzhiyun void __iomem *sysctrl_base;
213*4882a593Smuzhiyun #endif
214*4882a593Smuzhiyun phy_interface_t phy_mode;
215*4882a593Smuzhiyun int chan;
216*4882a593Smuzhiyun unsigned int port;
217*4882a593Smuzhiyun unsigned int group;
218*4882a593Smuzhiyun unsigned int speed;
219*4882a593Smuzhiyun unsigned int duplex;
220*4882a593Smuzhiyun unsigned int reg_inten;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun struct napi_struct napi;
223*4882a593Smuzhiyun struct device *dev;
224*4882a593Smuzhiyun struct net_device *ndev;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun struct tx_desc *tx_desc;
227*4882a593Smuzhiyun dma_addr_t tx_desc_dma;
228*4882a593Smuzhiyun struct sk_buff *tx_skb[TX_DESC_NUM];
229*4882a593Smuzhiyun dma_addr_t tx_phys[TX_DESC_NUM];
230*4882a593Smuzhiyun unsigned int tx_head;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun int tx_coalesce_frames;
233*4882a593Smuzhiyun int tx_coalesce_usecs;
234*4882a593Smuzhiyun struct hrtimer tx_coalesce_timer;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun unsigned char *rx_buf[RX_DESC_NUM];
237*4882a593Smuzhiyun dma_addr_t rx_phys[RX_DESC_NUM];
238*4882a593Smuzhiyun unsigned int rx_head;
239*4882a593Smuzhiyun unsigned int rx_buf_size;
240*4882a593Smuzhiyun unsigned int rx_cnt_remaining;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun struct device_node *phy_node;
243*4882a593Smuzhiyun struct phy_device *phy;
244*4882a593Smuzhiyun struct regmap *map;
245*4882a593Smuzhiyun struct work_struct tx_timeout_task;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* written only by tx cleanup */
248*4882a593Smuzhiyun unsigned int tx_tail ____cacheline_aligned_in_smp;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
tx_count(unsigned int head,unsigned int tail)251*4882a593Smuzhiyun static inline unsigned int tx_count(unsigned int head, unsigned int tail)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun return (head - tail) % TX_DESC_NUM;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
hip04_config_port(struct net_device * ndev,u32 speed,u32 duplex)256*4882a593Smuzhiyun static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
259*4882a593Smuzhiyun u32 val;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun priv->speed = speed;
262*4882a593Smuzhiyun priv->duplex = duplex;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun switch (priv->phy_mode) {
265*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
266*4882a593Smuzhiyun if (speed == SPEED_1000)
267*4882a593Smuzhiyun val = SGMII_SPEED_1000;
268*4882a593Smuzhiyun else if (speed == SPEED_100)
269*4882a593Smuzhiyun val = SGMII_SPEED_100;
270*4882a593Smuzhiyun else
271*4882a593Smuzhiyun val = SGMII_SPEED_10;
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case PHY_INTERFACE_MODE_MII:
274*4882a593Smuzhiyun if (speed == SPEED_100)
275*4882a593Smuzhiyun val = MII_SPEED_100;
276*4882a593Smuzhiyun else
277*4882a593Smuzhiyun val = MII_SPEED_10;
278*4882a593Smuzhiyun break;
279*4882a593Smuzhiyun default:
280*4882a593Smuzhiyun netdev_warn(ndev, "not supported mode\n");
281*4882a593Smuzhiyun val = MII_SPEED_10;
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_PORT_MODE);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
287*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun val = GE_MODE_CHANGE_EN;
290*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
hip04_reset_dreq(struct hip04_priv * priv)293*4882a593Smuzhiyun static void hip04_reset_dreq(struct hip04_priv *priv)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
296*4882a593Smuzhiyun writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
hip04_reset_ppe(struct hip04_priv * priv)300*4882a593Smuzhiyun static void hip04_reset_ppe(struct hip04_priv *priv)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun u32 val, tmp, timeout = 0;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun do {
305*4882a593Smuzhiyun regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
306*4882a593Smuzhiyun regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
307*4882a593Smuzhiyun if (timeout++ > RESET_TIMEOUT)
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun } while (val & 0xfff);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
hip04_config_fifo(struct hip04_priv * priv)312*4882a593Smuzhiyun static void hip04_config_fifo(struct hip04_priv *priv)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun u32 val;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
317*4882a593Smuzhiyun val |= PPE_CFG_STS_RX_PKT_CNT_RC;
318*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun val = BIT(priv->group);
321*4882a593Smuzhiyun regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
324*4882a593Smuzhiyun val |= PPE_CFG_QOS_VMID_MODE;
325*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
328*4882a593Smuzhiyun regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
331*4882a593Smuzhiyun val |= PPE_CFG_RX_FIFO_FSFU;
332*4882a593Smuzhiyun val |= priv->chan << PPE_CFG_RX_START_SHIFT;
333*4882a593Smuzhiyun regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
336*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun val = PPE_CFG_RX_PKT_ALIGN;
339*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
342*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun val = GMAC_PPE_RX_PKT_MAX_LEN;
345*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun val = GMAC_MAX_PKT_LEN;
348*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun val = GMAC_MIN_PKT_LEN;
351*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
354*4882a593Smuzhiyun val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
355*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun val = GE_RX_STRIP_CRC;
358*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
361*4882a593Smuzhiyun val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
362*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun #ifndef CONFIG_HI13X1_GMAC
365*4882a593Smuzhiyun val = GE_AUTO_NEG_CTL;
366*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
367*4882a593Smuzhiyun #endif
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
hip04_mac_enable(struct net_device * ndev)370*4882a593Smuzhiyun static void hip04_mac_enable(struct net_device *ndev)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
373*4882a593Smuzhiyun u32 val;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* enable tx & rx */
376*4882a593Smuzhiyun val = readl_relaxed(priv->base + GE_PORT_EN);
377*4882a593Smuzhiyun val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
378*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_PORT_EN);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* clear rx int */
381*4882a593Smuzhiyun val = RCV_INT;
382*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_RINT);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* config recv int */
385*4882a593Smuzhiyun val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
386*4882a593Smuzhiyun writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* enable interrupt */
389*4882a593Smuzhiyun priv->reg_inten = DEF_INT_MASK;
390*4882a593Smuzhiyun writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
hip04_mac_disable(struct net_device * ndev)393*4882a593Smuzhiyun static void hip04_mac_disable(struct net_device *ndev)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
396*4882a593Smuzhiyun u32 val;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* disable int */
399*4882a593Smuzhiyun priv->reg_inten &= ~(DEF_INT_MASK);
400*4882a593Smuzhiyun writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* disable tx & rx */
403*4882a593Smuzhiyun val = readl_relaxed(priv->base + GE_PORT_EN);
404*4882a593Smuzhiyun val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
405*4882a593Smuzhiyun writel_relaxed(val, priv->base + GE_PORT_EN);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
hip04_set_xmit_desc(struct hip04_priv * priv,dma_addr_t phys)408*4882a593Smuzhiyun static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun u32 val;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
413*4882a593Smuzhiyun writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
hip04_set_recv_desc(struct hip04_priv * priv,dma_addr_t phys)416*4882a593Smuzhiyun static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun u32 val;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun val = phys >> PPE_BUF_SIZE_SHIFT;
421*4882a593Smuzhiyun regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
hip04_recv_cnt(struct hip04_priv * priv)424*4882a593Smuzhiyun static u32 hip04_recv_cnt(struct hip04_priv *priv)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun return readl(priv->base + PPE_HIS_RX_PKT_CNT);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
hip04_update_mac_address(struct net_device * ndev)429*4882a593Smuzhiyun static void hip04_update_mac_address(struct net_device *ndev)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
434*4882a593Smuzhiyun priv->base + GE_STATION_MAC_ADDRESS);
435*4882a593Smuzhiyun writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
436*4882a593Smuzhiyun (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
437*4882a593Smuzhiyun priv->base + GE_STATION_MAC_ADDRESS + 4);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
hip04_set_mac_address(struct net_device * ndev,void * addr)440*4882a593Smuzhiyun static int hip04_set_mac_address(struct net_device *ndev, void *addr)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun eth_mac_addr(ndev, addr);
443*4882a593Smuzhiyun hip04_update_mac_address(ndev);
444*4882a593Smuzhiyun return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
hip04_tx_reclaim(struct net_device * ndev,bool force)447*4882a593Smuzhiyun static int hip04_tx_reclaim(struct net_device *ndev, bool force)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
450*4882a593Smuzhiyun unsigned tx_tail = priv->tx_tail;
451*4882a593Smuzhiyun struct tx_desc *desc;
452*4882a593Smuzhiyun unsigned int bytes_compl = 0, pkts_compl = 0;
453*4882a593Smuzhiyun unsigned int count;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun smp_rmb();
456*4882a593Smuzhiyun count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
457*4882a593Smuzhiyun if (count == 0)
458*4882a593Smuzhiyun goto out;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun while (count) {
461*4882a593Smuzhiyun desc = &priv->tx_desc[tx_tail];
462*4882a593Smuzhiyun if (desc->send_addr != 0) {
463*4882a593Smuzhiyun if (force)
464*4882a593Smuzhiyun desc->send_addr = 0;
465*4882a593Smuzhiyun else
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (priv->tx_phys[tx_tail]) {
470*4882a593Smuzhiyun dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
471*4882a593Smuzhiyun priv->tx_skb[tx_tail]->len,
472*4882a593Smuzhiyun DMA_TO_DEVICE);
473*4882a593Smuzhiyun priv->tx_phys[tx_tail] = 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun pkts_compl++;
476*4882a593Smuzhiyun bytes_compl += priv->tx_skb[tx_tail]->len;
477*4882a593Smuzhiyun dev_kfree_skb(priv->tx_skb[tx_tail]);
478*4882a593Smuzhiyun priv->tx_skb[tx_tail] = NULL;
479*4882a593Smuzhiyun tx_tail = TX_NEXT(tx_tail);
480*4882a593Smuzhiyun count--;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun priv->tx_tail = tx_tail;
484*4882a593Smuzhiyun smp_wmb(); /* Ensure tx_tail visible to xmit */
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun out:
487*4882a593Smuzhiyun if (pkts_compl || bytes_compl)
488*4882a593Smuzhiyun netdev_completed_queue(ndev, pkts_compl, bytes_compl);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
491*4882a593Smuzhiyun netif_wake_queue(ndev);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return count;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
hip04_start_tx_timer(struct hip04_priv * priv)496*4882a593Smuzhiyun static void hip04_start_tx_timer(struct hip04_priv *priv)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* allow timer to fire after half the time at the earliest */
501*4882a593Smuzhiyun hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
502*4882a593Smuzhiyun ns, HRTIMER_MODE_REL);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun static netdev_tx_t
hip04_mac_start_xmit(struct sk_buff * skb,struct net_device * ndev)506*4882a593Smuzhiyun hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
509*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
510*4882a593Smuzhiyun unsigned int tx_head = priv->tx_head, count;
511*4882a593Smuzhiyun struct tx_desc *desc = &priv->tx_desc[tx_head];
512*4882a593Smuzhiyun dma_addr_t phys;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun smp_rmb();
515*4882a593Smuzhiyun count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
516*4882a593Smuzhiyun if (count == (TX_DESC_NUM - 1)) {
517*4882a593Smuzhiyun netif_stop_queue(ndev);
518*4882a593Smuzhiyun return NETDEV_TX_BUSY;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
522*4882a593Smuzhiyun if (dma_mapping_error(priv->dev, phys)) {
523*4882a593Smuzhiyun dev_kfree_skb(skb);
524*4882a593Smuzhiyun return NETDEV_TX_OK;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun priv->tx_skb[tx_head] = skb;
528*4882a593Smuzhiyun priv->tx_phys[tx_head] = phys;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun desc->send_size = (__force u32)cpu_to_be32(skb->len);
531*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
532*4882a593Smuzhiyun desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
533*4882a593Smuzhiyun | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
534*4882a593Smuzhiyun desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
535*4882a593Smuzhiyun desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
536*4882a593Smuzhiyun #else
537*4882a593Smuzhiyun desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
538*4882a593Smuzhiyun desc->send_addr = (__force u32)cpu_to_be32(phys);
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
541*4882a593Smuzhiyun desc->wb_addr = (__force u32)cpu_to_be32(phys +
542*4882a593Smuzhiyun offsetof(struct tx_desc, send_addr));
543*4882a593Smuzhiyun skb_tx_timestamp(skb);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun hip04_set_xmit_desc(priv, phys);
546*4882a593Smuzhiyun count++;
547*4882a593Smuzhiyun netdev_sent_queue(ndev, skb->len);
548*4882a593Smuzhiyun priv->tx_head = TX_NEXT(tx_head);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun stats->tx_bytes += skb->len;
551*4882a593Smuzhiyun stats->tx_packets++;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /* Ensure tx_head update visible to tx reclaim */
554*4882a593Smuzhiyun smp_wmb();
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* queue is getting full, better start cleaning up now */
557*4882a593Smuzhiyun if (count >= priv->tx_coalesce_frames) {
558*4882a593Smuzhiyun if (napi_schedule_prep(&priv->napi)) {
559*4882a593Smuzhiyun /* disable rx interrupt and timer */
560*4882a593Smuzhiyun priv->reg_inten &= ~(RCV_INT);
561*4882a593Smuzhiyun writel_relaxed(DEF_INT_MASK & ~RCV_INT,
562*4882a593Smuzhiyun priv->base + PPE_INTEN);
563*4882a593Smuzhiyun hrtimer_cancel(&priv->tx_coalesce_timer);
564*4882a593Smuzhiyun __napi_schedule(&priv->napi);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
567*4882a593Smuzhiyun /* cleanup not pending yet, start a new timer */
568*4882a593Smuzhiyun hip04_start_tx_timer(priv);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun return NETDEV_TX_OK;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
hip04_rx_poll(struct napi_struct * napi,int budget)574*4882a593Smuzhiyun static int hip04_rx_poll(struct napi_struct *napi, int budget)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
577*4882a593Smuzhiyun struct net_device *ndev = priv->ndev;
578*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
579*4882a593Smuzhiyun struct rx_desc *desc;
580*4882a593Smuzhiyun struct sk_buff *skb;
581*4882a593Smuzhiyun unsigned char *buf;
582*4882a593Smuzhiyun bool last = false;
583*4882a593Smuzhiyun dma_addr_t phys;
584*4882a593Smuzhiyun int rx = 0;
585*4882a593Smuzhiyun int tx_remaining;
586*4882a593Smuzhiyun u16 len;
587*4882a593Smuzhiyun u32 err;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* clean up tx descriptors */
590*4882a593Smuzhiyun tx_remaining = hip04_tx_reclaim(ndev, false);
591*4882a593Smuzhiyun priv->rx_cnt_remaining += hip04_recv_cnt(priv);
592*4882a593Smuzhiyun while (priv->rx_cnt_remaining && !last) {
593*4882a593Smuzhiyun buf = priv->rx_buf[priv->rx_head];
594*4882a593Smuzhiyun skb = build_skb(buf, priv->rx_buf_size);
595*4882a593Smuzhiyun if (unlikely(!skb)) {
596*4882a593Smuzhiyun net_dbg_ratelimited("build_skb failed\n");
597*4882a593Smuzhiyun goto refill;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
601*4882a593Smuzhiyun RX_BUF_SIZE, DMA_FROM_DEVICE);
602*4882a593Smuzhiyun priv->rx_phys[priv->rx_head] = 0;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun desc = (struct rx_desc *)skb->data;
605*4882a593Smuzhiyun len = be16_to_cpu((__force __be16)desc->pkt_len);
606*4882a593Smuzhiyun err = be32_to_cpu((__force __be32)desc->pkt_err);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (0 == len) {
609*4882a593Smuzhiyun dev_kfree_skb_any(skb);
610*4882a593Smuzhiyun last = true;
611*4882a593Smuzhiyun } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
612*4882a593Smuzhiyun dev_kfree_skb_any(skb);
613*4882a593Smuzhiyun stats->rx_dropped++;
614*4882a593Smuzhiyun stats->rx_errors++;
615*4882a593Smuzhiyun } else {
616*4882a593Smuzhiyun skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
617*4882a593Smuzhiyun skb_put(skb, len);
618*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
619*4882a593Smuzhiyun napi_gro_receive(&priv->napi, skb);
620*4882a593Smuzhiyun stats->rx_packets++;
621*4882a593Smuzhiyun stats->rx_bytes += len;
622*4882a593Smuzhiyun rx++;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun refill:
626*4882a593Smuzhiyun buf = netdev_alloc_frag(priv->rx_buf_size);
627*4882a593Smuzhiyun if (!buf)
628*4882a593Smuzhiyun goto done;
629*4882a593Smuzhiyun phys = dma_map_single(priv->dev, buf,
630*4882a593Smuzhiyun RX_BUF_SIZE, DMA_FROM_DEVICE);
631*4882a593Smuzhiyun if (dma_mapping_error(priv->dev, phys))
632*4882a593Smuzhiyun goto done;
633*4882a593Smuzhiyun priv->rx_buf[priv->rx_head] = buf;
634*4882a593Smuzhiyun priv->rx_phys[priv->rx_head] = phys;
635*4882a593Smuzhiyun hip04_set_recv_desc(priv, phys);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun priv->rx_head = RX_NEXT(priv->rx_head);
638*4882a593Smuzhiyun if (rx >= budget) {
639*4882a593Smuzhiyun --priv->rx_cnt_remaining;
640*4882a593Smuzhiyun goto done;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (--priv->rx_cnt_remaining == 0)
644*4882a593Smuzhiyun priv->rx_cnt_remaining += hip04_recv_cnt(priv);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun if (!(priv->reg_inten & RCV_INT)) {
648*4882a593Smuzhiyun /* enable rx interrupt */
649*4882a593Smuzhiyun priv->reg_inten |= RCV_INT;
650*4882a593Smuzhiyun writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun napi_complete_done(napi, rx);
653*4882a593Smuzhiyun done:
654*4882a593Smuzhiyun /* start a new timer if necessary */
655*4882a593Smuzhiyun if (rx < budget && tx_remaining)
656*4882a593Smuzhiyun hip04_start_tx_timer(priv);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun return rx;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
hip04_mac_interrupt(int irq,void * dev_id)661*4882a593Smuzhiyun static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun struct net_device *ndev = (struct net_device *)dev_id;
664*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
665*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
666*4882a593Smuzhiyun u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (!ists)
669*4882a593Smuzhiyun return IRQ_NONE;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (unlikely(ists & DEF_INT_ERR)) {
674*4882a593Smuzhiyun if (ists & (RCV_NOBUF | RCV_DROP)) {
675*4882a593Smuzhiyun stats->rx_errors++;
676*4882a593Smuzhiyun stats->rx_dropped++;
677*4882a593Smuzhiyun netdev_err(ndev, "rx drop\n");
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun if (ists & TX_DROP) {
680*4882a593Smuzhiyun stats->tx_dropped++;
681*4882a593Smuzhiyun netdev_err(ndev, "tx drop\n");
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
686*4882a593Smuzhiyun /* disable rx interrupt */
687*4882a593Smuzhiyun priv->reg_inten &= ~(RCV_INT);
688*4882a593Smuzhiyun writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
689*4882a593Smuzhiyun hrtimer_cancel(&priv->tx_coalesce_timer);
690*4882a593Smuzhiyun __napi_schedule(&priv->napi);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return IRQ_HANDLED;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
tx_done(struct hrtimer * hrtimer)696*4882a593Smuzhiyun static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct hip04_priv *priv;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (napi_schedule_prep(&priv->napi)) {
703*4882a593Smuzhiyun /* disable rx interrupt */
704*4882a593Smuzhiyun priv->reg_inten &= ~(RCV_INT);
705*4882a593Smuzhiyun writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
706*4882a593Smuzhiyun __napi_schedule(&priv->napi);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun return HRTIMER_NORESTART;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
hip04_adjust_link(struct net_device * ndev)712*4882a593Smuzhiyun static void hip04_adjust_link(struct net_device *ndev)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
715*4882a593Smuzhiyun struct phy_device *phy = priv->phy;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
718*4882a593Smuzhiyun hip04_config_port(ndev, phy->speed, phy->duplex);
719*4882a593Smuzhiyun phy_print_status(phy);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
hip04_mac_open(struct net_device * ndev)723*4882a593Smuzhiyun static int hip04_mac_open(struct net_device *ndev)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
726*4882a593Smuzhiyun int i;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun priv->rx_head = 0;
729*4882a593Smuzhiyun priv->rx_cnt_remaining = 0;
730*4882a593Smuzhiyun priv->tx_head = 0;
731*4882a593Smuzhiyun priv->tx_tail = 0;
732*4882a593Smuzhiyun hip04_reset_ppe(priv);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++) {
735*4882a593Smuzhiyun dma_addr_t phys;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun phys = dma_map_single(priv->dev, priv->rx_buf[i],
738*4882a593Smuzhiyun RX_BUF_SIZE, DMA_FROM_DEVICE);
739*4882a593Smuzhiyun if (dma_mapping_error(priv->dev, phys))
740*4882a593Smuzhiyun return -EIO;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun priv->rx_phys[i] = phys;
743*4882a593Smuzhiyun hip04_set_recv_desc(priv, phys);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (priv->phy)
747*4882a593Smuzhiyun phy_start(priv->phy);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun netdev_reset_queue(ndev);
750*4882a593Smuzhiyun netif_start_queue(ndev);
751*4882a593Smuzhiyun hip04_mac_enable(ndev);
752*4882a593Smuzhiyun napi_enable(&priv->napi);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
hip04_mac_stop(struct net_device * ndev)757*4882a593Smuzhiyun static int hip04_mac_stop(struct net_device *ndev)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
760*4882a593Smuzhiyun int i;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun napi_disable(&priv->napi);
763*4882a593Smuzhiyun netif_stop_queue(ndev);
764*4882a593Smuzhiyun hip04_mac_disable(ndev);
765*4882a593Smuzhiyun hip04_tx_reclaim(ndev, true);
766*4882a593Smuzhiyun hip04_reset_ppe(priv);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (priv->phy)
769*4882a593Smuzhiyun phy_stop(priv->phy);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++) {
772*4882a593Smuzhiyun if (priv->rx_phys[i]) {
773*4882a593Smuzhiyun dma_unmap_single(priv->dev, priv->rx_phys[i],
774*4882a593Smuzhiyun RX_BUF_SIZE, DMA_FROM_DEVICE);
775*4882a593Smuzhiyun priv->rx_phys[i] = 0;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return 0;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
hip04_timeout(struct net_device * ndev,unsigned int txqueue)782*4882a593Smuzhiyun static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun schedule_work(&priv->tx_timeout_task);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
hip04_tx_timeout_task(struct work_struct * work)789*4882a593Smuzhiyun static void hip04_tx_timeout_task(struct work_struct *work)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun struct hip04_priv *priv;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun priv = container_of(work, struct hip04_priv, tx_timeout_task);
794*4882a593Smuzhiyun hip04_mac_stop(priv->ndev);
795*4882a593Smuzhiyun hip04_mac_open(priv->ndev);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
hip04_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)798*4882a593Smuzhiyun static int hip04_get_coalesce(struct net_device *netdev,
799*4882a593Smuzhiyun struct ethtool_coalesce *ec)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(netdev);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
804*4882a593Smuzhiyun ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun return 0;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
hip04_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)809*4882a593Smuzhiyun static int hip04_set_coalesce(struct net_device *netdev,
810*4882a593Smuzhiyun struct ethtool_coalesce *ec)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(netdev);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
815*4882a593Smuzhiyun ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
816*4882a593Smuzhiyun (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
817*4882a593Smuzhiyun ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
818*4882a593Smuzhiyun return -EINVAL;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
821*4882a593Smuzhiyun priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
hip04_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)826*4882a593Smuzhiyun static void hip04_get_drvinfo(struct net_device *netdev,
827*4882a593Smuzhiyun struct ethtool_drvinfo *drvinfo)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
830*4882a593Smuzhiyun strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun static const struct ethtool_ops hip04_ethtool_ops = {
834*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
835*4882a593Smuzhiyun ETHTOOL_COALESCE_TX_MAX_FRAMES,
836*4882a593Smuzhiyun .get_coalesce = hip04_get_coalesce,
837*4882a593Smuzhiyun .set_coalesce = hip04_set_coalesce,
838*4882a593Smuzhiyun .get_drvinfo = hip04_get_drvinfo,
839*4882a593Smuzhiyun };
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun static const struct net_device_ops hip04_netdev_ops = {
842*4882a593Smuzhiyun .ndo_open = hip04_mac_open,
843*4882a593Smuzhiyun .ndo_stop = hip04_mac_stop,
844*4882a593Smuzhiyun .ndo_start_xmit = hip04_mac_start_xmit,
845*4882a593Smuzhiyun .ndo_set_mac_address = hip04_set_mac_address,
846*4882a593Smuzhiyun .ndo_tx_timeout = hip04_timeout,
847*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
848*4882a593Smuzhiyun };
849*4882a593Smuzhiyun
hip04_alloc_ring(struct net_device * ndev,struct device * d)850*4882a593Smuzhiyun static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
853*4882a593Smuzhiyun int i;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun priv->tx_desc = dma_alloc_coherent(d,
856*4882a593Smuzhiyun TX_DESC_NUM * sizeof(struct tx_desc),
857*4882a593Smuzhiyun &priv->tx_desc_dma, GFP_KERNEL);
858*4882a593Smuzhiyun if (!priv->tx_desc)
859*4882a593Smuzhiyun return -ENOMEM;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun priv->rx_buf_size = RX_BUF_SIZE +
862*4882a593Smuzhiyun SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
863*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++) {
864*4882a593Smuzhiyun priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
865*4882a593Smuzhiyun if (!priv->rx_buf[i])
866*4882a593Smuzhiyun return -ENOMEM;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun return 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
hip04_free_ring(struct net_device * ndev,struct device * d)872*4882a593Smuzhiyun static void hip04_free_ring(struct net_device *ndev, struct device *d)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
875*4882a593Smuzhiyun int i;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun for (i = 0; i < RX_DESC_NUM; i++)
878*4882a593Smuzhiyun if (priv->rx_buf[i])
879*4882a593Smuzhiyun skb_free_frag(priv->rx_buf[i]);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun for (i = 0; i < TX_DESC_NUM; i++)
882*4882a593Smuzhiyun if (priv->tx_skb[i])
883*4882a593Smuzhiyun dev_kfree_skb_any(priv->tx_skb[i]);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
886*4882a593Smuzhiyun priv->tx_desc, priv->tx_desc_dma);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
hip04_mac_probe(struct platform_device * pdev)889*4882a593Smuzhiyun static int hip04_mac_probe(struct platform_device *pdev)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct device *d = &pdev->dev;
892*4882a593Smuzhiyun struct device_node *node = d->of_node;
893*4882a593Smuzhiyun struct of_phandle_args arg;
894*4882a593Smuzhiyun struct net_device *ndev;
895*4882a593Smuzhiyun struct hip04_priv *priv;
896*4882a593Smuzhiyun int irq;
897*4882a593Smuzhiyun int ret;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(struct hip04_priv));
900*4882a593Smuzhiyun if (!ndev)
901*4882a593Smuzhiyun return -ENOMEM;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun priv = netdev_priv(ndev);
904*4882a593Smuzhiyun priv->dev = d;
905*4882a593Smuzhiyun priv->ndev = ndev;
906*4882a593Smuzhiyun platform_set_drvdata(pdev, ndev);
907*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, &pdev->dev);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun priv->base = devm_platform_ioremap_resource(pdev, 0);
910*4882a593Smuzhiyun if (IS_ERR(priv->base)) {
911*4882a593Smuzhiyun ret = PTR_ERR(priv->base);
912*4882a593Smuzhiyun goto init_fail;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun #if defined(CONFIG_HI13X1_GMAC)
916*4882a593Smuzhiyun priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
917*4882a593Smuzhiyun if (IS_ERR(priv->sysctrl_base)) {
918*4882a593Smuzhiyun ret = PTR_ERR(priv->sysctrl_base);
919*4882a593Smuzhiyun goto init_fail;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun #endif
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
924*4882a593Smuzhiyun if (ret < 0) {
925*4882a593Smuzhiyun dev_warn(d, "no port-handle\n");
926*4882a593Smuzhiyun goto init_fail;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun priv->port = arg.args[0];
930*4882a593Smuzhiyun priv->chan = arg.args[1] * RX_DESC_NUM;
931*4882a593Smuzhiyun priv->group = arg.args[2];
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* BQL will try to keep the TX queue as short as possible, but it can't
936*4882a593Smuzhiyun * be faster than tx_coalesce_usecs, so we need a fast timeout here,
937*4882a593Smuzhiyun * but also long enough to gather up enough frames to ensure we don't
938*4882a593Smuzhiyun * get more interrupts than necessary.
939*4882a593Smuzhiyun * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
940*4882a593Smuzhiyun */
941*4882a593Smuzhiyun priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
942*4882a593Smuzhiyun priv->tx_coalesce_usecs = 200;
943*4882a593Smuzhiyun priv->tx_coalesce_timer.function = tx_done;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun priv->map = syscon_node_to_regmap(arg.np);
946*4882a593Smuzhiyun if (IS_ERR(priv->map)) {
947*4882a593Smuzhiyun dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
948*4882a593Smuzhiyun ret = PTR_ERR(priv->map);
949*4882a593Smuzhiyun goto init_fail;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun ret = of_get_phy_mode(node, &priv->phy_mode);
953*4882a593Smuzhiyun if (ret) {
954*4882a593Smuzhiyun dev_warn(d, "not find phy-mode\n");
955*4882a593Smuzhiyun goto init_fail;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
959*4882a593Smuzhiyun if (irq <= 0) {
960*4882a593Smuzhiyun ret = -EINVAL;
961*4882a593Smuzhiyun goto init_fail;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun ret = devm_request_irq(d, irq, hip04_mac_interrupt,
965*4882a593Smuzhiyun 0, pdev->name, ndev);
966*4882a593Smuzhiyun if (ret) {
967*4882a593Smuzhiyun netdev_err(ndev, "devm_request_irq failed\n");
968*4882a593Smuzhiyun goto init_fail;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
972*4882a593Smuzhiyun if (priv->phy_node) {
973*4882a593Smuzhiyun priv->phy = of_phy_connect(ndev, priv->phy_node,
974*4882a593Smuzhiyun &hip04_adjust_link,
975*4882a593Smuzhiyun 0, priv->phy_mode);
976*4882a593Smuzhiyun if (!priv->phy) {
977*4882a593Smuzhiyun ret = -EPROBE_DEFER;
978*4882a593Smuzhiyun goto init_fail;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun ndev->netdev_ops = &hip04_netdev_ops;
985*4882a593Smuzhiyun ndev->ethtool_ops = &hip04_ethtool_ops;
986*4882a593Smuzhiyun ndev->watchdog_timeo = TX_TIMEOUT;
987*4882a593Smuzhiyun ndev->priv_flags |= IFF_UNICAST_FLT;
988*4882a593Smuzhiyun ndev->irq = irq;
989*4882a593Smuzhiyun netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun hip04_reset_dreq(priv);
992*4882a593Smuzhiyun hip04_reset_ppe(priv);
993*4882a593Smuzhiyun if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
994*4882a593Smuzhiyun hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun hip04_config_fifo(priv);
997*4882a593Smuzhiyun eth_random_addr(ndev->dev_addr);
998*4882a593Smuzhiyun hip04_update_mac_address(ndev);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun ret = hip04_alloc_ring(ndev, d);
1001*4882a593Smuzhiyun if (ret) {
1002*4882a593Smuzhiyun netdev_err(ndev, "alloc ring fail\n");
1003*4882a593Smuzhiyun goto alloc_fail;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun ret = register_netdev(ndev);
1007*4882a593Smuzhiyun if (ret)
1008*4882a593Smuzhiyun goto alloc_fail;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun return 0;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun alloc_fail:
1013*4882a593Smuzhiyun hip04_free_ring(ndev, d);
1014*4882a593Smuzhiyun init_fail:
1015*4882a593Smuzhiyun of_node_put(priv->phy_node);
1016*4882a593Smuzhiyun free_netdev(ndev);
1017*4882a593Smuzhiyun return ret;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
hip04_remove(struct platform_device * pdev)1020*4882a593Smuzhiyun static int hip04_remove(struct platform_device *pdev)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun struct net_device *ndev = platform_get_drvdata(pdev);
1023*4882a593Smuzhiyun struct hip04_priv *priv = netdev_priv(ndev);
1024*4882a593Smuzhiyun struct device *d = &pdev->dev;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (priv->phy)
1027*4882a593Smuzhiyun phy_disconnect(priv->phy);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun hip04_free_ring(ndev, d);
1030*4882a593Smuzhiyun unregister_netdev(ndev);
1031*4882a593Smuzhiyun of_node_put(priv->phy_node);
1032*4882a593Smuzhiyun cancel_work_sync(&priv->tx_timeout_task);
1033*4882a593Smuzhiyun free_netdev(ndev);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun return 0;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun static const struct of_device_id hip04_mac_match[] = {
1039*4882a593Smuzhiyun { .compatible = "hisilicon,hip04-mac" },
1040*4882a593Smuzhiyun { }
1041*4882a593Smuzhiyun };
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, hip04_mac_match);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun static struct platform_driver hip04_mac_driver = {
1046*4882a593Smuzhiyun .probe = hip04_mac_probe,
1047*4882a593Smuzhiyun .remove = hip04_remove,
1048*4882a593Smuzhiyun .driver = {
1049*4882a593Smuzhiyun .name = DRV_NAME,
1050*4882a593Smuzhiyun .of_match_table = hip04_mac_match,
1051*4882a593Smuzhiyun },
1052*4882a593Smuzhiyun };
1053*4882a593Smuzhiyun module_platform_driver(hip04_mac_driver);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
1056*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1057