xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mediatek/mtk_star_emac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2020 MediaTek Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2020 BayLibre SAS
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bits.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/compiler.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
16*4882a593Smuzhiyun #include <linux/mii.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/netdevice.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/of_mdio.h>
21*4882a593Smuzhiyun #include <linux/of_net.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/pm.h>
24*4882a593Smuzhiyun #include <linux/regmap.h>
25*4882a593Smuzhiyun #include <linux/skbuff.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define MTK_STAR_DRVNAME			"mtk_star_emac"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define MTK_STAR_WAIT_TIMEOUT			300
31*4882a593Smuzhiyun #define MTK_STAR_MAX_FRAME_SIZE			1514
32*4882a593Smuzhiyun #define MTK_STAR_SKB_ALIGNMENT			16
33*4882a593Smuzhiyun #define MTK_STAR_NAPI_WEIGHT			64
34*4882a593Smuzhiyun #define MTK_STAR_HASHTABLE_MC_LIMIT		256
35*4882a593Smuzhiyun #define MTK_STAR_HASHTABLE_SIZE_MAX		512
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
38*4882a593Smuzhiyun  * work for this controller.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define MTK_STAR_IP_ALIGN			2
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
43*4882a593Smuzhiyun #define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* PHY Control Register 0 */
46*4882a593Smuzhiyun #define MTK_STAR_REG_PHY_CTRL0			0x0000
47*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL0_WTCMD		BIT(13)
48*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL0_RDCMD		BIT(14)
49*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL0_RWOK		BIT(15)
50*4882a593Smuzhiyun #define MTK_STAR_MSK_PHY_CTRL0_PREG		GENMASK(12, 8)
51*4882a593Smuzhiyun #define MTK_STAR_OFF_PHY_CTRL0_PREG		8
52*4882a593Smuzhiyun #define MTK_STAR_MSK_PHY_CTRL0_RWDATA		GENMASK(31, 16)
53*4882a593Smuzhiyun #define MTK_STAR_OFF_PHY_CTRL0_RWDATA		16
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* PHY Control Register 1 */
56*4882a593Smuzhiyun #define MTK_STAR_REG_PHY_CTRL1			0x0004
57*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL1_LINK_ST		BIT(0)
58*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL1_AN_EN		BIT(8)
59*4882a593Smuzhiyun #define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD	9
60*4882a593Smuzhiyun #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M	0x00
61*4882a593Smuzhiyun #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M	0x01
62*4882a593Smuzhiyun #define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M	0x02
63*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX	BIT(11)
64*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX	BIT(12)
65*4882a593Smuzhiyun #define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX	BIT(13)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* MAC Configuration Register */
68*4882a593Smuzhiyun #define MTK_STAR_REG_MAC_CFG			0x0008
69*4882a593Smuzhiyun #define MTK_STAR_OFF_MAC_CFG_IPG		10
70*4882a593Smuzhiyun #define MTK_STAR_VAL_MAC_CFG_IPG_96BIT		GENMASK(4, 0)
71*4882a593Smuzhiyun #define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522	BIT(16)
72*4882a593Smuzhiyun #define MTK_STAR_BIT_MAC_CFG_AUTO_PAD		BIT(19)
73*4882a593Smuzhiyun #define MTK_STAR_BIT_MAC_CFG_CRC_STRIP		BIT(20)
74*4882a593Smuzhiyun #define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP		BIT(22)
75*4882a593Smuzhiyun #define MTK_STAR_BIT_MAC_CFG_NIC_PD		BIT(31)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* Flow-Control Configuration Register */
78*4882a593Smuzhiyun #define MTK_STAR_REG_FC_CFG			0x000c
79*4882a593Smuzhiyun #define MTK_STAR_BIT_FC_CFG_BP_EN		BIT(7)
80*4882a593Smuzhiyun #define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR	BIT(8)
81*4882a593Smuzhiyun #define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH	16
82*4882a593Smuzhiyun #define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH	GENMASK(27, 16)
83*4882a593Smuzhiyun #define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K	0x800
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /* ARL Configuration Register */
86*4882a593Smuzhiyun #define MTK_STAR_REG_ARL_CFG			0x0010
87*4882a593Smuzhiyun #define MTK_STAR_BIT_ARL_CFG_HASH_ALG		BIT(0)
88*4882a593Smuzhiyun #define MTK_STAR_BIT_ARL_CFG_MISC_MODE		BIT(4)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* MAC High and Low Bytes Registers */
91*4882a593Smuzhiyun #define MTK_STAR_REG_MY_MAC_H			0x0014
92*4882a593Smuzhiyun #define MTK_STAR_REG_MY_MAC_L			0x0018
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* Hash Table Control Register */
95*4882a593Smuzhiyun #define MTK_STAR_REG_HASH_CTRL			0x001c
96*4882a593Smuzhiyun #define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR	GENMASK(8, 0)
97*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA	BIT(12)
98*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_ACC_CMD		BIT(13)
99*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_CMD_START	BIT(14)
100*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_BIST_OK		BIT(16)
101*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_BIST_DONE	BIT(17)
102*4882a593Smuzhiyun #define MTK_STAR_BIT_HASH_CTRL_BIST_EN		BIT(31)
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* TX DMA Control Register */
105*4882a593Smuzhiyun #define MTK_STAR_REG_TX_DMA_CTRL		0x0034
106*4882a593Smuzhiyun #define MTK_STAR_BIT_TX_DMA_CTRL_START		BIT(0)
107*4882a593Smuzhiyun #define MTK_STAR_BIT_TX_DMA_CTRL_STOP		BIT(1)
108*4882a593Smuzhiyun #define MTK_STAR_BIT_TX_DMA_CTRL_RESUME		BIT(2)
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /* RX DMA Control Register */
111*4882a593Smuzhiyun #define MTK_STAR_REG_RX_DMA_CTRL		0x0038
112*4882a593Smuzhiyun #define MTK_STAR_BIT_RX_DMA_CTRL_START		BIT(0)
113*4882a593Smuzhiyun #define MTK_STAR_BIT_RX_DMA_CTRL_STOP		BIT(1)
114*4882a593Smuzhiyun #define MTK_STAR_BIT_RX_DMA_CTRL_RESUME		BIT(2)
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /* DMA Address Registers */
117*4882a593Smuzhiyun #define MTK_STAR_REG_TX_DPTR			0x003c
118*4882a593Smuzhiyun #define MTK_STAR_REG_RX_DPTR			0x0040
119*4882a593Smuzhiyun #define MTK_STAR_REG_TX_BASE_ADDR		0x0044
120*4882a593Smuzhiyun #define MTK_STAR_REG_RX_BASE_ADDR		0x0048
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* Interrupt Status Register */
123*4882a593Smuzhiyun #define MTK_STAR_REG_INT_STS			0x0050
124*4882a593Smuzhiyun #define MTK_STAR_REG_INT_STS_PORT_STS_CHG	BIT(2)
125*4882a593Smuzhiyun #define MTK_STAR_REG_INT_STS_MIB_CNT_TH		BIT(3)
126*4882a593Smuzhiyun #define MTK_STAR_BIT_INT_STS_FNRC		BIT(6)
127*4882a593Smuzhiyun #define MTK_STAR_BIT_INT_STS_TNTC		BIT(8)
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* Interrupt Mask Register */
130*4882a593Smuzhiyun #define MTK_STAR_REG_INT_MASK			0x0054
131*4882a593Smuzhiyun #define MTK_STAR_BIT_INT_MASK_FNRC		BIT(6)
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Misc. Config Register */
134*4882a593Smuzhiyun #define MTK_STAR_REG_TEST1			0x005c
135*4882a593Smuzhiyun #define MTK_STAR_BIT_TEST1_RST_HASH_MBIST	BIT(31)
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* Extended Configuration Register */
138*4882a593Smuzhiyun #define MTK_STAR_REG_EXT_CFG			0x0060
139*4882a593Smuzhiyun #define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS	16
140*4882a593Smuzhiyun #define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS	GENMASK(26, 16)
141*4882a593Smuzhiyun #define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K	0x400
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* EthSys Configuration Register */
144*4882a593Smuzhiyun #define MTK_STAR_REG_SYS_CONF			0x0094
145*4882a593Smuzhiyun #define MTK_STAR_BIT_MII_PAD_OUT_ENABLE		BIT(0)
146*4882a593Smuzhiyun #define MTK_STAR_BIT_EXT_MDC_MODE		BIT(1)
147*4882a593Smuzhiyun #define MTK_STAR_BIT_SWC_MII_MODE		BIT(2)
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* MAC Clock Configuration Register */
150*4882a593Smuzhiyun #define MTK_STAR_REG_MAC_CLK_CONF		0x00ac
151*4882a593Smuzhiyun #define MTK_STAR_MSK_MAC_CLK_CONF		GENMASK(7, 0)
152*4882a593Smuzhiyun #define MTK_STAR_BIT_CLK_DIV_10			0x0a
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* Counter registers. */
155*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXOKPKT			0x0100
156*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXOKBYTE			0x0104
157*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXRUNT			0x0108
158*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXLONG			0x010c
159*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXDROP			0x0110
160*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXCRC			0x0114
161*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXARLDROP		0x0118
162*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXVLANDROP		0x011c
163*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXCSERR			0x0120
164*4882a593Smuzhiyun #define MTK_STAR_REG_C_RXPAUSE			0x0124
165*4882a593Smuzhiyun #define MTK_STAR_REG_C_TXOKPKT			0x0128
166*4882a593Smuzhiyun #define MTK_STAR_REG_C_TXOKBYTE			0x012c
167*4882a593Smuzhiyun #define MTK_STAR_REG_C_TXPAUSECOL		0x0130
168*4882a593Smuzhiyun #define MTK_STAR_REG_C_TXRTY			0x0134
169*4882a593Smuzhiyun #define MTK_STAR_REG_C_TXSKIP			0x0138
170*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_ARP			0x013c
171*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_RERR			0x01d8
172*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_UNI			0x01dc
173*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_MULTI			0x01e0
174*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_BROAD			0x01e4
175*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_ALIGNERR		0x01e8
176*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_UNI			0x01ec
177*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_MULTI			0x01f0
178*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_BROAD			0x01f4
179*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_TIMEOUT		0x01f8
180*4882a593Smuzhiyun #define MTK_STAR_REG_C_TX_LATECOL		0x01fc
181*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_LENGTHERR		0x0214
182*4882a593Smuzhiyun #define MTK_STAR_REG_C_RX_TWIST			0x0218
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Ethernet CFG Control */
185*4882a593Smuzhiyun #define MTK_PERICFG_REG_NIC_CFG_CON		0x03c4
186*4882a593Smuzhiyun #define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII	GENMASK(3, 0)
187*4882a593Smuzhiyun #define MTK_PERICFG_BIT_NIC_CFG_CON_RMII	BIT(0)
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* Represents the actual structure of descriptors used by the MAC. We can
190*4882a593Smuzhiyun  * reuse the same structure for both TX and RX - the layout is the same, only
191*4882a593Smuzhiyun  * the flags differ slightly.
192*4882a593Smuzhiyun  */
193*4882a593Smuzhiyun struct mtk_star_ring_desc {
194*4882a593Smuzhiyun 	/* Contains both the status flags as well as packet length. */
195*4882a593Smuzhiyun 	u32 status;
196*4882a593Smuzhiyun 	u32 data_ptr;
197*4882a593Smuzhiyun 	u32 vtag;
198*4882a593Smuzhiyun 	u32 reserved;
199*4882a593Smuzhiyun };
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define MTK_STAR_DESC_MSK_LEN			GENMASK(15, 0)
202*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_RX_CRCE		BIT(24)
203*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_RX_OSIZE		BIT(25)
204*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_INT			BIT(27)
205*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_LS			BIT(28)
206*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_FS			BIT(29)
207*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_EOR			BIT(30)
208*4882a593Smuzhiyun #define MTK_STAR_DESC_BIT_COWN			BIT(31)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* Helper structure for storing data read from/written to descriptors in order
211*4882a593Smuzhiyun  * to limit reads from/writes to DMA memory.
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun struct mtk_star_ring_desc_data {
214*4882a593Smuzhiyun 	unsigned int len;
215*4882a593Smuzhiyun 	unsigned int flags;
216*4882a593Smuzhiyun 	dma_addr_t dma_addr;
217*4882a593Smuzhiyun 	struct sk_buff *skb;
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #define MTK_STAR_RING_NUM_DESCS			128
221*4882a593Smuzhiyun #define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
222*4882a593Smuzhiyun #define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
223*4882a593Smuzhiyun #define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
224*4882a593Smuzhiyun #define MTK_STAR_DMA_SIZE \
225*4882a593Smuzhiyun 		(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun struct mtk_star_ring {
228*4882a593Smuzhiyun 	struct mtk_star_ring_desc *descs;
229*4882a593Smuzhiyun 	struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
230*4882a593Smuzhiyun 	dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
231*4882a593Smuzhiyun 	unsigned int head;
232*4882a593Smuzhiyun 	unsigned int tail;
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct mtk_star_priv {
236*4882a593Smuzhiyun 	struct net_device *ndev;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	struct regmap *regs;
239*4882a593Smuzhiyun 	struct regmap *pericfg;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	struct clk_bulk_data clks[MTK_STAR_NCLKS];
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	void *ring_base;
244*4882a593Smuzhiyun 	struct mtk_star_ring_desc *descs_base;
245*4882a593Smuzhiyun 	dma_addr_t dma_addr;
246*4882a593Smuzhiyun 	struct mtk_star_ring tx_ring;
247*4882a593Smuzhiyun 	struct mtk_star_ring rx_ring;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	struct mii_bus *mii;
250*4882a593Smuzhiyun 	struct napi_struct napi;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	struct device_node *phy_node;
253*4882a593Smuzhiyun 	phy_interface_t phy_intf;
254*4882a593Smuzhiyun 	struct phy_device *phydev;
255*4882a593Smuzhiyun 	unsigned int link;
256*4882a593Smuzhiyun 	int speed;
257*4882a593Smuzhiyun 	int duplex;
258*4882a593Smuzhiyun 	int pause;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* Protects against concurrent descriptor access. */
261*4882a593Smuzhiyun 	spinlock_t lock;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	struct rtnl_link_stats64 stats;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
mtk_star_get_dev(struct mtk_star_priv * priv)266*4882a593Smuzhiyun static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	return priv->ndev->dev.parent;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun static const struct regmap_config mtk_star_regmap_config = {
272*4882a593Smuzhiyun 	.reg_bits		= 32,
273*4882a593Smuzhiyun 	.val_bits		= 32,
274*4882a593Smuzhiyun 	.reg_stride		= 4,
275*4882a593Smuzhiyun 	.disable_locking	= true,
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun 
mtk_star_ring_init(struct mtk_star_ring * ring,struct mtk_star_ring_desc * descs)278*4882a593Smuzhiyun static void mtk_star_ring_init(struct mtk_star_ring *ring,
279*4882a593Smuzhiyun 			       struct mtk_star_ring_desc *descs)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	memset(ring, 0, sizeof(*ring));
282*4882a593Smuzhiyun 	ring->descs = descs;
283*4882a593Smuzhiyun 	ring->head = 0;
284*4882a593Smuzhiyun 	ring->tail = 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
mtk_star_ring_pop_tail(struct mtk_star_ring * ring,struct mtk_star_ring_desc_data * desc_data)287*4882a593Smuzhiyun static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
288*4882a593Smuzhiyun 				  struct mtk_star_ring_desc_data *desc_data)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
291*4882a593Smuzhiyun 	unsigned int status;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	status = READ_ONCE(desc->status);
294*4882a593Smuzhiyun 	dma_rmb(); /* Make sure we read the status bits before checking it. */
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (!(status & MTK_STAR_DESC_BIT_COWN))
297*4882a593Smuzhiyun 		return -1;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
300*4882a593Smuzhiyun 	desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
301*4882a593Smuzhiyun 	desc_data->dma_addr = ring->dma_addrs[ring->tail];
302*4882a593Smuzhiyun 	desc_data->skb = ring->skbs[ring->tail];
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	ring->dma_addrs[ring->tail] = 0;
305*4882a593Smuzhiyun 	ring->skbs[ring->tail] = NULL;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	WRITE_ONCE(desc->data_ptr, 0);
310*4882a593Smuzhiyun 	WRITE_ONCE(desc->status, status);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
mtk_star_ring_push_head(struct mtk_star_ring * ring,struct mtk_star_ring_desc_data * desc_data,unsigned int flags)317*4882a593Smuzhiyun static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
318*4882a593Smuzhiyun 				    struct mtk_star_ring_desc_data *desc_data,
319*4882a593Smuzhiyun 				    unsigned int flags)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
322*4882a593Smuzhiyun 	unsigned int status;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	status = READ_ONCE(desc->status);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	ring->skbs[ring->head] = desc_data->skb;
327*4882a593Smuzhiyun 	ring->dma_addrs[ring->head] = desc_data->dma_addr;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	status |= desc_data->len;
330*4882a593Smuzhiyun 	if (flags)
331*4882a593Smuzhiyun 		status |= flags;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
334*4882a593Smuzhiyun 	WRITE_ONCE(desc->status, status);
335*4882a593Smuzhiyun 	status &= ~MTK_STAR_DESC_BIT_COWN;
336*4882a593Smuzhiyun 	/* Flush previous modifications before ownership change. */
337*4882a593Smuzhiyun 	dma_wmb();
338*4882a593Smuzhiyun 	WRITE_ONCE(desc->status, status);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun static void
mtk_star_ring_push_head_rx(struct mtk_star_ring * ring,struct mtk_star_ring_desc_data * desc_data)344*4882a593Smuzhiyun mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
345*4882a593Smuzhiyun 			   struct mtk_star_ring_desc_data *desc_data)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	mtk_star_ring_push_head(ring, desc_data, 0);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun static void
mtk_star_ring_push_head_tx(struct mtk_star_ring * ring,struct mtk_star_ring_desc_data * desc_data)351*4882a593Smuzhiyun mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
352*4882a593Smuzhiyun 			   struct mtk_star_ring_desc_data *desc_data)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
355*4882a593Smuzhiyun 					  MTK_STAR_DESC_BIT_LS |
356*4882a593Smuzhiyun 					  MTK_STAR_DESC_BIT_INT;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	mtk_star_ring_push_head(ring, desc_data, flags);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
mtk_star_ring_num_used_descs(struct mtk_star_ring * ring)361*4882a593Smuzhiyun static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	return abs(ring->head - ring->tail);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
mtk_star_ring_full(struct mtk_star_ring * ring)366*4882a593Smuzhiyun static bool mtk_star_ring_full(struct mtk_star_ring *ring)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
mtk_star_ring_descs_available(struct mtk_star_ring * ring)371*4882a593Smuzhiyun static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	return mtk_star_ring_num_used_descs(ring) > 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
mtk_star_dma_map_rx(struct mtk_star_priv * priv,struct sk_buff * skb)376*4882a593Smuzhiyun static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
377*4882a593Smuzhiyun 				      struct sk_buff *skb)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
382*4882a593Smuzhiyun 	return dma_map_single(dev, skb_tail_pointer(skb) - 2,
383*4882a593Smuzhiyun 			      skb_tailroom(skb), DMA_FROM_DEVICE);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
mtk_star_dma_unmap_rx(struct mtk_star_priv * priv,struct mtk_star_ring_desc_data * desc_data)386*4882a593Smuzhiyun static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
387*4882a593Smuzhiyun 				  struct mtk_star_ring_desc_data *desc_data)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	dma_unmap_single(dev, desc_data->dma_addr,
392*4882a593Smuzhiyun 			 skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
mtk_star_dma_map_tx(struct mtk_star_priv * priv,struct sk_buff * skb)395*4882a593Smuzhiyun static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
396*4882a593Smuzhiyun 				      struct sk_buff *skb)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
mtk_star_dma_unmap_tx(struct mtk_star_priv * priv,struct mtk_star_ring_desc_data * desc_data)403*4882a593Smuzhiyun static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
404*4882a593Smuzhiyun 				  struct mtk_star_ring_desc_data *desc_data)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return dma_unmap_single(dev, desc_data->dma_addr,
409*4882a593Smuzhiyun 				skb_headlen(desc_data->skb), DMA_TO_DEVICE);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
mtk_star_nic_disable_pd(struct mtk_star_priv * priv)412*4882a593Smuzhiyun static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
415*4882a593Smuzhiyun 			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /* Unmask the three interrupts we care about, mask all others. */
mtk_star_intr_enable(struct mtk_star_priv * priv)419*4882a593Smuzhiyun static void mtk_star_intr_enable(struct mtk_star_priv *priv)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
422*4882a593Smuzhiyun 			   MTK_STAR_BIT_INT_STS_FNRC |
423*4882a593Smuzhiyun 			   MTK_STAR_REG_INT_STS_MIB_CNT_TH;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
mtk_star_intr_disable(struct mtk_star_priv * priv)428*4882a593Smuzhiyun static void mtk_star_intr_disable(struct mtk_star_priv *priv)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
mtk_star_intr_read(struct mtk_star_priv * priv)433*4882a593Smuzhiyun static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	unsigned int val;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return val;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
mtk_star_intr_ack_all(struct mtk_star_priv * priv)442*4882a593Smuzhiyun static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	unsigned int val;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	val = mtk_star_intr_read(priv);
447*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return val;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
mtk_star_dma_init(struct mtk_star_priv * priv)452*4882a593Smuzhiyun static void mtk_star_dma_init(struct mtk_star_priv *priv)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct mtk_star_ring_desc *desc;
455*4882a593Smuzhiyun 	unsigned int val;
456*4882a593Smuzhiyun 	int i;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
461*4882a593Smuzhiyun 		desc = &priv->descs_base[i];
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		memset(desc, 0, sizeof(*desc));
464*4882a593Smuzhiyun 		desc->status = MTK_STAR_DESC_BIT_COWN;
465*4882a593Smuzhiyun 		if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
466*4882a593Smuzhiyun 		    (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
467*4882a593Smuzhiyun 			desc->status |= MTK_STAR_DESC_BIT_EOR;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
471*4882a593Smuzhiyun 	mtk_star_ring_init(&priv->rx_ring,
472*4882a593Smuzhiyun 			   priv->descs_base + MTK_STAR_NUM_TX_DESCS);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* Set DMA pointers. */
475*4882a593Smuzhiyun 	val = (unsigned int)priv->dma_addr;
476*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
477*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
480*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
481*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
mtk_star_dma_start(struct mtk_star_priv * priv)484*4882a593Smuzhiyun static void mtk_star_dma_start(struct mtk_star_priv *priv)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
487*4882a593Smuzhiyun 			MTK_STAR_BIT_TX_DMA_CTRL_START);
488*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
489*4882a593Smuzhiyun 			MTK_STAR_BIT_RX_DMA_CTRL_START);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
mtk_star_dma_stop(struct mtk_star_priv * priv)492*4882a593Smuzhiyun static void mtk_star_dma_stop(struct mtk_star_priv *priv)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
495*4882a593Smuzhiyun 		     MTK_STAR_BIT_TX_DMA_CTRL_STOP);
496*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
497*4882a593Smuzhiyun 		     MTK_STAR_BIT_RX_DMA_CTRL_STOP);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
mtk_star_dma_disable(struct mtk_star_priv * priv)500*4882a593Smuzhiyun static void mtk_star_dma_disable(struct mtk_star_priv *priv)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	int i;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	mtk_star_dma_stop(priv);
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* Take back all descriptors. */
507*4882a593Smuzhiyun 	for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
508*4882a593Smuzhiyun 		priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
mtk_star_dma_resume_rx(struct mtk_star_priv * priv)511*4882a593Smuzhiyun static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
514*4882a593Smuzhiyun 			MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
mtk_star_dma_resume_tx(struct mtk_star_priv * priv)517*4882a593Smuzhiyun static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
520*4882a593Smuzhiyun 			MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
mtk_star_set_mac_addr(struct net_device * ndev)523*4882a593Smuzhiyun static void mtk_star_set_mac_addr(struct net_device *ndev)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
526*4882a593Smuzhiyun 	u8 *mac_addr = ndev->dev_addr;
527*4882a593Smuzhiyun 	unsigned int high, low;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	high = mac_addr[0] << 8 | mac_addr[1] << 0;
530*4882a593Smuzhiyun 	low = mac_addr[2] << 24 | mac_addr[3] << 16 |
531*4882a593Smuzhiyun 	      mac_addr[4] << 8 | mac_addr[5];
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
534*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
mtk_star_reset_counters(struct mtk_star_priv * priv)537*4882a593Smuzhiyun static void mtk_star_reset_counters(struct mtk_star_priv *priv)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	static const unsigned int counter_regs[] = {
540*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXOKPKT,
541*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXOKBYTE,
542*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXRUNT,
543*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXLONG,
544*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXDROP,
545*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXCRC,
546*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXARLDROP,
547*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXVLANDROP,
548*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXCSERR,
549*4882a593Smuzhiyun 		MTK_STAR_REG_C_RXPAUSE,
550*4882a593Smuzhiyun 		MTK_STAR_REG_C_TXOKPKT,
551*4882a593Smuzhiyun 		MTK_STAR_REG_C_TXOKBYTE,
552*4882a593Smuzhiyun 		MTK_STAR_REG_C_TXPAUSECOL,
553*4882a593Smuzhiyun 		MTK_STAR_REG_C_TXRTY,
554*4882a593Smuzhiyun 		MTK_STAR_REG_C_TXSKIP,
555*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_ARP,
556*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_RERR,
557*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_UNI,
558*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_MULTI,
559*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_BROAD,
560*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_ALIGNERR,
561*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_UNI,
562*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_MULTI,
563*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_BROAD,
564*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_TIMEOUT,
565*4882a593Smuzhiyun 		MTK_STAR_REG_C_TX_LATECOL,
566*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_LENGTHERR,
567*4882a593Smuzhiyun 		MTK_STAR_REG_C_RX_TWIST,
568*4882a593Smuzhiyun 	};
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	unsigned int i, val;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
573*4882a593Smuzhiyun 		regmap_read(priv->regs, counter_regs[i], &val);
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun 
mtk_star_update_stat(struct mtk_star_priv * priv,unsigned int reg,u64 * stat)576*4882a593Smuzhiyun static void mtk_star_update_stat(struct mtk_star_priv *priv,
577*4882a593Smuzhiyun 				 unsigned int reg, u64 *stat)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	unsigned int val;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	regmap_read(priv->regs, reg, &val);
582*4882a593Smuzhiyun 	*stat += val;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun /* Try to get as many stats as possible from the internal registers instead
586*4882a593Smuzhiyun  * of tracking them ourselves.
587*4882a593Smuzhiyun  */
mtk_star_update_stats(struct mtk_star_priv * priv)588*4882a593Smuzhiyun static void mtk_star_update_stats(struct mtk_star_priv *priv)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct rtnl_link_stats64 *stats = &priv->stats;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* OK packets and bytes. */
593*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
594*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
595*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
596*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* RX & TX multicast. */
599*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
600*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	/* Collisions. */
603*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
604*4882a593Smuzhiyun 			     &stats->collisions);
605*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
606*4882a593Smuzhiyun 			     &stats->collisions);
607*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/* RX Errors. */
610*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
611*4882a593Smuzhiyun 			     &stats->rx_length_errors);
612*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
613*4882a593Smuzhiyun 			     &stats->rx_over_errors);
614*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
615*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
616*4882a593Smuzhiyun 			     &stats->rx_frame_errors);
617*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
618*4882a593Smuzhiyun 			     &stats->rx_fifo_errors);
619*4882a593Smuzhiyun 	/* Sum of the general RX error counter + all of the above. */
620*4882a593Smuzhiyun 	mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
621*4882a593Smuzhiyun 	stats->rx_errors += stats->rx_length_errors;
622*4882a593Smuzhiyun 	stats->rx_errors += stats->rx_over_errors;
623*4882a593Smuzhiyun 	stats->rx_errors += stats->rx_crc_errors;
624*4882a593Smuzhiyun 	stats->rx_errors += stats->rx_frame_errors;
625*4882a593Smuzhiyun 	stats->rx_errors += stats->rx_fifo_errors;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
mtk_star_alloc_skb(struct net_device * ndev)628*4882a593Smuzhiyun static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	uintptr_t tail, offset;
631*4882a593Smuzhiyun 	struct sk_buff *skb;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
634*4882a593Smuzhiyun 	if (!skb)
635*4882a593Smuzhiyun 		return NULL;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/* Align to 16 bytes. */
638*4882a593Smuzhiyun 	tail = (uintptr_t)skb_tail_pointer(skb);
639*4882a593Smuzhiyun 	if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
640*4882a593Smuzhiyun 		offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
641*4882a593Smuzhiyun 		skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
645*4882a593Smuzhiyun 	 * extract the Ethernet header (14 bytes) so we need two more bytes.
646*4882a593Smuzhiyun 	 */
647*4882a593Smuzhiyun 	skb_reserve(skb, MTK_STAR_IP_ALIGN);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return skb;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
mtk_star_prepare_rx_skbs(struct net_device * ndev)652*4882a593Smuzhiyun static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
655*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->rx_ring;
656*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
657*4882a593Smuzhiyun 	struct mtk_star_ring_desc *desc;
658*4882a593Smuzhiyun 	struct sk_buff *skb;
659*4882a593Smuzhiyun 	dma_addr_t dma_addr;
660*4882a593Smuzhiyun 	int i;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
663*4882a593Smuzhiyun 		skb = mtk_star_alloc_skb(ndev);
664*4882a593Smuzhiyun 		if (!skb)
665*4882a593Smuzhiyun 			return -ENOMEM;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		dma_addr = mtk_star_dma_map_rx(priv, skb);
668*4882a593Smuzhiyun 		if (dma_mapping_error(dev, dma_addr)) {
669*4882a593Smuzhiyun 			dev_kfree_skb(skb);
670*4882a593Smuzhiyun 			return -ENOMEM;
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		desc = &ring->descs[i];
674*4882a593Smuzhiyun 		desc->data_ptr = dma_addr;
675*4882a593Smuzhiyun 		desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
676*4882a593Smuzhiyun 		desc->status &= ~MTK_STAR_DESC_BIT_COWN;
677*4882a593Smuzhiyun 		ring->skbs[i] = skb;
678*4882a593Smuzhiyun 		ring->dma_addrs[i] = dma_addr;
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun static void
mtk_star_ring_free_skbs(struct mtk_star_priv * priv,struct mtk_star_ring * ring,void (* unmap_func)(struct mtk_star_priv *,struct mtk_star_ring_desc_data *))685*4882a593Smuzhiyun mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
686*4882a593Smuzhiyun 			void (*unmap_func)(struct mtk_star_priv *,
687*4882a593Smuzhiyun 					   struct mtk_star_ring_desc_data *))
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	struct mtk_star_ring_desc_data desc_data;
690*4882a593Smuzhiyun 	int i;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
693*4882a593Smuzhiyun 		if (!ring->dma_addrs[i])
694*4882a593Smuzhiyun 			continue;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		desc_data.dma_addr = ring->dma_addrs[i];
697*4882a593Smuzhiyun 		desc_data.skb = ring->skbs[i];
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		unmap_func(priv, &desc_data);
700*4882a593Smuzhiyun 		dev_kfree_skb(desc_data.skb);
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
mtk_star_free_rx_skbs(struct mtk_star_priv * priv)704*4882a593Smuzhiyun static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->rx_ring;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
mtk_star_free_tx_skbs(struct mtk_star_priv * priv)711*4882a593Smuzhiyun static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->tx_ring;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun /* All processing for TX and RX happens in the napi poll callback.
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * FIXME: The interrupt handling should be more fine-grained with each
721*4882a593Smuzhiyun  * interrupt enabled/disabled independently when needed. Unfortunatly this
722*4882a593Smuzhiyun  * turned out to impact the driver's stability and until we have something
723*4882a593Smuzhiyun  * working properly, we're disabling all interrupts during TX & RX processing
724*4882a593Smuzhiyun  * or when resetting the counter registers.
725*4882a593Smuzhiyun  */
mtk_star_handle_irq(int irq,void * data)726*4882a593Smuzhiyun static irqreturn_t mtk_star_handle_irq(int irq, void *data)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	struct mtk_star_priv *priv;
729*4882a593Smuzhiyun 	struct net_device *ndev;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	ndev = data;
732*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (netif_running(ndev)) {
735*4882a593Smuzhiyun 		mtk_star_intr_disable(priv);
736*4882a593Smuzhiyun 		napi_schedule(&priv->napi);
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	return IRQ_HANDLED;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun /* Wait for the completion of any previous command - CMD_START bit must be
743*4882a593Smuzhiyun  * cleared by hardware.
744*4882a593Smuzhiyun  */
mtk_star_hash_wait_cmd_start(struct mtk_star_priv * priv)745*4882a593Smuzhiyun static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	unsigned int val;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return regmap_read_poll_timeout_atomic(priv->regs,
750*4882a593Smuzhiyun 				MTK_STAR_REG_HASH_CTRL, val,
751*4882a593Smuzhiyun 				!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
752*4882a593Smuzhiyun 				10, MTK_STAR_WAIT_TIMEOUT);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
mtk_star_hash_wait_ok(struct mtk_star_priv * priv)755*4882a593Smuzhiyun static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	unsigned int val;
758*4882a593Smuzhiyun 	int ret;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/* Wait for BIST_DONE bit. */
761*4882a593Smuzhiyun 	ret = regmap_read_poll_timeout_atomic(priv->regs,
762*4882a593Smuzhiyun 					MTK_STAR_REG_HASH_CTRL, val,
763*4882a593Smuzhiyun 					val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
764*4882a593Smuzhiyun 					10, MTK_STAR_WAIT_TIMEOUT);
765*4882a593Smuzhiyun 	if (ret)
766*4882a593Smuzhiyun 		return ret;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	/* Check the BIST_OK bit. */
769*4882a593Smuzhiyun 	if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
770*4882a593Smuzhiyun 			      MTK_STAR_BIT_HASH_CTRL_BIST_OK))
771*4882a593Smuzhiyun 		return -EIO;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
mtk_star_set_hashbit(struct mtk_star_priv * priv,unsigned int hash_addr)776*4882a593Smuzhiyun static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
777*4882a593Smuzhiyun 				unsigned int hash_addr)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	unsigned int val;
780*4882a593Smuzhiyun 	int ret;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	ret = mtk_star_hash_wait_cmd_start(priv);
783*4882a593Smuzhiyun 	if (ret)
784*4882a593Smuzhiyun 		return ret;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
787*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
788*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
789*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
790*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
791*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	return mtk_star_hash_wait_ok(priv);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
mtk_star_reset_hash_table(struct mtk_star_priv * priv)796*4882a593Smuzhiyun static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	int ret;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	ret = mtk_star_hash_wait_cmd_start(priv);
801*4882a593Smuzhiyun 	if (ret)
802*4882a593Smuzhiyun 		return ret;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
805*4882a593Smuzhiyun 			MTK_STAR_BIT_HASH_CTRL_BIST_EN);
806*4882a593Smuzhiyun 	regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
807*4882a593Smuzhiyun 			MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	return mtk_star_hash_wait_ok(priv);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
mtk_star_phy_config(struct mtk_star_priv * priv)812*4882a593Smuzhiyun static void mtk_star_phy_config(struct mtk_star_priv *priv)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	unsigned int val;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (priv->speed == SPEED_1000)
817*4882a593Smuzhiyun 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
818*4882a593Smuzhiyun 	else if (priv->speed == SPEED_100)
819*4882a593Smuzhiyun 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
820*4882a593Smuzhiyun 	else
821*4882a593Smuzhiyun 		val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
822*4882a593Smuzhiyun 	val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
825*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
826*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
827*4882a593Smuzhiyun 	/* Only full-duplex supported for now. */
828*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (priv->pause) {
833*4882a593Smuzhiyun 		val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
834*4882a593Smuzhiyun 		val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
835*4882a593Smuzhiyun 		val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
836*4882a593Smuzhiyun 	} else {
837*4882a593Smuzhiyun 		val = 0;
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
841*4882a593Smuzhiyun 			   MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
842*4882a593Smuzhiyun 			   MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	if (priv->pause) {
845*4882a593Smuzhiyun 		val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
846*4882a593Smuzhiyun 		val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
847*4882a593Smuzhiyun 	} else {
848*4882a593Smuzhiyun 		val = 0;
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
852*4882a593Smuzhiyun 			   MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
mtk_star_adjust_link(struct net_device * ndev)855*4882a593Smuzhiyun static void mtk_star_adjust_link(struct net_device *ndev)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
858*4882a593Smuzhiyun 	struct phy_device *phydev = priv->phydev;
859*4882a593Smuzhiyun 	bool new_state = false;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (phydev->link) {
862*4882a593Smuzhiyun 		if (!priv->link) {
863*4882a593Smuzhiyun 			priv->link = phydev->link;
864*4882a593Smuzhiyun 			new_state = true;
865*4882a593Smuzhiyun 		}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 		if (priv->speed != phydev->speed) {
868*4882a593Smuzhiyun 			priv->speed = phydev->speed;
869*4882a593Smuzhiyun 			new_state = true;
870*4882a593Smuzhiyun 		}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		if (priv->pause != phydev->pause) {
873*4882a593Smuzhiyun 			priv->pause = phydev->pause;
874*4882a593Smuzhiyun 			new_state = true;
875*4882a593Smuzhiyun 		}
876*4882a593Smuzhiyun 	} else {
877*4882a593Smuzhiyun 		if (priv->link) {
878*4882a593Smuzhiyun 			priv->link = phydev->link;
879*4882a593Smuzhiyun 			new_state = true;
880*4882a593Smuzhiyun 		}
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (new_state) {
884*4882a593Smuzhiyun 		if (phydev->link)
885*4882a593Smuzhiyun 			mtk_star_phy_config(priv);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		phy_print_status(ndev->phydev);
888*4882a593Smuzhiyun 	}
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
mtk_star_init_config(struct mtk_star_priv * priv)891*4882a593Smuzhiyun static void mtk_star_init_config(struct mtk_star_priv *priv)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun 	unsigned int val;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
896*4882a593Smuzhiyun 	       MTK_STAR_BIT_EXT_MDC_MODE |
897*4882a593Smuzhiyun 	       MTK_STAR_BIT_SWC_MII_MODE);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
900*4882a593Smuzhiyun 	regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
901*4882a593Smuzhiyun 			   MTK_STAR_MSK_MAC_CLK_CONF,
902*4882a593Smuzhiyun 			   MTK_STAR_BIT_CLK_DIV_10);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun 
mtk_star_set_mode_rmii(struct mtk_star_priv * priv)905*4882a593Smuzhiyun static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
908*4882a593Smuzhiyun 			   MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
909*4882a593Smuzhiyun 			   MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
mtk_star_enable(struct net_device * ndev)912*4882a593Smuzhiyun static int mtk_star_enable(struct net_device *ndev)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
915*4882a593Smuzhiyun 	unsigned int val;
916*4882a593Smuzhiyun 	int ret;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	mtk_star_nic_disable_pd(priv);
919*4882a593Smuzhiyun 	mtk_star_intr_disable(priv);
920*4882a593Smuzhiyun 	mtk_star_dma_stop(priv);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	mtk_star_set_mac_addr(ndev);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* Configure the MAC */
925*4882a593Smuzhiyun 	val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
926*4882a593Smuzhiyun 	val <<= MTK_STAR_OFF_MAC_CFG_IPG;
927*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
928*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
929*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
930*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	/* Enable Hash Table BIST and reset it */
933*4882a593Smuzhiyun 	ret = mtk_star_reset_hash_table(priv);
934*4882a593Smuzhiyun 	if (ret)
935*4882a593Smuzhiyun 		return ret;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	/* Setup the hashing algorithm */
938*4882a593Smuzhiyun 	regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
939*4882a593Smuzhiyun 			  MTK_STAR_BIT_ARL_CFG_HASH_ALG |
940*4882a593Smuzhiyun 			  MTK_STAR_BIT_ARL_CFG_MISC_MODE);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	/* Don't strip VLAN tags */
943*4882a593Smuzhiyun 	regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
944*4882a593Smuzhiyun 			  MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	/* Setup DMA */
947*4882a593Smuzhiyun 	mtk_star_dma_init(priv);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	ret = mtk_star_prepare_rx_skbs(ndev);
950*4882a593Smuzhiyun 	if (ret)
951*4882a593Smuzhiyun 		goto err_out;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/* Request the interrupt */
954*4882a593Smuzhiyun 	ret = request_irq(ndev->irq, mtk_star_handle_irq,
955*4882a593Smuzhiyun 			  IRQF_TRIGGER_FALLING, ndev->name, ndev);
956*4882a593Smuzhiyun 	if (ret)
957*4882a593Smuzhiyun 		goto err_free_skbs;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	napi_enable(&priv->napi);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	mtk_star_intr_ack_all(priv);
962*4882a593Smuzhiyun 	mtk_star_intr_enable(priv);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	/* Connect to and start PHY */
965*4882a593Smuzhiyun 	priv->phydev = of_phy_connect(ndev, priv->phy_node,
966*4882a593Smuzhiyun 				      mtk_star_adjust_link, 0, priv->phy_intf);
967*4882a593Smuzhiyun 	if (!priv->phydev) {
968*4882a593Smuzhiyun 		netdev_err(ndev, "failed to connect to PHY\n");
969*4882a593Smuzhiyun 		ret = -ENODEV;
970*4882a593Smuzhiyun 		goto err_free_irq;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	mtk_star_dma_start(priv);
974*4882a593Smuzhiyun 	phy_start(priv->phydev);
975*4882a593Smuzhiyun 	netif_start_queue(ndev);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	return 0;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun err_free_irq:
980*4882a593Smuzhiyun 	free_irq(ndev->irq, ndev);
981*4882a593Smuzhiyun err_free_skbs:
982*4882a593Smuzhiyun 	mtk_star_free_rx_skbs(priv);
983*4882a593Smuzhiyun err_out:
984*4882a593Smuzhiyun 	return ret;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
mtk_star_disable(struct net_device * ndev)987*4882a593Smuzhiyun static void mtk_star_disable(struct net_device *ndev)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	netif_stop_queue(ndev);
992*4882a593Smuzhiyun 	napi_disable(&priv->napi);
993*4882a593Smuzhiyun 	mtk_star_intr_disable(priv);
994*4882a593Smuzhiyun 	mtk_star_dma_disable(priv);
995*4882a593Smuzhiyun 	mtk_star_intr_ack_all(priv);
996*4882a593Smuzhiyun 	phy_stop(priv->phydev);
997*4882a593Smuzhiyun 	phy_disconnect(priv->phydev);
998*4882a593Smuzhiyun 	free_irq(ndev->irq, ndev);
999*4882a593Smuzhiyun 	mtk_star_free_rx_skbs(priv);
1000*4882a593Smuzhiyun 	mtk_star_free_tx_skbs(priv);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
mtk_star_netdev_open(struct net_device * ndev)1003*4882a593Smuzhiyun static int mtk_star_netdev_open(struct net_device *ndev)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	return mtk_star_enable(ndev);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
mtk_star_netdev_stop(struct net_device * ndev)1008*4882a593Smuzhiyun static int mtk_star_netdev_stop(struct net_device *ndev)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	mtk_star_disable(ndev);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun 
mtk_star_netdev_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1015*4882a593Smuzhiyun static int mtk_star_netdev_ioctl(struct net_device *ndev,
1016*4882a593Smuzhiyun 				 struct ifreq *req, int cmd)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun 	if (!netif_running(ndev))
1019*4882a593Smuzhiyun 		return -EINVAL;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	return phy_mii_ioctl(ndev->phydev, req, cmd);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
mtk_star_netdev_start_xmit(struct sk_buff * skb,struct net_device * ndev)1024*4882a593Smuzhiyun static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
1025*4882a593Smuzhiyun 				      struct net_device *ndev)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
1028*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->tx_ring;
1029*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
1030*4882a593Smuzhiyun 	struct mtk_star_ring_desc_data desc_data;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
1033*4882a593Smuzhiyun 	if (dma_mapping_error(dev, desc_data.dma_addr))
1034*4882a593Smuzhiyun 		goto err_drop_packet;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	desc_data.skb = skb;
1037*4882a593Smuzhiyun 	desc_data.len = skb->len;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	spin_lock_bh(&priv->lock);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	mtk_star_ring_push_head_tx(ring, &desc_data);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	netdev_sent_queue(ndev, skb->len);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (mtk_star_ring_full(ring))
1046*4882a593Smuzhiyun 		netif_stop_queue(ndev);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	spin_unlock_bh(&priv->lock);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	mtk_star_dma_resume_tx(priv);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun err_drop_packet:
1055*4882a593Smuzhiyun 	dev_kfree_skb(skb);
1056*4882a593Smuzhiyun 	ndev->stats.tx_dropped++;
1057*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun /* Returns the number of bytes sent or a negative number on the first
1061*4882a593Smuzhiyun  * descriptor owned by DMA.
1062*4882a593Smuzhiyun  */
mtk_star_tx_complete_one(struct mtk_star_priv * priv)1063*4882a593Smuzhiyun static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->tx_ring;
1066*4882a593Smuzhiyun 	struct mtk_star_ring_desc_data desc_data;
1067*4882a593Smuzhiyun 	int ret;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1070*4882a593Smuzhiyun 	if (ret)
1071*4882a593Smuzhiyun 		return ret;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	mtk_star_dma_unmap_tx(priv, &desc_data);
1074*4882a593Smuzhiyun 	ret = desc_data.skb->len;
1075*4882a593Smuzhiyun 	dev_kfree_skb_irq(desc_data.skb);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
mtk_star_tx_complete_all(struct mtk_star_priv * priv)1080*4882a593Smuzhiyun static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->tx_ring;
1083*4882a593Smuzhiyun 	struct net_device *ndev = priv->ndev;
1084*4882a593Smuzhiyun 	int ret, pkts_compl, bytes_compl;
1085*4882a593Smuzhiyun 	bool wake = false;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	spin_lock(&priv->lock);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	for (pkts_compl = 0, bytes_compl = 0;;
1090*4882a593Smuzhiyun 	     pkts_compl++, bytes_compl += ret, wake = true) {
1091*4882a593Smuzhiyun 		if (!mtk_star_ring_descs_available(ring))
1092*4882a593Smuzhiyun 			break;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 		ret = mtk_star_tx_complete_one(priv);
1095*4882a593Smuzhiyun 		if (ret < 0)
1096*4882a593Smuzhiyun 			break;
1097*4882a593Smuzhiyun 	}
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (wake && netif_queue_stopped(ndev))
1102*4882a593Smuzhiyun 		netif_wake_queue(ndev);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
mtk_star_netdev_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1107*4882a593Smuzhiyun static void mtk_star_netdev_get_stats64(struct net_device *ndev,
1108*4882a593Smuzhiyun 					struct rtnl_link_stats64 *stats)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	mtk_star_update_stats(priv);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	memcpy(stats, &priv->stats, sizeof(*stats));
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
mtk_star_set_rx_mode(struct net_device * ndev)1117*4882a593Smuzhiyun static void mtk_star_set_rx_mode(struct net_device *ndev)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
1120*4882a593Smuzhiyun 	struct netdev_hw_addr *hw_addr;
1121*4882a593Smuzhiyun 	unsigned int hash_addr, i;
1122*4882a593Smuzhiyun 	int ret;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	if (ndev->flags & IFF_PROMISC) {
1125*4882a593Smuzhiyun 		regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
1126*4882a593Smuzhiyun 				MTK_STAR_BIT_ARL_CFG_MISC_MODE);
1127*4882a593Smuzhiyun 	} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
1128*4882a593Smuzhiyun 		   ndev->flags & IFF_ALLMULTI) {
1129*4882a593Smuzhiyun 		for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
1130*4882a593Smuzhiyun 			ret = mtk_star_set_hashbit(priv, i);
1131*4882a593Smuzhiyun 			if (ret)
1132*4882a593Smuzhiyun 				goto hash_fail;
1133*4882a593Smuzhiyun 		}
1134*4882a593Smuzhiyun 	} else {
1135*4882a593Smuzhiyun 		/* Clear previous settings. */
1136*4882a593Smuzhiyun 		ret = mtk_star_reset_hash_table(priv);
1137*4882a593Smuzhiyun 		if (ret)
1138*4882a593Smuzhiyun 			goto hash_fail;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 		netdev_for_each_mc_addr(hw_addr, ndev) {
1141*4882a593Smuzhiyun 			hash_addr = (hw_addr->addr[0] & 0x01) << 8;
1142*4882a593Smuzhiyun 			hash_addr += hw_addr->addr[5];
1143*4882a593Smuzhiyun 			ret = mtk_star_set_hashbit(priv, hash_addr);
1144*4882a593Smuzhiyun 			if (ret)
1145*4882a593Smuzhiyun 				goto hash_fail;
1146*4882a593Smuzhiyun 		}
1147*4882a593Smuzhiyun 	}
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	return;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun hash_fail:
1152*4882a593Smuzhiyun 	if (ret == -ETIMEDOUT)
1153*4882a593Smuzhiyun 		netdev_err(ndev, "setting hash bit timed out\n");
1154*4882a593Smuzhiyun 	else
1155*4882a593Smuzhiyun 		/* Should be -EIO */
1156*4882a593Smuzhiyun 		netdev_err(ndev, "unable to set hash bit");
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun static const struct net_device_ops mtk_star_netdev_ops = {
1160*4882a593Smuzhiyun 	.ndo_open		= mtk_star_netdev_open,
1161*4882a593Smuzhiyun 	.ndo_stop		= mtk_star_netdev_stop,
1162*4882a593Smuzhiyun 	.ndo_start_xmit		= mtk_star_netdev_start_xmit,
1163*4882a593Smuzhiyun 	.ndo_get_stats64	= mtk_star_netdev_get_stats64,
1164*4882a593Smuzhiyun 	.ndo_set_rx_mode	= mtk_star_set_rx_mode,
1165*4882a593Smuzhiyun 	.ndo_do_ioctl		= mtk_star_netdev_ioctl,
1166*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
1167*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1168*4882a593Smuzhiyun };
1169*4882a593Smuzhiyun 
mtk_star_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1170*4882a593Smuzhiyun static void mtk_star_get_drvinfo(struct net_device *dev,
1171*4882a593Smuzhiyun 				 struct ethtool_drvinfo *info)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun /* TODO Add ethtool stats. */
1177*4882a593Smuzhiyun static const struct ethtool_ops mtk_star_ethtool_ops = {
1178*4882a593Smuzhiyun 	.get_drvinfo		= mtk_star_get_drvinfo,
1179*4882a593Smuzhiyun 	.get_link		= ethtool_op_get_link,
1180*4882a593Smuzhiyun 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1181*4882a593Smuzhiyun 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1182*4882a593Smuzhiyun };
1183*4882a593Smuzhiyun 
mtk_star_receive_packet(struct mtk_star_priv * priv)1184*4882a593Smuzhiyun static int mtk_star_receive_packet(struct mtk_star_priv *priv)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun 	struct mtk_star_ring *ring = &priv->rx_ring;
1187*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
1188*4882a593Smuzhiyun 	struct mtk_star_ring_desc_data desc_data;
1189*4882a593Smuzhiyun 	struct net_device *ndev = priv->ndev;
1190*4882a593Smuzhiyun 	struct sk_buff *curr_skb, *new_skb;
1191*4882a593Smuzhiyun 	dma_addr_t new_dma_addr;
1192*4882a593Smuzhiyun 	int ret;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	spin_lock(&priv->lock);
1195*4882a593Smuzhiyun 	ret = mtk_star_ring_pop_tail(ring, &desc_data);
1196*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
1197*4882a593Smuzhiyun 	if (ret)
1198*4882a593Smuzhiyun 		return -1;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	curr_skb = desc_data.skb;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
1203*4882a593Smuzhiyun 	    (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
1204*4882a593Smuzhiyun 		/* Error packet -> drop and reuse skb. */
1205*4882a593Smuzhiyun 		new_skb = curr_skb;
1206*4882a593Smuzhiyun 		goto push_new_skb;
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* Prepare new skb before receiving the current one. Reuse the current
1210*4882a593Smuzhiyun 	 * skb if we fail at any point.
1211*4882a593Smuzhiyun 	 */
1212*4882a593Smuzhiyun 	new_skb = mtk_star_alloc_skb(ndev);
1213*4882a593Smuzhiyun 	if (!new_skb) {
1214*4882a593Smuzhiyun 		ndev->stats.rx_dropped++;
1215*4882a593Smuzhiyun 		new_skb = curr_skb;
1216*4882a593Smuzhiyun 		goto push_new_skb;
1217*4882a593Smuzhiyun 	}
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
1220*4882a593Smuzhiyun 	if (dma_mapping_error(dev, new_dma_addr)) {
1221*4882a593Smuzhiyun 		ndev->stats.rx_dropped++;
1222*4882a593Smuzhiyun 		dev_kfree_skb(new_skb);
1223*4882a593Smuzhiyun 		new_skb = curr_skb;
1224*4882a593Smuzhiyun 		netdev_err(ndev, "DMA mapping error of RX descriptor\n");
1225*4882a593Smuzhiyun 		goto push_new_skb;
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	/* We can't fail anymore at this point: it's safe to unmap the skb. */
1229*4882a593Smuzhiyun 	mtk_star_dma_unmap_rx(priv, &desc_data);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	skb_put(desc_data.skb, desc_data.len);
1232*4882a593Smuzhiyun 	desc_data.skb->ip_summed = CHECKSUM_NONE;
1233*4882a593Smuzhiyun 	desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
1234*4882a593Smuzhiyun 	desc_data.skb->dev = ndev;
1235*4882a593Smuzhiyun 	netif_receive_skb(desc_data.skb);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	/* update dma_addr for new skb */
1238*4882a593Smuzhiyun 	desc_data.dma_addr = new_dma_addr;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun push_new_skb:
1241*4882a593Smuzhiyun 	desc_data.len = skb_tailroom(new_skb);
1242*4882a593Smuzhiyun 	desc_data.skb = new_skb;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	spin_lock(&priv->lock);
1245*4882a593Smuzhiyun 	mtk_star_ring_push_head_rx(ring, &desc_data);
1246*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	return 0;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun 
mtk_star_process_rx(struct mtk_star_priv * priv,int budget)1251*4882a593Smuzhiyun static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun 	int received, ret;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	for (received = 0, ret = 0; received < budget && ret == 0; received++)
1256*4882a593Smuzhiyun 		ret = mtk_star_receive_packet(priv);
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	mtk_star_dma_resume_rx(priv);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	return received;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
mtk_star_poll(struct napi_struct * napi,int budget)1263*4882a593Smuzhiyun static int mtk_star_poll(struct napi_struct *napi, int budget)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	struct mtk_star_priv *priv;
1266*4882a593Smuzhiyun 	unsigned int status;
1267*4882a593Smuzhiyun 	int received = 0;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	priv = container_of(napi, struct mtk_star_priv, napi);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	status = mtk_star_intr_read(priv);
1272*4882a593Smuzhiyun 	mtk_star_intr_ack_all(priv);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	if (status & MTK_STAR_BIT_INT_STS_TNTC)
1275*4882a593Smuzhiyun 		/* Clean-up all TX descriptors. */
1276*4882a593Smuzhiyun 		mtk_star_tx_complete_all(priv);
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	if (status & MTK_STAR_BIT_INT_STS_FNRC)
1279*4882a593Smuzhiyun 		/* Receive up to $budget packets. */
1280*4882a593Smuzhiyun 		received = mtk_star_process_rx(priv, budget);
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
1283*4882a593Smuzhiyun 		mtk_star_update_stats(priv);
1284*4882a593Smuzhiyun 		mtk_star_reset_counters(priv);
1285*4882a593Smuzhiyun 	}
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	if (received < budget)
1288*4882a593Smuzhiyun 		napi_complete_done(napi, received);
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	mtk_star_intr_enable(priv);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	return received;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun 
mtk_star_mdio_rwok_clear(struct mtk_star_priv * priv)1295*4882a593Smuzhiyun static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1298*4882a593Smuzhiyun 		     MTK_STAR_BIT_PHY_CTRL0_RWOK);
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun 
mtk_star_mdio_rwok_wait(struct mtk_star_priv * priv)1301*4882a593Smuzhiyun static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
1302*4882a593Smuzhiyun {
1303*4882a593Smuzhiyun 	unsigned int val;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
1306*4882a593Smuzhiyun 					val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
1307*4882a593Smuzhiyun 					10, MTK_STAR_WAIT_TIMEOUT);
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun 
mtk_star_mdio_read(struct mii_bus * mii,int phy_id,int regnum)1310*4882a593Smuzhiyun static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun 	struct mtk_star_priv *priv = mii->priv;
1313*4882a593Smuzhiyun 	unsigned int val, data;
1314*4882a593Smuzhiyun 	int ret;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	if (regnum & MII_ADDR_C45)
1317*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	mtk_star_mdio_rwok_clear(priv);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
1322*4882a593Smuzhiyun 	val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1323*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	ret = mtk_star_mdio_rwok_wait(priv);
1328*4882a593Smuzhiyun 	if (ret)
1329*4882a593Smuzhiyun 		return ret;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1334*4882a593Smuzhiyun 	data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	return data;
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
mtk_star_mdio_write(struct mii_bus * mii,int phy_id,int regnum,u16 data)1339*4882a593Smuzhiyun static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
1340*4882a593Smuzhiyun 			       int regnum, u16 data)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun 	struct mtk_star_priv *priv = mii->priv;
1343*4882a593Smuzhiyun 	unsigned int val;
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	if (regnum & MII_ADDR_C45)
1346*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	mtk_star_mdio_rwok_clear(priv);
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	val = data;
1351*4882a593Smuzhiyun 	val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
1352*4882a593Smuzhiyun 	val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
1353*4882a593Smuzhiyun 	regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
1354*4882a593Smuzhiyun 	regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
1355*4882a593Smuzhiyun 	val |= regnum;
1356*4882a593Smuzhiyun 	val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	return mtk_star_mdio_rwok_wait(priv);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun 
mtk_star_mdio_init(struct net_device * ndev)1363*4882a593Smuzhiyun static int mtk_star_mdio_init(struct net_device *ndev)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	struct mtk_star_priv *priv = netdev_priv(ndev);
1366*4882a593Smuzhiyun 	struct device *dev = mtk_star_get_dev(priv);
1367*4882a593Smuzhiyun 	struct device_node *of_node, *mdio_node;
1368*4882a593Smuzhiyun 	int ret;
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	of_node = dev->of_node;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	mdio_node = of_get_child_by_name(of_node, "mdio");
1373*4882a593Smuzhiyun 	if (!mdio_node)
1374*4882a593Smuzhiyun 		return -ENODEV;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	if (!of_device_is_available(mdio_node)) {
1377*4882a593Smuzhiyun 		ret = -ENODEV;
1378*4882a593Smuzhiyun 		goto out_put_node;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	priv->mii = devm_mdiobus_alloc(dev);
1382*4882a593Smuzhiyun 	if (!priv->mii) {
1383*4882a593Smuzhiyun 		ret = -ENOMEM;
1384*4882a593Smuzhiyun 		goto out_put_node;
1385*4882a593Smuzhiyun 	}
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1388*4882a593Smuzhiyun 	priv->mii->name = "mtk-mac-mdio";
1389*4882a593Smuzhiyun 	priv->mii->parent = dev;
1390*4882a593Smuzhiyun 	priv->mii->read = mtk_star_mdio_read;
1391*4882a593Smuzhiyun 	priv->mii->write = mtk_star_mdio_write;
1392*4882a593Smuzhiyun 	priv->mii->priv = priv;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun out_put_node:
1397*4882a593Smuzhiyun 	of_node_put(mdio_node);
1398*4882a593Smuzhiyun 	return ret;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun 
mtk_star_suspend(struct device * dev)1401*4882a593Smuzhiyun static __maybe_unused int mtk_star_suspend(struct device *dev)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	struct mtk_star_priv *priv;
1404*4882a593Smuzhiyun 	struct net_device *ndev;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	ndev = dev_get_drvdata(dev);
1407*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	if (netif_running(ndev))
1410*4882a593Smuzhiyun 		mtk_star_disable(ndev);
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	return 0;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
mtk_star_resume(struct device * dev)1417*4882a593Smuzhiyun static __maybe_unused int mtk_star_resume(struct device *dev)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	struct mtk_star_priv *priv;
1420*4882a593Smuzhiyun 	struct net_device *ndev;
1421*4882a593Smuzhiyun 	int ret;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	ndev = dev_get_drvdata(dev);
1424*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1427*4882a593Smuzhiyun 	if (ret)
1428*4882a593Smuzhiyun 		return ret;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	if (netif_running(ndev)) {
1431*4882a593Smuzhiyun 		ret = mtk_star_enable(ndev);
1432*4882a593Smuzhiyun 		if (ret)
1433*4882a593Smuzhiyun 			clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1434*4882a593Smuzhiyun 	}
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	return ret;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
mtk_star_clk_disable_unprepare(void * data)1439*4882a593Smuzhiyun static void mtk_star_clk_disable_unprepare(void *data)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun 	struct mtk_star_priv *priv = data;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun 
mtk_star_probe(struct platform_device * pdev)1446*4882a593Smuzhiyun static int mtk_star_probe(struct platform_device *pdev)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun 	struct device_node *of_node;
1449*4882a593Smuzhiyun 	struct mtk_star_priv *priv;
1450*4882a593Smuzhiyun 	struct net_device *ndev;
1451*4882a593Smuzhiyun 	struct device *dev;
1452*4882a593Smuzhiyun 	void __iomem *base;
1453*4882a593Smuzhiyun 	int ret, i;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	dev = &pdev->dev;
1456*4882a593Smuzhiyun 	of_node = dev->of_node;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	ndev = devm_alloc_etherdev(dev, sizeof(*priv));
1459*4882a593Smuzhiyun 	if (!ndev)
1460*4882a593Smuzhiyun 		return -ENOMEM;
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
1463*4882a593Smuzhiyun 	priv->ndev = ndev;
1464*4882a593Smuzhiyun 	SET_NETDEV_DEV(ndev, dev);
1465*4882a593Smuzhiyun 	platform_set_drvdata(pdev, ndev);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	ndev->min_mtu = ETH_ZLEN;
1468*4882a593Smuzhiyun 	ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	spin_lock_init(&priv->lock);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	base = devm_platform_ioremap_resource(pdev, 0);
1473*4882a593Smuzhiyun 	if (IS_ERR(base))
1474*4882a593Smuzhiyun 		return PTR_ERR(base);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	/* We won't be checking the return values of regmap read & write
1477*4882a593Smuzhiyun 	 * functions. They can only fail for mmio if there's a clock attached
1478*4882a593Smuzhiyun 	 * to regmap which is not the case here.
1479*4882a593Smuzhiyun 	 */
1480*4882a593Smuzhiyun 	priv->regs = devm_regmap_init_mmio(dev, base,
1481*4882a593Smuzhiyun 					   &mtk_star_regmap_config);
1482*4882a593Smuzhiyun 	if (IS_ERR(priv->regs))
1483*4882a593Smuzhiyun 		return PTR_ERR(priv->regs);
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
1486*4882a593Smuzhiyun 							"mediatek,pericfg");
1487*4882a593Smuzhiyun 	if (IS_ERR(priv->pericfg)) {
1488*4882a593Smuzhiyun 		dev_err(dev, "Failed to lookup the PERICFG syscon\n");
1489*4882a593Smuzhiyun 		return PTR_ERR(priv->pericfg);
1490*4882a593Smuzhiyun 	}
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	ndev->irq = platform_get_irq(pdev, 0);
1493*4882a593Smuzhiyun 	if (ndev->irq < 0)
1494*4882a593Smuzhiyun 		return ndev->irq;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	for (i = 0; i < MTK_STAR_NCLKS; i++)
1497*4882a593Smuzhiyun 		priv->clks[i].id = mtk_star_clk_names[i];
1498*4882a593Smuzhiyun 	ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
1499*4882a593Smuzhiyun 	if (ret)
1500*4882a593Smuzhiyun 		return ret;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
1503*4882a593Smuzhiyun 	if (ret)
1504*4882a593Smuzhiyun 		return ret;
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	ret = devm_add_action_or_reset(dev,
1507*4882a593Smuzhiyun 				       mtk_star_clk_disable_unprepare, priv);
1508*4882a593Smuzhiyun 	if (ret)
1509*4882a593Smuzhiyun 		return ret;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	ret = of_get_phy_mode(of_node, &priv->phy_intf);
1512*4882a593Smuzhiyun 	if (ret) {
1513*4882a593Smuzhiyun 		return ret;
1514*4882a593Smuzhiyun 	} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
1515*4882a593Smuzhiyun 		dev_err(dev, "unsupported phy mode: %s\n",
1516*4882a593Smuzhiyun 			phy_modes(priv->phy_intf));
1517*4882a593Smuzhiyun 		return -EINVAL;
1518*4882a593Smuzhiyun 	}
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
1521*4882a593Smuzhiyun 	if (!priv->phy_node) {
1522*4882a593Smuzhiyun 		dev_err(dev, "failed to retrieve the phy handle from device tree\n");
1523*4882a593Smuzhiyun 		return -ENODEV;
1524*4882a593Smuzhiyun 	}
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	mtk_star_set_mode_rmii(priv);
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1529*4882a593Smuzhiyun 	if (ret) {
1530*4882a593Smuzhiyun 		dev_err(dev, "unsupported DMA mask\n");
1531*4882a593Smuzhiyun 		return ret;
1532*4882a593Smuzhiyun 	}
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
1535*4882a593Smuzhiyun 					      &priv->dma_addr,
1536*4882a593Smuzhiyun 					      GFP_KERNEL | GFP_DMA);
1537*4882a593Smuzhiyun 	if (!priv->ring_base)
1538*4882a593Smuzhiyun 		return -ENOMEM;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	mtk_star_nic_disable_pd(priv);
1541*4882a593Smuzhiyun 	mtk_star_init_config(priv);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	ret = mtk_star_mdio_init(ndev);
1544*4882a593Smuzhiyun 	if (ret)
1545*4882a593Smuzhiyun 		return ret;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
1548*4882a593Smuzhiyun 	if (ret || !is_valid_ether_addr(ndev->dev_addr))
1549*4882a593Smuzhiyun 		eth_hw_addr_random(ndev);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	ndev->netdev_ops = &mtk_star_netdev_ops;
1552*4882a593Smuzhiyun 	ndev->ethtool_ops = &mtk_star_ethtool_ops;
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	return devm_register_netdev(dev, ndev);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun static const struct of_device_id mtk_star_of_match[] = {
1560*4882a593Smuzhiyun 	{ .compatible = "mediatek,mt8516-eth", },
1561*4882a593Smuzhiyun 	{ .compatible = "mediatek,mt8518-eth", },
1562*4882a593Smuzhiyun 	{ .compatible = "mediatek,mt8175-eth", },
1563*4882a593Smuzhiyun 	{ }
1564*4882a593Smuzhiyun };
1565*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mtk_star_of_match);
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
1568*4882a593Smuzhiyun 			 mtk_star_suspend, mtk_star_resume);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun static struct platform_driver mtk_star_driver = {
1571*4882a593Smuzhiyun 	.driver = {
1572*4882a593Smuzhiyun 		.name = MTK_STAR_DRVNAME,
1573*4882a593Smuzhiyun 		.pm = &mtk_star_pm_ops,
1574*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(mtk_star_of_match),
1575*4882a593Smuzhiyun 	},
1576*4882a593Smuzhiyun 	.probe = mtk_star_probe,
1577*4882a593Smuzhiyun };
1578*4882a593Smuzhiyun module_platform_driver(mtk_star_driver);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
1581*4882a593Smuzhiyun MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
1582*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1583