1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5*4882a593Smuzhiyun * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6*4882a593Smuzhiyun * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/of_device.h>
10*4882a593Smuzhiyun #include <linux/of_mdio.h>
11*4882a593Smuzhiyun #include <linux/of_net.h>
12*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
13*4882a593Smuzhiyun #include <linux/regmap.h>
14*4882a593Smuzhiyun #include <linux/clk.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/if_vlan.h>
17*4882a593Smuzhiyun #include <linux/reset.h>
18*4882a593Smuzhiyun #include <linux/tcp.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/pinctrl/devinfo.h>
21*4882a593Smuzhiyun #include <linux/phylink.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "mtk_eth_soc.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static int mtk_msg_level = -1;
26*4882a593Smuzhiyun module_param_named(msg_level, mtk_msg_level, int, 0);
27*4882a593Smuzhiyun MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define MTK_ETHTOOL_STAT(x) { #x, \
30*4882a593Smuzhiyun offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* strings used by ethtool */
33*4882a593Smuzhiyun static const struct mtk_ethtool_stats {
34*4882a593Smuzhiyun char str[ETH_GSTRING_LEN];
35*4882a593Smuzhiyun u32 offset;
36*4882a593Smuzhiyun } mtk_ethtool_stats[] = {
37*4882a593Smuzhiyun MTK_ETHTOOL_STAT(tx_bytes),
38*4882a593Smuzhiyun MTK_ETHTOOL_STAT(tx_packets),
39*4882a593Smuzhiyun MTK_ETHTOOL_STAT(tx_skip),
40*4882a593Smuzhiyun MTK_ETHTOOL_STAT(tx_collisions),
41*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_bytes),
42*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_packets),
43*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_overflow),
44*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_fcs_errors),
45*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_short_errors),
46*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_long_errors),
47*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_checksum_errors),
48*4882a593Smuzhiyun MTK_ETHTOOL_STAT(rx_flow_control_packets),
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static const char * const mtk_clks_source_name[] = {
52*4882a593Smuzhiyun "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
53*4882a593Smuzhiyun "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
54*4882a593Smuzhiyun "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
55*4882a593Smuzhiyun "sgmii_ck", "eth2pll",
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
mtk_w32(struct mtk_eth * eth,u32 val,unsigned reg)58*4882a593Smuzhiyun void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun __raw_writel(val, eth->base + reg);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
mtk_r32(struct mtk_eth * eth,unsigned reg)63*4882a593Smuzhiyun u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return __raw_readl(eth->base + reg);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
mtk_m32(struct mtk_eth * eth,u32 mask,u32 set,unsigned reg)68*4882a593Smuzhiyun static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun u32 val;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun val = mtk_r32(eth, reg);
73*4882a593Smuzhiyun val &= ~mask;
74*4882a593Smuzhiyun val |= set;
75*4882a593Smuzhiyun mtk_w32(eth, val, reg);
76*4882a593Smuzhiyun return reg;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
mtk_mdio_busy_wait(struct mtk_eth * eth)79*4882a593Smuzhiyun static int mtk_mdio_busy_wait(struct mtk_eth *eth)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned long t_start = jiffies;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun while (1) {
84*4882a593Smuzhiyun if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun usleep_range(10, 20);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun dev_err(eth->dev, "mdio: MDIO timeout\n");
92*4882a593Smuzhiyun return -1;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
_mtk_mdio_write(struct mtk_eth * eth,u32 phy_addr,u32 phy_register,u32 write_data)95*4882a593Smuzhiyun static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
96*4882a593Smuzhiyun u32 phy_register, u32 write_data)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun if (mtk_mdio_busy_wait(eth))
99*4882a593Smuzhiyun return -1;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun write_data &= 0xffff;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
104*4882a593Smuzhiyun (phy_register << PHY_IAC_REG_SHIFT) |
105*4882a593Smuzhiyun (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
106*4882a593Smuzhiyun MTK_PHY_IAC);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (mtk_mdio_busy_wait(eth))
109*4882a593Smuzhiyun return -1;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
_mtk_mdio_read(struct mtk_eth * eth,int phy_addr,int phy_reg)114*4882a593Smuzhiyun static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun u32 d;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (mtk_mdio_busy_wait(eth))
119*4882a593Smuzhiyun return 0xffff;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
122*4882a593Smuzhiyun (phy_reg << PHY_IAC_REG_SHIFT) |
123*4882a593Smuzhiyun (phy_addr << PHY_IAC_ADDR_SHIFT),
124*4882a593Smuzhiyun MTK_PHY_IAC);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (mtk_mdio_busy_wait(eth))
127*4882a593Smuzhiyun return 0xffff;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return d;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
mtk_mdio_write(struct mii_bus * bus,int phy_addr,int phy_reg,u16 val)134*4882a593Smuzhiyun static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
135*4882a593Smuzhiyun int phy_reg, u16 val)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct mtk_eth *eth = bus->priv;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
mtk_mdio_read(struct mii_bus * bus,int phy_addr,int phy_reg)142*4882a593Smuzhiyun static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct mtk_eth *eth = bus->priv;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return _mtk_mdio_read(eth, phy_addr, phy_reg);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
mt7621_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface)149*4882a593Smuzhiyun static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
150*4882a593Smuzhiyun phy_interface_t interface)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun u32 val;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* Check DDR memory type.
155*4882a593Smuzhiyun * Currently TRGMII mode with DDR2 memory is not supported.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
158*4882a593Smuzhiyun if (interface == PHY_INTERFACE_MODE_TRGMII &&
159*4882a593Smuzhiyun val & SYSCFG_DRAM_TYPE_DDR2) {
160*4882a593Smuzhiyun dev_err(eth->dev,
161*4882a593Smuzhiyun "TRGMII mode with DDR2 memory is not supported!\n");
162*4882a593Smuzhiyun return -EOPNOTSUPP;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
166*4882a593Smuzhiyun ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
169*4882a593Smuzhiyun ETHSYS_TRGMII_MT7621_MASK, val);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
mtk_gmac0_rgmii_adjust(struct mtk_eth * eth,phy_interface_t interface,int speed)174*4882a593Smuzhiyun static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
175*4882a593Smuzhiyun phy_interface_t interface, int speed)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u32 val;
178*4882a593Smuzhiyun int ret;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (interface == PHY_INTERFACE_MODE_TRGMII) {
181*4882a593Smuzhiyun mtk_w32(eth, TRGMII_MODE, INTF_MODE);
182*4882a593Smuzhiyun val = 500000000;
183*4882a593Smuzhiyun ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
184*4882a593Smuzhiyun if (ret)
185*4882a593Smuzhiyun dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
186*4882a593Smuzhiyun return;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun val = (speed == SPEED_1000) ?
190*4882a593Smuzhiyun INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
191*4882a593Smuzhiyun mtk_w32(eth, val, INTF_MODE);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
194*4882a593Smuzhiyun ETHSYS_TRGMII_CLK_SEL362_5,
195*4882a593Smuzhiyun ETHSYS_TRGMII_CLK_SEL362_5);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun val = (speed == SPEED_1000) ? 250000000 : 500000000;
198*4882a593Smuzhiyun ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
199*4882a593Smuzhiyun if (ret)
200*4882a593Smuzhiyun dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun val = (speed == SPEED_1000) ?
203*4882a593Smuzhiyun RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
204*4882a593Smuzhiyun mtk_w32(eth, val, TRGMII_RCK_CTRL);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun val = (speed == SPEED_1000) ?
207*4882a593Smuzhiyun TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
208*4882a593Smuzhiyun mtk_w32(eth, val, TRGMII_TCK_CTRL);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
mtk_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)211*4882a593Smuzhiyun static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
212*4882a593Smuzhiyun const struct phylink_link_state *state)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
215*4882a593Smuzhiyun phylink_config);
216*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
217*4882a593Smuzhiyun u32 mcr_cur, mcr_new, sid, i;
218*4882a593Smuzhiyun int val, ge_mode, err = 0;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* MT76x8 has no hardware settings between for the MAC */
221*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
222*4882a593Smuzhiyun mac->interface != state->interface) {
223*4882a593Smuzhiyun /* Setup soc pin functions */
224*4882a593Smuzhiyun switch (state->interface) {
225*4882a593Smuzhiyun case PHY_INTERFACE_MODE_TRGMII:
226*4882a593Smuzhiyun if (mac->id)
227*4882a593Smuzhiyun goto err_phy;
228*4882a593Smuzhiyun if (!MTK_HAS_CAPS(mac->hw->soc->caps,
229*4882a593Smuzhiyun MTK_GMAC1_TRGMII))
230*4882a593Smuzhiyun goto err_phy;
231*4882a593Smuzhiyun fallthrough;
232*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_TXID:
233*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_RXID:
234*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_ID:
235*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII:
236*4882a593Smuzhiyun case PHY_INTERFACE_MODE_MII:
237*4882a593Smuzhiyun case PHY_INTERFACE_MODE_REVMII:
238*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RMII:
239*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
240*4882a593Smuzhiyun err = mtk_gmac_rgmii_path_setup(eth, mac->id);
241*4882a593Smuzhiyun if (err)
242*4882a593Smuzhiyun goto init_err;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun case PHY_INTERFACE_MODE_1000BASEX:
246*4882a593Smuzhiyun case PHY_INTERFACE_MODE_2500BASEX:
247*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
248*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
249*4882a593Smuzhiyun err = mtk_gmac_sgmii_path_setup(eth, mac->id);
250*4882a593Smuzhiyun if (err)
251*4882a593Smuzhiyun goto init_err;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun case PHY_INTERFACE_MODE_GMII:
255*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
256*4882a593Smuzhiyun err = mtk_gmac_gephy_path_setup(eth, mac->id);
257*4882a593Smuzhiyun if (err)
258*4882a593Smuzhiyun goto init_err;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun default:
262*4882a593Smuzhiyun goto err_phy;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Setup clock for 1st gmac */
266*4882a593Smuzhiyun if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
267*4882a593Smuzhiyun !phy_interface_mode_is_8023z(state->interface) &&
268*4882a593Smuzhiyun MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
269*4882a593Smuzhiyun if (MTK_HAS_CAPS(mac->hw->soc->caps,
270*4882a593Smuzhiyun MTK_TRGMII_MT7621_CLK)) {
271*4882a593Smuzhiyun if (mt7621_gmac0_rgmii_adjust(mac->hw,
272*4882a593Smuzhiyun state->interface))
273*4882a593Smuzhiyun goto err_phy;
274*4882a593Smuzhiyun } else {
275*4882a593Smuzhiyun mtk_gmac0_rgmii_adjust(mac->hw,
276*4882a593Smuzhiyun state->interface,
277*4882a593Smuzhiyun state->speed);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* mt7623_pad_clk_setup */
280*4882a593Smuzhiyun for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
281*4882a593Smuzhiyun mtk_w32(mac->hw,
282*4882a593Smuzhiyun TD_DM_DRVP(8) | TD_DM_DRVN(8),
283*4882a593Smuzhiyun TRGMII_TD_ODT(i));
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Assert/release MT7623 RXC reset */
286*4882a593Smuzhiyun mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
287*4882a593Smuzhiyun TRGMII_RCK_CTRL);
288*4882a593Smuzhiyun mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ge_mode = 0;
293*4882a593Smuzhiyun switch (state->interface) {
294*4882a593Smuzhiyun case PHY_INTERFACE_MODE_MII:
295*4882a593Smuzhiyun case PHY_INTERFACE_MODE_GMII:
296*4882a593Smuzhiyun ge_mode = 1;
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun case PHY_INTERFACE_MODE_REVMII:
299*4882a593Smuzhiyun ge_mode = 2;
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RMII:
302*4882a593Smuzhiyun if (mac->id)
303*4882a593Smuzhiyun goto err_phy;
304*4882a593Smuzhiyun ge_mode = 3;
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun default:
307*4882a593Smuzhiyun break;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* put the gmac into the right mode */
311*4882a593Smuzhiyun regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
312*4882a593Smuzhiyun val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
313*4882a593Smuzhiyun val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
314*4882a593Smuzhiyun regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun mac->interface = state->interface;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* SGMII */
320*4882a593Smuzhiyun if (state->interface == PHY_INTERFACE_MODE_SGMII ||
321*4882a593Smuzhiyun phy_interface_mode_is_8023z(state->interface)) {
322*4882a593Smuzhiyun /* The path GMAC to SGMII will be enabled once the SGMIISYS is
323*4882a593Smuzhiyun * being setup done.
324*4882a593Smuzhiyun */
325*4882a593Smuzhiyun regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
328*4882a593Smuzhiyun SYSCFG0_SGMII_MASK,
329*4882a593Smuzhiyun ~(u32)SYSCFG0_SGMII_MASK);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* Decide how GMAC and SGMIISYS be mapped */
332*4882a593Smuzhiyun sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
333*4882a593Smuzhiyun 0 : mac->id;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Setup SGMIISYS with the determined property */
336*4882a593Smuzhiyun if (state->interface != PHY_INTERFACE_MODE_SGMII)
337*4882a593Smuzhiyun err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
338*4882a593Smuzhiyun state);
339*4882a593Smuzhiyun else if (phylink_autoneg_inband(mode))
340*4882a593Smuzhiyun err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (err)
343*4882a593Smuzhiyun goto init_err;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
346*4882a593Smuzhiyun SYSCFG0_SGMII_MASK, val);
347*4882a593Smuzhiyun } else if (phylink_autoneg_inband(mode)) {
348*4882a593Smuzhiyun dev_err(eth->dev,
349*4882a593Smuzhiyun "In-band mode not supported in non SGMII mode!\n");
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Setup gmac */
354*4882a593Smuzhiyun mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
355*4882a593Smuzhiyun mcr_new = mcr_cur;
356*4882a593Smuzhiyun mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
357*4882a593Smuzhiyun MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* Only update control register when needed! */
360*4882a593Smuzhiyun if (mcr_new != mcr_cur)
361*4882a593Smuzhiyun mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun err_phy:
366*4882a593Smuzhiyun dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
367*4882a593Smuzhiyun mac->id, phy_modes(state->interface));
368*4882a593Smuzhiyun return;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun init_err:
371*4882a593Smuzhiyun dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
372*4882a593Smuzhiyun mac->id, phy_modes(state->interface), err);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
mtk_mac_pcs_get_state(struct phylink_config * config,struct phylink_link_state * state)375*4882a593Smuzhiyun static void mtk_mac_pcs_get_state(struct phylink_config *config,
376*4882a593Smuzhiyun struct phylink_link_state *state)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
379*4882a593Smuzhiyun phylink_config);
380*4882a593Smuzhiyun u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun state->link = (pmsr & MAC_MSR_LINK);
383*4882a593Smuzhiyun state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
386*4882a593Smuzhiyun case 0:
387*4882a593Smuzhiyun state->speed = SPEED_10;
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun case MAC_MSR_SPEED_100:
390*4882a593Smuzhiyun state->speed = SPEED_100;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun case MAC_MSR_SPEED_1000:
393*4882a593Smuzhiyun state->speed = SPEED_1000;
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun default:
396*4882a593Smuzhiyun state->speed = SPEED_UNKNOWN;
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
401*4882a593Smuzhiyun if (pmsr & MAC_MSR_RX_FC)
402*4882a593Smuzhiyun state->pause |= MLO_PAUSE_RX;
403*4882a593Smuzhiyun if (pmsr & MAC_MSR_TX_FC)
404*4882a593Smuzhiyun state->pause |= MLO_PAUSE_TX;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
mtk_mac_an_restart(struct phylink_config * config)407*4882a593Smuzhiyun static void mtk_mac_an_restart(struct phylink_config *config)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
410*4882a593Smuzhiyun phylink_config);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun mtk_sgmii_restart_an(mac->hw, mac->id);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
mtk_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)415*4882a593Smuzhiyun static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
416*4882a593Smuzhiyun phy_interface_t interface)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
419*4882a593Smuzhiyun phylink_config);
420*4882a593Smuzhiyun u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
423*4882a593Smuzhiyun mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
mtk_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)426*4882a593Smuzhiyun static void mtk_mac_link_up(struct phylink_config *config,
427*4882a593Smuzhiyun struct phy_device *phy,
428*4882a593Smuzhiyun unsigned int mode, phy_interface_t interface,
429*4882a593Smuzhiyun int speed, int duplex, bool tx_pause, bool rx_pause)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
432*4882a593Smuzhiyun phylink_config);
433*4882a593Smuzhiyun u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
436*4882a593Smuzhiyun MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
437*4882a593Smuzhiyun MAC_MCR_FORCE_RX_FC);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* Configure speed */
440*4882a593Smuzhiyun switch (speed) {
441*4882a593Smuzhiyun case SPEED_2500:
442*4882a593Smuzhiyun case SPEED_1000:
443*4882a593Smuzhiyun mcr |= MAC_MCR_SPEED_1000;
444*4882a593Smuzhiyun break;
445*4882a593Smuzhiyun case SPEED_100:
446*4882a593Smuzhiyun mcr |= MAC_MCR_SPEED_100;
447*4882a593Smuzhiyun break;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Configure duplex */
451*4882a593Smuzhiyun if (duplex == DUPLEX_FULL)
452*4882a593Smuzhiyun mcr |= MAC_MCR_FORCE_DPX;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Configure pause modes - phylink will avoid these for half duplex */
455*4882a593Smuzhiyun if (tx_pause)
456*4882a593Smuzhiyun mcr |= MAC_MCR_FORCE_TX_FC;
457*4882a593Smuzhiyun if (rx_pause)
458*4882a593Smuzhiyun mcr |= MAC_MCR_FORCE_RX_FC;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
461*4882a593Smuzhiyun mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
mtk_validate(struct phylink_config * config,unsigned long * supported,struct phylink_link_state * state)464*4882a593Smuzhiyun static void mtk_validate(struct phylink_config *config,
465*4882a593Smuzhiyun unsigned long *supported,
466*4882a593Smuzhiyun struct phylink_link_state *state)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun struct mtk_mac *mac = container_of(config, struct mtk_mac,
469*4882a593Smuzhiyun phylink_config);
470*4882a593Smuzhiyun __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (state->interface != PHY_INTERFACE_MODE_NA &&
473*4882a593Smuzhiyun state->interface != PHY_INTERFACE_MODE_MII &&
474*4882a593Smuzhiyun state->interface != PHY_INTERFACE_MODE_GMII &&
475*4882a593Smuzhiyun !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
476*4882a593Smuzhiyun phy_interface_mode_is_rgmii(state->interface)) &&
477*4882a593Smuzhiyun !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
478*4882a593Smuzhiyun !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
479*4882a593Smuzhiyun !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
480*4882a593Smuzhiyun (state->interface == PHY_INTERFACE_MODE_SGMII ||
481*4882a593Smuzhiyun phy_interface_mode_is_8023z(state->interface)))) {
482*4882a593Smuzhiyun linkmode_zero(supported);
483*4882a593Smuzhiyun return;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun phylink_set_port_modes(mask);
487*4882a593Smuzhiyun phylink_set(mask, Autoneg);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun switch (state->interface) {
490*4882a593Smuzhiyun case PHY_INTERFACE_MODE_TRGMII:
491*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun case PHY_INTERFACE_MODE_1000BASEX:
494*4882a593Smuzhiyun case PHY_INTERFACE_MODE_2500BASEX:
495*4882a593Smuzhiyun phylink_set(mask, 1000baseX_Full);
496*4882a593Smuzhiyun phylink_set(mask, 2500baseX_Full);
497*4882a593Smuzhiyun break;
498*4882a593Smuzhiyun case PHY_INTERFACE_MODE_GMII:
499*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII:
500*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_ID:
501*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_RXID:
502*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RGMII_TXID:
503*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Half);
504*4882a593Smuzhiyun fallthrough;
505*4882a593Smuzhiyun case PHY_INTERFACE_MODE_SGMII:
506*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
507*4882a593Smuzhiyun phylink_set(mask, 1000baseX_Full);
508*4882a593Smuzhiyun fallthrough;
509*4882a593Smuzhiyun case PHY_INTERFACE_MODE_MII:
510*4882a593Smuzhiyun case PHY_INTERFACE_MODE_RMII:
511*4882a593Smuzhiyun case PHY_INTERFACE_MODE_REVMII:
512*4882a593Smuzhiyun case PHY_INTERFACE_MODE_NA:
513*4882a593Smuzhiyun default:
514*4882a593Smuzhiyun phylink_set(mask, 10baseT_Half);
515*4882a593Smuzhiyun phylink_set(mask, 10baseT_Full);
516*4882a593Smuzhiyun phylink_set(mask, 100baseT_Half);
517*4882a593Smuzhiyun phylink_set(mask, 100baseT_Full);
518*4882a593Smuzhiyun break;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (state->interface == PHY_INTERFACE_MODE_NA) {
522*4882a593Smuzhiyun if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
523*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
524*4882a593Smuzhiyun phylink_set(mask, 1000baseX_Full);
525*4882a593Smuzhiyun phylink_set(mask, 2500baseX_Full);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
528*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
529*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Half);
530*4882a593Smuzhiyun phylink_set(mask, 1000baseX_Full);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
533*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Full);
534*4882a593Smuzhiyun phylink_set(mask, 1000baseT_Half);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun phylink_set(mask, Pause);
539*4882a593Smuzhiyun phylink_set(mask, Asym_Pause);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun linkmode_and(supported, supported, mask);
542*4882a593Smuzhiyun linkmode_and(state->advertising, state->advertising, mask);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* We can only operate at 2500BaseX or 1000BaseX. If requested
545*4882a593Smuzhiyun * to advertise both, only report advertising at 2500BaseX.
546*4882a593Smuzhiyun */
547*4882a593Smuzhiyun phylink_helper_basex_speed(state);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun static const struct phylink_mac_ops mtk_phylink_ops = {
551*4882a593Smuzhiyun .validate = mtk_validate,
552*4882a593Smuzhiyun .mac_pcs_get_state = mtk_mac_pcs_get_state,
553*4882a593Smuzhiyun .mac_an_restart = mtk_mac_an_restart,
554*4882a593Smuzhiyun .mac_config = mtk_mac_config,
555*4882a593Smuzhiyun .mac_link_down = mtk_mac_link_down,
556*4882a593Smuzhiyun .mac_link_up = mtk_mac_link_up,
557*4882a593Smuzhiyun };
558*4882a593Smuzhiyun
mtk_mdio_init(struct mtk_eth * eth)559*4882a593Smuzhiyun static int mtk_mdio_init(struct mtk_eth *eth)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct device_node *mii_np;
562*4882a593Smuzhiyun int ret;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
565*4882a593Smuzhiyun if (!mii_np) {
566*4882a593Smuzhiyun dev_err(eth->dev, "no %s child node found", "mdio-bus");
567*4882a593Smuzhiyun return -ENODEV;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (!of_device_is_available(mii_np)) {
571*4882a593Smuzhiyun ret = -ENODEV;
572*4882a593Smuzhiyun goto err_put_node;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun eth->mii_bus = devm_mdiobus_alloc(eth->dev);
576*4882a593Smuzhiyun if (!eth->mii_bus) {
577*4882a593Smuzhiyun ret = -ENOMEM;
578*4882a593Smuzhiyun goto err_put_node;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun eth->mii_bus->name = "mdio";
582*4882a593Smuzhiyun eth->mii_bus->read = mtk_mdio_read;
583*4882a593Smuzhiyun eth->mii_bus->write = mtk_mdio_write;
584*4882a593Smuzhiyun eth->mii_bus->priv = eth;
585*4882a593Smuzhiyun eth->mii_bus->parent = eth->dev;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
588*4882a593Smuzhiyun ret = of_mdiobus_register(eth->mii_bus, mii_np);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun err_put_node:
591*4882a593Smuzhiyun of_node_put(mii_np);
592*4882a593Smuzhiyun return ret;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
mtk_mdio_cleanup(struct mtk_eth * eth)595*4882a593Smuzhiyun static void mtk_mdio_cleanup(struct mtk_eth *eth)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun if (!eth->mii_bus)
598*4882a593Smuzhiyun return;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun mdiobus_unregister(eth->mii_bus);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
mtk_tx_irq_disable(struct mtk_eth * eth,u32 mask)603*4882a593Smuzhiyun static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun unsigned long flags;
606*4882a593Smuzhiyun u32 val;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun spin_lock_irqsave(ð->tx_irq_lock, flags);
609*4882a593Smuzhiyun val = mtk_r32(eth, eth->tx_int_mask_reg);
610*4882a593Smuzhiyun mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
611*4882a593Smuzhiyun spin_unlock_irqrestore(ð->tx_irq_lock, flags);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
mtk_tx_irq_enable(struct mtk_eth * eth,u32 mask)614*4882a593Smuzhiyun static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun unsigned long flags;
617*4882a593Smuzhiyun u32 val;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun spin_lock_irqsave(ð->tx_irq_lock, flags);
620*4882a593Smuzhiyun val = mtk_r32(eth, eth->tx_int_mask_reg);
621*4882a593Smuzhiyun mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
622*4882a593Smuzhiyun spin_unlock_irqrestore(ð->tx_irq_lock, flags);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
mtk_rx_irq_disable(struct mtk_eth * eth,u32 mask)625*4882a593Smuzhiyun static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun unsigned long flags;
628*4882a593Smuzhiyun u32 val;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun spin_lock_irqsave(ð->rx_irq_lock, flags);
631*4882a593Smuzhiyun val = mtk_r32(eth, MTK_PDMA_INT_MASK);
632*4882a593Smuzhiyun mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
633*4882a593Smuzhiyun spin_unlock_irqrestore(ð->rx_irq_lock, flags);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
mtk_rx_irq_enable(struct mtk_eth * eth,u32 mask)636*4882a593Smuzhiyun static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun unsigned long flags;
639*4882a593Smuzhiyun u32 val;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun spin_lock_irqsave(ð->rx_irq_lock, flags);
642*4882a593Smuzhiyun val = mtk_r32(eth, MTK_PDMA_INT_MASK);
643*4882a593Smuzhiyun mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
644*4882a593Smuzhiyun spin_unlock_irqrestore(ð->rx_irq_lock, flags);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
mtk_set_mac_address(struct net_device * dev,void * p)647*4882a593Smuzhiyun static int mtk_set_mac_address(struct net_device *dev, void *p)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun int ret = eth_mac_addr(dev, p);
650*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
651*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
652*4882a593Smuzhiyun const char *macaddr = dev->dev_addr;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (ret)
655*4882a593Smuzhiyun return ret;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
658*4882a593Smuzhiyun return -EBUSY;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun spin_lock_bh(&mac->hw->page_lock);
661*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
662*4882a593Smuzhiyun mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
663*4882a593Smuzhiyun MT7628_SDM_MAC_ADRH);
664*4882a593Smuzhiyun mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
665*4882a593Smuzhiyun (macaddr[4] << 8) | macaddr[5],
666*4882a593Smuzhiyun MT7628_SDM_MAC_ADRL);
667*4882a593Smuzhiyun } else {
668*4882a593Smuzhiyun mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
669*4882a593Smuzhiyun MTK_GDMA_MAC_ADRH(mac->id));
670*4882a593Smuzhiyun mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
671*4882a593Smuzhiyun (macaddr[4] << 8) | macaddr[5],
672*4882a593Smuzhiyun MTK_GDMA_MAC_ADRL(mac->id));
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun spin_unlock_bh(&mac->hw->page_lock);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun return 0;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
mtk_stats_update_mac(struct mtk_mac * mac)679*4882a593Smuzhiyun void mtk_stats_update_mac(struct mtk_mac *mac)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct mtk_hw_stats *hw_stats = mac->hw_stats;
682*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun u64_stats_update_begin(&hw_stats->syncp);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
687*4882a593Smuzhiyun hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
688*4882a593Smuzhiyun hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
689*4882a593Smuzhiyun hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
690*4882a593Smuzhiyun hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
691*4882a593Smuzhiyun hw_stats->rx_checksum_errors +=
692*4882a593Smuzhiyun mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
693*4882a593Smuzhiyun } else {
694*4882a593Smuzhiyun unsigned int offs = hw_stats->reg_offset;
695*4882a593Smuzhiyun u64 stats;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun hw_stats->rx_bytes += mtk_r32(mac->hw,
698*4882a593Smuzhiyun MTK_GDM1_RX_GBCNT_L + offs);
699*4882a593Smuzhiyun stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
700*4882a593Smuzhiyun if (stats)
701*4882a593Smuzhiyun hw_stats->rx_bytes += (stats << 32);
702*4882a593Smuzhiyun hw_stats->rx_packets +=
703*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
704*4882a593Smuzhiyun hw_stats->rx_overflow +=
705*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
706*4882a593Smuzhiyun hw_stats->rx_fcs_errors +=
707*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
708*4882a593Smuzhiyun hw_stats->rx_short_errors +=
709*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
710*4882a593Smuzhiyun hw_stats->rx_long_errors +=
711*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
712*4882a593Smuzhiyun hw_stats->rx_checksum_errors +=
713*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
714*4882a593Smuzhiyun hw_stats->rx_flow_control_packets +=
715*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
716*4882a593Smuzhiyun hw_stats->tx_skip +=
717*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
718*4882a593Smuzhiyun hw_stats->tx_collisions +=
719*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
720*4882a593Smuzhiyun hw_stats->tx_bytes +=
721*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
722*4882a593Smuzhiyun stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
723*4882a593Smuzhiyun if (stats)
724*4882a593Smuzhiyun hw_stats->tx_bytes += (stats << 32);
725*4882a593Smuzhiyun hw_stats->tx_packets +=
726*4882a593Smuzhiyun mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun u64_stats_update_end(&hw_stats->syncp);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
mtk_stats_update(struct mtk_eth * eth)732*4882a593Smuzhiyun static void mtk_stats_update(struct mtk_eth *eth)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun int i;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
737*4882a593Smuzhiyun if (!eth->mac[i] || !eth->mac[i]->hw_stats)
738*4882a593Smuzhiyun continue;
739*4882a593Smuzhiyun if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
740*4882a593Smuzhiyun mtk_stats_update_mac(eth->mac[i]);
741*4882a593Smuzhiyun spin_unlock(ð->mac[i]->hw_stats->stats_lock);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
mtk_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * storage)746*4882a593Smuzhiyun static void mtk_get_stats64(struct net_device *dev,
747*4882a593Smuzhiyun struct rtnl_link_stats64 *storage)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
750*4882a593Smuzhiyun struct mtk_hw_stats *hw_stats = mac->hw_stats;
751*4882a593Smuzhiyun unsigned int start;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (netif_running(dev) && netif_device_present(dev)) {
754*4882a593Smuzhiyun if (spin_trylock_bh(&hw_stats->stats_lock)) {
755*4882a593Smuzhiyun mtk_stats_update_mac(mac);
756*4882a593Smuzhiyun spin_unlock_bh(&hw_stats->stats_lock);
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun do {
761*4882a593Smuzhiyun start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
762*4882a593Smuzhiyun storage->rx_packets = hw_stats->rx_packets;
763*4882a593Smuzhiyun storage->tx_packets = hw_stats->tx_packets;
764*4882a593Smuzhiyun storage->rx_bytes = hw_stats->rx_bytes;
765*4882a593Smuzhiyun storage->tx_bytes = hw_stats->tx_bytes;
766*4882a593Smuzhiyun storage->collisions = hw_stats->tx_collisions;
767*4882a593Smuzhiyun storage->rx_length_errors = hw_stats->rx_short_errors +
768*4882a593Smuzhiyun hw_stats->rx_long_errors;
769*4882a593Smuzhiyun storage->rx_over_errors = hw_stats->rx_overflow;
770*4882a593Smuzhiyun storage->rx_crc_errors = hw_stats->rx_fcs_errors;
771*4882a593Smuzhiyun storage->rx_errors = hw_stats->rx_checksum_errors;
772*4882a593Smuzhiyun storage->tx_aborted_errors = hw_stats->tx_skip;
773*4882a593Smuzhiyun } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun storage->tx_errors = dev->stats.tx_errors;
776*4882a593Smuzhiyun storage->rx_dropped = dev->stats.rx_dropped;
777*4882a593Smuzhiyun storage->tx_dropped = dev->stats.tx_dropped;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
mtk_max_frag_size(int mtu)780*4882a593Smuzhiyun static inline int mtk_max_frag_size(int mtu)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
783*4882a593Smuzhiyun if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
784*4882a593Smuzhiyun mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
787*4882a593Smuzhiyun SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
mtk_max_buf_size(int frag_size)790*4882a593Smuzhiyun static inline int mtk_max_buf_size(int frag_size)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
793*4882a593Smuzhiyun SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun return buf_size;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
mtk_rx_get_desc(struct mtk_rx_dma * rxd,struct mtk_rx_dma * dma_rxd)800*4882a593Smuzhiyun static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
801*4882a593Smuzhiyun struct mtk_rx_dma *dma_rxd)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
804*4882a593Smuzhiyun rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
805*4882a593Smuzhiyun rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
806*4882a593Smuzhiyun rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
mtk_max_lro_buf_alloc(gfp_t gfp_mask)809*4882a593Smuzhiyun static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
812*4882a593Smuzhiyun unsigned long data;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
815*4882a593Smuzhiyun get_order(size));
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun return (void *)data;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /* the qdma core needs scratch memory to be setup */
mtk_init_fq_dma(struct mtk_eth * eth)821*4882a593Smuzhiyun static int mtk_init_fq_dma(struct mtk_eth *eth)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun dma_addr_t phy_ring_tail;
824*4882a593Smuzhiyun int cnt = MTK_DMA_SIZE;
825*4882a593Smuzhiyun dma_addr_t dma_addr;
826*4882a593Smuzhiyun int i;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun eth->scratch_ring = dma_alloc_coherent(eth->dev,
829*4882a593Smuzhiyun cnt * sizeof(struct mtk_tx_dma),
830*4882a593Smuzhiyun ð->phy_scratch_ring,
831*4882a593Smuzhiyun GFP_ATOMIC);
832*4882a593Smuzhiyun if (unlikely(!eth->scratch_ring))
833*4882a593Smuzhiyun return -ENOMEM;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
836*4882a593Smuzhiyun GFP_KERNEL);
837*4882a593Smuzhiyun if (unlikely(!eth->scratch_head))
838*4882a593Smuzhiyun return -ENOMEM;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun dma_addr = dma_map_single(eth->dev,
841*4882a593Smuzhiyun eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
842*4882a593Smuzhiyun DMA_FROM_DEVICE);
843*4882a593Smuzhiyun if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
844*4882a593Smuzhiyun return -ENOMEM;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun phy_ring_tail = eth->phy_scratch_ring +
847*4882a593Smuzhiyun (sizeof(struct mtk_tx_dma) * (cnt - 1));
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun for (i = 0; i < cnt; i++) {
850*4882a593Smuzhiyun eth->scratch_ring[i].txd1 =
851*4882a593Smuzhiyun (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
852*4882a593Smuzhiyun if (i < cnt - 1)
853*4882a593Smuzhiyun eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
854*4882a593Smuzhiyun ((i + 1) * sizeof(struct mtk_tx_dma)));
855*4882a593Smuzhiyun eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
859*4882a593Smuzhiyun mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
860*4882a593Smuzhiyun mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
861*4882a593Smuzhiyun mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun return 0;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
mtk_qdma_phys_to_virt(struct mtk_tx_ring * ring,u32 desc)866*4882a593Smuzhiyun static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun void *ret = ring->dma;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun return ret + (desc - ring->phys);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
mtk_desc_to_tx_buf(struct mtk_tx_ring * ring,struct mtk_tx_dma * txd)873*4882a593Smuzhiyun static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
874*4882a593Smuzhiyun struct mtk_tx_dma *txd)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun int idx = txd - ring->dma;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return &ring->buf[idx];
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
qdma_to_pdma(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)881*4882a593Smuzhiyun static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
882*4882a593Smuzhiyun struct mtk_tx_dma *dma)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun return ring->dma_pdma - ring->dma + dma;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
txd_to_idx(struct mtk_tx_ring * ring,struct mtk_tx_dma * dma)887*4882a593Smuzhiyun static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
mtk_tx_unmap(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf)892*4882a593Smuzhiyun static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
895*4882a593Smuzhiyun if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
896*4882a593Smuzhiyun dma_unmap_single(eth->dev,
897*4882a593Smuzhiyun dma_unmap_addr(tx_buf, dma_addr0),
898*4882a593Smuzhiyun dma_unmap_len(tx_buf, dma_len0),
899*4882a593Smuzhiyun DMA_TO_DEVICE);
900*4882a593Smuzhiyun } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
901*4882a593Smuzhiyun dma_unmap_page(eth->dev,
902*4882a593Smuzhiyun dma_unmap_addr(tx_buf, dma_addr0),
903*4882a593Smuzhiyun dma_unmap_len(tx_buf, dma_len0),
904*4882a593Smuzhiyun DMA_TO_DEVICE);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun } else {
907*4882a593Smuzhiyun if (dma_unmap_len(tx_buf, dma_len0)) {
908*4882a593Smuzhiyun dma_unmap_page(eth->dev,
909*4882a593Smuzhiyun dma_unmap_addr(tx_buf, dma_addr0),
910*4882a593Smuzhiyun dma_unmap_len(tx_buf, dma_len0),
911*4882a593Smuzhiyun DMA_TO_DEVICE);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun if (dma_unmap_len(tx_buf, dma_len1)) {
915*4882a593Smuzhiyun dma_unmap_page(eth->dev,
916*4882a593Smuzhiyun dma_unmap_addr(tx_buf, dma_addr1),
917*4882a593Smuzhiyun dma_unmap_len(tx_buf, dma_len1),
918*4882a593Smuzhiyun DMA_TO_DEVICE);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun tx_buf->flags = 0;
923*4882a593Smuzhiyun if (tx_buf->skb &&
924*4882a593Smuzhiyun (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
925*4882a593Smuzhiyun dev_kfree_skb_any(tx_buf->skb);
926*4882a593Smuzhiyun tx_buf->skb = NULL;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
setup_tx_buf(struct mtk_eth * eth,struct mtk_tx_buf * tx_buf,struct mtk_tx_dma * txd,dma_addr_t mapped_addr,size_t size,int idx)929*4882a593Smuzhiyun static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
930*4882a593Smuzhiyun struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
931*4882a593Smuzhiyun size_t size, int idx)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
934*4882a593Smuzhiyun dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
935*4882a593Smuzhiyun dma_unmap_len_set(tx_buf, dma_len0, size);
936*4882a593Smuzhiyun } else {
937*4882a593Smuzhiyun if (idx & 1) {
938*4882a593Smuzhiyun txd->txd3 = mapped_addr;
939*4882a593Smuzhiyun txd->txd2 |= TX_DMA_PLEN1(size);
940*4882a593Smuzhiyun dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
941*4882a593Smuzhiyun dma_unmap_len_set(tx_buf, dma_len1, size);
942*4882a593Smuzhiyun } else {
943*4882a593Smuzhiyun tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
944*4882a593Smuzhiyun txd->txd1 = mapped_addr;
945*4882a593Smuzhiyun txd->txd2 = TX_DMA_PLEN0(size);
946*4882a593Smuzhiyun dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
947*4882a593Smuzhiyun dma_unmap_len_set(tx_buf, dma_len0, size);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
mtk_tx_map(struct sk_buff * skb,struct net_device * dev,int tx_num,struct mtk_tx_ring * ring,bool gso)952*4882a593Smuzhiyun static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
953*4882a593Smuzhiyun int tx_num, struct mtk_tx_ring *ring, bool gso)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
956*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
957*4882a593Smuzhiyun struct mtk_tx_dma *itxd, *txd;
958*4882a593Smuzhiyun struct mtk_tx_dma *itxd_pdma, *txd_pdma;
959*4882a593Smuzhiyun struct mtk_tx_buf *itx_buf, *tx_buf;
960*4882a593Smuzhiyun dma_addr_t mapped_addr;
961*4882a593Smuzhiyun unsigned int nr_frags;
962*4882a593Smuzhiyun int i, n_desc = 1;
963*4882a593Smuzhiyun u32 txd4 = 0, fport;
964*4882a593Smuzhiyun int k = 0;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun itxd = ring->next_free;
967*4882a593Smuzhiyun itxd_pdma = qdma_to_pdma(ring, itxd);
968*4882a593Smuzhiyun if (itxd == ring->last_free)
969*4882a593Smuzhiyun return -ENOMEM;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* set the forward port */
972*4882a593Smuzhiyun fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
973*4882a593Smuzhiyun txd4 |= fport;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun itx_buf = mtk_desc_to_tx_buf(ring, itxd);
976*4882a593Smuzhiyun memset(itx_buf, 0, sizeof(*itx_buf));
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (gso)
979*4882a593Smuzhiyun txd4 |= TX_DMA_TSO;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /* TX Checksum offload */
982*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
983*4882a593Smuzhiyun txd4 |= TX_DMA_CHKSUM;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /* VLAN header offload */
986*4882a593Smuzhiyun if (skb_vlan_tag_present(skb))
987*4882a593Smuzhiyun txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun mapped_addr = dma_map_single(eth->dev, skb->data,
990*4882a593Smuzhiyun skb_headlen(skb), DMA_TO_DEVICE);
991*4882a593Smuzhiyun if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
992*4882a593Smuzhiyun return -ENOMEM;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun WRITE_ONCE(itxd->txd1, mapped_addr);
995*4882a593Smuzhiyun itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
996*4882a593Smuzhiyun itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
997*4882a593Smuzhiyun MTK_TX_FLAGS_FPORT1;
998*4882a593Smuzhiyun setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
999*4882a593Smuzhiyun k++);
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* TX SG offload */
1002*4882a593Smuzhiyun txd = itxd;
1003*4882a593Smuzhiyun txd_pdma = qdma_to_pdma(ring, txd);
1004*4882a593Smuzhiyun nr_frags = skb_shinfo(skb)->nr_frags;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun for (i = 0; i < nr_frags; i++) {
1007*4882a593Smuzhiyun skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1008*4882a593Smuzhiyun unsigned int offset = 0;
1009*4882a593Smuzhiyun int frag_size = skb_frag_size(frag);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun while (frag_size) {
1012*4882a593Smuzhiyun bool last_frag = false;
1013*4882a593Smuzhiyun unsigned int frag_map_size;
1014*4882a593Smuzhiyun bool new_desc = true;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
1017*4882a593Smuzhiyun (i & 0x1)) {
1018*4882a593Smuzhiyun txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
1019*4882a593Smuzhiyun txd_pdma = qdma_to_pdma(ring, txd);
1020*4882a593Smuzhiyun if (txd == ring->last_free)
1021*4882a593Smuzhiyun goto err_dma;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun n_desc++;
1024*4882a593Smuzhiyun } else {
1025*4882a593Smuzhiyun new_desc = false;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1030*4882a593Smuzhiyun mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
1031*4882a593Smuzhiyun frag_map_size,
1032*4882a593Smuzhiyun DMA_TO_DEVICE);
1033*4882a593Smuzhiyun if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1034*4882a593Smuzhiyun goto err_dma;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun if (i == nr_frags - 1 &&
1037*4882a593Smuzhiyun (frag_size - frag_map_size) == 0)
1038*4882a593Smuzhiyun last_frag = true;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun WRITE_ONCE(txd->txd1, mapped_addr);
1041*4882a593Smuzhiyun WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
1042*4882a593Smuzhiyun TX_DMA_PLEN0(frag_map_size) |
1043*4882a593Smuzhiyun last_frag * TX_DMA_LS0));
1044*4882a593Smuzhiyun WRITE_ONCE(txd->txd4, fport);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun tx_buf = mtk_desc_to_tx_buf(ring, txd);
1047*4882a593Smuzhiyun if (new_desc)
1048*4882a593Smuzhiyun memset(tx_buf, 0, sizeof(*tx_buf));
1049*4882a593Smuzhiyun tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1050*4882a593Smuzhiyun tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1051*4882a593Smuzhiyun tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1052*4882a593Smuzhiyun MTK_TX_FLAGS_FPORT1;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1055*4882a593Smuzhiyun frag_map_size, k++);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun frag_size -= frag_map_size;
1058*4882a593Smuzhiyun offset += frag_map_size;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /* store skb to cleanup */
1063*4882a593Smuzhiyun itx_buf->skb = skb;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun WRITE_ONCE(itxd->txd4, txd4);
1066*4882a593Smuzhiyun WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1067*4882a593Smuzhiyun (!nr_frags * TX_DMA_LS0)));
1068*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1069*4882a593Smuzhiyun if (k & 0x1)
1070*4882a593Smuzhiyun txd_pdma->txd2 |= TX_DMA_LS0;
1071*4882a593Smuzhiyun else
1072*4882a593Smuzhiyun txd_pdma->txd2 |= TX_DMA_LS1;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun netdev_sent_queue(dev, skb->len);
1076*4882a593Smuzhiyun skb_tx_timestamp(skb);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1079*4882a593Smuzhiyun atomic_sub(n_desc, &ring->free_count);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /* make sure that all changes to the dma ring are flushed before we
1082*4882a593Smuzhiyun * continue
1083*4882a593Smuzhiyun */
1084*4882a593Smuzhiyun wmb();
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1087*4882a593Smuzhiyun if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1088*4882a593Smuzhiyun !netdev_xmit_more())
1089*4882a593Smuzhiyun mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1090*4882a593Smuzhiyun } else {
1091*4882a593Smuzhiyun int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1092*4882a593Smuzhiyun ring->dma_size);
1093*4882a593Smuzhiyun mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun return 0;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun err_dma:
1099*4882a593Smuzhiyun do {
1100*4882a593Smuzhiyun tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* unmap dma */
1103*4882a593Smuzhiyun mtk_tx_unmap(eth, tx_buf);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1106*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1107*4882a593Smuzhiyun itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1110*4882a593Smuzhiyun itxd_pdma = qdma_to_pdma(ring, itxd);
1111*4882a593Smuzhiyun } while (itxd != txd);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun return -ENOMEM;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
mtk_cal_txd_req(struct sk_buff * skb)1116*4882a593Smuzhiyun static inline int mtk_cal_txd_req(struct sk_buff *skb)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun int i, nfrags;
1119*4882a593Smuzhiyun skb_frag_t *frag;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun nfrags = 1;
1122*4882a593Smuzhiyun if (skb_is_gso(skb)) {
1123*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1124*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[i];
1125*4882a593Smuzhiyun nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1126*4882a593Smuzhiyun MTK_TX_DMA_BUF_LEN);
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun } else {
1129*4882a593Smuzhiyun nfrags += skb_shinfo(skb)->nr_frags;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun return nfrags;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
mtk_queue_stopped(struct mtk_eth * eth)1135*4882a593Smuzhiyun static int mtk_queue_stopped(struct mtk_eth *eth)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun int i;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
1140*4882a593Smuzhiyun if (!eth->netdev[i])
1141*4882a593Smuzhiyun continue;
1142*4882a593Smuzhiyun if (netif_queue_stopped(eth->netdev[i]))
1143*4882a593Smuzhiyun return 1;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun return 0;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
mtk_wake_queue(struct mtk_eth * eth)1149*4882a593Smuzhiyun static void mtk_wake_queue(struct mtk_eth *eth)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun int i;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
1154*4882a593Smuzhiyun if (!eth->netdev[i])
1155*4882a593Smuzhiyun continue;
1156*4882a593Smuzhiyun netif_wake_queue(eth->netdev[i]);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
mtk_stop_queue(struct mtk_eth * eth)1160*4882a593Smuzhiyun static void mtk_stop_queue(struct mtk_eth *eth)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun int i;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
1165*4882a593Smuzhiyun if (!eth->netdev[i])
1166*4882a593Smuzhiyun continue;
1167*4882a593Smuzhiyun netif_stop_queue(eth->netdev[i]);
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
mtk_start_xmit(struct sk_buff * skb,struct net_device * dev)1171*4882a593Smuzhiyun static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
1174*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
1175*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1176*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
1177*4882a593Smuzhiyun bool gso = false;
1178*4882a593Smuzhiyun int tx_num;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun /* normally we can rely on the stack not calling this more than once,
1181*4882a593Smuzhiyun * however we have 2 queues running on the same ring so we need to lock
1182*4882a593Smuzhiyun * the ring access
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun spin_lock(ð->page_lock);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1187*4882a593Smuzhiyun goto drop;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun tx_num = mtk_cal_txd_req(skb);
1190*4882a593Smuzhiyun if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1191*4882a593Smuzhiyun mtk_stop_queue(eth);
1192*4882a593Smuzhiyun netif_err(eth, tx_queued, dev,
1193*4882a593Smuzhiyun "Tx Ring full when queue awake!\n");
1194*4882a593Smuzhiyun spin_unlock(ð->page_lock);
1195*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /* TSO: fill MSS info in tcp checksum field */
1199*4882a593Smuzhiyun if (skb_is_gso(skb)) {
1200*4882a593Smuzhiyun if (skb_cow_head(skb, 0)) {
1201*4882a593Smuzhiyun netif_warn(eth, tx_err, dev,
1202*4882a593Smuzhiyun "GSO expand head fail.\n");
1203*4882a593Smuzhiyun goto drop;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_type &
1207*4882a593Smuzhiyun (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1208*4882a593Smuzhiyun gso = true;
1209*4882a593Smuzhiyun tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1214*4882a593Smuzhiyun goto drop;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1217*4882a593Smuzhiyun mtk_stop_queue(eth);
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun spin_unlock(ð->page_lock);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun return NETDEV_TX_OK;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun drop:
1224*4882a593Smuzhiyun spin_unlock(ð->page_lock);
1225*4882a593Smuzhiyun stats->tx_dropped++;
1226*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1227*4882a593Smuzhiyun return NETDEV_TX_OK;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
mtk_get_rx_ring(struct mtk_eth * eth)1230*4882a593Smuzhiyun static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun int i;
1233*4882a593Smuzhiyun struct mtk_rx_ring *ring;
1234*4882a593Smuzhiyun int idx;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (!eth->hwlro)
1237*4882a593Smuzhiyun return ð->rx_ring[0];
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1240*4882a593Smuzhiyun ring = ð->rx_ring[i];
1241*4882a593Smuzhiyun idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1242*4882a593Smuzhiyun if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1243*4882a593Smuzhiyun ring->calc_idx_update = true;
1244*4882a593Smuzhiyun return ring;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun return NULL;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun
mtk_update_rx_cpu_idx(struct mtk_eth * eth)1251*4882a593Smuzhiyun static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun struct mtk_rx_ring *ring;
1254*4882a593Smuzhiyun int i;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun if (!eth->hwlro) {
1257*4882a593Smuzhiyun ring = ð->rx_ring[0];
1258*4882a593Smuzhiyun mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1259*4882a593Smuzhiyun } else {
1260*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1261*4882a593Smuzhiyun ring = ð->rx_ring[i];
1262*4882a593Smuzhiyun if (ring->calc_idx_update) {
1263*4882a593Smuzhiyun ring->calc_idx_update = false;
1264*4882a593Smuzhiyun mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
mtk_poll_rx(struct napi_struct * napi,int budget,struct mtk_eth * eth)1270*4882a593Smuzhiyun static int mtk_poll_rx(struct napi_struct *napi, int budget,
1271*4882a593Smuzhiyun struct mtk_eth *eth)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun struct mtk_rx_ring *ring;
1274*4882a593Smuzhiyun int idx;
1275*4882a593Smuzhiyun struct sk_buff *skb;
1276*4882a593Smuzhiyun u8 *data, *new_data;
1277*4882a593Smuzhiyun struct mtk_rx_dma *rxd, trxd;
1278*4882a593Smuzhiyun int done = 0;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun while (done < budget) {
1281*4882a593Smuzhiyun struct net_device *netdev;
1282*4882a593Smuzhiyun unsigned int pktlen;
1283*4882a593Smuzhiyun dma_addr_t dma_addr;
1284*4882a593Smuzhiyun int mac;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun ring = mtk_get_rx_ring(eth);
1287*4882a593Smuzhiyun if (unlikely(!ring))
1288*4882a593Smuzhiyun goto rx_done;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1291*4882a593Smuzhiyun rxd = &ring->dma[idx];
1292*4882a593Smuzhiyun data = ring->data[idx];
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun mtk_rx_get_desc(&trxd, rxd);
1295*4882a593Smuzhiyun if (!(trxd.rxd2 & RX_DMA_DONE))
1296*4882a593Smuzhiyun break;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun /* find out which mac the packet come from. values start at 1 */
1299*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1300*4882a593Smuzhiyun mac = 0;
1301*4882a593Smuzhiyun } else {
1302*4882a593Smuzhiyun mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1303*4882a593Smuzhiyun RX_DMA_FPORT_MASK;
1304*4882a593Smuzhiyun mac--;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1308*4882a593Smuzhiyun !eth->netdev[mac]))
1309*4882a593Smuzhiyun goto release_desc;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun netdev = eth->netdev[mac];
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1314*4882a593Smuzhiyun goto release_desc;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun /* alloc new buffer */
1317*4882a593Smuzhiyun if (ring->frag_size <= PAGE_SIZE)
1318*4882a593Smuzhiyun new_data = napi_alloc_frag(ring->frag_size);
1319*4882a593Smuzhiyun else
1320*4882a593Smuzhiyun new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
1321*4882a593Smuzhiyun if (unlikely(!new_data)) {
1322*4882a593Smuzhiyun netdev->stats.rx_dropped++;
1323*4882a593Smuzhiyun goto release_desc;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun dma_addr = dma_map_single(eth->dev,
1326*4882a593Smuzhiyun new_data + NET_SKB_PAD +
1327*4882a593Smuzhiyun eth->ip_align,
1328*4882a593Smuzhiyun ring->buf_size,
1329*4882a593Smuzhiyun DMA_FROM_DEVICE);
1330*4882a593Smuzhiyun if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1331*4882a593Smuzhiyun skb_free_frag(new_data);
1332*4882a593Smuzhiyun netdev->stats.rx_dropped++;
1333*4882a593Smuzhiyun goto release_desc;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /* receive data */
1337*4882a593Smuzhiyun skb = build_skb(data, ring->frag_size);
1338*4882a593Smuzhiyun if (unlikely(!skb)) {
1339*4882a593Smuzhiyun skb_free_frag(new_data);
1340*4882a593Smuzhiyun netdev->stats.rx_dropped++;
1341*4882a593Smuzhiyun goto release_desc;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun dma_unmap_single(eth->dev, trxd.rxd1,
1346*4882a593Smuzhiyun ring->buf_size, DMA_FROM_DEVICE);
1347*4882a593Smuzhiyun pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1348*4882a593Smuzhiyun skb->dev = netdev;
1349*4882a593Smuzhiyun skb_put(skb, pktlen);
1350*4882a593Smuzhiyun if (trxd.rxd4 & eth->rx_dma_l4_valid)
1351*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
1352*4882a593Smuzhiyun else
1353*4882a593Smuzhiyun skb_checksum_none_assert(skb);
1354*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, netdev);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1357*4882a593Smuzhiyun (trxd.rxd2 & RX_DMA_VTAG))
1358*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1359*4882a593Smuzhiyun RX_DMA_VID(trxd.rxd3));
1360*4882a593Smuzhiyun skb_record_rx_queue(skb, 0);
1361*4882a593Smuzhiyun napi_gro_receive(napi, skb);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun ring->data[idx] = new_data;
1364*4882a593Smuzhiyun rxd->rxd1 = (unsigned int)dma_addr;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun release_desc:
1367*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1368*4882a593Smuzhiyun rxd->rxd2 = RX_DMA_LSO;
1369*4882a593Smuzhiyun else
1370*4882a593Smuzhiyun rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun ring->calc_idx = idx;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun done++;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun rx_done:
1378*4882a593Smuzhiyun if (done) {
1379*4882a593Smuzhiyun /* make sure that all changes to the dma ring are flushed before
1380*4882a593Smuzhiyun * we continue
1381*4882a593Smuzhiyun */
1382*4882a593Smuzhiyun wmb();
1383*4882a593Smuzhiyun mtk_update_rx_cpu_idx(eth);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun return done;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
mtk_poll_tx_qdma(struct mtk_eth * eth,int budget,unsigned int * done,unsigned int * bytes)1389*4882a593Smuzhiyun static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1390*4882a593Smuzhiyun unsigned int *done, unsigned int *bytes)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1393*4882a593Smuzhiyun struct mtk_tx_dma *desc;
1394*4882a593Smuzhiyun struct sk_buff *skb;
1395*4882a593Smuzhiyun struct mtk_tx_buf *tx_buf;
1396*4882a593Smuzhiyun u32 cpu, dma;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1399*4882a593Smuzhiyun dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun desc = mtk_qdma_phys_to_virt(ring, cpu);
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun while ((cpu != dma) && budget) {
1404*4882a593Smuzhiyun u32 next_cpu = desc->txd2;
1405*4882a593Smuzhiyun int mac = 0;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1408*4882a593Smuzhiyun if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1409*4882a593Smuzhiyun break;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun tx_buf = mtk_desc_to_tx_buf(ring, desc);
1412*4882a593Smuzhiyun if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1413*4882a593Smuzhiyun mac = 1;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun skb = tx_buf->skb;
1416*4882a593Smuzhiyun if (!skb)
1417*4882a593Smuzhiyun break;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1420*4882a593Smuzhiyun bytes[mac] += skb->len;
1421*4882a593Smuzhiyun done[mac]++;
1422*4882a593Smuzhiyun budget--;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun mtk_tx_unmap(eth, tx_buf);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun ring->last_free = desc;
1427*4882a593Smuzhiyun atomic_inc(&ring->free_count);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun cpu = next_cpu;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun return budget;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
mtk_poll_tx_pdma(struct mtk_eth * eth,int budget,unsigned int * done,unsigned int * bytes)1437*4882a593Smuzhiyun static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1438*4882a593Smuzhiyun unsigned int *done, unsigned int *bytes)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1441*4882a593Smuzhiyun struct mtk_tx_dma *desc;
1442*4882a593Smuzhiyun struct sk_buff *skb;
1443*4882a593Smuzhiyun struct mtk_tx_buf *tx_buf;
1444*4882a593Smuzhiyun u32 cpu, dma;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun cpu = ring->cpu_idx;
1447*4882a593Smuzhiyun dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun while ((cpu != dma) && budget) {
1450*4882a593Smuzhiyun tx_buf = &ring->buf[cpu];
1451*4882a593Smuzhiyun skb = tx_buf->skb;
1452*4882a593Smuzhiyun if (!skb)
1453*4882a593Smuzhiyun break;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1456*4882a593Smuzhiyun bytes[0] += skb->len;
1457*4882a593Smuzhiyun done[0]++;
1458*4882a593Smuzhiyun budget--;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun mtk_tx_unmap(eth, tx_buf);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun desc = &ring->dma[cpu];
1464*4882a593Smuzhiyun ring->last_free = desc;
1465*4882a593Smuzhiyun atomic_inc(&ring->free_count);
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun ring->cpu_idx = cpu;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun return budget;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
mtk_poll_tx(struct mtk_eth * eth,int budget)1475*4882a593Smuzhiyun static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1478*4882a593Smuzhiyun unsigned int done[MTK_MAX_DEVS];
1479*4882a593Smuzhiyun unsigned int bytes[MTK_MAX_DEVS];
1480*4882a593Smuzhiyun int total = 0, i;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun memset(done, 0, sizeof(done));
1483*4882a593Smuzhiyun memset(bytes, 0, sizeof(bytes));
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1486*4882a593Smuzhiyun budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1487*4882a593Smuzhiyun else
1488*4882a593Smuzhiyun budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
1491*4882a593Smuzhiyun if (!eth->netdev[i] || !done[i])
1492*4882a593Smuzhiyun continue;
1493*4882a593Smuzhiyun netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1494*4882a593Smuzhiyun total += done[i];
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun if (mtk_queue_stopped(eth) &&
1498*4882a593Smuzhiyun (atomic_read(&ring->free_count) > ring->thresh))
1499*4882a593Smuzhiyun mtk_wake_queue(eth);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun return total;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
mtk_handle_status_irq(struct mtk_eth * eth)1504*4882a593Smuzhiyun static void mtk_handle_status_irq(struct mtk_eth *eth)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1509*4882a593Smuzhiyun mtk_stats_update(eth);
1510*4882a593Smuzhiyun mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1511*4882a593Smuzhiyun MTK_INT_STATUS2);
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
mtk_napi_tx(struct napi_struct * napi,int budget)1515*4882a593Smuzhiyun static int mtk_napi_tx(struct napi_struct *napi, int budget)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1518*4882a593Smuzhiyun u32 status, mask;
1519*4882a593Smuzhiyun int tx_done = 0;
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1522*4882a593Smuzhiyun mtk_handle_status_irq(eth);
1523*4882a593Smuzhiyun mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1524*4882a593Smuzhiyun tx_done = mtk_poll_tx(eth, budget);
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun if (unlikely(netif_msg_intr(eth))) {
1527*4882a593Smuzhiyun status = mtk_r32(eth, eth->tx_int_status_reg);
1528*4882a593Smuzhiyun mask = mtk_r32(eth, eth->tx_int_mask_reg);
1529*4882a593Smuzhiyun dev_info(eth->dev,
1530*4882a593Smuzhiyun "done tx %d, intr 0x%08x/0x%x\n",
1531*4882a593Smuzhiyun tx_done, status, mask);
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun if (tx_done == budget)
1535*4882a593Smuzhiyun return budget;
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun status = mtk_r32(eth, eth->tx_int_status_reg);
1538*4882a593Smuzhiyun if (status & MTK_TX_DONE_INT)
1539*4882a593Smuzhiyun return budget;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun napi_complete(napi);
1542*4882a593Smuzhiyun mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun return tx_done;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun
mtk_napi_rx(struct napi_struct * napi,int budget)1547*4882a593Smuzhiyun static int mtk_napi_rx(struct napi_struct *napi, int budget)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1550*4882a593Smuzhiyun u32 status, mask;
1551*4882a593Smuzhiyun int rx_done = 0;
1552*4882a593Smuzhiyun int remain_budget = budget;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun mtk_handle_status_irq(eth);
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun poll_again:
1557*4882a593Smuzhiyun mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1558*4882a593Smuzhiyun rx_done = mtk_poll_rx(napi, remain_budget, eth);
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (unlikely(netif_msg_intr(eth))) {
1561*4882a593Smuzhiyun status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1562*4882a593Smuzhiyun mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1563*4882a593Smuzhiyun dev_info(eth->dev,
1564*4882a593Smuzhiyun "done rx %d, intr 0x%08x/0x%x\n",
1565*4882a593Smuzhiyun rx_done, status, mask);
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun if (rx_done == remain_budget)
1568*4882a593Smuzhiyun return budget;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1571*4882a593Smuzhiyun if (status & MTK_RX_DONE_INT) {
1572*4882a593Smuzhiyun remain_budget -= rx_done;
1573*4882a593Smuzhiyun goto poll_again;
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun napi_complete(napi);
1576*4882a593Smuzhiyun mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun return rx_done + budget - remain_budget;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
mtk_tx_alloc(struct mtk_eth * eth)1581*4882a593Smuzhiyun static int mtk_tx_alloc(struct mtk_eth *eth)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1584*4882a593Smuzhiyun int i, sz = sizeof(*ring->dma);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1587*4882a593Smuzhiyun GFP_KERNEL);
1588*4882a593Smuzhiyun if (!ring->buf)
1589*4882a593Smuzhiyun goto no_tx_mem;
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1592*4882a593Smuzhiyun &ring->phys, GFP_ATOMIC);
1593*4882a593Smuzhiyun if (!ring->dma)
1594*4882a593Smuzhiyun goto no_tx_mem;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun for (i = 0; i < MTK_DMA_SIZE; i++) {
1597*4882a593Smuzhiyun int next = (i + 1) % MTK_DMA_SIZE;
1598*4882a593Smuzhiyun u32 next_ptr = ring->phys + next * sz;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun ring->dma[i].txd2 = next_ptr;
1601*4882a593Smuzhiyun ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1605*4882a593Smuzhiyun * only as the framework. The real HW descriptors are the PDMA
1606*4882a593Smuzhiyun * descriptors in ring->dma_pdma.
1607*4882a593Smuzhiyun */
1608*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1609*4882a593Smuzhiyun ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1610*4882a593Smuzhiyun &ring->phys_pdma,
1611*4882a593Smuzhiyun GFP_ATOMIC);
1612*4882a593Smuzhiyun if (!ring->dma_pdma)
1613*4882a593Smuzhiyun goto no_tx_mem;
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun for (i = 0; i < MTK_DMA_SIZE; i++) {
1616*4882a593Smuzhiyun ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1617*4882a593Smuzhiyun ring->dma_pdma[i].txd4 = 0;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun ring->dma_size = MTK_DMA_SIZE;
1622*4882a593Smuzhiyun atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1623*4882a593Smuzhiyun ring->next_free = &ring->dma[0];
1624*4882a593Smuzhiyun ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1625*4882a593Smuzhiyun ring->thresh = MAX_SKB_FRAGS;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /* make sure that all changes to the dma ring are flushed before we
1628*4882a593Smuzhiyun * continue
1629*4882a593Smuzhiyun */
1630*4882a593Smuzhiyun wmb();
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1633*4882a593Smuzhiyun mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1634*4882a593Smuzhiyun mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1635*4882a593Smuzhiyun mtk_w32(eth,
1636*4882a593Smuzhiyun ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1637*4882a593Smuzhiyun MTK_QTX_CRX_PTR);
1638*4882a593Smuzhiyun mtk_w32(eth,
1639*4882a593Smuzhiyun ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1640*4882a593Smuzhiyun MTK_QTX_DRX_PTR);
1641*4882a593Smuzhiyun mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1642*4882a593Smuzhiyun MTK_QTX_CFG(0));
1643*4882a593Smuzhiyun } else {
1644*4882a593Smuzhiyun mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1645*4882a593Smuzhiyun mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1646*4882a593Smuzhiyun mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1647*4882a593Smuzhiyun mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun return 0;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun no_tx_mem:
1653*4882a593Smuzhiyun return -ENOMEM;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
mtk_tx_clean(struct mtk_eth * eth)1656*4882a593Smuzhiyun static void mtk_tx_clean(struct mtk_eth *eth)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun struct mtk_tx_ring *ring = ð->tx_ring;
1659*4882a593Smuzhiyun int i;
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun if (ring->buf) {
1662*4882a593Smuzhiyun for (i = 0; i < MTK_DMA_SIZE; i++)
1663*4882a593Smuzhiyun mtk_tx_unmap(eth, &ring->buf[i]);
1664*4882a593Smuzhiyun kfree(ring->buf);
1665*4882a593Smuzhiyun ring->buf = NULL;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun if (ring->dma) {
1669*4882a593Smuzhiyun dma_free_coherent(eth->dev,
1670*4882a593Smuzhiyun MTK_DMA_SIZE * sizeof(*ring->dma),
1671*4882a593Smuzhiyun ring->dma,
1672*4882a593Smuzhiyun ring->phys);
1673*4882a593Smuzhiyun ring->dma = NULL;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun if (ring->dma_pdma) {
1677*4882a593Smuzhiyun dma_free_coherent(eth->dev,
1678*4882a593Smuzhiyun MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1679*4882a593Smuzhiyun ring->dma_pdma,
1680*4882a593Smuzhiyun ring->phys_pdma);
1681*4882a593Smuzhiyun ring->dma_pdma = NULL;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
mtk_rx_alloc(struct mtk_eth * eth,int ring_no,int rx_flag)1685*4882a593Smuzhiyun static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun struct mtk_rx_ring *ring;
1688*4882a593Smuzhiyun int rx_data_len, rx_dma_size;
1689*4882a593Smuzhiyun int i;
1690*4882a593Smuzhiyun u32 offset = 0;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun if (rx_flag == MTK_RX_FLAGS_QDMA) {
1693*4882a593Smuzhiyun if (ring_no)
1694*4882a593Smuzhiyun return -EINVAL;
1695*4882a593Smuzhiyun ring = ð->rx_ring_qdma;
1696*4882a593Smuzhiyun offset = 0x1000;
1697*4882a593Smuzhiyun } else {
1698*4882a593Smuzhiyun ring = ð->rx_ring[ring_no];
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1702*4882a593Smuzhiyun rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1703*4882a593Smuzhiyun rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1704*4882a593Smuzhiyun } else {
1705*4882a593Smuzhiyun rx_data_len = ETH_DATA_LEN;
1706*4882a593Smuzhiyun rx_dma_size = MTK_DMA_SIZE;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun ring->frag_size = mtk_max_frag_size(rx_data_len);
1710*4882a593Smuzhiyun ring->buf_size = mtk_max_buf_size(ring->frag_size);
1711*4882a593Smuzhiyun ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1712*4882a593Smuzhiyun GFP_KERNEL);
1713*4882a593Smuzhiyun if (!ring->data)
1714*4882a593Smuzhiyun return -ENOMEM;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun for (i = 0; i < rx_dma_size; i++) {
1717*4882a593Smuzhiyun if (ring->frag_size <= PAGE_SIZE)
1718*4882a593Smuzhiyun ring->data[i] = netdev_alloc_frag(ring->frag_size);
1719*4882a593Smuzhiyun else
1720*4882a593Smuzhiyun ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
1721*4882a593Smuzhiyun if (!ring->data[i])
1722*4882a593Smuzhiyun return -ENOMEM;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun ring->dma = dma_alloc_coherent(eth->dev,
1726*4882a593Smuzhiyun rx_dma_size * sizeof(*ring->dma),
1727*4882a593Smuzhiyun &ring->phys, GFP_ATOMIC);
1728*4882a593Smuzhiyun if (!ring->dma)
1729*4882a593Smuzhiyun return -ENOMEM;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun for (i = 0; i < rx_dma_size; i++) {
1732*4882a593Smuzhiyun dma_addr_t dma_addr = dma_map_single(eth->dev,
1733*4882a593Smuzhiyun ring->data[i] + NET_SKB_PAD + eth->ip_align,
1734*4882a593Smuzhiyun ring->buf_size,
1735*4882a593Smuzhiyun DMA_FROM_DEVICE);
1736*4882a593Smuzhiyun if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1737*4882a593Smuzhiyun return -ENOMEM;
1738*4882a593Smuzhiyun ring->dma[i].rxd1 = (unsigned int)dma_addr;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1741*4882a593Smuzhiyun ring->dma[i].rxd2 = RX_DMA_LSO;
1742*4882a593Smuzhiyun else
1743*4882a593Smuzhiyun ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun ring->dma_size = rx_dma_size;
1746*4882a593Smuzhiyun ring->calc_idx_update = false;
1747*4882a593Smuzhiyun ring->calc_idx = rx_dma_size - 1;
1748*4882a593Smuzhiyun ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1749*4882a593Smuzhiyun /* make sure that all changes to the dma ring are flushed before we
1750*4882a593Smuzhiyun * continue
1751*4882a593Smuzhiyun */
1752*4882a593Smuzhiyun wmb();
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1755*4882a593Smuzhiyun mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1756*4882a593Smuzhiyun mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1757*4882a593Smuzhiyun mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun return 0;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun
mtk_rx_clean(struct mtk_eth * eth,struct mtk_rx_ring * ring)1762*4882a593Smuzhiyun static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun int i;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun if (ring->data && ring->dma) {
1767*4882a593Smuzhiyun for (i = 0; i < ring->dma_size; i++) {
1768*4882a593Smuzhiyun if (!ring->data[i])
1769*4882a593Smuzhiyun continue;
1770*4882a593Smuzhiyun if (!ring->dma[i].rxd1)
1771*4882a593Smuzhiyun continue;
1772*4882a593Smuzhiyun dma_unmap_single(eth->dev,
1773*4882a593Smuzhiyun ring->dma[i].rxd1,
1774*4882a593Smuzhiyun ring->buf_size,
1775*4882a593Smuzhiyun DMA_FROM_DEVICE);
1776*4882a593Smuzhiyun skb_free_frag(ring->data[i]);
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun kfree(ring->data);
1779*4882a593Smuzhiyun ring->data = NULL;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun if (ring->dma) {
1783*4882a593Smuzhiyun dma_free_coherent(eth->dev,
1784*4882a593Smuzhiyun ring->dma_size * sizeof(*ring->dma),
1785*4882a593Smuzhiyun ring->dma,
1786*4882a593Smuzhiyun ring->phys);
1787*4882a593Smuzhiyun ring->dma = NULL;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
mtk_hwlro_rx_init(struct mtk_eth * eth)1791*4882a593Smuzhiyun static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1792*4882a593Smuzhiyun {
1793*4882a593Smuzhiyun int i;
1794*4882a593Smuzhiyun u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1795*4882a593Smuzhiyun u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun /* set LRO rings to auto-learn modes */
1798*4882a593Smuzhiyun ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun /* validate LRO ring */
1801*4882a593Smuzhiyun ring_ctrl_dw2 |= MTK_RING_VLD;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun /* set AGE timer (unit: 20us) */
1804*4882a593Smuzhiyun ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1805*4882a593Smuzhiyun ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun /* set max AGG timer (unit: 20us) */
1808*4882a593Smuzhiyun ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun /* set max LRO AGG count */
1811*4882a593Smuzhiyun ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1812*4882a593Smuzhiyun ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1815*4882a593Smuzhiyun mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1816*4882a593Smuzhiyun mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1817*4882a593Smuzhiyun mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun /* IPv4 checksum update enable */
1821*4882a593Smuzhiyun lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun /* switch priority comparison to packet count mode */
1824*4882a593Smuzhiyun lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun /* bandwidth threshold setting */
1827*4882a593Smuzhiyun mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun /* auto-learn score delta setting */
1830*4882a593Smuzhiyun mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1833*4882a593Smuzhiyun mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1834*4882a593Smuzhiyun MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun /* set HW LRO mode & the max aggregation count for rx packets */
1837*4882a593Smuzhiyun lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1840*4882a593Smuzhiyun lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun /* enable HW LRO */
1843*4882a593Smuzhiyun lro_ctrl_dw0 |= MTK_LRO_EN;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1846*4882a593Smuzhiyun mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun return 0;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
mtk_hwlro_rx_uninit(struct mtk_eth * eth)1851*4882a593Smuzhiyun static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun int i;
1854*4882a593Smuzhiyun u32 val;
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun /* relinquish lro rings, flush aggregated packets */
1857*4882a593Smuzhiyun mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun /* wait for relinquishments done */
1860*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
1861*4882a593Smuzhiyun val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1862*4882a593Smuzhiyun if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1863*4882a593Smuzhiyun msleep(20);
1864*4882a593Smuzhiyun continue;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun break;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun /* invalidate lro rings */
1870*4882a593Smuzhiyun for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1871*4882a593Smuzhiyun mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1872*4882a593Smuzhiyun
1873*4882a593Smuzhiyun /* disable HW LRO */
1874*4882a593Smuzhiyun mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
mtk_hwlro_val_ipaddr(struct mtk_eth * eth,int idx,__be32 ip)1877*4882a593Smuzhiyun static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun u32 reg_val;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun /* invalidate the IP setting */
1884*4882a593Smuzhiyun mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun /* validate the IP setting */
1889*4882a593Smuzhiyun mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun
mtk_hwlro_inval_ipaddr(struct mtk_eth * eth,int idx)1892*4882a593Smuzhiyun static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun u32 reg_val;
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun /* invalidate the IP setting */
1899*4882a593Smuzhiyun mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun
mtk_hwlro_get_ip_cnt(struct mtk_mac * mac)1904*4882a593Smuzhiyun static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun int cnt = 0;
1907*4882a593Smuzhiyun int i;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1910*4882a593Smuzhiyun if (mac->hwlro_ip[i])
1911*4882a593Smuzhiyun cnt++;
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun return cnt;
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun
mtk_hwlro_add_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)1917*4882a593Smuzhiyun static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1918*4882a593Smuzhiyun struct ethtool_rxnfc *cmd)
1919*4882a593Smuzhiyun {
1920*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fsp =
1921*4882a593Smuzhiyun (struct ethtool_rx_flow_spec *)&cmd->fs;
1922*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
1923*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
1924*4882a593Smuzhiyun int hwlro_idx;
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun if ((fsp->flow_type != TCP_V4_FLOW) ||
1927*4882a593Smuzhiyun (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1928*4882a593Smuzhiyun (fsp->location > 1))
1929*4882a593Smuzhiyun return -EINVAL;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1932*4882a593Smuzhiyun hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun return 0;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
mtk_hwlro_del_ipaddr(struct net_device * dev,struct ethtool_rxnfc * cmd)1941*4882a593Smuzhiyun static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1942*4882a593Smuzhiyun struct ethtool_rxnfc *cmd)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fsp =
1945*4882a593Smuzhiyun (struct ethtool_rx_flow_spec *)&cmd->fs;
1946*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
1947*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
1948*4882a593Smuzhiyun int hwlro_idx;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun if (fsp->location > 1)
1951*4882a593Smuzhiyun return -EINVAL;
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun mac->hwlro_ip[fsp->location] = 0;
1954*4882a593Smuzhiyun hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun return 0;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun
mtk_hwlro_netdev_disable(struct net_device * dev)1963*4882a593Smuzhiyun static void mtk_hwlro_netdev_disable(struct net_device *dev)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
1966*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
1967*4882a593Smuzhiyun int i, hwlro_idx;
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1970*4882a593Smuzhiyun mac->hwlro_ip[i] = 0;
1971*4882a593Smuzhiyun hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun mac->hwlro_ip_cnt = 0;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun
mtk_hwlro_get_fdir_entry(struct net_device * dev,struct ethtool_rxnfc * cmd)1979*4882a593Smuzhiyun static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1980*4882a593Smuzhiyun struct ethtool_rxnfc *cmd)
1981*4882a593Smuzhiyun {
1982*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
1983*4882a593Smuzhiyun struct ethtool_rx_flow_spec *fsp =
1984*4882a593Smuzhiyun (struct ethtool_rx_flow_spec *)&cmd->fs;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
1987*4882a593Smuzhiyun return -EINVAL;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun /* only tcp dst ipv4 is meaningful, others are meaningless */
1990*4882a593Smuzhiyun fsp->flow_type = TCP_V4_FLOW;
1991*4882a593Smuzhiyun fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1992*4882a593Smuzhiyun fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun fsp->h_u.tcp_ip4_spec.ip4src = 0;
1995*4882a593Smuzhiyun fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1996*4882a593Smuzhiyun fsp->h_u.tcp_ip4_spec.psrc = 0;
1997*4882a593Smuzhiyun fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1998*4882a593Smuzhiyun fsp->h_u.tcp_ip4_spec.pdst = 0;
1999*4882a593Smuzhiyun fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
2000*4882a593Smuzhiyun fsp->h_u.tcp_ip4_spec.tos = 0;
2001*4882a593Smuzhiyun fsp->m_u.tcp_ip4_spec.tos = 0xff;
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun return 0;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
mtk_hwlro_get_fdir_all(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)2006*4882a593Smuzhiyun static int mtk_hwlro_get_fdir_all(struct net_device *dev,
2007*4882a593Smuzhiyun struct ethtool_rxnfc *cmd,
2008*4882a593Smuzhiyun u32 *rule_locs)
2009*4882a593Smuzhiyun {
2010*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2011*4882a593Smuzhiyun int cnt = 0;
2012*4882a593Smuzhiyun int i;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
2015*4882a593Smuzhiyun if (mac->hwlro_ip[i]) {
2016*4882a593Smuzhiyun rule_locs[cnt] = i;
2017*4882a593Smuzhiyun cnt++;
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun cmd->rule_cnt = cnt;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun return 0;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
mtk_fix_features(struct net_device * dev,netdev_features_t features)2026*4882a593Smuzhiyun static netdev_features_t mtk_fix_features(struct net_device *dev,
2027*4882a593Smuzhiyun netdev_features_t features)
2028*4882a593Smuzhiyun {
2029*4882a593Smuzhiyun if (!(features & NETIF_F_LRO)) {
2030*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2031*4882a593Smuzhiyun int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun if (ip_cnt) {
2034*4882a593Smuzhiyun netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun features |= NETIF_F_LRO;
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun return features;
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun
mtk_set_features(struct net_device * dev,netdev_features_t features)2043*4882a593Smuzhiyun static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2044*4882a593Smuzhiyun {
2045*4882a593Smuzhiyun int err = 0;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun if (!((dev->features ^ features) & NETIF_F_LRO))
2048*4882a593Smuzhiyun return 0;
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun if (!(features & NETIF_F_LRO))
2051*4882a593Smuzhiyun mtk_hwlro_netdev_disable(dev);
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun return err;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun /* wait for DMA to finish whatever it is doing before we start using it again */
mtk_dma_busy_wait(struct mtk_eth * eth)2057*4882a593Smuzhiyun static int mtk_dma_busy_wait(struct mtk_eth *eth)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun unsigned long t_start = jiffies;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun while (1) {
2062*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2063*4882a593Smuzhiyun if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2064*4882a593Smuzhiyun (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2065*4882a593Smuzhiyun return 0;
2066*4882a593Smuzhiyun } else {
2067*4882a593Smuzhiyun if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2068*4882a593Smuzhiyun (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2069*4882a593Smuzhiyun return 0;
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun dev_err(eth->dev, "DMA init timeout\n");
2077*4882a593Smuzhiyun return -1;
2078*4882a593Smuzhiyun }
2079*4882a593Smuzhiyun
mtk_dma_init(struct mtk_eth * eth)2080*4882a593Smuzhiyun static int mtk_dma_init(struct mtk_eth *eth)
2081*4882a593Smuzhiyun {
2082*4882a593Smuzhiyun int err;
2083*4882a593Smuzhiyun u32 i;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun if (mtk_dma_busy_wait(eth))
2086*4882a593Smuzhiyun return -EBUSY;
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2089*4882a593Smuzhiyun /* QDMA needs scratch memory for internal reordering of the
2090*4882a593Smuzhiyun * descriptors
2091*4882a593Smuzhiyun */
2092*4882a593Smuzhiyun err = mtk_init_fq_dma(eth);
2093*4882a593Smuzhiyun if (err)
2094*4882a593Smuzhiyun return err;
2095*4882a593Smuzhiyun }
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun err = mtk_tx_alloc(eth);
2098*4882a593Smuzhiyun if (err)
2099*4882a593Smuzhiyun return err;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2102*4882a593Smuzhiyun err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2103*4882a593Smuzhiyun if (err)
2104*4882a593Smuzhiyun return err;
2105*4882a593Smuzhiyun }
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2108*4882a593Smuzhiyun if (err)
2109*4882a593Smuzhiyun return err;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if (eth->hwlro) {
2112*4882a593Smuzhiyun for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2113*4882a593Smuzhiyun err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2114*4882a593Smuzhiyun if (err)
2115*4882a593Smuzhiyun return err;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun err = mtk_hwlro_rx_init(eth);
2118*4882a593Smuzhiyun if (err)
2119*4882a593Smuzhiyun return err;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2123*4882a593Smuzhiyun /* Enable random early drop and set drop threshold
2124*4882a593Smuzhiyun * automatically
2125*4882a593Smuzhiyun */
2126*4882a593Smuzhiyun mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2127*4882a593Smuzhiyun FC_THRES_MIN, MTK_QDMA_FC_THRES);
2128*4882a593Smuzhiyun mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
mtk_dma_free(struct mtk_eth * eth)2134*4882a593Smuzhiyun static void mtk_dma_free(struct mtk_eth *eth)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun int i;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++)
2139*4882a593Smuzhiyun if (eth->netdev[i])
2140*4882a593Smuzhiyun netdev_reset_queue(eth->netdev[i]);
2141*4882a593Smuzhiyun if (eth->scratch_ring) {
2142*4882a593Smuzhiyun dma_free_coherent(eth->dev,
2143*4882a593Smuzhiyun MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2144*4882a593Smuzhiyun eth->scratch_ring,
2145*4882a593Smuzhiyun eth->phy_scratch_ring);
2146*4882a593Smuzhiyun eth->scratch_ring = NULL;
2147*4882a593Smuzhiyun eth->phy_scratch_ring = 0;
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun mtk_tx_clean(eth);
2150*4882a593Smuzhiyun mtk_rx_clean(eth, ð->rx_ring[0]);
2151*4882a593Smuzhiyun mtk_rx_clean(eth, ð->rx_ring_qdma);
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun if (eth->hwlro) {
2154*4882a593Smuzhiyun mtk_hwlro_rx_uninit(eth);
2155*4882a593Smuzhiyun for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2156*4882a593Smuzhiyun mtk_rx_clean(eth, ð->rx_ring[i]);
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun
2159*4882a593Smuzhiyun kfree(eth->scratch_head);
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun
mtk_tx_timeout(struct net_device * dev,unsigned int txqueue)2162*4882a593Smuzhiyun static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2163*4882a593Smuzhiyun {
2164*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2165*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun eth->netdev[mac->id]->stats.tx_errors++;
2168*4882a593Smuzhiyun netif_err(eth, tx_err, dev,
2169*4882a593Smuzhiyun "transmit timed out\n");
2170*4882a593Smuzhiyun schedule_work(ð->pending_work);
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun
mtk_handle_irq_rx(int irq,void * _eth)2173*4882a593Smuzhiyun static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2174*4882a593Smuzhiyun {
2175*4882a593Smuzhiyun struct mtk_eth *eth = _eth;
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun if (likely(napi_schedule_prep(ð->rx_napi))) {
2178*4882a593Smuzhiyun __napi_schedule(ð->rx_napi);
2179*4882a593Smuzhiyun mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun return IRQ_HANDLED;
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun
mtk_handle_irq_tx(int irq,void * _eth)2185*4882a593Smuzhiyun static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun struct mtk_eth *eth = _eth;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun if (likely(napi_schedule_prep(ð->tx_napi))) {
2190*4882a593Smuzhiyun __napi_schedule(ð->tx_napi);
2191*4882a593Smuzhiyun mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun return IRQ_HANDLED;
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun
mtk_handle_irq(int irq,void * _eth)2197*4882a593Smuzhiyun static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun struct mtk_eth *eth = _eth;
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2202*4882a593Smuzhiyun if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2203*4882a593Smuzhiyun mtk_handle_irq_rx(irq, _eth);
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2206*4882a593Smuzhiyun if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2207*4882a593Smuzhiyun mtk_handle_irq_tx(irq, _eth);
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun return IRQ_HANDLED;
2211*4882a593Smuzhiyun }
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
mtk_poll_controller(struct net_device * dev)2214*4882a593Smuzhiyun static void mtk_poll_controller(struct net_device *dev)
2215*4882a593Smuzhiyun {
2216*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2217*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2220*4882a593Smuzhiyun mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2221*4882a593Smuzhiyun mtk_handle_irq_rx(eth->irq[2], dev);
2222*4882a593Smuzhiyun mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2223*4882a593Smuzhiyun mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun #endif
2226*4882a593Smuzhiyun
mtk_start_dma(struct mtk_eth * eth)2227*4882a593Smuzhiyun static int mtk_start_dma(struct mtk_eth *eth)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2230*4882a593Smuzhiyun int err;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun err = mtk_dma_init(eth);
2233*4882a593Smuzhiyun if (err) {
2234*4882a593Smuzhiyun mtk_dma_free(eth);
2235*4882a593Smuzhiyun return err;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2239*4882a593Smuzhiyun mtk_w32(eth,
2240*4882a593Smuzhiyun MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2241*4882a593Smuzhiyun MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2242*4882a593Smuzhiyun MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2243*4882a593Smuzhiyun MTK_RX_BT_32DWORDS,
2244*4882a593Smuzhiyun MTK_QDMA_GLO_CFG);
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun mtk_w32(eth,
2247*4882a593Smuzhiyun MTK_RX_DMA_EN | rx_2b_offset |
2248*4882a593Smuzhiyun MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2249*4882a593Smuzhiyun MTK_PDMA_GLO_CFG);
2250*4882a593Smuzhiyun } else {
2251*4882a593Smuzhiyun mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2252*4882a593Smuzhiyun MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2253*4882a593Smuzhiyun MTK_PDMA_GLO_CFG);
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun return 0;
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun
mtk_gdm_config(struct mtk_eth * eth,u32 config)2259*4882a593Smuzhiyun static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun int i;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2264*4882a593Smuzhiyun return;
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
2267*4882a593Smuzhiyun u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun /* default setup the forward port to send frame to PDMA */
2270*4882a593Smuzhiyun val &= ~0xffff;
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun /* Enable RX checksum */
2273*4882a593Smuzhiyun val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun val |= config;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun /* Reset and enable PSE */
2280*4882a593Smuzhiyun mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2281*4882a593Smuzhiyun mtk_w32(eth, 0, MTK_RST_GL);
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun
mtk_open(struct net_device * dev)2284*4882a593Smuzhiyun static int mtk_open(struct net_device *dev)
2285*4882a593Smuzhiyun {
2286*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2287*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2288*4882a593Smuzhiyun int err;
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2291*4882a593Smuzhiyun if (err) {
2292*4882a593Smuzhiyun netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2293*4882a593Smuzhiyun err);
2294*4882a593Smuzhiyun return err;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun /* we run 2 netdevs on the same dma ring so we only bring it up once */
2298*4882a593Smuzhiyun if (!refcount_read(ð->dma_refcnt)) {
2299*4882a593Smuzhiyun int err = mtk_start_dma(eth);
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun if (err)
2302*4882a593Smuzhiyun if (err) {
2303*4882a593Smuzhiyun phylink_disconnect_phy(mac->phylink);
2304*4882a593Smuzhiyun return err;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun napi_enable(ð->tx_napi);
2310*4882a593Smuzhiyun napi_enable(ð->rx_napi);
2311*4882a593Smuzhiyun mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2312*4882a593Smuzhiyun mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2313*4882a593Smuzhiyun refcount_set(ð->dma_refcnt, 1);
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun else
2316*4882a593Smuzhiyun refcount_inc(ð->dma_refcnt);
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun phylink_start(mac->phylink);
2319*4882a593Smuzhiyun netif_start_queue(dev);
2320*4882a593Smuzhiyun return 0;
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
mtk_stop_dma(struct mtk_eth * eth,u32 glo_cfg)2323*4882a593Smuzhiyun static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2324*4882a593Smuzhiyun {
2325*4882a593Smuzhiyun u32 val;
2326*4882a593Smuzhiyun int i;
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun /* stop the dma engine */
2329*4882a593Smuzhiyun spin_lock_bh(ð->page_lock);
2330*4882a593Smuzhiyun val = mtk_r32(eth, glo_cfg);
2331*4882a593Smuzhiyun mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2332*4882a593Smuzhiyun glo_cfg);
2333*4882a593Smuzhiyun spin_unlock_bh(ð->page_lock);
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun /* wait for dma stop */
2336*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
2337*4882a593Smuzhiyun val = mtk_r32(eth, glo_cfg);
2338*4882a593Smuzhiyun if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2339*4882a593Smuzhiyun msleep(20);
2340*4882a593Smuzhiyun continue;
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun break;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun
mtk_stop(struct net_device * dev)2346*4882a593Smuzhiyun static int mtk_stop(struct net_device *dev)
2347*4882a593Smuzhiyun {
2348*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2349*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun phylink_stop(mac->phylink);
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun netif_tx_disable(dev);
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun phylink_disconnect_phy(mac->phylink);
2356*4882a593Smuzhiyun
2357*4882a593Smuzhiyun /* only shutdown DMA if this is the last user */
2358*4882a593Smuzhiyun if (!refcount_dec_and_test(ð->dma_refcnt))
2359*4882a593Smuzhiyun return 0;
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2364*4882a593Smuzhiyun mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2365*4882a593Smuzhiyun napi_disable(ð->tx_napi);
2366*4882a593Smuzhiyun napi_disable(ð->rx_napi);
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2369*4882a593Smuzhiyun mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2370*4882a593Smuzhiyun mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun mtk_dma_free(eth);
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun return 0;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun
ethsys_reset(struct mtk_eth * eth,u32 reset_bits)2377*4882a593Smuzhiyun static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2378*4882a593Smuzhiyun {
2379*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2380*4882a593Smuzhiyun reset_bits,
2381*4882a593Smuzhiyun reset_bits);
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun usleep_range(1000, 1100);
2384*4882a593Smuzhiyun regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2385*4882a593Smuzhiyun reset_bits,
2386*4882a593Smuzhiyun ~reset_bits);
2387*4882a593Smuzhiyun mdelay(10);
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun
mtk_clk_disable(struct mtk_eth * eth)2390*4882a593Smuzhiyun static void mtk_clk_disable(struct mtk_eth *eth)
2391*4882a593Smuzhiyun {
2392*4882a593Smuzhiyun int clk;
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2395*4882a593Smuzhiyun clk_disable_unprepare(eth->clks[clk]);
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun
mtk_clk_enable(struct mtk_eth * eth)2398*4882a593Smuzhiyun static int mtk_clk_enable(struct mtk_eth *eth)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun int clk, ret;
2401*4882a593Smuzhiyun
2402*4882a593Smuzhiyun for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2403*4882a593Smuzhiyun ret = clk_prepare_enable(eth->clks[clk]);
2404*4882a593Smuzhiyun if (ret)
2405*4882a593Smuzhiyun goto err_disable_clks;
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun return 0;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun err_disable_clks:
2411*4882a593Smuzhiyun while (--clk >= 0)
2412*4882a593Smuzhiyun clk_disable_unprepare(eth->clks[clk]);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun return ret;
2415*4882a593Smuzhiyun }
2416*4882a593Smuzhiyun
mtk_hw_init(struct mtk_eth * eth)2417*4882a593Smuzhiyun static int mtk_hw_init(struct mtk_eth *eth)
2418*4882a593Smuzhiyun {
2419*4882a593Smuzhiyun int i, val, ret;
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun if (test_and_set_bit(MTK_HW_INIT, ð->state))
2422*4882a593Smuzhiyun return 0;
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun pm_runtime_enable(eth->dev);
2425*4882a593Smuzhiyun pm_runtime_get_sync(eth->dev);
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun ret = mtk_clk_enable(eth);
2428*4882a593Smuzhiyun if (ret)
2429*4882a593Smuzhiyun goto err_disable_pm;
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2432*4882a593Smuzhiyun ret = device_reset(eth->dev);
2433*4882a593Smuzhiyun if (ret) {
2434*4882a593Smuzhiyun dev_err(eth->dev, "MAC reset failed!\n");
2435*4882a593Smuzhiyun goto err_disable_pm;
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun /* enable interrupt delay for RX */
2439*4882a593Smuzhiyun mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun /* disable delay and normal interrupt */
2442*4882a593Smuzhiyun mtk_tx_irq_disable(eth, ~0);
2443*4882a593Smuzhiyun mtk_rx_irq_disable(eth, ~0);
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun return 0;
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun /* Non-MT7628 handling... */
2449*4882a593Smuzhiyun ethsys_reset(eth, RSTCTRL_FE);
2450*4882a593Smuzhiyun ethsys_reset(eth, RSTCTRL_PPE);
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun if (eth->pctl) {
2453*4882a593Smuzhiyun /* Set GE2 driving and slew rate */
2454*4882a593Smuzhiyun regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun /* set GE2 TDSEL */
2457*4882a593Smuzhiyun regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun /* set GE2 TUNE */
2460*4882a593Smuzhiyun regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun /* Set linkdown as the default for each GMAC. Its own MCR would be set
2464*4882a593Smuzhiyun * up with the more appropriate value when mtk_mac_config call is being
2465*4882a593Smuzhiyun * invoked.
2466*4882a593Smuzhiyun */
2467*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++)
2468*4882a593Smuzhiyun mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2469*4882a593Smuzhiyun
2470*4882a593Smuzhiyun /* Indicates CDM to parse the MTK special tag from CPU
2471*4882a593Smuzhiyun * which also is working out for untag packets.
2472*4882a593Smuzhiyun */
2473*4882a593Smuzhiyun val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2474*4882a593Smuzhiyun mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun /* Enable RX VLan Offloading */
2477*4882a593Smuzhiyun mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun /* enable interrupt delay for RX */
2480*4882a593Smuzhiyun mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun /* disable delay and normal interrupt */
2483*4882a593Smuzhiyun mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2484*4882a593Smuzhiyun mtk_tx_irq_disable(eth, ~0);
2485*4882a593Smuzhiyun mtk_rx_irq_disable(eth, ~0);
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun /* FE int grouping */
2488*4882a593Smuzhiyun mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2489*4882a593Smuzhiyun mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2490*4882a593Smuzhiyun mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2491*4882a593Smuzhiyun mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2492*4882a593Smuzhiyun mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun return 0;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun err_disable_pm:
2497*4882a593Smuzhiyun pm_runtime_put_sync(eth->dev);
2498*4882a593Smuzhiyun pm_runtime_disable(eth->dev);
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun return ret;
2501*4882a593Smuzhiyun }
2502*4882a593Smuzhiyun
mtk_hw_deinit(struct mtk_eth * eth)2503*4882a593Smuzhiyun static int mtk_hw_deinit(struct mtk_eth *eth)
2504*4882a593Smuzhiyun {
2505*4882a593Smuzhiyun if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2506*4882a593Smuzhiyun return 0;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun mtk_clk_disable(eth);
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun pm_runtime_put_sync(eth->dev);
2511*4882a593Smuzhiyun pm_runtime_disable(eth->dev);
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun return 0;
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun
mtk_init(struct net_device * dev)2516*4882a593Smuzhiyun static int __init mtk_init(struct net_device *dev)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2519*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2520*4882a593Smuzhiyun const char *mac_addr;
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun mac_addr = of_get_mac_address(mac->of_node);
2523*4882a593Smuzhiyun if (!IS_ERR(mac_addr))
2524*4882a593Smuzhiyun ether_addr_copy(dev->dev_addr, mac_addr);
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun /* If the mac address is invalid, use random mac address */
2527*4882a593Smuzhiyun if (!is_valid_ether_addr(dev->dev_addr)) {
2528*4882a593Smuzhiyun eth_hw_addr_random(dev);
2529*4882a593Smuzhiyun dev_err(eth->dev, "generated random MAC address %pM\n",
2530*4882a593Smuzhiyun dev->dev_addr);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun return 0;
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun
mtk_uninit(struct net_device * dev)2536*4882a593Smuzhiyun static void mtk_uninit(struct net_device *dev)
2537*4882a593Smuzhiyun {
2538*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2539*4882a593Smuzhiyun struct mtk_eth *eth = mac->hw;
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun phylink_disconnect_phy(mac->phylink);
2542*4882a593Smuzhiyun mtk_tx_irq_disable(eth, ~0);
2543*4882a593Smuzhiyun mtk_rx_irq_disable(eth, ~0);
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun
mtk_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)2546*4882a593Smuzhiyun static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun switch (cmd) {
2551*4882a593Smuzhiyun case SIOCGMIIPHY:
2552*4882a593Smuzhiyun case SIOCGMIIREG:
2553*4882a593Smuzhiyun case SIOCSMIIREG:
2554*4882a593Smuzhiyun return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2555*4882a593Smuzhiyun default:
2556*4882a593Smuzhiyun break;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun return -EOPNOTSUPP;
2560*4882a593Smuzhiyun }
2561*4882a593Smuzhiyun
mtk_pending_work(struct work_struct * work)2562*4882a593Smuzhiyun static void mtk_pending_work(struct work_struct *work)
2563*4882a593Smuzhiyun {
2564*4882a593Smuzhiyun struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2565*4882a593Smuzhiyun int err, i;
2566*4882a593Smuzhiyun unsigned long restart = 0;
2567*4882a593Smuzhiyun
2568*4882a593Smuzhiyun rtnl_lock();
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2573*4882a593Smuzhiyun cpu_relax();
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2576*4882a593Smuzhiyun /* stop all devices to make sure that dma is properly shut down */
2577*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
2578*4882a593Smuzhiyun if (!eth->netdev[i])
2579*4882a593Smuzhiyun continue;
2580*4882a593Smuzhiyun mtk_stop(eth->netdev[i]);
2581*4882a593Smuzhiyun __set_bit(i, &restart);
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun /* restart underlying hardware such as power, clock, pin mux
2586*4882a593Smuzhiyun * and the connected phy
2587*4882a593Smuzhiyun */
2588*4882a593Smuzhiyun mtk_hw_deinit(eth);
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun if (eth->dev->pins)
2591*4882a593Smuzhiyun pinctrl_select_state(eth->dev->pins->p,
2592*4882a593Smuzhiyun eth->dev->pins->default_state);
2593*4882a593Smuzhiyun mtk_hw_init(eth);
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun /* restart DMA and enable IRQs */
2596*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
2597*4882a593Smuzhiyun if (!test_bit(i, &restart))
2598*4882a593Smuzhiyun continue;
2599*4882a593Smuzhiyun err = mtk_open(eth->netdev[i]);
2600*4882a593Smuzhiyun if (err) {
2601*4882a593Smuzhiyun netif_alert(eth, ifup, eth->netdev[i],
2602*4882a593Smuzhiyun "Driver up/down cycle failed, closing device.\n");
2603*4882a593Smuzhiyun dev_close(eth->netdev[i]);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun clear_bit_unlock(MTK_RESETTING, ð->state);
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun rtnl_unlock();
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun
mtk_free_dev(struct mtk_eth * eth)2614*4882a593Smuzhiyun static int mtk_free_dev(struct mtk_eth *eth)
2615*4882a593Smuzhiyun {
2616*4882a593Smuzhiyun int i;
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
2619*4882a593Smuzhiyun if (!eth->netdev[i])
2620*4882a593Smuzhiyun continue;
2621*4882a593Smuzhiyun free_netdev(eth->netdev[i]);
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun return 0;
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun
mtk_unreg_dev(struct mtk_eth * eth)2627*4882a593Smuzhiyun static int mtk_unreg_dev(struct mtk_eth *eth)
2628*4882a593Smuzhiyun {
2629*4882a593Smuzhiyun int i;
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
2632*4882a593Smuzhiyun if (!eth->netdev[i])
2633*4882a593Smuzhiyun continue;
2634*4882a593Smuzhiyun unregister_netdev(eth->netdev[i]);
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun return 0;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun
mtk_cleanup(struct mtk_eth * eth)2640*4882a593Smuzhiyun static int mtk_cleanup(struct mtk_eth *eth)
2641*4882a593Smuzhiyun {
2642*4882a593Smuzhiyun mtk_unreg_dev(eth);
2643*4882a593Smuzhiyun mtk_free_dev(eth);
2644*4882a593Smuzhiyun cancel_work_sync(ð->pending_work);
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun return 0;
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun
mtk_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)2649*4882a593Smuzhiyun static int mtk_get_link_ksettings(struct net_device *ndev,
2650*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
2651*4882a593Smuzhiyun {
2652*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(ndev);
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2655*4882a593Smuzhiyun return -EBUSY;
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun
mtk_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)2660*4882a593Smuzhiyun static int mtk_set_link_ksettings(struct net_device *ndev,
2661*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
2662*4882a593Smuzhiyun {
2663*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(ndev);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2666*4882a593Smuzhiyun return -EBUSY;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
mtk_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2671*4882a593Smuzhiyun static void mtk_get_drvinfo(struct net_device *dev,
2672*4882a593Smuzhiyun struct ethtool_drvinfo *info)
2673*4882a593Smuzhiyun {
2674*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2677*4882a593Smuzhiyun strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2678*4882a593Smuzhiyun info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2679*4882a593Smuzhiyun }
2680*4882a593Smuzhiyun
mtk_get_msglevel(struct net_device * dev)2681*4882a593Smuzhiyun static u32 mtk_get_msglevel(struct net_device *dev)
2682*4882a593Smuzhiyun {
2683*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun return mac->hw->msg_enable;
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
mtk_set_msglevel(struct net_device * dev,u32 value)2688*4882a593Smuzhiyun static void mtk_set_msglevel(struct net_device *dev, u32 value)
2689*4882a593Smuzhiyun {
2690*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun mac->hw->msg_enable = value;
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun
mtk_nway_reset(struct net_device * dev)2695*4882a593Smuzhiyun static int mtk_nway_reset(struct net_device *dev)
2696*4882a593Smuzhiyun {
2697*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2700*4882a593Smuzhiyun return -EBUSY;
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun if (!mac->phylink)
2703*4882a593Smuzhiyun return -ENOTSUPP;
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun return phylink_ethtool_nway_reset(mac->phylink);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
mtk_get_strings(struct net_device * dev,u32 stringset,u8 * data)2708*4882a593Smuzhiyun static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2709*4882a593Smuzhiyun {
2710*4882a593Smuzhiyun int i;
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun switch (stringset) {
2713*4882a593Smuzhiyun case ETH_SS_STATS:
2714*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2715*4882a593Smuzhiyun memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2716*4882a593Smuzhiyun data += ETH_GSTRING_LEN;
2717*4882a593Smuzhiyun }
2718*4882a593Smuzhiyun break;
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun
mtk_get_sset_count(struct net_device * dev,int sset)2722*4882a593Smuzhiyun static int mtk_get_sset_count(struct net_device *dev, int sset)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun switch (sset) {
2725*4882a593Smuzhiyun case ETH_SS_STATS:
2726*4882a593Smuzhiyun return ARRAY_SIZE(mtk_ethtool_stats);
2727*4882a593Smuzhiyun default:
2728*4882a593Smuzhiyun return -EOPNOTSUPP;
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun }
2731*4882a593Smuzhiyun
mtk_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2732*4882a593Smuzhiyun static void mtk_get_ethtool_stats(struct net_device *dev,
2733*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *data)
2734*4882a593Smuzhiyun {
2735*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2736*4882a593Smuzhiyun struct mtk_hw_stats *hwstats = mac->hw_stats;
2737*4882a593Smuzhiyun u64 *data_src, *data_dst;
2738*4882a593Smuzhiyun unsigned int start;
2739*4882a593Smuzhiyun int i;
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2742*4882a593Smuzhiyun return;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun if (netif_running(dev) && netif_device_present(dev)) {
2745*4882a593Smuzhiyun if (spin_trylock_bh(&hwstats->stats_lock)) {
2746*4882a593Smuzhiyun mtk_stats_update_mac(mac);
2747*4882a593Smuzhiyun spin_unlock_bh(&hwstats->stats_lock);
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun data_src = (u64 *)hwstats;
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun do {
2754*4882a593Smuzhiyun data_dst = data;
2755*4882a593Smuzhiyun start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2756*4882a593Smuzhiyun
2757*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2758*4882a593Smuzhiyun *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2759*4882a593Smuzhiyun } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun
mtk_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)2762*4882a593Smuzhiyun static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2763*4882a593Smuzhiyun u32 *rule_locs)
2764*4882a593Smuzhiyun {
2765*4882a593Smuzhiyun int ret = -EOPNOTSUPP;
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun switch (cmd->cmd) {
2768*4882a593Smuzhiyun case ETHTOOL_GRXRINGS:
2769*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO) {
2770*4882a593Smuzhiyun cmd->data = MTK_MAX_RX_RING_NUM;
2771*4882a593Smuzhiyun ret = 0;
2772*4882a593Smuzhiyun }
2773*4882a593Smuzhiyun break;
2774*4882a593Smuzhiyun case ETHTOOL_GRXCLSRLCNT:
2775*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO) {
2776*4882a593Smuzhiyun struct mtk_mac *mac = netdev_priv(dev);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun cmd->rule_cnt = mac->hwlro_ip_cnt;
2779*4882a593Smuzhiyun ret = 0;
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun break;
2782*4882a593Smuzhiyun case ETHTOOL_GRXCLSRULE:
2783*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO)
2784*4882a593Smuzhiyun ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2785*4882a593Smuzhiyun break;
2786*4882a593Smuzhiyun case ETHTOOL_GRXCLSRLALL:
2787*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO)
2788*4882a593Smuzhiyun ret = mtk_hwlro_get_fdir_all(dev, cmd,
2789*4882a593Smuzhiyun rule_locs);
2790*4882a593Smuzhiyun break;
2791*4882a593Smuzhiyun default:
2792*4882a593Smuzhiyun break;
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun
2795*4882a593Smuzhiyun return ret;
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun
mtk_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)2798*4882a593Smuzhiyun static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2799*4882a593Smuzhiyun {
2800*4882a593Smuzhiyun int ret = -EOPNOTSUPP;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun switch (cmd->cmd) {
2803*4882a593Smuzhiyun case ETHTOOL_SRXCLSRLINS:
2804*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO)
2805*4882a593Smuzhiyun ret = mtk_hwlro_add_ipaddr(dev, cmd);
2806*4882a593Smuzhiyun break;
2807*4882a593Smuzhiyun case ETHTOOL_SRXCLSRLDEL:
2808*4882a593Smuzhiyun if (dev->hw_features & NETIF_F_LRO)
2809*4882a593Smuzhiyun ret = mtk_hwlro_del_ipaddr(dev, cmd);
2810*4882a593Smuzhiyun break;
2811*4882a593Smuzhiyun default:
2812*4882a593Smuzhiyun break;
2813*4882a593Smuzhiyun }
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun return ret;
2816*4882a593Smuzhiyun }
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun static const struct ethtool_ops mtk_ethtool_ops = {
2819*4882a593Smuzhiyun .get_link_ksettings = mtk_get_link_ksettings,
2820*4882a593Smuzhiyun .set_link_ksettings = mtk_set_link_ksettings,
2821*4882a593Smuzhiyun .get_drvinfo = mtk_get_drvinfo,
2822*4882a593Smuzhiyun .get_msglevel = mtk_get_msglevel,
2823*4882a593Smuzhiyun .set_msglevel = mtk_set_msglevel,
2824*4882a593Smuzhiyun .nway_reset = mtk_nway_reset,
2825*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
2826*4882a593Smuzhiyun .get_strings = mtk_get_strings,
2827*4882a593Smuzhiyun .get_sset_count = mtk_get_sset_count,
2828*4882a593Smuzhiyun .get_ethtool_stats = mtk_get_ethtool_stats,
2829*4882a593Smuzhiyun .get_rxnfc = mtk_get_rxnfc,
2830*4882a593Smuzhiyun .set_rxnfc = mtk_set_rxnfc,
2831*4882a593Smuzhiyun };
2832*4882a593Smuzhiyun
2833*4882a593Smuzhiyun static const struct net_device_ops mtk_netdev_ops = {
2834*4882a593Smuzhiyun .ndo_init = mtk_init,
2835*4882a593Smuzhiyun .ndo_uninit = mtk_uninit,
2836*4882a593Smuzhiyun .ndo_open = mtk_open,
2837*4882a593Smuzhiyun .ndo_stop = mtk_stop,
2838*4882a593Smuzhiyun .ndo_start_xmit = mtk_start_xmit,
2839*4882a593Smuzhiyun .ndo_set_mac_address = mtk_set_mac_address,
2840*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
2841*4882a593Smuzhiyun .ndo_do_ioctl = mtk_do_ioctl,
2842*4882a593Smuzhiyun .ndo_tx_timeout = mtk_tx_timeout,
2843*4882a593Smuzhiyun .ndo_get_stats64 = mtk_get_stats64,
2844*4882a593Smuzhiyun .ndo_fix_features = mtk_fix_features,
2845*4882a593Smuzhiyun .ndo_set_features = mtk_set_features,
2846*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2847*4882a593Smuzhiyun .ndo_poll_controller = mtk_poll_controller,
2848*4882a593Smuzhiyun #endif
2849*4882a593Smuzhiyun };
2850*4882a593Smuzhiyun
mtk_add_mac(struct mtk_eth * eth,struct device_node * np)2851*4882a593Smuzhiyun static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2852*4882a593Smuzhiyun {
2853*4882a593Smuzhiyun const __be32 *_id = of_get_property(np, "reg", NULL);
2854*4882a593Smuzhiyun phy_interface_t phy_mode;
2855*4882a593Smuzhiyun struct phylink *phylink;
2856*4882a593Smuzhiyun struct mtk_mac *mac;
2857*4882a593Smuzhiyun int id, err;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun if (!_id) {
2860*4882a593Smuzhiyun dev_err(eth->dev, "missing mac id\n");
2861*4882a593Smuzhiyun return -EINVAL;
2862*4882a593Smuzhiyun }
2863*4882a593Smuzhiyun
2864*4882a593Smuzhiyun id = be32_to_cpup(_id);
2865*4882a593Smuzhiyun if (id >= MTK_MAC_COUNT) {
2866*4882a593Smuzhiyun dev_err(eth->dev, "%d is not a valid mac id\n", id);
2867*4882a593Smuzhiyun return -EINVAL;
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun if (eth->netdev[id]) {
2871*4882a593Smuzhiyun dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2872*4882a593Smuzhiyun return -EINVAL;
2873*4882a593Smuzhiyun }
2874*4882a593Smuzhiyun
2875*4882a593Smuzhiyun eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2876*4882a593Smuzhiyun if (!eth->netdev[id]) {
2877*4882a593Smuzhiyun dev_err(eth->dev, "alloc_etherdev failed\n");
2878*4882a593Smuzhiyun return -ENOMEM;
2879*4882a593Smuzhiyun }
2880*4882a593Smuzhiyun mac = netdev_priv(eth->netdev[id]);
2881*4882a593Smuzhiyun eth->mac[id] = mac;
2882*4882a593Smuzhiyun mac->id = id;
2883*4882a593Smuzhiyun mac->hw = eth;
2884*4882a593Smuzhiyun mac->of_node = np;
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2887*4882a593Smuzhiyun mac->hwlro_ip_cnt = 0;
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun mac->hw_stats = devm_kzalloc(eth->dev,
2890*4882a593Smuzhiyun sizeof(*mac->hw_stats),
2891*4882a593Smuzhiyun GFP_KERNEL);
2892*4882a593Smuzhiyun if (!mac->hw_stats) {
2893*4882a593Smuzhiyun dev_err(eth->dev, "failed to allocate counter memory\n");
2894*4882a593Smuzhiyun err = -ENOMEM;
2895*4882a593Smuzhiyun goto free_netdev;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun spin_lock_init(&mac->hw_stats->stats_lock);
2898*4882a593Smuzhiyun u64_stats_init(&mac->hw_stats->syncp);
2899*4882a593Smuzhiyun mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun /* phylink create */
2902*4882a593Smuzhiyun err = of_get_phy_mode(np, &phy_mode);
2903*4882a593Smuzhiyun if (err) {
2904*4882a593Smuzhiyun dev_err(eth->dev, "incorrect phy-mode\n");
2905*4882a593Smuzhiyun goto free_netdev;
2906*4882a593Smuzhiyun }
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun /* mac config is not set */
2909*4882a593Smuzhiyun mac->interface = PHY_INTERFACE_MODE_NA;
2910*4882a593Smuzhiyun mac->mode = MLO_AN_PHY;
2911*4882a593Smuzhiyun mac->speed = SPEED_UNKNOWN;
2912*4882a593Smuzhiyun
2913*4882a593Smuzhiyun mac->phylink_config.dev = ð->netdev[id]->dev;
2914*4882a593Smuzhiyun mac->phylink_config.type = PHYLINK_NETDEV;
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun phylink = phylink_create(&mac->phylink_config,
2917*4882a593Smuzhiyun of_fwnode_handle(mac->of_node),
2918*4882a593Smuzhiyun phy_mode, &mtk_phylink_ops);
2919*4882a593Smuzhiyun if (IS_ERR(phylink)) {
2920*4882a593Smuzhiyun err = PTR_ERR(phylink);
2921*4882a593Smuzhiyun goto free_netdev;
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun
2924*4882a593Smuzhiyun mac->phylink = phylink;
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2927*4882a593Smuzhiyun eth->netdev[id]->watchdog_timeo = 5 * HZ;
2928*4882a593Smuzhiyun eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2929*4882a593Smuzhiyun eth->netdev[id]->base_addr = (unsigned long)eth->base;
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun eth->netdev[id]->hw_features = eth->soc->hw_features;
2932*4882a593Smuzhiyun if (eth->hwlro)
2933*4882a593Smuzhiyun eth->netdev[id]->hw_features |= NETIF_F_LRO;
2934*4882a593Smuzhiyun
2935*4882a593Smuzhiyun eth->netdev[id]->vlan_features = eth->soc->hw_features &
2936*4882a593Smuzhiyun ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2937*4882a593Smuzhiyun eth->netdev[id]->features |= eth->soc->hw_features;
2938*4882a593Smuzhiyun eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun eth->netdev[id]->irq = eth->irq[0];
2941*4882a593Smuzhiyun eth->netdev[id]->dev.of_node = np;
2942*4882a593Smuzhiyun
2943*4882a593Smuzhiyun eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun return 0;
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun free_netdev:
2948*4882a593Smuzhiyun free_netdev(eth->netdev[id]);
2949*4882a593Smuzhiyun return err;
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun
mtk_probe(struct platform_device * pdev)2952*4882a593Smuzhiyun static int mtk_probe(struct platform_device *pdev)
2953*4882a593Smuzhiyun {
2954*4882a593Smuzhiyun struct device_node *mac_np;
2955*4882a593Smuzhiyun struct mtk_eth *eth;
2956*4882a593Smuzhiyun int err, i;
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2959*4882a593Smuzhiyun if (!eth)
2960*4882a593Smuzhiyun return -ENOMEM;
2961*4882a593Smuzhiyun
2962*4882a593Smuzhiyun eth->soc = of_device_get_match_data(&pdev->dev);
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun eth->dev = &pdev->dev;
2965*4882a593Smuzhiyun eth->base = devm_platform_ioremap_resource(pdev, 0);
2966*4882a593Smuzhiyun if (IS_ERR(eth->base))
2967*4882a593Smuzhiyun return PTR_ERR(eth->base);
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2970*4882a593Smuzhiyun eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2971*4882a593Smuzhiyun eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2972*4882a593Smuzhiyun } else {
2973*4882a593Smuzhiyun eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2974*4882a593Smuzhiyun eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2975*4882a593Smuzhiyun }
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2978*4882a593Smuzhiyun eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2979*4882a593Smuzhiyun eth->ip_align = NET_IP_ALIGN;
2980*4882a593Smuzhiyun } else {
2981*4882a593Smuzhiyun eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2982*4882a593Smuzhiyun }
2983*4882a593Smuzhiyun
2984*4882a593Smuzhiyun spin_lock_init(ð->page_lock);
2985*4882a593Smuzhiyun spin_lock_init(ð->tx_irq_lock);
2986*4882a593Smuzhiyun spin_lock_init(ð->rx_irq_lock);
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2989*4882a593Smuzhiyun eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2990*4882a593Smuzhiyun "mediatek,ethsys");
2991*4882a593Smuzhiyun if (IS_ERR(eth->ethsys)) {
2992*4882a593Smuzhiyun dev_err(&pdev->dev, "no ethsys regmap found\n");
2993*4882a593Smuzhiyun return PTR_ERR(eth->ethsys);
2994*4882a593Smuzhiyun }
2995*4882a593Smuzhiyun }
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2998*4882a593Smuzhiyun eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2999*4882a593Smuzhiyun "mediatek,infracfg");
3000*4882a593Smuzhiyun if (IS_ERR(eth->infra)) {
3001*4882a593Smuzhiyun dev_err(&pdev->dev, "no infracfg regmap found\n");
3002*4882a593Smuzhiyun return PTR_ERR(eth->infra);
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3007*4882a593Smuzhiyun eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3008*4882a593Smuzhiyun GFP_KERNEL);
3009*4882a593Smuzhiyun if (!eth->sgmii)
3010*4882a593Smuzhiyun return -ENOMEM;
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3013*4882a593Smuzhiyun eth->soc->ana_rgc3);
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun if (err)
3016*4882a593Smuzhiyun return err;
3017*4882a593Smuzhiyun }
3018*4882a593Smuzhiyun
3019*4882a593Smuzhiyun if (eth->soc->required_pctl) {
3020*4882a593Smuzhiyun eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3021*4882a593Smuzhiyun "mediatek,pctl");
3022*4882a593Smuzhiyun if (IS_ERR(eth->pctl)) {
3023*4882a593Smuzhiyun dev_err(&pdev->dev, "no pctl regmap found\n");
3024*4882a593Smuzhiyun return PTR_ERR(eth->pctl);
3025*4882a593Smuzhiyun }
3026*4882a593Smuzhiyun }
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
3029*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3030*4882a593Smuzhiyun eth->irq[i] = eth->irq[0];
3031*4882a593Smuzhiyun else
3032*4882a593Smuzhiyun eth->irq[i] = platform_get_irq(pdev, i);
3033*4882a593Smuzhiyun if (eth->irq[i] < 0) {
3034*4882a593Smuzhiyun dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3035*4882a593Smuzhiyun return -ENXIO;
3036*4882a593Smuzhiyun }
3037*4882a593Smuzhiyun }
3038*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3039*4882a593Smuzhiyun eth->clks[i] = devm_clk_get(eth->dev,
3040*4882a593Smuzhiyun mtk_clks_source_name[i]);
3041*4882a593Smuzhiyun if (IS_ERR(eth->clks[i])) {
3042*4882a593Smuzhiyun if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3043*4882a593Smuzhiyun return -EPROBE_DEFER;
3044*4882a593Smuzhiyun if (eth->soc->required_clks & BIT(i)) {
3045*4882a593Smuzhiyun dev_err(&pdev->dev, "clock %s not found\n",
3046*4882a593Smuzhiyun mtk_clks_source_name[i]);
3047*4882a593Smuzhiyun return -EINVAL;
3048*4882a593Smuzhiyun }
3049*4882a593Smuzhiyun eth->clks[i] = NULL;
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun }
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3054*4882a593Smuzhiyun INIT_WORK(ð->pending_work, mtk_pending_work);
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun err = mtk_hw_init(eth);
3057*4882a593Smuzhiyun if (err)
3058*4882a593Smuzhiyun return err;
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3061*4882a593Smuzhiyun
3062*4882a593Smuzhiyun for_each_child_of_node(pdev->dev.of_node, mac_np) {
3063*4882a593Smuzhiyun if (!of_device_is_compatible(mac_np,
3064*4882a593Smuzhiyun "mediatek,eth-mac"))
3065*4882a593Smuzhiyun continue;
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun if (!of_device_is_available(mac_np))
3068*4882a593Smuzhiyun continue;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun err = mtk_add_mac(eth, mac_np);
3071*4882a593Smuzhiyun if (err) {
3072*4882a593Smuzhiyun of_node_put(mac_np);
3073*4882a593Smuzhiyun goto err_deinit_hw;
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3078*4882a593Smuzhiyun err = devm_request_irq(eth->dev, eth->irq[0],
3079*4882a593Smuzhiyun mtk_handle_irq, 0,
3080*4882a593Smuzhiyun dev_name(eth->dev), eth);
3081*4882a593Smuzhiyun } else {
3082*4882a593Smuzhiyun err = devm_request_irq(eth->dev, eth->irq[1],
3083*4882a593Smuzhiyun mtk_handle_irq_tx, 0,
3084*4882a593Smuzhiyun dev_name(eth->dev), eth);
3085*4882a593Smuzhiyun if (err)
3086*4882a593Smuzhiyun goto err_free_dev;
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun err = devm_request_irq(eth->dev, eth->irq[2],
3089*4882a593Smuzhiyun mtk_handle_irq_rx, 0,
3090*4882a593Smuzhiyun dev_name(eth->dev), eth);
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun if (err)
3093*4882a593Smuzhiyun goto err_free_dev;
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun /* No MT7628/88 support yet */
3096*4882a593Smuzhiyun if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3097*4882a593Smuzhiyun err = mtk_mdio_init(eth);
3098*4882a593Smuzhiyun if (err)
3099*4882a593Smuzhiyun goto err_free_dev;
3100*4882a593Smuzhiyun }
3101*4882a593Smuzhiyun
3102*4882a593Smuzhiyun for (i = 0; i < MTK_MAX_DEVS; i++) {
3103*4882a593Smuzhiyun if (!eth->netdev[i])
3104*4882a593Smuzhiyun continue;
3105*4882a593Smuzhiyun
3106*4882a593Smuzhiyun err = register_netdev(eth->netdev[i]);
3107*4882a593Smuzhiyun if (err) {
3108*4882a593Smuzhiyun dev_err(eth->dev, "error bringing up device\n");
3109*4882a593Smuzhiyun goto err_deinit_mdio;
3110*4882a593Smuzhiyun } else
3111*4882a593Smuzhiyun netif_info(eth, probe, eth->netdev[i],
3112*4882a593Smuzhiyun "mediatek frame engine at 0x%08lx, irq %d\n",
3113*4882a593Smuzhiyun eth->netdev[i]->base_addr, eth->irq[0]);
3114*4882a593Smuzhiyun }
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun /* we run 2 devices on the same DMA ring so we need a dummy device
3117*4882a593Smuzhiyun * for NAPI to work
3118*4882a593Smuzhiyun */
3119*4882a593Smuzhiyun init_dummy_netdev(ð->dummy_dev);
3120*4882a593Smuzhiyun netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
3121*4882a593Smuzhiyun MTK_NAPI_WEIGHT);
3122*4882a593Smuzhiyun netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
3123*4882a593Smuzhiyun MTK_NAPI_WEIGHT);
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun platform_set_drvdata(pdev, eth);
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun return 0;
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun err_deinit_mdio:
3130*4882a593Smuzhiyun mtk_mdio_cleanup(eth);
3131*4882a593Smuzhiyun err_free_dev:
3132*4882a593Smuzhiyun mtk_free_dev(eth);
3133*4882a593Smuzhiyun err_deinit_hw:
3134*4882a593Smuzhiyun mtk_hw_deinit(eth);
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun return err;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun
mtk_remove(struct platform_device * pdev)3139*4882a593Smuzhiyun static int mtk_remove(struct platform_device *pdev)
3140*4882a593Smuzhiyun {
3141*4882a593Smuzhiyun struct mtk_eth *eth = platform_get_drvdata(pdev);
3142*4882a593Smuzhiyun struct mtk_mac *mac;
3143*4882a593Smuzhiyun int i;
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun /* stop all devices to make sure that dma is properly shut down */
3146*4882a593Smuzhiyun for (i = 0; i < MTK_MAC_COUNT; i++) {
3147*4882a593Smuzhiyun if (!eth->netdev[i])
3148*4882a593Smuzhiyun continue;
3149*4882a593Smuzhiyun mtk_stop(eth->netdev[i]);
3150*4882a593Smuzhiyun mac = netdev_priv(eth->netdev[i]);
3151*4882a593Smuzhiyun phylink_disconnect_phy(mac->phylink);
3152*4882a593Smuzhiyun }
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun mtk_hw_deinit(eth);
3155*4882a593Smuzhiyun
3156*4882a593Smuzhiyun netif_napi_del(ð->tx_napi);
3157*4882a593Smuzhiyun netif_napi_del(ð->rx_napi);
3158*4882a593Smuzhiyun mtk_cleanup(eth);
3159*4882a593Smuzhiyun mtk_mdio_cleanup(eth);
3160*4882a593Smuzhiyun
3161*4882a593Smuzhiyun return 0;
3162*4882a593Smuzhiyun }
3163*4882a593Smuzhiyun
3164*4882a593Smuzhiyun static const struct mtk_soc_data mt2701_data = {
3165*4882a593Smuzhiyun .caps = MT7623_CAPS | MTK_HWLRO,
3166*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES,
3167*4882a593Smuzhiyun .required_clks = MT7623_CLKS_BITMAP,
3168*4882a593Smuzhiyun .required_pctl = true,
3169*4882a593Smuzhiyun };
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun static const struct mtk_soc_data mt7621_data = {
3172*4882a593Smuzhiyun .caps = MT7621_CAPS,
3173*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES,
3174*4882a593Smuzhiyun .required_clks = MT7621_CLKS_BITMAP,
3175*4882a593Smuzhiyun .required_pctl = false,
3176*4882a593Smuzhiyun };
3177*4882a593Smuzhiyun
3178*4882a593Smuzhiyun static const struct mtk_soc_data mt7622_data = {
3179*4882a593Smuzhiyun .ana_rgc3 = 0x2028,
3180*4882a593Smuzhiyun .caps = MT7622_CAPS | MTK_HWLRO,
3181*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES,
3182*4882a593Smuzhiyun .required_clks = MT7622_CLKS_BITMAP,
3183*4882a593Smuzhiyun .required_pctl = false,
3184*4882a593Smuzhiyun };
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun static const struct mtk_soc_data mt7623_data = {
3187*4882a593Smuzhiyun .caps = MT7623_CAPS | MTK_HWLRO,
3188*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES,
3189*4882a593Smuzhiyun .required_clks = MT7623_CLKS_BITMAP,
3190*4882a593Smuzhiyun .required_pctl = true,
3191*4882a593Smuzhiyun };
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun static const struct mtk_soc_data mt7629_data = {
3194*4882a593Smuzhiyun .ana_rgc3 = 0x128,
3195*4882a593Smuzhiyun .caps = MT7629_CAPS | MTK_HWLRO,
3196*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES,
3197*4882a593Smuzhiyun .required_clks = MT7629_CLKS_BITMAP,
3198*4882a593Smuzhiyun .required_pctl = false,
3199*4882a593Smuzhiyun };
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun static const struct mtk_soc_data rt5350_data = {
3202*4882a593Smuzhiyun .caps = MT7628_CAPS,
3203*4882a593Smuzhiyun .hw_features = MTK_HW_FEATURES_MT7628,
3204*4882a593Smuzhiyun .required_clks = MT7628_CLKS_BITMAP,
3205*4882a593Smuzhiyun .required_pctl = false,
3206*4882a593Smuzhiyun };
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun const struct of_device_id of_mtk_match[] = {
3209*4882a593Smuzhiyun { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3210*4882a593Smuzhiyun { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3211*4882a593Smuzhiyun { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3212*4882a593Smuzhiyun { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3213*4882a593Smuzhiyun { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3214*4882a593Smuzhiyun { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3215*4882a593Smuzhiyun {},
3216*4882a593Smuzhiyun };
3217*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, of_mtk_match);
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun static struct platform_driver mtk_driver = {
3220*4882a593Smuzhiyun .probe = mtk_probe,
3221*4882a593Smuzhiyun .remove = mtk_remove,
3222*4882a593Smuzhiyun .driver = {
3223*4882a593Smuzhiyun .name = "mtk_soc_eth",
3224*4882a593Smuzhiyun .of_match_table = of_mtk_match,
3225*4882a593Smuzhiyun },
3226*4882a593Smuzhiyun };
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun module_platform_driver(mtk_driver);
3229*4882a593Smuzhiyun
3230*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3231*4882a593Smuzhiyun MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3232*4882a593Smuzhiyun MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
3233