xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*******************************************************************************
3*4882a593Smuzhiyun   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4*4882a593Smuzhiyun   ST Ethernet IPs are built around a Synopsys IP Core.
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun   Documentation available at:
12*4882a593Smuzhiyun 	http://www.stlinux.com
13*4882a593Smuzhiyun   Support available at:
14*4882a593Smuzhiyun 	https://bugzilla.stlinux.com/
15*4882a593Smuzhiyun *******************************************************************************/
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/clk.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/ip.h>
21*4882a593Smuzhiyun #include <linux/tcp.h>
22*4882a593Smuzhiyun #include <linux/skbuff.h>
23*4882a593Smuzhiyun #include <linux/ethtool.h>
24*4882a593Smuzhiyun #include <linux/if_ether.h>
25*4882a593Smuzhiyun #include <linux/crc32.h>
26*4882a593Smuzhiyun #include <linux/mii.h>
27*4882a593Smuzhiyun #include <linux/if.h>
28*4882a593Smuzhiyun #include <linux/if_vlan.h>
29*4882a593Smuzhiyun #include <linux/dma-mapping.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/pm_runtime.h>
32*4882a593Smuzhiyun #include <linux/prefetch.h>
33*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
34*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
35*4882a593Smuzhiyun #include <linux/debugfs.h>
36*4882a593Smuzhiyun #include <linux/seq_file.h>
37*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
38*4882a593Smuzhiyun #include <linux/net_tstamp.h>
39*4882a593Smuzhiyun #include <linux/phylink.h>
40*4882a593Smuzhiyun #include <linux/udp.h>
41*4882a593Smuzhiyun #include <net/pkt_cls.h>
42*4882a593Smuzhiyun #include "stmmac_ptp.h"
43*4882a593Smuzhiyun #include "stmmac.h"
44*4882a593Smuzhiyun #include <linux/reset.h>
45*4882a593Smuzhiyun #include <linux/of_mdio.h>
46*4882a593Smuzhiyun #include "dwmac1000.h"
47*4882a593Smuzhiyun #include "dwxgmac2.h"
48*4882a593Smuzhiyun #include "hwif.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* As long as the interface is active, we keep the timestamping counter enabled
51*4882a593Smuzhiyun  * with fine resolution and binary rollover. This avoid non-monotonic behavior
52*4882a593Smuzhiyun  * (clock jumps) when changing timestamping settings at runtime.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
55*4882a593Smuzhiyun 				 PTP_TCR_TSCTRLSSR)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
58*4882a593Smuzhiyun #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Module parameters */
61*4882a593Smuzhiyun #define TX_TIMEO	5000
62*4882a593Smuzhiyun static int watchdog = TX_TIMEO;
63*4882a593Smuzhiyun module_param(watchdog, int, 0644);
64*4882a593Smuzhiyun MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static int debug = -1;
67*4882a593Smuzhiyun module_param(debug, int, 0644);
68*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static int phyaddr = -1;
71*4882a593Smuzhiyun module_param(phyaddr, int, 0444);
72*4882a593Smuzhiyun MODULE_PARM_DESC(phyaddr, "Physical device address");
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
75*4882a593Smuzhiyun #define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static int flow_ctrl = FLOW_AUTO;
78*4882a593Smuzhiyun module_param(flow_ctrl, int, 0644);
79*4882a593Smuzhiyun MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static int pause = PAUSE_TIME;
82*4882a593Smuzhiyun module_param(pause, int, 0644);
83*4882a593Smuzhiyun MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define TC_DEFAULT 64
86*4882a593Smuzhiyun static int tc = TC_DEFAULT;
87*4882a593Smuzhiyun module_param(tc, int, 0644);
88*4882a593Smuzhiyun MODULE_PARM_DESC(tc, "DMA threshold control value");
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define	DEFAULT_BUFSIZE	1536
91*4882a593Smuzhiyun static int buf_sz = DEFAULT_BUFSIZE;
92*4882a593Smuzhiyun module_param(buf_sz, int, 0644);
93*4882a593Smuzhiyun MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define	STMMAC_RX_COPYBREAK	256
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98*4882a593Smuzhiyun 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
99*4882a593Smuzhiyun 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define STMMAC_DEFAULT_LPI_TIMER	1000
102*4882a593Smuzhiyun static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103*4882a593Smuzhiyun module_param(eee_timer, int, 0644);
104*4882a593Smuzhiyun MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105*4882a593Smuzhiyun #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* By default the driver will use the ring mode to manage tx and rx descriptors,
108*4882a593Smuzhiyun  * but allow user to force to use the chain instead of the ring
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun static unsigned int chain_mode;
111*4882a593Smuzhiyun module_param(chain_mode, int, 0444);
112*4882a593Smuzhiyun MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
117*4882a593Smuzhiyun static const struct net_device_ops stmmac_netdev_ops;
118*4882a593Smuzhiyun static void stmmac_init_fs(struct net_device *dev);
119*4882a593Smuzhiyun static void stmmac_exit_fs(struct net_device *dev);
120*4882a593Smuzhiyun #endif
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
123*4882a593Smuzhiyun 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)124*4882a593Smuzhiyun int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int ret = 0;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (enabled) {
129*4882a593Smuzhiyun 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
130*4882a593Smuzhiyun 		if (ret)
131*4882a593Smuzhiyun 			return ret;
132*4882a593Smuzhiyun 		ret = clk_prepare_enable(priv->plat->pclk);
133*4882a593Smuzhiyun 		if (ret) {
134*4882a593Smuzhiyun 			clk_disable_unprepare(priv->plat->stmmac_clk);
135*4882a593Smuzhiyun 			return ret;
136*4882a593Smuzhiyun 		}
137*4882a593Smuzhiyun 	} else {
138*4882a593Smuzhiyun 		clk_disable_unprepare(priv->plat->stmmac_clk);
139*4882a593Smuzhiyun 		clk_disable_unprepare(priv->plat->pclk);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return ret;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun  * stmmac_verify_args - verify the driver parameters.
148*4882a593Smuzhiyun  * Description: it checks the driver parameters and set a default in case of
149*4882a593Smuzhiyun  * errors.
150*4882a593Smuzhiyun  */
stmmac_verify_args(void)151*4882a593Smuzhiyun static void stmmac_verify_args(void)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	if (unlikely(watchdog < 0))
154*4882a593Smuzhiyun 		watchdog = TX_TIMEO;
155*4882a593Smuzhiyun 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
156*4882a593Smuzhiyun 		buf_sz = DEFAULT_BUFSIZE;
157*4882a593Smuzhiyun 	if (unlikely(flow_ctrl > 1))
158*4882a593Smuzhiyun 		flow_ctrl = FLOW_AUTO;
159*4882a593Smuzhiyun 	else if (likely(flow_ctrl < 0))
160*4882a593Smuzhiyun 		flow_ctrl = FLOW_OFF;
161*4882a593Smuzhiyun 	if (unlikely((pause < 0) || (pause > 0xffff)))
162*4882a593Smuzhiyun 		pause = PAUSE_TIME;
163*4882a593Smuzhiyun 	if (eee_timer < 0)
164*4882a593Smuzhiyun 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun  * stmmac_disable_all_queues - Disable all queues
169*4882a593Smuzhiyun  * @priv: driver private structure
170*4882a593Smuzhiyun  */
stmmac_disable_all_queues(struct stmmac_priv * priv)171*4882a593Smuzhiyun static void stmmac_disable_all_queues(struct stmmac_priv *priv)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
174*4882a593Smuzhiyun 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
175*4882a593Smuzhiyun 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
176*4882a593Smuzhiyun 	u32 queue;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for (queue = 0; queue < maxq; queue++) {
179*4882a593Smuzhiyun 		struct stmmac_channel *ch = &priv->channel[queue];
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		if (queue < rx_queues_cnt)
182*4882a593Smuzhiyun 			napi_disable(&ch->rx_napi);
183*4882a593Smuzhiyun 		if (queue < tx_queues_cnt)
184*4882a593Smuzhiyun 			napi_disable(&ch->tx_napi);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun  * stmmac_enable_all_queues - Enable all queues
190*4882a593Smuzhiyun  * @priv: driver private structure
191*4882a593Smuzhiyun  */
stmmac_enable_all_queues(struct stmmac_priv * priv)192*4882a593Smuzhiyun static void stmmac_enable_all_queues(struct stmmac_priv *priv)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
195*4882a593Smuzhiyun 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196*4882a593Smuzhiyun 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
197*4882a593Smuzhiyun 	u32 queue;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	for (queue = 0; queue < maxq; queue++) {
200*4882a593Smuzhiyun 		struct stmmac_channel *ch = &priv->channel[queue];
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		if (queue < rx_queues_cnt)
203*4882a593Smuzhiyun 			napi_enable(&ch->rx_napi);
204*4882a593Smuzhiyun 		if (queue < tx_queues_cnt)
205*4882a593Smuzhiyun 			napi_enable(&ch->tx_napi);
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
stmmac_service_event_schedule(struct stmmac_priv * priv)209*4882a593Smuzhiyun static void stmmac_service_event_schedule(struct stmmac_priv *priv)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
212*4882a593Smuzhiyun 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
213*4882a593Smuzhiyun 		queue_work(priv->wq, &priv->service_task);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
stmmac_global_err(struct stmmac_priv * priv)216*4882a593Smuzhiyun static void stmmac_global_err(struct stmmac_priv *priv)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	netif_carrier_off(priv->dev);
219*4882a593Smuzhiyun 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
220*4882a593Smuzhiyun 	stmmac_service_event_schedule(priv);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun  * stmmac_clk_csr_set - dynamically set the MDC clock
225*4882a593Smuzhiyun  * @priv: driver private structure
226*4882a593Smuzhiyun  * Description: this is to dynamically set the MDC clock according to the csr
227*4882a593Smuzhiyun  * clock input.
228*4882a593Smuzhiyun  * Note:
229*4882a593Smuzhiyun  *	If a specific clk_csr value is passed from the platform
230*4882a593Smuzhiyun  *	this means that the CSR Clock Range selection cannot be
231*4882a593Smuzhiyun  *	changed at run-time and it is fixed (as reported in the driver
232*4882a593Smuzhiyun  *	documentation). Viceversa the driver will try to set the MDC
233*4882a593Smuzhiyun  *	clock dynamically according to the actual clock input.
234*4882a593Smuzhiyun  */
stmmac_clk_csr_set(struct stmmac_priv * priv)235*4882a593Smuzhiyun static void stmmac_clk_csr_set(struct stmmac_priv *priv)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	u32 clk_rate;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	clk_rate = clk_get_rate(priv->plat->pclk);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* Platform provided default clk_csr would be assumed valid
242*4882a593Smuzhiyun 	 * for all other cases except for the below mentioned ones.
243*4882a593Smuzhiyun 	 * For values higher than the IEEE 802.3 specified frequency
244*4882a593Smuzhiyun 	 * we can not estimate the proper divider as it is not known
245*4882a593Smuzhiyun 	 * the frequency of clk_csr_i. So we do not change the default
246*4882a593Smuzhiyun 	 * divider.
247*4882a593Smuzhiyun 	 */
248*4882a593Smuzhiyun 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
249*4882a593Smuzhiyun 		if (clk_rate < CSR_F_35M)
250*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_20_35M;
251*4882a593Smuzhiyun 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
252*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_35_60M;
253*4882a593Smuzhiyun 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
254*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_60_100M;
255*4882a593Smuzhiyun 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
256*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_100_150M;
257*4882a593Smuzhiyun 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
258*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_150_250M;
259*4882a593Smuzhiyun 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
260*4882a593Smuzhiyun 			priv->clk_csr = STMMAC_CSR_250_300M;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (priv->plat->has_sun8i) {
264*4882a593Smuzhiyun 		if (clk_rate > 160000000)
265*4882a593Smuzhiyun 			priv->clk_csr = 0x03;
266*4882a593Smuzhiyun 		else if (clk_rate > 80000000)
267*4882a593Smuzhiyun 			priv->clk_csr = 0x02;
268*4882a593Smuzhiyun 		else if (clk_rate > 40000000)
269*4882a593Smuzhiyun 			priv->clk_csr = 0x01;
270*4882a593Smuzhiyun 		else
271*4882a593Smuzhiyun 			priv->clk_csr = 0;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (priv->plat->has_xgmac) {
275*4882a593Smuzhiyun 		if (clk_rate > 400000000)
276*4882a593Smuzhiyun 			priv->clk_csr = 0x5;
277*4882a593Smuzhiyun 		else if (clk_rate > 350000000)
278*4882a593Smuzhiyun 			priv->clk_csr = 0x4;
279*4882a593Smuzhiyun 		else if (clk_rate > 300000000)
280*4882a593Smuzhiyun 			priv->clk_csr = 0x3;
281*4882a593Smuzhiyun 		else if (clk_rate > 250000000)
282*4882a593Smuzhiyun 			priv->clk_csr = 0x2;
283*4882a593Smuzhiyun 		else if (clk_rate > 150000000)
284*4882a593Smuzhiyun 			priv->clk_csr = 0x1;
285*4882a593Smuzhiyun 		else
286*4882a593Smuzhiyun 			priv->clk_csr = 0x0;
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
print_pkt(unsigned char * buf,int len)290*4882a593Smuzhiyun static void print_pkt(unsigned char *buf, int len)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
293*4882a593Smuzhiyun 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)296*4882a593Smuzhiyun static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
299*4882a593Smuzhiyun 	u32 avail;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (tx_q->dirty_tx > tx_q->cur_tx)
302*4882a593Smuzhiyun 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
303*4882a593Smuzhiyun 	else
304*4882a593Smuzhiyun 		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return avail;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun  * stmmac_rx_dirty - Get RX queue dirty
311*4882a593Smuzhiyun  * @priv: driver private structure
312*4882a593Smuzhiyun  * @queue: RX queue index
313*4882a593Smuzhiyun  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)314*4882a593Smuzhiyun static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
317*4882a593Smuzhiyun 	u32 dirty;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (rx_q->dirty_rx <= rx_q->cur_rx)
320*4882a593Smuzhiyun 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
321*4882a593Smuzhiyun 	else
322*4882a593Smuzhiyun 		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return dirty;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun  * stmmac_enable_eee_mode - check and enter in LPI mode
329*4882a593Smuzhiyun  * @priv: driver private structure
330*4882a593Smuzhiyun  * Description: this function is to verify and enter in LPI mode in case of
331*4882a593Smuzhiyun  * EEE.
332*4882a593Smuzhiyun  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)333*4882a593Smuzhiyun static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
336*4882a593Smuzhiyun 	u32 queue;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* check if all TX queues have the work finished */
339*4882a593Smuzhiyun 	for (queue = 0; queue < tx_cnt; queue++) {
340*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		if (tx_q->dirty_tx != tx_q->cur_tx)
343*4882a593Smuzhiyun 			return; /* still unfinished work */
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Check and enter in LPI mode */
347*4882a593Smuzhiyun 	if (!priv->tx_path_in_lpi_mode)
348*4882a593Smuzhiyun 		stmmac_set_eee_mode(priv, priv->hw,
349*4882a593Smuzhiyun 				priv->plat->en_tx_lpi_clockgating);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /**
353*4882a593Smuzhiyun  * stmmac_disable_eee_mode - disable and exit from LPI mode
354*4882a593Smuzhiyun  * @priv: driver private structure
355*4882a593Smuzhiyun  * Description: this function is to exit and disable EEE in case of
356*4882a593Smuzhiyun  * LPI state is true. This is called by the xmit.
357*4882a593Smuzhiyun  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)358*4882a593Smuzhiyun void stmmac_disable_eee_mode(struct stmmac_priv *priv)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	stmmac_reset_eee_mode(priv, priv->hw);
361*4882a593Smuzhiyun 	del_timer_sync(&priv->eee_ctrl_timer);
362*4882a593Smuzhiyun 	priv->tx_path_in_lpi_mode = false;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * stmmac_eee_ctrl_timer - EEE TX SW timer.
367*4882a593Smuzhiyun  * @t:  timer_list struct containing private info
368*4882a593Smuzhiyun  * Description:
369*4882a593Smuzhiyun  *  if there is no data transfer and if we are not in LPI state,
370*4882a593Smuzhiyun  *  then MAC Transmitter can be moved to LPI state.
371*4882a593Smuzhiyun  */
stmmac_eee_ctrl_timer(struct timer_list * t)372*4882a593Smuzhiyun static void stmmac_eee_ctrl_timer(struct timer_list *t)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	stmmac_enable_eee_mode(priv);
377*4882a593Smuzhiyun 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun  * stmmac_eee_init - init EEE
382*4882a593Smuzhiyun  * @priv: driver private structure
383*4882a593Smuzhiyun  * Description:
384*4882a593Smuzhiyun  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
385*4882a593Smuzhiyun  *  can also manage EEE, this function enable the LPI state and start related
386*4882a593Smuzhiyun  *  timer.
387*4882a593Smuzhiyun  */
stmmac_eee_init(struct stmmac_priv * priv)388*4882a593Smuzhiyun bool stmmac_eee_init(struct stmmac_priv *priv)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	int eee_tw_timer = priv->eee_tw_timer;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Using PCS we cannot dial with the phy registers at this stage
393*4882a593Smuzhiyun 	 * so we do not support extra feature like EEE.
394*4882a593Smuzhiyun 	 */
395*4882a593Smuzhiyun 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
396*4882a593Smuzhiyun 	    priv->hw->pcs == STMMAC_PCS_RTBI)
397*4882a593Smuzhiyun 		return false;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Check if MAC core supports the EEE feature. */
400*4882a593Smuzhiyun 	if (!priv->dma_cap.eee)
401*4882a593Smuzhiyun 		return false;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	mutex_lock(&priv->lock);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* Check if it needs to be deactivated */
406*4882a593Smuzhiyun 	if (!priv->eee_active) {
407*4882a593Smuzhiyun 		if (priv->eee_enabled) {
408*4882a593Smuzhiyun 			netdev_dbg(priv->dev, "disable EEE\n");
409*4882a593Smuzhiyun 			del_timer_sync(&priv->eee_ctrl_timer);
410*4882a593Smuzhiyun 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 		mutex_unlock(&priv->lock);
413*4882a593Smuzhiyun 		return false;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (priv->eee_active && !priv->eee_enabled) {
417*4882a593Smuzhiyun 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
418*4882a593Smuzhiyun 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
419*4882a593Smuzhiyun 				     eee_tw_timer);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	mutex_unlock(&priv->lock);
425*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
426*4882a593Smuzhiyun 	return true;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /* stmmac_get_tx_hwtstamp - get HW TX timestamps
430*4882a593Smuzhiyun  * @priv: driver private structure
431*4882a593Smuzhiyun  * @p : descriptor pointer
432*4882a593Smuzhiyun  * @skb : the socket buffer
433*4882a593Smuzhiyun  * Description :
434*4882a593Smuzhiyun  * This function will read timestamp from the descriptor & pass it to stack.
435*4882a593Smuzhiyun  * and also perform some sanity checks.
436*4882a593Smuzhiyun  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)437*4882a593Smuzhiyun static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
438*4882a593Smuzhiyun 				   struct dma_desc *p, struct sk_buff *skb)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	struct skb_shared_hwtstamps shhwtstamp;
441*4882a593Smuzhiyun 	bool found = false;
442*4882a593Smuzhiyun 	u64 ns = 0;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if (!priv->hwts_tx_en)
445*4882a593Smuzhiyun 		return;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* exit if skb doesn't support hw tstamp */
448*4882a593Smuzhiyun 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
449*4882a593Smuzhiyun 		return;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/* check tx tstamp status */
452*4882a593Smuzhiyun 	if (stmmac_get_tx_timestamp_status(priv, p)) {
453*4882a593Smuzhiyun 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
454*4882a593Smuzhiyun 		found = true;
455*4882a593Smuzhiyun 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
456*4882a593Smuzhiyun 		found = true;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (found) {
460*4882a593Smuzhiyun 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
461*4882a593Smuzhiyun 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
464*4882a593Smuzhiyun 		/* pass tstamp to stack */
465*4882a593Smuzhiyun 		skb_tstamp_tx(skb, &shhwtstamp);
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /* stmmac_get_rx_hwtstamp - get HW RX timestamps
470*4882a593Smuzhiyun  * @priv: driver private structure
471*4882a593Smuzhiyun  * @p : descriptor pointer
472*4882a593Smuzhiyun  * @np : next descriptor pointer
473*4882a593Smuzhiyun  * @skb : the socket buffer
474*4882a593Smuzhiyun  * Description :
475*4882a593Smuzhiyun  * This function will read received packet's timestamp from the descriptor
476*4882a593Smuzhiyun  * and pass it to stack. It also perform some sanity checks.
477*4882a593Smuzhiyun  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)478*4882a593Smuzhiyun static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
479*4882a593Smuzhiyun 				   struct dma_desc *np, struct sk_buff *skb)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
482*4882a593Smuzhiyun 	struct dma_desc *desc = p;
483*4882a593Smuzhiyun 	u64 ns = 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (!priv->hwts_rx_en)
486*4882a593Smuzhiyun 		return;
487*4882a593Smuzhiyun 	/* For GMAC4, the valid timestamp is from CTX next desc. */
488*4882a593Smuzhiyun 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
489*4882a593Smuzhiyun 		desc = np;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Check if timestamp is available */
492*4882a593Smuzhiyun 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
493*4882a593Smuzhiyun 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
494*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
495*4882a593Smuzhiyun 		shhwtstamp = skb_hwtstamps(skb);
496*4882a593Smuzhiyun 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
497*4882a593Smuzhiyun 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
498*4882a593Smuzhiyun 	} else  {
499*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /**
504*4882a593Smuzhiyun  *  stmmac_hwtstamp_set - control hardware timestamping.
505*4882a593Smuzhiyun  *  @dev: device pointer.
506*4882a593Smuzhiyun  *  @ifr: An IOCTL specific structure, that can contain a pointer to
507*4882a593Smuzhiyun  *  a proprietary structure used to pass information to the driver.
508*4882a593Smuzhiyun  *  Description:
509*4882a593Smuzhiyun  *  This function configures the MAC to enable/disable both outgoing(TX)
510*4882a593Smuzhiyun  *  and incoming(RX) packets time stamping based on user input.
511*4882a593Smuzhiyun  *  Return Value:
512*4882a593Smuzhiyun  *  0 on success and an appropriate -ve integer on failure.
513*4882a593Smuzhiyun  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)514*4882a593Smuzhiyun static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
517*4882a593Smuzhiyun 	struct hwtstamp_config config;
518*4882a593Smuzhiyun 	u32 ptp_v2 = 0;
519*4882a593Smuzhiyun 	u32 tstamp_all = 0;
520*4882a593Smuzhiyun 	u32 ptp_over_ipv4_udp = 0;
521*4882a593Smuzhiyun 	u32 ptp_over_ipv6_udp = 0;
522*4882a593Smuzhiyun 	u32 ptp_over_ethernet = 0;
523*4882a593Smuzhiyun 	u32 snap_type_sel = 0;
524*4882a593Smuzhiyun 	u32 ts_master_en = 0;
525*4882a593Smuzhiyun 	u32 ts_event_en = 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
528*4882a593Smuzhiyun 		netdev_alert(priv->dev, "No support for HW time stamping\n");
529*4882a593Smuzhiyun 		priv->hwts_tx_en = 0;
530*4882a593Smuzhiyun 		priv->hwts_rx_en = 0;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 		return -EOPNOTSUPP;
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (copy_from_user(&config, ifr->ifr_data,
536*4882a593Smuzhiyun 			   sizeof(config)))
537*4882a593Smuzhiyun 		return -EFAULT;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
540*4882a593Smuzhiyun 		   __func__, config.flags, config.tx_type, config.rx_filter);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* reserved for future extensions */
543*4882a593Smuzhiyun 	if (config.flags)
544*4882a593Smuzhiyun 		return -EINVAL;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (config.tx_type != HWTSTAMP_TX_OFF &&
547*4882a593Smuzhiyun 	    config.tx_type != HWTSTAMP_TX_ON)
548*4882a593Smuzhiyun 		return -ERANGE;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (priv->adv_ts) {
551*4882a593Smuzhiyun 		switch (config.rx_filter) {
552*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_NONE:
553*4882a593Smuzhiyun 			/* time stamp no incoming packet at all */
554*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_NONE;
555*4882a593Smuzhiyun 			break;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
558*4882a593Smuzhiyun 			/* PTP v1, UDP, any kind of event packet */
559*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
560*4882a593Smuzhiyun 			/* 'xmac' hardware can support Sync, Pdelay_Req and
561*4882a593Smuzhiyun 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
562*4882a593Smuzhiyun 			 * This leaves Delay_Req timestamps out.
563*4882a593Smuzhiyun 			 * Enable all events *and* general purpose message
564*4882a593Smuzhiyun 			 * timestamping
565*4882a593Smuzhiyun 			 */
566*4882a593Smuzhiyun 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
567*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569*4882a593Smuzhiyun 			break;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
572*4882a593Smuzhiyun 			/* PTP v1, UDP, Sync packet */
573*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
574*4882a593Smuzhiyun 			/* take time stamp for SYNC messages only */
575*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
578*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
579*4882a593Smuzhiyun 			break;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
582*4882a593Smuzhiyun 			/* PTP v1, UDP, Delay_req packet */
583*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
584*4882a593Smuzhiyun 			/* take time stamp for Delay_Req messages only */
585*4882a593Smuzhiyun 			ts_master_en = PTP_TCR_TSMSTRENA;
586*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
589*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
590*4882a593Smuzhiyun 			break;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
593*4882a593Smuzhiyun 			/* PTP v2, UDP, any kind of event packet */
594*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
595*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
596*4882a593Smuzhiyun 			/* take time stamp for all event messages */
597*4882a593Smuzhiyun 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601*4882a593Smuzhiyun 			break;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
604*4882a593Smuzhiyun 			/* PTP v2, UDP, Sync packet */
605*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
606*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
607*4882a593Smuzhiyun 			/* take time stamp for SYNC messages only */
608*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612*4882a593Smuzhiyun 			break;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
615*4882a593Smuzhiyun 			/* PTP v2, UDP, Delay_req packet */
616*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
617*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
618*4882a593Smuzhiyun 			/* take time stamp for Delay_Req messages only */
619*4882a593Smuzhiyun 			ts_master_en = PTP_TCR_TSMSTRENA;
620*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624*4882a593Smuzhiyun 			break;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
627*4882a593Smuzhiyun 			/* PTP v2/802.AS1 any layer, any kind of event packet */
628*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
629*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
630*4882a593Smuzhiyun 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631*4882a593Smuzhiyun 			if (priv->synopsys_id < DWMAC_CORE_4_10)
632*4882a593Smuzhiyun 				ts_event_en = PTP_TCR_TSEVNTENA;
633*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
634*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
635*4882a593Smuzhiyun 			ptp_over_ethernet = PTP_TCR_TSIPENA;
636*4882a593Smuzhiyun 			break;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
639*4882a593Smuzhiyun 			/* PTP v2/802.AS1, any layer, Sync packet */
640*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
641*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
642*4882a593Smuzhiyun 			/* take time stamp for SYNC messages only */
643*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647*4882a593Smuzhiyun 			ptp_over_ethernet = PTP_TCR_TSIPENA;
648*4882a593Smuzhiyun 			break;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
651*4882a593Smuzhiyun 			/* PTP v2/802.AS1, any layer, Delay_req packet */
652*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
653*4882a593Smuzhiyun 			ptp_v2 = PTP_TCR_TSVER2ENA;
654*4882a593Smuzhiyun 			/* take time stamp for Delay_Req messages only */
655*4882a593Smuzhiyun 			ts_master_en = PTP_TCR_TSMSTRENA;
656*4882a593Smuzhiyun 			ts_event_en = PTP_TCR_TSEVNTENA;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
659*4882a593Smuzhiyun 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
660*4882a593Smuzhiyun 			ptp_over_ethernet = PTP_TCR_TSIPENA;
661*4882a593Smuzhiyun 			break;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_NTP_ALL:
664*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_ALL:
665*4882a593Smuzhiyun 			/* time stamp any incoming packet */
666*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_ALL;
667*4882a593Smuzhiyun 			tstamp_all = PTP_TCR_TSENALL;
668*4882a593Smuzhiyun 			break;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 		default:
671*4882a593Smuzhiyun 			return -ERANGE;
672*4882a593Smuzhiyun 		}
673*4882a593Smuzhiyun 	} else {
674*4882a593Smuzhiyun 		switch (config.rx_filter) {
675*4882a593Smuzhiyun 		case HWTSTAMP_FILTER_NONE:
676*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_NONE;
677*4882a593Smuzhiyun 			break;
678*4882a593Smuzhiyun 		default:
679*4882a593Smuzhiyun 			/* PTP v1, UDP, any kind of event packet */
680*4882a593Smuzhiyun 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
681*4882a593Smuzhiyun 			break;
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
685*4882a593Smuzhiyun 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
690*4882a593Smuzhiyun 		priv->systime_flags |= tstamp_all | ptp_v2 |
691*4882a593Smuzhiyun 				       ptp_over_ethernet | ptp_over_ipv6_udp |
692*4882a593Smuzhiyun 				       ptp_over_ipv4_udp | ts_event_en |
693*4882a593Smuzhiyun 				       ts_master_en | snap_type_sel;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	memcpy(&priv->tstamp_config, &config, sizeof(config));
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return copy_to_user(ifr->ifr_data, &config,
701*4882a593Smuzhiyun 			    sizeof(config)) ? -EFAULT : 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun /**
705*4882a593Smuzhiyun  *  stmmac_hwtstamp_get - read hardware timestamping.
706*4882a593Smuzhiyun  *  @dev: device pointer.
707*4882a593Smuzhiyun  *  @ifr: An IOCTL specific structure, that can contain a pointer to
708*4882a593Smuzhiyun  *  a proprietary structure used to pass information to the driver.
709*4882a593Smuzhiyun  *  Description:
710*4882a593Smuzhiyun  *  This function obtain the current hardware timestamping settings
711*4882a593Smuzhiyun  *  as requested.
712*4882a593Smuzhiyun  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)713*4882a593Smuzhiyun static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
716*4882a593Smuzhiyun 	struct hwtstamp_config *config = &priv->tstamp_config;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
719*4882a593Smuzhiyun 		return -EOPNOTSUPP;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	return copy_to_user(ifr->ifr_data, config,
722*4882a593Smuzhiyun 			    sizeof(*config)) ? -EFAULT : 0;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /**
726*4882a593Smuzhiyun  * stmmac_init_tstamp_counter - init hardware timestamping counter
727*4882a593Smuzhiyun  * @priv: driver private structure
728*4882a593Smuzhiyun  * @systime_flags: timestamping flags
729*4882a593Smuzhiyun  * Description:
730*4882a593Smuzhiyun  * Initialize hardware counter for packet timestamping.
731*4882a593Smuzhiyun  * This is valid as long as the interface is open and not suspended.
732*4882a593Smuzhiyun  * Will be rerun after resuming from suspend, case in which the timestamping
733*4882a593Smuzhiyun  * flags updated by stmmac_hwtstamp_set() also need to be restored.
734*4882a593Smuzhiyun  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)735*4882a593Smuzhiyun int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
738*4882a593Smuzhiyun 	struct timespec64 now;
739*4882a593Smuzhiyun 	u32 sec_inc = 0;
740*4882a593Smuzhiyun 	u64 temp = 0;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
743*4882a593Smuzhiyun 		return -EOPNOTSUPP;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
746*4882a593Smuzhiyun 	priv->systime_flags = systime_flags;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* program Sub Second Increment reg */
749*4882a593Smuzhiyun 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
750*4882a593Smuzhiyun 					   priv->plat->clk_ptp_rate,
751*4882a593Smuzhiyun 					   xmac, &sec_inc);
752*4882a593Smuzhiyun 	temp = div_u64(1000000000ULL, sec_inc);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	/* Store sub second increment for later use */
755*4882a593Smuzhiyun 	priv->sub_second_inc = sec_inc;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/* calculate default added value:
758*4882a593Smuzhiyun 	 * formula is :
759*4882a593Smuzhiyun 	 * addend = (2^32)/freq_div_ratio;
760*4882a593Smuzhiyun 	 * where, freq_div_ratio = 1e9ns/sec_inc
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	temp = (u64)(temp << 32);
763*4882a593Smuzhiyun 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
764*4882a593Smuzhiyun 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	/* initialize system time */
767*4882a593Smuzhiyun 	ktime_get_real_ts64(&now);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* lower 32 bits of tv_sec are safe until y2106 */
770*4882a593Smuzhiyun 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	return 0;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun /**
777*4882a593Smuzhiyun  * stmmac_init_ptp - init PTP
778*4882a593Smuzhiyun  * @priv: driver private structure
779*4882a593Smuzhiyun  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
780*4882a593Smuzhiyun  * This is done by looking at the HW cap. register.
781*4882a593Smuzhiyun  * This function also registers the ptp driver.
782*4882a593Smuzhiyun  */
stmmac_init_ptp(struct stmmac_priv * priv)783*4882a593Smuzhiyun static int stmmac_init_ptp(struct stmmac_priv *priv)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
786*4882a593Smuzhiyun 	int ret;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
789*4882a593Smuzhiyun 	if (ret)
790*4882a593Smuzhiyun 		return ret;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	priv->adv_ts = 0;
793*4882a593Smuzhiyun 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
794*4882a593Smuzhiyun 	if (xmac && priv->dma_cap.atime_stamp)
795*4882a593Smuzhiyun 		priv->adv_ts = 1;
796*4882a593Smuzhiyun 	/* Dwmac 3.x core with extend_desc can support adv_ts */
797*4882a593Smuzhiyun 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
798*4882a593Smuzhiyun 		priv->adv_ts = 1;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	if (priv->dma_cap.time_stamp)
801*4882a593Smuzhiyun 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	if (priv->adv_ts)
804*4882a593Smuzhiyun 		netdev_info(priv->dev,
805*4882a593Smuzhiyun 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	priv->hwts_tx_en = 0;
808*4882a593Smuzhiyun 	priv->hwts_rx_en = 0;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
stmmac_release_ptp(struct stmmac_priv * priv)813*4882a593Smuzhiyun static void stmmac_release_ptp(struct stmmac_priv *priv)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
816*4882a593Smuzhiyun 	stmmac_ptp_unregister(priv);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun /**
820*4882a593Smuzhiyun  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
821*4882a593Smuzhiyun  *  @priv: driver private structure
822*4882a593Smuzhiyun  *  @duplex: duplex passed to the next function
823*4882a593Smuzhiyun  *  Description: It is used for configuring the flow control in all queues
824*4882a593Smuzhiyun  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)825*4882a593Smuzhiyun static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl & priv->plat->flow_ctrl,
830*4882a593Smuzhiyun 			 priv->pause, tx_cnt);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun 
stmmac_validate(struct phylink_config * config,unsigned long * supported,struct phylink_link_state * state)833*4882a593Smuzhiyun static void stmmac_validate(struct phylink_config *config,
834*4882a593Smuzhiyun 			    unsigned long *supported,
835*4882a593Smuzhiyun 			    struct phylink_link_state *state)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
838*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
839*4882a593Smuzhiyun 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
840*4882a593Smuzhiyun 	int tx_cnt = priv->plat->tx_queues_to_use;
841*4882a593Smuzhiyun 	int max_speed = priv->plat->max_speed;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	phylink_set(mac_supported, 10baseT_Half);
844*4882a593Smuzhiyun 	phylink_set(mac_supported, 10baseT_Full);
845*4882a593Smuzhiyun 	phylink_set(mac_supported, 100baseT_Half);
846*4882a593Smuzhiyun 	phylink_set(mac_supported, 100baseT_Full);
847*4882a593Smuzhiyun 	phylink_set(mac_supported, 1000baseT_Half);
848*4882a593Smuzhiyun 	phylink_set(mac_supported, 1000baseT_Full);
849*4882a593Smuzhiyun 	phylink_set(mac_supported, 1000baseKX_Full);
850*4882a593Smuzhiyun 	phylink_set(mac_supported, 100baseT1_Full);
851*4882a593Smuzhiyun 	phylink_set(mac_supported, 1000baseT1_Full);
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	phylink_set(mac_supported, Autoneg);
854*4882a593Smuzhiyun 	phylink_set(mac_supported, Pause);
855*4882a593Smuzhiyun 	phylink_set(mac_supported, Asym_Pause);
856*4882a593Smuzhiyun 	phylink_set_port_modes(mac_supported);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/* Cut down 1G if asked to */
859*4882a593Smuzhiyun 	if ((max_speed > 0) && (max_speed < 1000)) {
860*4882a593Smuzhiyun 		phylink_set(mask, 1000baseT_Full);
861*4882a593Smuzhiyun 		phylink_set(mask, 1000baseX_Full);
862*4882a593Smuzhiyun 	} else if (priv->plat->has_xgmac) {
863*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 2500)) {
864*4882a593Smuzhiyun 			phylink_set(mac_supported, 2500baseT_Full);
865*4882a593Smuzhiyun 			phylink_set(mac_supported, 2500baseX_Full);
866*4882a593Smuzhiyun 		}
867*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 5000)) {
868*4882a593Smuzhiyun 			phylink_set(mac_supported, 5000baseT_Full);
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 10000)) {
871*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseSR_Full);
872*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseLR_Full);
873*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseER_Full);
874*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseLRM_Full);
875*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseT_Full);
876*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseKX4_Full);
877*4882a593Smuzhiyun 			phylink_set(mac_supported, 10000baseKR_Full);
878*4882a593Smuzhiyun 		}
879*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 25000)) {
880*4882a593Smuzhiyun 			phylink_set(mac_supported, 25000baseCR_Full);
881*4882a593Smuzhiyun 			phylink_set(mac_supported, 25000baseKR_Full);
882*4882a593Smuzhiyun 			phylink_set(mac_supported, 25000baseSR_Full);
883*4882a593Smuzhiyun 		}
884*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 40000)) {
885*4882a593Smuzhiyun 			phylink_set(mac_supported, 40000baseKR4_Full);
886*4882a593Smuzhiyun 			phylink_set(mac_supported, 40000baseCR4_Full);
887*4882a593Smuzhiyun 			phylink_set(mac_supported, 40000baseSR4_Full);
888*4882a593Smuzhiyun 			phylink_set(mac_supported, 40000baseLR4_Full);
889*4882a593Smuzhiyun 		}
890*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 50000)) {
891*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseCR2_Full);
892*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseKR2_Full);
893*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseSR2_Full);
894*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseKR_Full);
895*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseSR_Full);
896*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseCR_Full);
897*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
898*4882a593Smuzhiyun 			phylink_set(mac_supported, 50000baseDR_Full);
899*4882a593Smuzhiyun 		}
900*4882a593Smuzhiyun 		if (!max_speed || (max_speed >= 100000)) {
901*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseKR4_Full);
902*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseSR4_Full);
903*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseCR4_Full);
904*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
905*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseKR2_Full);
906*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseSR2_Full);
907*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseCR2_Full);
908*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
909*4882a593Smuzhiyun 			phylink_set(mac_supported, 100000baseDR2_Full);
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/* Half-Duplex can only work with single queue */
914*4882a593Smuzhiyun 	if (tx_cnt > 1) {
915*4882a593Smuzhiyun 		phylink_set(mask, 10baseT_Half);
916*4882a593Smuzhiyun 		phylink_set(mask, 100baseT_Half);
917*4882a593Smuzhiyun 		phylink_set(mask, 1000baseT_Half);
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	linkmode_and(supported, supported, mac_supported);
921*4882a593Smuzhiyun 	linkmode_andnot(supported, supported, mask);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	linkmode_and(state->advertising, state->advertising, mac_supported);
924*4882a593Smuzhiyun 	linkmode_andnot(state->advertising, state->advertising, mask);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* If PCS is supported, check which modes it supports. */
927*4882a593Smuzhiyun 	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
stmmac_mac_pcs_get_state(struct phylink_config * config,struct phylink_link_state * state)930*4882a593Smuzhiyun static void stmmac_mac_pcs_get_state(struct phylink_config *config,
931*4882a593Smuzhiyun 				     struct phylink_link_state *state)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	state->link = 0;
936*4882a593Smuzhiyun 	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)939*4882a593Smuzhiyun static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
940*4882a593Smuzhiyun 			      const struct phylink_link_state *state)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
stmmac_mac_an_restart(struct phylink_config * config)947*4882a593Smuzhiyun static void stmmac_mac_an_restart(struct phylink_config *config)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	/* Not Supported */
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)952*4882a593Smuzhiyun static void stmmac_mac_link_down(struct phylink_config *config,
953*4882a593Smuzhiyun 				 unsigned int mode, phy_interface_t interface)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	stmmac_mac_set(priv, priv->ioaddr, false);
958*4882a593Smuzhiyun 	priv->eee_active = false;
959*4882a593Smuzhiyun 	priv->tx_lpi_enabled = false;
960*4882a593Smuzhiyun 	stmmac_eee_init(priv);
961*4882a593Smuzhiyun 	stmmac_set_eee_pls(priv, priv->hw, false);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)964*4882a593Smuzhiyun static void stmmac_mac_link_up(struct phylink_config *config,
965*4882a593Smuzhiyun 			       struct phy_device *phy,
966*4882a593Smuzhiyun 			       unsigned int mode, phy_interface_t interface,
967*4882a593Smuzhiyun 			       int speed, int duplex,
968*4882a593Smuzhiyun 			       bool tx_pause, bool rx_pause)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971*4882a593Smuzhiyun 	u32 ctrl;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
976*4882a593Smuzhiyun 	ctrl &= ~priv->hw->link.speed_mask;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
979*4882a593Smuzhiyun 		switch (speed) {
980*4882a593Smuzhiyun 		case SPEED_10000:
981*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xgmii.speed10000;
982*4882a593Smuzhiyun 			break;
983*4882a593Smuzhiyun 		case SPEED_5000:
984*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xgmii.speed5000;
985*4882a593Smuzhiyun 			break;
986*4882a593Smuzhiyun 		case SPEED_2500:
987*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xgmii.speed2500;
988*4882a593Smuzhiyun 			break;
989*4882a593Smuzhiyun 		default:
990*4882a593Smuzhiyun 			return;
991*4882a593Smuzhiyun 		}
992*4882a593Smuzhiyun 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
993*4882a593Smuzhiyun 		switch (speed) {
994*4882a593Smuzhiyun 		case SPEED_100000:
995*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xlgmii.speed100000;
996*4882a593Smuzhiyun 			break;
997*4882a593Smuzhiyun 		case SPEED_50000:
998*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xlgmii.speed50000;
999*4882a593Smuzhiyun 			break;
1000*4882a593Smuzhiyun 		case SPEED_40000:
1001*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xlgmii.speed40000;
1002*4882a593Smuzhiyun 			break;
1003*4882a593Smuzhiyun 		case SPEED_25000:
1004*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xlgmii.speed25000;
1005*4882a593Smuzhiyun 			break;
1006*4882a593Smuzhiyun 		case SPEED_10000:
1007*4882a593Smuzhiyun 			ctrl |= priv->hw->link.xgmii.speed10000;
1008*4882a593Smuzhiyun 			break;
1009*4882a593Smuzhiyun 		case SPEED_2500:
1010*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed2500;
1011*4882a593Smuzhiyun 			break;
1012*4882a593Smuzhiyun 		case SPEED_1000:
1013*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed1000;
1014*4882a593Smuzhiyun 			break;
1015*4882a593Smuzhiyun 		default:
1016*4882a593Smuzhiyun 			return;
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 	} else {
1019*4882a593Smuzhiyun 		switch (speed) {
1020*4882a593Smuzhiyun 		case SPEED_2500:
1021*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed2500;
1022*4882a593Smuzhiyun 			break;
1023*4882a593Smuzhiyun 		case SPEED_1000:
1024*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed1000;
1025*4882a593Smuzhiyun 			break;
1026*4882a593Smuzhiyun 		case SPEED_100:
1027*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed100;
1028*4882a593Smuzhiyun 			break;
1029*4882a593Smuzhiyun 		case SPEED_10:
1030*4882a593Smuzhiyun 			ctrl |= priv->hw->link.speed10;
1031*4882a593Smuzhiyun 			break;
1032*4882a593Smuzhiyun 		default:
1033*4882a593Smuzhiyun 			return;
1034*4882a593Smuzhiyun 		}
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	priv->speed = speed;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	if (priv->plat->fix_mac_speed)
1040*4882a593Smuzhiyun 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	if (!duplex)
1043*4882a593Smuzhiyun 		ctrl &= ~priv->hw->link.duplex;
1044*4882a593Smuzhiyun 	else
1045*4882a593Smuzhiyun 		ctrl |= priv->hw->link.duplex;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	/* Flow Control operation */
1048*4882a593Smuzhiyun 	if (rx_pause && tx_pause)
1049*4882a593Smuzhiyun 		priv->flow_ctrl = FLOW_AUTO;
1050*4882a593Smuzhiyun 	else if (rx_pause && !tx_pause)
1051*4882a593Smuzhiyun 		priv->flow_ctrl = FLOW_RX;
1052*4882a593Smuzhiyun 	else if (!rx_pause && tx_pause)
1053*4882a593Smuzhiyun 		priv->flow_ctrl = FLOW_TX;
1054*4882a593Smuzhiyun 	else
1055*4882a593Smuzhiyun 		priv->flow_ctrl = FLOW_OFF;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	stmmac_mac_flow_ctrl(priv, duplex);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	stmmac_mac_set(priv, priv->ioaddr, true);
1062*4882a593Smuzhiyun 	if (phy && priv->dma_cap.eee) {
1063*4882a593Smuzhiyun 		priv->eee_active = phy_init_eee(phy, 1) >= 0;
1064*4882a593Smuzhiyun 		priv->eee_enabled = stmmac_eee_init(priv);
1065*4882a593Smuzhiyun 		priv->tx_lpi_enabled = priv->eee_enabled;
1066*4882a593Smuzhiyun 		stmmac_set_eee_pls(priv, priv->hw, true);
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1071*4882a593Smuzhiyun 	.validate = stmmac_validate,
1072*4882a593Smuzhiyun 	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
1073*4882a593Smuzhiyun 	.mac_config = stmmac_mac_config,
1074*4882a593Smuzhiyun 	.mac_an_restart = stmmac_mac_an_restart,
1075*4882a593Smuzhiyun 	.mac_link_down = stmmac_mac_link_down,
1076*4882a593Smuzhiyun 	.mac_link_up = stmmac_mac_link_up,
1077*4882a593Smuzhiyun };
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun /**
1080*4882a593Smuzhiyun  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1081*4882a593Smuzhiyun  * @priv: driver private structure
1082*4882a593Smuzhiyun  * Description: this is to verify if the HW supports the PCS.
1083*4882a593Smuzhiyun  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1084*4882a593Smuzhiyun  * configured for the TBI, RTBI, or SGMII PHY interface.
1085*4882a593Smuzhiyun  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1086*4882a593Smuzhiyun static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	int interface = priv->plat->interface;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (priv->dma_cap.pcs) {
1091*4882a593Smuzhiyun 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1092*4882a593Smuzhiyun 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1093*4882a593Smuzhiyun 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1094*4882a593Smuzhiyun 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1095*4882a593Smuzhiyun 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1096*4882a593Smuzhiyun 			priv->hw->pcs = STMMAC_PCS_RGMII;
1097*4882a593Smuzhiyun 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1098*4882a593Smuzhiyun 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1099*4882a593Smuzhiyun 			priv->hw->pcs = STMMAC_PCS_SGMII;
1100*4882a593Smuzhiyun 		}
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun /**
1105*4882a593Smuzhiyun  * stmmac_init_phy - PHY initialization
1106*4882a593Smuzhiyun  * @dev: net device structure
1107*4882a593Smuzhiyun  * Description: it initializes the driver's PHY state, and attaches the PHY
1108*4882a593Smuzhiyun  * to the mac driver.
1109*4882a593Smuzhiyun  *  Return value:
1110*4882a593Smuzhiyun  *  0 on success
1111*4882a593Smuzhiyun  */
stmmac_init_phy(struct net_device * dev)1112*4882a593Smuzhiyun static int stmmac_init_phy(struct net_device *dev)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
1115*4882a593Smuzhiyun 	struct device_node *node;
1116*4882a593Smuzhiyun 	int ret;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	if (priv->plat->integrated_phy_power)
1119*4882a593Smuzhiyun 		ret = priv->plat->integrated_phy_power(priv->plat->bsp_priv, true);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	node = priv->plat->phylink_node;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	if (node)
1124*4882a593Smuzhiyun 		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1127*4882a593Smuzhiyun 	 * manually parse it
1128*4882a593Smuzhiyun 	 */
1129*4882a593Smuzhiyun 	if (!node || ret) {
1130*4882a593Smuzhiyun 		int addr = priv->plat->phy_addr;
1131*4882a593Smuzhiyun 		struct phy_device *phydev;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		phydev = mdiobus_get_phy(priv->mii, addr);
1134*4882a593Smuzhiyun 		if (!phydev) {
1135*4882a593Smuzhiyun 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1136*4882a593Smuzhiyun 			return -ENODEV;
1137*4882a593Smuzhiyun 		}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 		ret = phylink_connect_phy(priv->phylink, phydev);
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	if (!priv->plat->pmt) {
1143*4882a593Smuzhiyun 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 		phylink_ethtool_get_wol(priv->phylink, &wol);
1146*4882a593Smuzhiyun 		device_set_wakeup_capable(priv->device, !!wol.supported);
1147*4882a593Smuzhiyun 	}
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	return ret;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun 
stmmac_phy_setup(struct stmmac_priv * priv)1152*4882a593Smuzhiyun static int stmmac_phy_setup(struct stmmac_priv *priv)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun 	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1155*4882a593Smuzhiyun 	int mode = priv->plat->phy_interface;
1156*4882a593Smuzhiyun 	struct phylink *phylink;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	priv->phylink_config.dev = &priv->dev->dev;
1159*4882a593Smuzhiyun 	priv->phylink_config.type = PHYLINK_NETDEV;
1160*4882a593Smuzhiyun 	priv->phylink_config.pcs_poll = true;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	if (!fwnode)
1163*4882a593Smuzhiyun 		fwnode = dev_fwnode(priv->device);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	phylink = phylink_create(&priv->phylink_config, fwnode,
1166*4882a593Smuzhiyun 				 mode, &stmmac_phylink_mac_ops);
1167*4882a593Smuzhiyun 	if (IS_ERR(phylink))
1168*4882a593Smuzhiyun 		return PTR_ERR(phylink);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	priv->phylink = phylink;
1171*4882a593Smuzhiyun 	return 0;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
stmmac_display_rx_rings(struct stmmac_priv * priv)1174*4882a593Smuzhiyun static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1177*4882a593Smuzhiyun 	unsigned int desc_size;
1178*4882a593Smuzhiyun 	void *head_rx;
1179*4882a593Smuzhiyun 	u32 queue;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/* Display RX rings */
1182*4882a593Smuzhiyun 	for (queue = 0; queue < rx_cnt; queue++) {
1183*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 		pr_info("\tRX Queue %u rings\n", queue);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 		if (priv->extend_desc) {
1188*4882a593Smuzhiyun 			head_rx = (void *)rx_q->dma_erx;
1189*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_extended_desc);
1190*4882a593Smuzhiyun 		} else {
1191*4882a593Smuzhiyun 			head_rx = (void *)rx_q->dma_rx;
1192*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_desc);
1193*4882a593Smuzhiyun 		}
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 		/* Display RX ring */
1196*4882a593Smuzhiyun 		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1197*4882a593Smuzhiyun 				    rx_q->dma_rx_phy, desc_size);
1198*4882a593Smuzhiyun 	}
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun 
stmmac_display_tx_rings(struct stmmac_priv * priv)1201*4882a593Smuzhiyun static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1204*4882a593Smuzhiyun 	unsigned int desc_size;
1205*4882a593Smuzhiyun 	void *head_tx;
1206*4882a593Smuzhiyun 	u32 queue;
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/* Display TX rings */
1209*4882a593Smuzhiyun 	for (queue = 0; queue < tx_cnt; queue++) {
1210*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 		pr_info("\tTX Queue %d rings\n", queue);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 		if (priv->extend_desc) {
1215*4882a593Smuzhiyun 			head_tx = (void *)tx_q->dma_etx;
1216*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_extended_desc);
1217*4882a593Smuzhiyun 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1218*4882a593Smuzhiyun 			head_tx = (void *)tx_q->dma_entx;
1219*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_edesc);
1220*4882a593Smuzhiyun 		} else {
1221*4882a593Smuzhiyun 			head_tx = (void *)tx_q->dma_tx;
1222*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_desc);
1223*4882a593Smuzhiyun 		}
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1226*4882a593Smuzhiyun 				    tx_q->dma_tx_phy, desc_size);
1227*4882a593Smuzhiyun 	}
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
stmmac_display_rings(struct stmmac_priv * priv)1230*4882a593Smuzhiyun static void stmmac_display_rings(struct stmmac_priv *priv)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	/* Display RX ring */
1233*4882a593Smuzhiyun 	stmmac_display_rx_rings(priv);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	/* Display TX ring */
1236*4882a593Smuzhiyun 	stmmac_display_tx_rings(priv);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
stmmac_set_bfsize(int mtu,int bufsize)1239*4882a593Smuzhiyun static int stmmac_set_bfsize(int mtu, int bufsize)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun 	int ret = bufsize;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	if (mtu >= BUF_SIZE_8KiB)
1244*4882a593Smuzhiyun 		ret = BUF_SIZE_16KiB;
1245*4882a593Smuzhiyun 	else if (mtu >= BUF_SIZE_4KiB)
1246*4882a593Smuzhiyun 		ret = BUF_SIZE_8KiB;
1247*4882a593Smuzhiyun 	else if (mtu >= BUF_SIZE_2KiB)
1248*4882a593Smuzhiyun 		ret = BUF_SIZE_4KiB;
1249*4882a593Smuzhiyun 	else if (mtu > DEFAULT_BUFSIZE)
1250*4882a593Smuzhiyun 		ret = BUF_SIZE_2KiB;
1251*4882a593Smuzhiyun 	else
1252*4882a593Smuzhiyun 		ret = DEFAULT_BUFSIZE;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	return ret;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun /**
1258*4882a593Smuzhiyun  * stmmac_clear_rx_descriptors - clear RX descriptors
1259*4882a593Smuzhiyun  * @priv: driver private structure
1260*4882a593Smuzhiyun  * @queue: RX queue index
1261*4882a593Smuzhiyun  * Description: this function is called to clear the RX descriptors
1262*4882a593Smuzhiyun  * in case of both basic and extended descriptors are used.
1263*4882a593Smuzhiyun  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,u32 queue)1264*4882a593Smuzhiyun static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1267*4882a593Smuzhiyun 	int i;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	/* Clear the RX descriptors */
1270*4882a593Smuzhiyun 	for (i = 0; i < priv->dma_rx_size; i++)
1271*4882a593Smuzhiyun 		if (priv->extend_desc)
1272*4882a593Smuzhiyun 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1273*4882a593Smuzhiyun 					priv->use_riwt, priv->mode,
1274*4882a593Smuzhiyun 					(i == priv->dma_rx_size - 1),
1275*4882a593Smuzhiyun 					priv->dma_buf_sz);
1276*4882a593Smuzhiyun 		else
1277*4882a593Smuzhiyun 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1278*4882a593Smuzhiyun 					priv->use_riwt, priv->mode,
1279*4882a593Smuzhiyun 					(i == priv->dma_rx_size - 1),
1280*4882a593Smuzhiyun 					priv->dma_buf_sz);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun /**
1284*4882a593Smuzhiyun  * stmmac_clear_tx_descriptors - clear tx descriptors
1285*4882a593Smuzhiyun  * @priv: driver private structure
1286*4882a593Smuzhiyun  * @queue: TX queue index.
1287*4882a593Smuzhiyun  * Description: this function is called to clear the TX descriptors
1288*4882a593Smuzhiyun  * in case of both basic and extended descriptors are used.
1289*4882a593Smuzhiyun  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,u32 queue)1290*4882a593Smuzhiyun static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1293*4882a593Smuzhiyun 	int i;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/* Clear the TX descriptors */
1296*4882a593Smuzhiyun 	for (i = 0; i < priv->dma_tx_size; i++) {
1297*4882a593Smuzhiyun 		int last = (i == (priv->dma_tx_size - 1));
1298*4882a593Smuzhiyun 		struct dma_desc *p;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 		if (priv->extend_desc)
1301*4882a593Smuzhiyun 			p = &tx_q->dma_etx[i].basic;
1302*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1303*4882a593Smuzhiyun 			p = &tx_q->dma_entx[i].basic;
1304*4882a593Smuzhiyun 		else
1305*4882a593Smuzhiyun 			p = &tx_q->dma_tx[i];
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1308*4882a593Smuzhiyun 	}
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun /**
1312*4882a593Smuzhiyun  * stmmac_clear_descriptors - clear descriptors
1313*4882a593Smuzhiyun  * @priv: driver private structure
1314*4882a593Smuzhiyun  * Description: this function is called to clear the TX and RX descriptors
1315*4882a593Smuzhiyun  * in case of both basic and extended descriptors are used.
1316*4882a593Smuzhiyun  */
stmmac_clear_descriptors(struct stmmac_priv * priv)1317*4882a593Smuzhiyun static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1320*4882a593Smuzhiyun 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1321*4882a593Smuzhiyun 	u32 queue;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	/* Clear the RX descriptors */
1324*4882a593Smuzhiyun 	for (queue = 0; queue < rx_queue_cnt; queue++)
1325*4882a593Smuzhiyun 		stmmac_clear_rx_descriptors(priv, queue);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	/* Clear the TX descriptors */
1328*4882a593Smuzhiyun 	for (queue = 0; queue < tx_queue_cnt; queue++)
1329*4882a593Smuzhiyun 		stmmac_clear_tx_descriptors(priv, queue);
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun /**
1333*4882a593Smuzhiyun  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1334*4882a593Smuzhiyun  * @priv: driver private structure
1335*4882a593Smuzhiyun  * @p: descriptor pointer
1336*4882a593Smuzhiyun  * @i: descriptor index
1337*4882a593Smuzhiyun  * @flags: gfp flag
1338*4882a593Smuzhiyun  * @queue: RX queue index
1339*4882a593Smuzhiyun  * Description: this function is called to allocate a receive buffer, perform
1340*4882a593Smuzhiyun  * the DMA mapping and init the descriptor.
1341*4882a593Smuzhiyun  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct dma_desc * p,int i,gfp_t flags,u32 queue)1342*4882a593Smuzhiyun static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1343*4882a593Smuzhiyun 				  int i, gfp_t flags, u32 queue)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1346*4882a593Smuzhiyun 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1347*4882a593Smuzhiyun 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (priv->dma_cap.addr64 <= 32)
1350*4882a593Smuzhiyun 		gfp |= GFP_DMA32;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1353*4882a593Smuzhiyun 	if (!buf->page)
1354*4882a593Smuzhiyun 		return -ENOMEM;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	if (priv->sph) {
1357*4882a593Smuzhiyun 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1358*4882a593Smuzhiyun 		if (!buf->sec_page)
1359*4882a593Smuzhiyun 			return -ENOMEM;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1362*4882a593Smuzhiyun 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1363*4882a593Smuzhiyun 	} else {
1364*4882a593Smuzhiyun 		buf->sec_page = NULL;
1365*4882a593Smuzhiyun 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	buf->addr = page_pool_get_dma_addr(buf->page);
1369*4882a593Smuzhiyun 	stmmac_set_desc_addr(priv, p, buf->addr);
1370*4882a593Smuzhiyun 	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1371*4882a593Smuzhiyun 		stmmac_init_desc3(priv, p);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	return 0;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun /**
1377*4882a593Smuzhiyun  * stmmac_free_rx_buffer - free RX dma buffers
1378*4882a593Smuzhiyun  * @priv: private structure
1379*4882a593Smuzhiyun  * @queue: RX queue index
1380*4882a593Smuzhiyun  * @i: buffer index.
1381*4882a593Smuzhiyun  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,u32 queue,int i)1382*4882a593Smuzhiyun static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1385*4882a593Smuzhiyun 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	if (buf->page)
1388*4882a593Smuzhiyun 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1389*4882a593Smuzhiyun 	buf->page = NULL;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	if (buf->sec_page)
1392*4882a593Smuzhiyun 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1393*4882a593Smuzhiyun 	buf->sec_page = NULL;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun /**
1397*4882a593Smuzhiyun  * stmmac_free_tx_buffer - free RX dma buffers
1398*4882a593Smuzhiyun  * @priv: private structure
1399*4882a593Smuzhiyun  * @queue: RX queue index
1400*4882a593Smuzhiyun  * @i: buffer index.
1401*4882a593Smuzhiyun  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,u32 queue,int i)1402*4882a593Smuzhiyun static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	if (tx_q->tx_skbuff_dma[i].buf) {
1407*4882a593Smuzhiyun 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1408*4882a593Smuzhiyun 			dma_unmap_page(priv->device,
1409*4882a593Smuzhiyun 				       tx_q->tx_skbuff_dma[i].buf,
1410*4882a593Smuzhiyun 				       tx_q->tx_skbuff_dma[i].len,
1411*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
1412*4882a593Smuzhiyun 		else
1413*4882a593Smuzhiyun 			dma_unmap_single(priv->device,
1414*4882a593Smuzhiyun 					 tx_q->tx_skbuff_dma[i].buf,
1415*4882a593Smuzhiyun 					 tx_q->tx_skbuff_dma[i].len,
1416*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	if (tx_q->tx_skbuff[i]) {
1420*4882a593Smuzhiyun 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1421*4882a593Smuzhiyun 		tx_q->tx_skbuff[i] = NULL;
1422*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[i].buf = 0;
1423*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun /**
1428*4882a593Smuzhiyun  * init_dma_rx_desc_rings - init the RX descriptor rings
1429*4882a593Smuzhiyun  * @dev: net device structure
1430*4882a593Smuzhiyun  * @flags: gfp flag.
1431*4882a593Smuzhiyun  * Description: this function initializes the DMA RX descriptors
1432*4882a593Smuzhiyun  * and allocates the socket buffers. It supports the chained and ring
1433*4882a593Smuzhiyun  * modes.
1434*4882a593Smuzhiyun  */
init_dma_rx_desc_rings(struct net_device * dev,gfp_t flags)1435*4882a593Smuzhiyun static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
1438*4882a593Smuzhiyun 	u32 rx_count = priv->plat->rx_queues_to_use;
1439*4882a593Smuzhiyun 	int ret = -ENOMEM;
1440*4882a593Smuzhiyun 	int queue;
1441*4882a593Smuzhiyun 	int i;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	/* RX INITIALIZATION */
1444*4882a593Smuzhiyun 	netif_dbg(priv, probe, priv->dev,
1445*4882a593Smuzhiyun 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	for (queue = 0; queue < rx_count; queue++) {
1448*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 		netif_dbg(priv, probe, priv->dev,
1451*4882a593Smuzhiyun 			  "(%s) dma_rx_phy=0x%08x\n", __func__,
1452*4882a593Smuzhiyun 			  (u32)rx_q->dma_rx_phy);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 		stmmac_clear_rx_descriptors(priv, queue);
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 		for (i = 0; i < priv->dma_rx_size; i++) {
1457*4882a593Smuzhiyun 			struct dma_desc *p;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 			if (priv->extend_desc)
1460*4882a593Smuzhiyun 				p = &((rx_q->dma_erx + i)->basic);
1461*4882a593Smuzhiyun 			else
1462*4882a593Smuzhiyun 				p = rx_q->dma_rx + i;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 			ret = stmmac_init_rx_buffers(priv, p, i, flags,
1465*4882a593Smuzhiyun 						     queue);
1466*4882a593Smuzhiyun 			if (ret)
1467*4882a593Smuzhiyun 				goto err_init_rx_buffers;
1468*4882a593Smuzhiyun 		}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 		rx_q->cur_rx = 0;
1471*4882a593Smuzhiyun 		rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 		/* Setup the chained descriptor addresses */
1474*4882a593Smuzhiyun 		if (priv->mode == STMMAC_CHAIN_MODE) {
1475*4882a593Smuzhiyun 			if (priv->extend_desc)
1476*4882a593Smuzhiyun 				stmmac_mode_init(priv, rx_q->dma_erx,
1477*4882a593Smuzhiyun 						 rx_q->dma_rx_phy,
1478*4882a593Smuzhiyun 						 priv->dma_rx_size, 1);
1479*4882a593Smuzhiyun 			else
1480*4882a593Smuzhiyun 				stmmac_mode_init(priv, rx_q->dma_rx,
1481*4882a593Smuzhiyun 						 rx_q->dma_rx_phy,
1482*4882a593Smuzhiyun 						 priv->dma_rx_size, 0);
1483*4882a593Smuzhiyun 		}
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	return 0;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun err_init_rx_buffers:
1489*4882a593Smuzhiyun 	while (queue >= 0) {
1490*4882a593Smuzhiyun 		while (--i >= 0)
1491*4882a593Smuzhiyun 			stmmac_free_rx_buffer(priv, queue, i);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		if (queue == 0)
1494*4882a593Smuzhiyun 			break;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 		i = priv->dma_rx_size;
1497*4882a593Smuzhiyun 		queue--;
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	return ret;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun /**
1504*4882a593Smuzhiyun  * init_dma_tx_desc_rings - init the TX descriptor rings
1505*4882a593Smuzhiyun  * @dev: net device structure.
1506*4882a593Smuzhiyun  * Description: this function initializes the DMA TX descriptors
1507*4882a593Smuzhiyun  * and allocates the socket buffers. It supports the chained and ring
1508*4882a593Smuzhiyun  * modes.
1509*4882a593Smuzhiyun  */
init_dma_tx_desc_rings(struct net_device * dev)1510*4882a593Smuzhiyun static int init_dma_tx_desc_rings(struct net_device *dev)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
1513*4882a593Smuzhiyun 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1514*4882a593Smuzhiyun 	u32 queue;
1515*4882a593Smuzhiyun 	int i;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	for (queue = 0; queue < tx_queue_cnt; queue++) {
1518*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 		netif_dbg(priv, probe, priv->dev,
1521*4882a593Smuzhiyun 			  "(%s) dma_tx_phy=0x%08x\n", __func__,
1522*4882a593Smuzhiyun 			 (u32)tx_q->dma_tx_phy);
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 		/* Setup the chained descriptor addresses */
1525*4882a593Smuzhiyun 		if (priv->mode == STMMAC_CHAIN_MODE) {
1526*4882a593Smuzhiyun 			if (priv->extend_desc)
1527*4882a593Smuzhiyun 				stmmac_mode_init(priv, tx_q->dma_etx,
1528*4882a593Smuzhiyun 						 tx_q->dma_tx_phy,
1529*4882a593Smuzhiyun 						 priv->dma_tx_size, 1);
1530*4882a593Smuzhiyun 			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1531*4882a593Smuzhiyun 				stmmac_mode_init(priv, tx_q->dma_tx,
1532*4882a593Smuzhiyun 						 tx_q->dma_tx_phy,
1533*4882a593Smuzhiyun 						 priv->dma_tx_size, 0);
1534*4882a593Smuzhiyun 		}
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		for (i = 0; i < priv->dma_tx_size; i++) {
1537*4882a593Smuzhiyun 			struct dma_desc *p;
1538*4882a593Smuzhiyun 			if (priv->extend_desc)
1539*4882a593Smuzhiyun 				p = &((tx_q->dma_etx + i)->basic);
1540*4882a593Smuzhiyun 			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1541*4882a593Smuzhiyun 				p = &((tx_q->dma_entx + i)->basic);
1542*4882a593Smuzhiyun 			else
1543*4882a593Smuzhiyun 				p = tx_q->dma_tx + i;
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 			stmmac_clear_desc(priv, p);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[i].buf = 0;
1548*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[i].map_as_page = false;
1549*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[i].len = 0;
1550*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[i].last_segment = false;
1551*4882a593Smuzhiyun 			tx_q->tx_skbuff[i] = NULL;
1552*4882a593Smuzhiyun 		}
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 		tx_q->dirty_tx = 0;
1555*4882a593Smuzhiyun 		tx_q->cur_tx = 0;
1556*4882a593Smuzhiyun 		tx_q->mss = 0;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	return 0;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun /**
1565*4882a593Smuzhiyun  * init_dma_desc_rings - init the RX/TX descriptor rings
1566*4882a593Smuzhiyun  * @dev: net device structure
1567*4882a593Smuzhiyun  * @flags: gfp flag.
1568*4882a593Smuzhiyun  * Description: this function initializes the DMA RX/TX descriptors
1569*4882a593Smuzhiyun  * and allocates the socket buffers. It supports the chained and ring
1570*4882a593Smuzhiyun  * modes.
1571*4882a593Smuzhiyun  */
init_dma_desc_rings(struct net_device * dev,gfp_t flags)1572*4882a593Smuzhiyun static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
1575*4882a593Smuzhiyun 	int ret;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	ret = init_dma_rx_desc_rings(dev, flags);
1578*4882a593Smuzhiyun 	if (ret)
1579*4882a593Smuzhiyun 		return ret;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	ret = init_dma_tx_desc_rings(dev);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	stmmac_clear_descriptors(priv);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	if (netif_msg_hw(priv))
1586*4882a593Smuzhiyun 		stmmac_display_rings(priv);
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	return ret;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun /**
1592*4882a593Smuzhiyun  * dma_free_rx_skbufs - free RX dma buffers
1593*4882a593Smuzhiyun  * @priv: private structure
1594*4882a593Smuzhiyun  * @queue: RX queue index
1595*4882a593Smuzhiyun  */
dma_free_rx_skbufs(struct stmmac_priv * priv,u32 queue)1596*4882a593Smuzhiyun static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun 	int i;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	for (i = 0; i < priv->dma_rx_size; i++)
1601*4882a593Smuzhiyun 		stmmac_free_rx_buffer(priv, queue, i);
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun /**
1605*4882a593Smuzhiyun  * dma_free_tx_skbufs - free TX dma buffers
1606*4882a593Smuzhiyun  * @priv: private structure
1607*4882a593Smuzhiyun  * @queue: TX queue index
1608*4882a593Smuzhiyun  */
dma_free_tx_skbufs(struct stmmac_priv * priv,u32 queue)1609*4882a593Smuzhiyun static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	int i;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	for (i = 0; i < priv->dma_tx_size; i++)
1614*4882a593Smuzhiyun 		stmmac_free_tx_buffer(priv, queue, i);
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun /**
1618*4882a593Smuzhiyun  * stmmac_free_tx_skbufs - free TX skb buffers
1619*4882a593Smuzhiyun  * @priv: private structure
1620*4882a593Smuzhiyun  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1621*4882a593Smuzhiyun static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1624*4882a593Smuzhiyun 	u32 queue;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	for (queue = 0; queue < tx_queue_cnt; queue++)
1627*4882a593Smuzhiyun 		dma_free_tx_skbufs(priv, queue);
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun /**
1631*4882a593Smuzhiyun  * free_dma_rx_desc_resources - free RX dma desc resources
1632*4882a593Smuzhiyun  * @priv: private structure
1633*4882a593Smuzhiyun  */
free_dma_rx_desc_resources(struct stmmac_priv * priv)1634*4882a593Smuzhiyun static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1635*4882a593Smuzhiyun {
1636*4882a593Smuzhiyun 	u32 rx_count = priv->plat->rx_queues_to_use;
1637*4882a593Smuzhiyun 	u32 queue;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	/* Free RX queue resources */
1640*4882a593Smuzhiyun 	for (queue = 0; queue < rx_count; queue++) {
1641*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 		/* Release the DMA RX socket buffers */
1644*4882a593Smuzhiyun 		dma_free_rx_skbufs(priv, queue);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 		/* Free DMA regions of consistent memory previously allocated */
1647*4882a593Smuzhiyun 		if (!priv->extend_desc)
1648*4882a593Smuzhiyun 			dma_free_coherent(priv->device, priv->dma_rx_size *
1649*4882a593Smuzhiyun 					  sizeof(struct dma_desc),
1650*4882a593Smuzhiyun 					  rx_q->dma_rx, rx_q->dma_rx_phy);
1651*4882a593Smuzhiyun 		else
1652*4882a593Smuzhiyun 			dma_free_coherent(priv->device, priv->dma_rx_size *
1653*4882a593Smuzhiyun 					  sizeof(struct dma_extended_desc),
1654*4882a593Smuzhiyun 					  rx_q->dma_erx, rx_q->dma_rx_phy);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 		kfree(rx_q->buf_pool);
1657*4882a593Smuzhiyun 		if (rx_q->page_pool)
1658*4882a593Smuzhiyun 			page_pool_destroy(rx_q->page_pool);
1659*4882a593Smuzhiyun 	}
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun /**
1663*4882a593Smuzhiyun  * free_dma_tx_desc_resources - free TX dma desc resources
1664*4882a593Smuzhiyun  * @priv: private structure
1665*4882a593Smuzhiyun  */
free_dma_tx_desc_resources(struct stmmac_priv * priv)1666*4882a593Smuzhiyun static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	u32 tx_count = priv->plat->tx_queues_to_use;
1669*4882a593Smuzhiyun 	u32 queue;
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	/* Free TX queue resources */
1672*4882a593Smuzhiyun 	for (queue = 0; queue < tx_count; queue++) {
1673*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1674*4882a593Smuzhiyun 		size_t size;
1675*4882a593Smuzhiyun 		void *addr;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		/* Release the DMA TX socket buffers */
1678*4882a593Smuzhiyun 		dma_free_tx_skbufs(priv, queue);
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 		if (priv->extend_desc) {
1681*4882a593Smuzhiyun 			size = sizeof(struct dma_extended_desc);
1682*4882a593Smuzhiyun 			addr = tx_q->dma_etx;
1683*4882a593Smuzhiyun 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1684*4882a593Smuzhiyun 			size = sizeof(struct dma_edesc);
1685*4882a593Smuzhiyun 			addr = tx_q->dma_entx;
1686*4882a593Smuzhiyun 		} else {
1687*4882a593Smuzhiyun 			size = sizeof(struct dma_desc);
1688*4882a593Smuzhiyun 			addr = tx_q->dma_tx;
1689*4882a593Smuzhiyun 		}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 		size *= priv->dma_tx_size;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 		kfree(tx_q->tx_skbuff_dma);
1696*4882a593Smuzhiyun 		kfree(tx_q->tx_skbuff);
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun /**
1701*4882a593Smuzhiyun  * alloc_dma_rx_desc_resources - alloc RX resources.
1702*4882a593Smuzhiyun  * @priv: private structure
1703*4882a593Smuzhiyun  * Description: according to which descriptor can be used (extend or basic)
1704*4882a593Smuzhiyun  * this function allocates the resources for TX and RX paths. In case of
1705*4882a593Smuzhiyun  * reception, for example, it pre-allocated the RX socket buffer in order to
1706*4882a593Smuzhiyun  * allow zero-copy mechanism.
1707*4882a593Smuzhiyun  */
alloc_dma_rx_desc_resources(struct stmmac_priv * priv)1708*4882a593Smuzhiyun static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1709*4882a593Smuzhiyun {
1710*4882a593Smuzhiyun 	u32 rx_count = priv->plat->rx_queues_to_use;
1711*4882a593Smuzhiyun 	int ret = -ENOMEM;
1712*4882a593Smuzhiyun 	u32 queue;
1713*4882a593Smuzhiyun 
1714*4882a593Smuzhiyun 	/* RX queues buffers and DMA */
1715*4882a593Smuzhiyun 	for (queue = 0; queue < rx_count; queue++) {
1716*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1717*4882a593Smuzhiyun 		struct page_pool_params pp_params = { 0 };
1718*4882a593Smuzhiyun 		unsigned int num_pages;
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 		rx_q->queue_index = queue;
1721*4882a593Smuzhiyun 		rx_q->priv_data = priv;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 		pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1724*4882a593Smuzhiyun 		pp_params.pool_size = priv->dma_rx_size;
1725*4882a593Smuzhiyun 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1726*4882a593Smuzhiyun 		pp_params.order = ilog2(num_pages);
1727*4882a593Smuzhiyun 		pp_params.nid = dev_to_node(priv->device);
1728*4882a593Smuzhiyun 		pp_params.dev = priv->device;
1729*4882a593Smuzhiyun 		pp_params.dma_dir = DMA_FROM_DEVICE;
1730*4882a593Smuzhiyun 		pp_params.max_len = num_pages * PAGE_SIZE;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 		rx_q->page_pool = page_pool_create(&pp_params);
1733*4882a593Smuzhiyun 		if (IS_ERR(rx_q->page_pool)) {
1734*4882a593Smuzhiyun 			ret = PTR_ERR(rx_q->page_pool);
1735*4882a593Smuzhiyun 			rx_q->page_pool = NULL;
1736*4882a593Smuzhiyun 			goto err_dma;
1737*4882a593Smuzhiyun 		}
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 		rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1740*4882a593Smuzhiyun 					 sizeof(*rx_q->buf_pool),
1741*4882a593Smuzhiyun 					 GFP_KERNEL);
1742*4882a593Smuzhiyun 		if (!rx_q->buf_pool)
1743*4882a593Smuzhiyun 			goto err_dma;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 		if (priv->extend_desc) {
1746*4882a593Smuzhiyun 			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1747*4882a593Smuzhiyun 							   priv->dma_rx_size *
1748*4882a593Smuzhiyun 							   sizeof(struct dma_extended_desc),
1749*4882a593Smuzhiyun 							   &rx_q->dma_rx_phy,
1750*4882a593Smuzhiyun 							   GFP_KERNEL);
1751*4882a593Smuzhiyun 			if (!rx_q->dma_erx)
1752*4882a593Smuzhiyun 				goto err_dma;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 		} else {
1755*4882a593Smuzhiyun 			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1756*4882a593Smuzhiyun 							  priv->dma_rx_size *
1757*4882a593Smuzhiyun 							  sizeof(struct dma_desc),
1758*4882a593Smuzhiyun 							  &rx_q->dma_rx_phy,
1759*4882a593Smuzhiyun 							  GFP_KERNEL);
1760*4882a593Smuzhiyun 			if (!rx_q->dma_rx)
1761*4882a593Smuzhiyun 				goto err_dma;
1762*4882a593Smuzhiyun 		}
1763*4882a593Smuzhiyun 	}
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	return 0;
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun err_dma:
1768*4882a593Smuzhiyun 	free_dma_rx_desc_resources(priv);
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	return ret;
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun /**
1774*4882a593Smuzhiyun  * alloc_dma_tx_desc_resources - alloc TX resources.
1775*4882a593Smuzhiyun  * @priv: private structure
1776*4882a593Smuzhiyun  * Description: according to which descriptor can be used (extend or basic)
1777*4882a593Smuzhiyun  * this function allocates the resources for TX and RX paths. In case of
1778*4882a593Smuzhiyun  * reception, for example, it pre-allocated the RX socket buffer in order to
1779*4882a593Smuzhiyun  * allow zero-copy mechanism.
1780*4882a593Smuzhiyun  */
alloc_dma_tx_desc_resources(struct stmmac_priv * priv)1781*4882a593Smuzhiyun static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	u32 tx_count = priv->plat->tx_queues_to_use;
1784*4882a593Smuzhiyun 	int ret = -ENOMEM;
1785*4882a593Smuzhiyun 	u32 queue;
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	/* TX queues buffers and DMA */
1788*4882a593Smuzhiyun 	for (queue = 0; queue < tx_count; queue++) {
1789*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1790*4882a593Smuzhiyun 		size_t size;
1791*4882a593Smuzhiyun 		void *addr;
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 		tx_q->queue_index = queue;
1794*4882a593Smuzhiyun 		tx_q->priv_data = priv;
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1797*4882a593Smuzhiyun 					      sizeof(*tx_q->tx_skbuff_dma),
1798*4882a593Smuzhiyun 					      GFP_KERNEL);
1799*4882a593Smuzhiyun 		if (!tx_q->tx_skbuff_dma)
1800*4882a593Smuzhiyun 			goto err_dma;
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 		tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1803*4882a593Smuzhiyun 					  sizeof(struct sk_buff *),
1804*4882a593Smuzhiyun 					  GFP_KERNEL);
1805*4882a593Smuzhiyun 		if (!tx_q->tx_skbuff)
1806*4882a593Smuzhiyun 			goto err_dma;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 		if (priv->extend_desc)
1809*4882a593Smuzhiyun 			size = sizeof(struct dma_extended_desc);
1810*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1811*4882a593Smuzhiyun 			size = sizeof(struct dma_edesc);
1812*4882a593Smuzhiyun 		else
1813*4882a593Smuzhiyun 			size = sizeof(struct dma_desc);
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 		size *= priv->dma_tx_size;
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 		addr = dma_alloc_coherent(priv->device, size,
1818*4882a593Smuzhiyun 					  &tx_q->dma_tx_phy, GFP_KERNEL);
1819*4882a593Smuzhiyun 		if (!addr)
1820*4882a593Smuzhiyun 			goto err_dma;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 		if (priv->extend_desc)
1823*4882a593Smuzhiyun 			tx_q->dma_etx = addr;
1824*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1825*4882a593Smuzhiyun 			tx_q->dma_entx = addr;
1826*4882a593Smuzhiyun 		else
1827*4882a593Smuzhiyun 			tx_q->dma_tx = addr;
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	return 0;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun err_dma:
1833*4882a593Smuzhiyun 	free_dma_tx_desc_resources(priv);
1834*4882a593Smuzhiyun 	return ret;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun /**
1838*4882a593Smuzhiyun  * alloc_dma_desc_resources - alloc TX/RX resources.
1839*4882a593Smuzhiyun  * @priv: private structure
1840*4882a593Smuzhiyun  * Description: according to which descriptor can be used (extend or basic)
1841*4882a593Smuzhiyun  * this function allocates the resources for TX and RX paths. In case of
1842*4882a593Smuzhiyun  * reception, for example, it pre-allocated the RX socket buffer in order to
1843*4882a593Smuzhiyun  * allow zero-copy mechanism.
1844*4882a593Smuzhiyun  */
alloc_dma_desc_resources(struct stmmac_priv * priv)1845*4882a593Smuzhiyun static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun 	/* RX Allocation */
1848*4882a593Smuzhiyun 	int ret = alloc_dma_rx_desc_resources(priv);
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if (ret)
1851*4882a593Smuzhiyun 		return ret;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	ret = alloc_dma_tx_desc_resources(priv);
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	return ret;
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun /**
1859*4882a593Smuzhiyun  * free_dma_desc_resources - free dma desc resources
1860*4882a593Smuzhiyun  * @priv: private structure
1861*4882a593Smuzhiyun  */
free_dma_desc_resources(struct stmmac_priv * priv)1862*4882a593Smuzhiyun static void free_dma_desc_resources(struct stmmac_priv *priv)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun 	/* Release the DMA RX socket buffers */
1865*4882a593Smuzhiyun 	free_dma_rx_desc_resources(priv);
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	/* Release the DMA TX socket buffers */
1868*4882a593Smuzhiyun 	free_dma_tx_desc_resources(priv);
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun /**
1872*4882a593Smuzhiyun  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1873*4882a593Smuzhiyun  *  @priv: driver private structure
1874*4882a593Smuzhiyun  *  Description: It is used for enabling the rx queues in the MAC
1875*4882a593Smuzhiyun  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)1876*4882a593Smuzhiyun static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
1879*4882a593Smuzhiyun 	int queue;
1880*4882a593Smuzhiyun 	u8 mode;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	for (queue = 0; queue < rx_queues_count; queue++) {
1883*4882a593Smuzhiyun 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1884*4882a593Smuzhiyun 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun /**
1889*4882a593Smuzhiyun  * stmmac_start_rx_dma - start RX DMA channel
1890*4882a593Smuzhiyun  * @priv: driver private structure
1891*4882a593Smuzhiyun  * @chan: RX channel index
1892*4882a593Smuzhiyun  * Description:
1893*4882a593Smuzhiyun  * This starts a RX DMA channel
1894*4882a593Smuzhiyun  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)1895*4882a593Smuzhiyun static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1898*4882a593Smuzhiyun 	stmmac_start_rx(priv, priv->ioaddr, chan);
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun /**
1902*4882a593Smuzhiyun  * stmmac_start_tx_dma - start TX DMA channel
1903*4882a593Smuzhiyun  * @priv: driver private structure
1904*4882a593Smuzhiyun  * @chan: TX channel index
1905*4882a593Smuzhiyun  * Description:
1906*4882a593Smuzhiyun  * This starts a TX DMA channel
1907*4882a593Smuzhiyun  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)1908*4882a593Smuzhiyun static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1911*4882a593Smuzhiyun 	stmmac_start_tx(priv, priv->ioaddr, chan);
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun /**
1915*4882a593Smuzhiyun  * stmmac_stop_rx_dma - stop RX DMA channel
1916*4882a593Smuzhiyun  * @priv: driver private structure
1917*4882a593Smuzhiyun  * @chan: RX channel index
1918*4882a593Smuzhiyun  * Description:
1919*4882a593Smuzhiyun  * This stops a RX DMA channel
1920*4882a593Smuzhiyun  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)1921*4882a593Smuzhiyun static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1922*4882a593Smuzhiyun {
1923*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1924*4882a593Smuzhiyun 	stmmac_stop_rx(priv, priv->ioaddr, chan);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun /**
1928*4882a593Smuzhiyun  * stmmac_stop_tx_dma - stop TX DMA channel
1929*4882a593Smuzhiyun  * @priv: driver private structure
1930*4882a593Smuzhiyun  * @chan: TX channel index
1931*4882a593Smuzhiyun  * Description:
1932*4882a593Smuzhiyun  * This stops a TX DMA channel
1933*4882a593Smuzhiyun  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)1934*4882a593Smuzhiyun static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1935*4882a593Smuzhiyun {
1936*4882a593Smuzhiyun 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1937*4882a593Smuzhiyun 	stmmac_stop_tx(priv, priv->ioaddr, chan);
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun /**
1941*4882a593Smuzhiyun  * stmmac_start_all_dma - start all RX and TX DMA channels
1942*4882a593Smuzhiyun  * @priv: driver private structure
1943*4882a593Smuzhiyun  * Description:
1944*4882a593Smuzhiyun  * This starts all the RX and TX DMA channels
1945*4882a593Smuzhiyun  */
stmmac_start_all_dma(struct stmmac_priv * priv)1946*4882a593Smuzhiyun static void stmmac_start_all_dma(struct stmmac_priv *priv)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1949*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1950*4882a593Smuzhiyun 	u32 chan = 0;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	for (chan = 0; chan < rx_channels_count; chan++)
1953*4882a593Smuzhiyun 		stmmac_start_rx_dma(priv, chan);
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channels_count; chan++)
1956*4882a593Smuzhiyun 		stmmac_start_tx_dma(priv, chan);
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun /**
1960*4882a593Smuzhiyun  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1961*4882a593Smuzhiyun  * @priv: driver private structure
1962*4882a593Smuzhiyun  * Description:
1963*4882a593Smuzhiyun  * This stops the RX and TX DMA channels
1964*4882a593Smuzhiyun  */
stmmac_stop_all_dma(struct stmmac_priv * priv)1965*4882a593Smuzhiyun static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1966*4882a593Smuzhiyun {
1967*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1968*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1969*4882a593Smuzhiyun 	u32 chan = 0;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	for (chan = 0; chan < rx_channels_count; chan++)
1972*4882a593Smuzhiyun 		stmmac_stop_rx_dma(priv, chan);
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channels_count; chan++)
1975*4882a593Smuzhiyun 		stmmac_stop_tx_dma(priv, chan);
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun /**
1979*4882a593Smuzhiyun  *  stmmac_dma_operation_mode - HW DMA operation mode
1980*4882a593Smuzhiyun  *  @priv: driver private structure
1981*4882a593Smuzhiyun  *  Description: it is used for configuring the DMA operation mode register in
1982*4882a593Smuzhiyun  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1983*4882a593Smuzhiyun  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)1984*4882a593Smuzhiyun static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1985*4882a593Smuzhiyun {
1986*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
1987*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1988*4882a593Smuzhiyun 	int rxfifosz = priv->plat->rx_fifo_size;
1989*4882a593Smuzhiyun 	int txfifosz = priv->plat->tx_fifo_size;
1990*4882a593Smuzhiyun 	u32 txmode = 0;
1991*4882a593Smuzhiyun 	u32 rxmode = 0;
1992*4882a593Smuzhiyun 	u32 chan = 0;
1993*4882a593Smuzhiyun 	u8 qmode = 0;
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	if (rxfifosz == 0)
1996*4882a593Smuzhiyun 		rxfifosz = priv->dma_cap.rx_fifo_size;
1997*4882a593Smuzhiyun 	if (txfifosz == 0)
1998*4882a593Smuzhiyun 		txfifosz = priv->dma_cap.tx_fifo_size;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	/* Adjust for real per queue fifo size */
2001*4882a593Smuzhiyun 	rxfifosz /= rx_channels_count;
2002*4882a593Smuzhiyun 	txfifosz /= tx_channels_count;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	if (priv->plat->force_thresh_dma_mode) {
2005*4882a593Smuzhiyun 		txmode = tc;
2006*4882a593Smuzhiyun 		rxmode = tc;
2007*4882a593Smuzhiyun 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2008*4882a593Smuzhiyun 		/*
2009*4882a593Smuzhiyun 		 * In case of GMAC, SF mode can be enabled
2010*4882a593Smuzhiyun 		 * to perform the TX COE in HW. This depends on:
2011*4882a593Smuzhiyun 		 * 1) TX COE if actually supported
2012*4882a593Smuzhiyun 		 * 2) There is no bugged Jumbo frame support
2013*4882a593Smuzhiyun 		 *    that needs to not insert csum in the TDES.
2014*4882a593Smuzhiyun 		 */
2015*4882a593Smuzhiyun 		txmode = SF_DMA_MODE;
2016*4882a593Smuzhiyun 		rxmode = SF_DMA_MODE;
2017*4882a593Smuzhiyun 		priv->xstats.threshold = SF_DMA_MODE;
2018*4882a593Smuzhiyun 	} else {
2019*4882a593Smuzhiyun 		txmode = tc;
2020*4882a593Smuzhiyun 		rxmode = SF_DMA_MODE;
2021*4882a593Smuzhiyun 	}
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	/* configure all channels */
2024*4882a593Smuzhiyun 	for (chan = 0; chan < rx_channels_count; chan++) {
2025*4882a593Smuzhiyun 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2028*4882a593Smuzhiyun 				rxfifosz, qmode);
2029*4882a593Smuzhiyun 		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
2030*4882a593Smuzhiyun 				chan);
2031*4882a593Smuzhiyun 	}
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channels_count; chan++) {
2034*4882a593Smuzhiyun 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2037*4882a593Smuzhiyun 				txfifosz, qmode);
2038*4882a593Smuzhiyun 	}
2039*4882a593Smuzhiyun }
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun /**
2042*4882a593Smuzhiyun  * stmmac_tx_clean - to manage the transmission completion
2043*4882a593Smuzhiyun  * @priv: driver private structure
2044*4882a593Smuzhiyun  * @budget: napi budget limiting this functions packet handling
2045*4882a593Smuzhiyun  * @queue: TX queue index
2046*4882a593Smuzhiyun  * Description: it reclaims the transmit resources after transmission completes.
2047*4882a593Smuzhiyun  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue)2048*4882a593Smuzhiyun static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2049*4882a593Smuzhiyun {
2050*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2051*4882a593Smuzhiyun 	unsigned int bytes_compl = 0, pkts_compl = 0;
2052*4882a593Smuzhiyun 	unsigned int entry, count = 0;
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	priv->xstats.tx_clean++;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	entry = tx_q->dirty_tx;
2059*4882a593Smuzhiyun 	while ((entry != tx_q->cur_tx) && (count < budget)) {
2060*4882a593Smuzhiyun 		struct sk_buff *skb = tx_q->tx_skbuff[entry];
2061*4882a593Smuzhiyun 		struct dma_desc *p;
2062*4882a593Smuzhiyun 		int status;
2063*4882a593Smuzhiyun 
2064*4882a593Smuzhiyun 		if (priv->extend_desc)
2065*4882a593Smuzhiyun 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2066*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2067*4882a593Smuzhiyun 			p = &tx_q->dma_entx[entry].basic;
2068*4882a593Smuzhiyun 		else
2069*4882a593Smuzhiyun 			p = tx_q->dma_tx + entry;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 		status = stmmac_tx_status(priv, &priv->dev->stats,
2072*4882a593Smuzhiyun 				&priv->xstats, p, priv->ioaddr);
2073*4882a593Smuzhiyun 		/* Check if the descriptor is owned by the DMA */
2074*4882a593Smuzhiyun 		if (unlikely(status & tx_dma_own))
2075*4882a593Smuzhiyun 			break;
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 		count++;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 		/* Make sure descriptor fields are read after reading
2080*4882a593Smuzhiyun 		 * the own bit.
2081*4882a593Smuzhiyun 		 */
2082*4882a593Smuzhiyun 		dma_rmb();
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 		/* Just consider the last segment and ...*/
2085*4882a593Smuzhiyun 		if (likely(!(status & tx_not_ls))) {
2086*4882a593Smuzhiyun 			/* ... verify the status error condition */
2087*4882a593Smuzhiyun 			if (unlikely(status & tx_err)) {
2088*4882a593Smuzhiyun 				priv->dev->stats.tx_errors++;
2089*4882a593Smuzhiyun 			} else {
2090*4882a593Smuzhiyun 				priv->dev->stats.tx_packets++;
2091*4882a593Smuzhiyun 				priv->xstats.tx_pkt_n++;
2092*4882a593Smuzhiyun 			}
2093*4882a593Smuzhiyun 			stmmac_get_tx_hwtstamp(priv, p, skb);
2094*4882a593Smuzhiyun 		}
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2097*4882a593Smuzhiyun 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2098*4882a593Smuzhiyun 				dma_unmap_page(priv->device,
2099*4882a593Smuzhiyun 					       tx_q->tx_skbuff_dma[entry].buf,
2100*4882a593Smuzhiyun 					       tx_q->tx_skbuff_dma[entry].len,
2101*4882a593Smuzhiyun 					       DMA_TO_DEVICE);
2102*4882a593Smuzhiyun 			else
2103*4882a593Smuzhiyun 				dma_unmap_single(priv->device,
2104*4882a593Smuzhiyun 						 tx_q->tx_skbuff_dma[entry].buf,
2105*4882a593Smuzhiyun 						 tx_q->tx_skbuff_dma[entry].len,
2106*4882a593Smuzhiyun 						 DMA_TO_DEVICE);
2107*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].buf = 0;
2108*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].len = 0;
2109*4882a593Smuzhiyun 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2110*4882a593Smuzhiyun 		}
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 		stmmac_clean_desc3(priv, tx_q, p);
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2115*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 		if (likely(skb != NULL)) {
2118*4882a593Smuzhiyun 			pkts_compl++;
2119*4882a593Smuzhiyun 			bytes_compl += skb->len;
2120*4882a593Smuzhiyun 			dev_consume_skb_any(skb);
2121*4882a593Smuzhiyun 			tx_q->tx_skbuff[entry] = NULL;
2122*4882a593Smuzhiyun 		}
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 		stmmac_release_tx_desc(priv, p, priv->mode);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2127*4882a593Smuzhiyun 	}
2128*4882a593Smuzhiyun 	tx_q->dirty_tx = entry;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2131*4882a593Smuzhiyun 				  pkts_compl, bytes_compl);
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2134*4882a593Smuzhiyun 								queue))) &&
2135*4882a593Smuzhiyun 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 		netif_dbg(priv, tx_done, priv->dev,
2138*4882a593Smuzhiyun 			  "%s: restart transmit\n", __func__);
2139*4882a593Smuzhiyun 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2140*4882a593Smuzhiyun 	}
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2143*4882a593Smuzhiyun 		stmmac_enable_eee_mode(priv);
2144*4882a593Smuzhiyun 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2145*4882a593Smuzhiyun 	}
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	/* We still have pending packets, let's call for a new scheduling */
2148*4882a593Smuzhiyun 	if (tx_q->dirty_tx != tx_q->cur_tx)
2149*4882a593Smuzhiyun 		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	return count;
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun /**
2157*4882a593Smuzhiyun  * stmmac_tx_err - to manage the tx error
2158*4882a593Smuzhiyun  * @priv: driver private structure
2159*4882a593Smuzhiyun  * @chan: channel index
2160*4882a593Smuzhiyun  * Description: it cleans the descriptors and restarts the transmission
2161*4882a593Smuzhiyun  * in case of transmission errors.
2162*4882a593Smuzhiyun  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2163*4882a593Smuzhiyun static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	stmmac_stop_tx_dma(priv, chan);
2170*4882a593Smuzhiyun 	dma_free_tx_skbufs(priv, chan);
2171*4882a593Smuzhiyun 	stmmac_clear_tx_descriptors(priv, chan);
2172*4882a593Smuzhiyun 	tx_q->dirty_tx = 0;
2173*4882a593Smuzhiyun 	tx_q->cur_tx = 0;
2174*4882a593Smuzhiyun 	tx_q->mss = 0;
2175*4882a593Smuzhiyun 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2176*4882a593Smuzhiyun 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2177*4882a593Smuzhiyun 			    tx_q->dma_tx_phy, chan);
2178*4882a593Smuzhiyun 	stmmac_start_tx_dma(priv, chan);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	priv->dev->stats.tx_errors++;
2181*4882a593Smuzhiyun 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun /**
2185*4882a593Smuzhiyun  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2186*4882a593Smuzhiyun  *  @priv: driver private structure
2187*4882a593Smuzhiyun  *  @txmode: TX operating mode
2188*4882a593Smuzhiyun  *  @rxmode: RX operating mode
2189*4882a593Smuzhiyun  *  @chan: channel index
2190*4882a593Smuzhiyun  *  Description: it is used for configuring of the DMA operation mode in
2191*4882a593Smuzhiyun  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2192*4882a593Smuzhiyun  *  mode.
2193*4882a593Smuzhiyun  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2194*4882a593Smuzhiyun static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2195*4882a593Smuzhiyun 					  u32 rxmode, u32 chan)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2198*4882a593Smuzhiyun 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2199*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2200*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2201*4882a593Smuzhiyun 	int rxfifosz = priv->plat->rx_fifo_size;
2202*4882a593Smuzhiyun 	int txfifosz = priv->plat->tx_fifo_size;
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	if (rxfifosz == 0)
2205*4882a593Smuzhiyun 		rxfifosz = priv->dma_cap.rx_fifo_size;
2206*4882a593Smuzhiyun 	if (txfifosz == 0)
2207*4882a593Smuzhiyun 		txfifosz = priv->dma_cap.tx_fifo_size;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	/* Adjust for real per queue fifo size */
2210*4882a593Smuzhiyun 	rxfifosz /= rx_channels_count;
2211*4882a593Smuzhiyun 	txfifosz /= tx_channels_count;
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2214*4882a593Smuzhiyun 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2217*4882a593Smuzhiyun static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun 	int ret;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2222*4882a593Smuzhiyun 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2223*4882a593Smuzhiyun 	if (ret && (ret != -EINVAL)) {
2224*4882a593Smuzhiyun 		stmmac_global_err(priv);
2225*4882a593Smuzhiyun 		return true;
2226*4882a593Smuzhiyun 	}
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	return false;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan)2231*4882a593Smuzhiyun static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2234*4882a593Smuzhiyun 						 &priv->xstats, chan);
2235*4882a593Smuzhiyun 	struct stmmac_channel *ch = &priv->channel[chan];
2236*4882a593Smuzhiyun 	unsigned long flags;
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2239*4882a593Smuzhiyun 		if (napi_schedule_prep(&ch->rx_napi)) {
2240*4882a593Smuzhiyun 			spin_lock_irqsave(&ch->lock, flags);
2241*4882a593Smuzhiyun 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2242*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ch->lock, flags);
2243*4882a593Smuzhiyun 			__napi_schedule(&ch->rx_napi);
2244*4882a593Smuzhiyun 		}
2245*4882a593Smuzhiyun 	}
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2248*4882a593Smuzhiyun 		if (napi_schedule_prep(&ch->tx_napi)) {
2249*4882a593Smuzhiyun 			spin_lock_irqsave(&ch->lock, flags);
2250*4882a593Smuzhiyun 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2251*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ch->lock, flags);
2252*4882a593Smuzhiyun 			__napi_schedule(&ch->tx_napi);
2253*4882a593Smuzhiyun 		}
2254*4882a593Smuzhiyun 	}
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	return status;
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun /**
2260*4882a593Smuzhiyun  * stmmac_dma_interrupt - DMA ISR
2261*4882a593Smuzhiyun  * @priv: driver private structure
2262*4882a593Smuzhiyun  * Description: this is the DMA ISR. It is called by the main ISR.
2263*4882a593Smuzhiyun  * It calls the dwmac dma routine and schedule poll method in case of some
2264*4882a593Smuzhiyun  * work can be done.
2265*4882a593Smuzhiyun  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2266*4882a593Smuzhiyun static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2267*4882a593Smuzhiyun {
2268*4882a593Smuzhiyun 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2269*4882a593Smuzhiyun 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2270*4882a593Smuzhiyun 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2271*4882a593Smuzhiyun 				tx_channel_count : rx_channel_count;
2272*4882a593Smuzhiyun 	u32 chan;
2273*4882a593Smuzhiyun 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	/* Make sure we never check beyond our status buffer. */
2276*4882a593Smuzhiyun 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2277*4882a593Smuzhiyun 		channels_to_check = ARRAY_SIZE(status);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	for (chan = 0; chan < channels_to_check; chan++)
2280*4882a593Smuzhiyun 		status[chan] = stmmac_napi_check(priv, chan);
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channel_count; chan++) {
2283*4882a593Smuzhiyun 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2284*4882a593Smuzhiyun 			/* Try to bump up the dma threshold on this failure */
2285*4882a593Smuzhiyun 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2286*4882a593Smuzhiyun 			    (tc <= 256)) {
2287*4882a593Smuzhiyun 				tc += 64;
2288*4882a593Smuzhiyun 				if (priv->plat->force_thresh_dma_mode)
2289*4882a593Smuzhiyun 					stmmac_set_dma_operation_mode(priv,
2290*4882a593Smuzhiyun 								      tc,
2291*4882a593Smuzhiyun 								      tc,
2292*4882a593Smuzhiyun 								      chan);
2293*4882a593Smuzhiyun 				else
2294*4882a593Smuzhiyun 					stmmac_set_dma_operation_mode(priv,
2295*4882a593Smuzhiyun 								    tc,
2296*4882a593Smuzhiyun 								    SF_DMA_MODE,
2297*4882a593Smuzhiyun 								    chan);
2298*4882a593Smuzhiyun 				priv->xstats.threshold = tc;
2299*4882a593Smuzhiyun 			}
2300*4882a593Smuzhiyun 		} else if (unlikely(status[chan] == tx_hard_error)) {
2301*4882a593Smuzhiyun 			stmmac_tx_err(priv, chan);
2302*4882a593Smuzhiyun 		}
2303*4882a593Smuzhiyun 	}
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun /**
2307*4882a593Smuzhiyun  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2308*4882a593Smuzhiyun  * @priv: driver private structure
2309*4882a593Smuzhiyun  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2310*4882a593Smuzhiyun  */
stmmac_mmc_setup(struct stmmac_priv * priv)2311*4882a593Smuzhiyun static void stmmac_mmc_setup(struct stmmac_priv *priv)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2314*4882a593Smuzhiyun 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	if (priv->dma_cap.rmon) {
2319*4882a593Smuzhiyun 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2320*4882a593Smuzhiyun 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2321*4882a593Smuzhiyun 	} else
2322*4882a593Smuzhiyun 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun /**
2326*4882a593Smuzhiyun  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2327*4882a593Smuzhiyun  * @priv: driver private structure
2328*4882a593Smuzhiyun  * Description:
2329*4882a593Smuzhiyun  *  new GMAC chip generations have a new register to indicate the
2330*4882a593Smuzhiyun  *  presence of the optional feature/functions.
2331*4882a593Smuzhiyun  *  This can be also used to override the value passed through the
2332*4882a593Smuzhiyun  *  platform and necessary for old MAC10/100 and GMAC chips.
2333*4882a593Smuzhiyun  */
stmmac_get_hw_features(struct stmmac_priv * priv)2334*4882a593Smuzhiyun static int stmmac_get_hw_features(struct stmmac_priv *priv)
2335*4882a593Smuzhiyun {
2336*4882a593Smuzhiyun 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2337*4882a593Smuzhiyun }
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun /**
2340*4882a593Smuzhiyun  * stmmac_check_ether_addr - check if the MAC addr is valid
2341*4882a593Smuzhiyun  * @priv: driver private structure
2342*4882a593Smuzhiyun  * Description:
2343*4882a593Smuzhiyun  * it is to verify if the MAC address is valid, in case of failures it
2344*4882a593Smuzhiyun  * generates a random MAC address
2345*4882a593Smuzhiyun  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2346*4882a593Smuzhiyun static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2347*4882a593Smuzhiyun {
2348*4882a593Smuzhiyun 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2349*4882a593Smuzhiyun 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2350*4882a593Smuzhiyun 		if (likely(priv->plat->get_eth_addr))
2351*4882a593Smuzhiyun 			priv->plat->get_eth_addr(priv->plat->bsp_priv,
2352*4882a593Smuzhiyun 				priv->dev->dev_addr);
2353*4882a593Smuzhiyun 		if (!is_valid_ether_addr(priv->dev->dev_addr))
2354*4882a593Smuzhiyun 			eth_hw_addr_random(priv->dev);
2355*4882a593Smuzhiyun 		dev_info(priv->device, "device MAC address %pM\n",
2356*4882a593Smuzhiyun 			 priv->dev->dev_addr);
2357*4882a593Smuzhiyun 	}
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun /**
2361*4882a593Smuzhiyun  * stmmac_init_dma_engine - DMA init.
2362*4882a593Smuzhiyun  * @priv: driver private structure
2363*4882a593Smuzhiyun  * Description:
2364*4882a593Smuzhiyun  * It inits the DMA invoking the specific MAC/GMAC callback.
2365*4882a593Smuzhiyun  * Some DMA parameters can be passed from the platform;
2366*4882a593Smuzhiyun  * in case of these are not passed a default is kept for the MAC or GMAC.
2367*4882a593Smuzhiyun  */
stmmac_init_dma_engine(struct stmmac_priv * priv)2368*4882a593Smuzhiyun static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2371*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2372*4882a593Smuzhiyun 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2373*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q;
2374*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q;
2375*4882a593Smuzhiyun 	u32 chan = 0;
2376*4882a593Smuzhiyun 	int atds = 0;
2377*4882a593Smuzhiyun 	int ret = 0;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2380*4882a593Smuzhiyun 		dev_err(priv->device, "Invalid DMA configuration\n");
2381*4882a593Smuzhiyun 		return -EINVAL;
2382*4882a593Smuzhiyun 	}
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2385*4882a593Smuzhiyun 		atds = 1;
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	ret = stmmac_reset(priv, priv->ioaddr);
2388*4882a593Smuzhiyun 	if (ret) {
2389*4882a593Smuzhiyun 		dev_err(priv->device, "Failed to reset the dma\n");
2390*4882a593Smuzhiyun 		return ret;
2391*4882a593Smuzhiyun 	}
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	/* DMA Configuration */
2394*4882a593Smuzhiyun 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	if (priv->plat->axi)
2397*4882a593Smuzhiyun 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	/* DMA CSR Channel configuration */
2400*4882a593Smuzhiyun 	for (chan = 0; chan < dma_csr_ch; chan++)
2401*4882a593Smuzhiyun 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	/* DMA RX Channel Configuration */
2404*4882a593Smuzhiyun 	for (chan = 0; chan < rx_channels_count; chan++) {
2405*4882a593Smuzhiyun 		rx_q = &priv->rx_queue[chan];
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2408*4882a593Smuzhiyun 				    rx_q->dma_rx_phy, chan);
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2411*4882a593Smuzhiyun 				     (priv->dma_rx_size *
2412*4882a593Smuzhiyun 				      sizeof(struct dma_desc));
2413*4882a593Smuzhiyun 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2414*4882a593Smuzhiyun 				       rx_q->rx_tail_addr, chan);
2415*4882a593Smuzhiyun 	}
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	/* DMA TX Channel Configuration */
2418*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channels_count; chan++) {
2419*4882a593Smuzhiyun 		tx_q = &priv->tx_queue[chan];
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2422*4882a593Smuzhiyun 				    tx_q->dma_tx_phy, chan);
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2425*4882a593Smuzhiyun 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2426*4882a593Smuzhiyun 				       tx_q->tx_tail_addr, chan);
2427*4882a593Smuzhiyun 	}
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	return ret;
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)2432*4882a593Smuzhiyun static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun /**
2440*4882a593Smuzhiyun  * stmmac_tx_timer - mitigation sw timer for tx.
2441*4882a593Smuzhiyun  * @t: data pointer
2442*4882a593Smuzhiyun  * Description:
2443*4882a593Smuzhiyun  * This is the timer handler to directly invoke the stmmac_tx_clean.
2444*4882a593Smuzhiyun  */
stmmac_tx_timer(struct timer_list * t)2445*4882a593Smuzhiyun static void stmmac_tx_timer(struct timer_list *t)
2446*4882a593Smuzhiyun {
2447*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2448*4882a593Smuzhiyun 	struct stmmac_priv *priv = tx_q->priv_data;
2449*4882a593Smuzhiyun 	struct stmmac_channel *ch;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	ch = &priv->channel[tx_q->queue_index];
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	if (likely(napi_schedule_prep(&ch->tx_napi))) {
2454*4882a593Smuzhiyun 		unsigned long flags;
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 		spin_lock_irqsave(&ch->lock, flags);
2457*4882a593Smuzhiyun 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2458*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ch->lock, flags);
2459*4882a593Smuzhiyun 		__napi_schedule(&ch->tx_napi);
2460*4882a593Smuzhiyun 	}
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun /**
2464*4882a593Smuzhiyun  * stmmac_init_coalesce - init mitigation options.
2465*4882a593Smuzhiyun  * @priv: driver private structure
2466*4882a593Smuzhiyun  * Description:
2467*4882a593Smuzhiyun  * This inits the coalesce parameters: i.e. timer rate,
2468*4882a593Smuzhiyun  * timer handler and default threshold used for enabling the
2469*4882a593Smuzhiyun  * interrupt on completion bit.
2470*4882a593Smuzhiyun  */
stmmac_init_coalesce(struct stmmac_priv * priv)2471*4882a593Smuzhiyun static void stmmac_init_coalesce(struct stmmac_priv *priv)
2472*4882a593Smuzhiyun {
2473*4882a593Smuzhiyun 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2474*4882a593Smuzhiyun 	u32 chan;
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	priv->tx_coal_frames = STMMAC_TX_FRAMES;
2477*4882a593Smuzhiyun 	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2478*4882a593Smuzhiyun 	priv->rx_coal_frames = STMMAC_RX_FRAMES;
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channel_count; chan++) {
2481*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2484*4882a593Smuzhiyun 	}
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun 
stmmac_set_rings_length(struct stmmac_priv * priv)2487*4882a593Smuzhiyun static void stmmac_set_rings_length(struct stmmac_priv *priv)
2488*4882a593Smuzhiyun {
2489*4882a593Smuzhiyun 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2490*4882a593Smuzhiyun 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2491*4882a593Smuzhiyun 	u32 chan;
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	/* set TX ring length */
2494*4882a593Smuzhiyun 	for (chan = 0; chan < tx_channels_count; chan++)
2495*4882a593Smuzhiyun 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2496*4882a593Smuzhiyun 				       (priv->dma_tx_size - 1), chan);
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 	/* set RX ring length */
2499*4882a593Smuzhiyun 	for (chan = 0; chan < rx_channels_count; chan++)
2500*4882a593Smuzhiyun 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2501*4882a593Smuzhiyun 				       (priv->dma_rx_size - 1), chan);
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun /**
2505*4882a593Smuzhiyun  *  stmmac_set_tx_queue_weight - Set TX queue weight
2506*4882a593Smuzhiyun  *  @priv: driver private structure
2507*4882a593Smuzhiyun  *  Description: It is used for setting TX queues weight
2508*4882a593Smuzhiyun  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)2509*4882a593Smuzhiyun static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2512*4882a593Smuzhiyun 	u32 weight;
2513*4882a593Smuzhiyun 	u32 queue;
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	for (queue = 0; queue < tx_queues_count; queue++) {
2516*4882a593Smuzhiyun 		weight = priv->plat->tx_queues_cfg[queue].weight;
2517*4882a593Smuzhiyun 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2518*4882a593Smuzhiyun 	}
2519*4882a593Smuzhiyun }
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun /**
2522*4882a593Smuzhiyun  *  stmmac_configure_cbs - Configure CBS in TX queue
2523*4882a593Smuzhiyun  *  @priv: driver private structure
2524*4882a593Smuzhiyun  *  Description: It is used for configuring CBS in AVB TX queues
2525*4882a593Smuzhiyun  */
stmmac_configure_cbs(struct stmmac_priv * priv)2526*4882a593Smuzhiyun static void stmmac_configure_cbs(struct stmmac_priv *priv)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2529*4882a593Smuzhiyun 	u32 mode_to_use;
2530*4882a593Smuzhiyun 	u32 queue;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	/* queue 0 is reserved for legacy traffic */
2533*4882a593Smuzhiyun 	for (queue = 1; queue < tx_queues_count; queue++) {
2534*4882a593Smuzhiyun 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2535*4882a593Smuzhiyun 		if (mode_to_use == MTL_QUEUE_DCB)
2536*4882a593Smuzhiyun 			continue;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 		stmmac_config_cbs(priv, priv->hw,
2539*4882a593Smuzhiyun 				priv->plat->tx_queues_cfg[queue].send_slope,
2540*4882a593Smuzhiyun 				priv->plat->tx_queues_cfg[queue].idle_slope,
2541*4882a593Smuzhiyun 				priv->plat->tx_queues_cfg[queue].high_credit,
2542*4882a593Smuzhiyun 				priv->plat->tx_queues_cfg[queue].low_credit,
2543*4882a593Smuzhiyun 				queue);
2544*4882a593Smuzhiyun 	}
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun /**
2548*4882a593Smuzhiyun  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2549*4882a593Smuzhiyun  *  @priv: driver private structure
2550*4882a593Smuzhiyun  *  Description: It is used for mapping RX queues to RX dma channels
2551*4882a593Smuzhiyun  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)2552*4882a593Smuzhiyun static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2553*4882a593Smuzhiyun {
2554*4882a593Smuzhiyun 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2555*4882a593Smuzhiyun 	u32 queue;
2556*4882a593Smuzhiyun 	u32 chan;
2557*4882a593Smuzhiyun 
2558*4882a593Smuzhiyun 	for (queue = 0; queue < rx_queues_count; queue++) {
2559*4882a593Smuzhiyun 		chan = priv->plat->rx_queues_cfg[queue].chan;
2560*4882a593Smuzhiyun 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun /**
2565*4882a593Smuzhiyun  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2566*4882a593Smuzhiyun  *  @priv: driver private structure
2567*4882a593Smuzhiyun  *  Description: It is used for configuring the RX Queue Priority
2568*4882a593Smuzhiyun  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)2569*4882a593Smuzhiyun static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2570*4882a593Smuzhiyun {
2571*4882a593Smuzhiyun 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2572*4882a593Smuzhiyun 	u32 queue;
2573*4882a593Smuzhiyun 	u32 prio;
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	for (queue = 0; queue < rx_queues_count; queue++) {
2576*4882a593Smuzhiyun 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
2577*4882a593Smuzhiyun 			continue;
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 		prio = priv->plat->rx_queues_cfg[queue].prio;
2580*4882a593Smuzhiyun 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2581*4882a593Smuzhiyun 	}
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun /**
2585*4882a593Smuzhiyun  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2586*4882a593Smuzhiyun  *  @priv: driver private structure
2587*4882a593Smuzhiyun  *  Description: It is used for configuring the TX Queue Priority
2588*4882a593Smuzhiyun  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)2589*4882a593Smuzhiyun static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2590*4882a593Smuzhiyun {
2591*4882a593Smuzhiyun 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2592*4882a593Smuzhiyun 	u32 queue;
2593*4882a593Smuzhiyun 	u32 prio;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	for (queue = 0; queue < tx_queues_count; queue++) {
2596*4882a593Smuzhiyun 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
2597*4882a593Smuzhiyun 			continue;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 		prio = priv->plat->tx_queues_cfg[queue].prio;
2600*4882a593Smuzhiyun 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2601*4882a593Smuzhiyun 	}
2602*4882a593Smuzhiyun }
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun /**
2605*4882a593Smuzhiyun  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2606*4882a593Smuzhiyun  *  @priv: driver private structure
2607*4882a593Smuzhiyun  *  Description: It is used for configuring the RX queue routing
2608*4882a593Smuzhiyun  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)2609*4882a593Smuzhiyun static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2612*4882a593Smuzhiyun 	u32 queue;
2613*4882a593Smuzhiyun 	u8 packet;
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 	for (queue = 0; queue < rx_queues_count; queue++) {
2616*4882a593Smuzhiyun 		/* no specific packet type routing specified for the queue */
2617*4882a593Smuzhiyun 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2618*4882a593Smuzhiyun 			continue;
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2621*4882a593Smuzhiyun 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2622*4882a593Smuzhiyun 	}
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun 
stmmac_mac_config_rss(struct stmmac_priv * priv)2625*4882a593Smuzhiyun static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2626*4882a593Smuzhiyun {
2627*4882a593Smuzhiyun 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2628*4882a593Smuzhiyun 		priv->rss.enable = false;
2629*4882a593Smuzhiyun 		return;
2630*4882a593Smuzhiyun 	}
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 	if (priv->dev->features & NETIF_F_RXHASH)
2633*4882a593Smuzhiyun 		priv->rss.enable = true;
2634*4882a593Smuzhiyun 	else
2635*4882a593Smuzhiyun 		priv->rss.enable = false;
2636*4882a593Smuzhiyun 
2637*4882a593Smuzhiyun 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
2638*4882a593Smuzhiyun 			     priv->plat->rx_queues_to_use);
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun 
2641*4882a593Smuzhiyun /**
2642*4882a593Smuzhiyun  *  stmmac_mtl_configuration - Configure MTL
2643*4882a593Smuzhiyun  *  @priv: driver private structure
2644*4882a593Smuzhiyun  *  Description: It is used for configurring MTL
2645*4882a593Smuzhiyun  */
stmmac_mtl_configuration(struct stmmac_priv * priv)2646*4882a593Smuzhiyun static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2647*4882a593Smuzhiyun {
2648*4882a593Smuzhiyun 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2649*4882a593Smuzhiyun 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	if (tx_queues_count > 1)
2652*4882a593Smuzhiyun 		stmmac_set_tx_queue_weight(priv);
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	/* Configure MTL RX algorithms */
2655*4882a593Smuzhiyun 	if (rx_queues_count > 1)
2656*4882a593Smuzhiyun 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2657*4882a593Smuzhiyun 				priv->plat->rx_sched_algorithm);
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	/* Configure MTL TX algorithms */
2660*4882a593Smuzhiyun 	if (tx_queues_count > 1)
2661*4882a593Smuzhiyun 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2662*4882a593Smuzhiyun 				priv->plat->tx_sched_algorithm);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 	/* Configure CBS in AVB TX queues */
2665*4882a593Smuzhiyun 	if (tx_queues_count > 1)
2666*4882a593Smuzhiyun 		stmmac_configure_cbs(priv);
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	/* Map RX MTL to DMA channels */
2669*4882a593Smuzhiyun 	stmmac_rx_queue_dma_chan_map(priv);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	/* Enable MAC RX Queues */
2672*4882a593Smuzhiyun 	stmmac_mac_enable_rx_queues(priv);
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	/* Set RX priorities */
2675*4882a593Smuzhiyun 	if (rx_queues_count > 1)
2676*4882a593Smuzhiyun 		stmmac_mac_config_rx_queues_prio(priv);
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	/* Set TX priorities */
2679*4882a593Smuzhiyun 	if (tx_queues_count > 1)
2680*4882a593Smuzhiyun 		stmmac_mac_config_tx_queues_prio(priv);
2681*4882a593Smuzhiyun 
2682*4882a593Smuzhiyun 	/* Set RX routing */
2683*4882a593Smuzhiyun 	if (rx_queues_count > 1)
2684*4882a593Smuzhiyun 		stmmac_mac_config_rx_queues_routing(priv);
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	/* Receive Side Scaling */
2687*4882a593Smuzhiyun 	if (rx_queues_count > 1)
2688*4882a593Smuzhiyun 		stmmac_mac_config_rss(priv);
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)2691*4882a593Smuzhiyun static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2692*4882a593Smuzhiyun {
2693*4882a593Smuzhiyun 	if (priv->dma_cap.asp) {
2694*4882a593Smuzhiyun 		netdev_info(priv->dev, "Enabling Safety Features\n");
2695*4882a593Smuzhiyun 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2696*4882a593Smuzhiyun 	} else {
2697*4882a593Smuzhiyun 		netdev_info(priv->dev, "No Safety Features support found\n");
2698*4882a593Smuzhiyun 	}
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun /**
2702*4882a593Smuzhiyun  * stmmac_hw_setup - setup mac in a usable state.
2703*4882a593Smuzhiyun  *  @dev : pointer to the device structure.
2704*4882a593Smuzhiyun  *  @ptp_register: register PTP if set
2705*4882a593Smuzhiyun  *  Description:
2706*4882a593Smuzhiyun  *  this is the main function to setup the HW in a usable state because the
2707*4882a593Smuzhiyun  *  dma engine is reset, the core registers are configured (e.g. AXI,
2708*4882a593Smuzhiyun  *  Checksum features, timers). The DMA is ready to start receiving and
2709*4882a593Smuzhiyun  *  transmitting.
2710*4882a593Smuzhiyun  *  Return value:
2711*4882a593Smuzhiyun  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2712*4882a593Smuzhiyun  *  file on failure.
2713*4882a593Smuzhiyun  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)2714*4882a593Smuzhiyun static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
2715*4882a593Smuzhiyun {
2716*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
2717*4882a593Smuzhiyun 	u32 rx_cnt = priv->plat->rx_queues_to_use;
2718*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
2719*4882a593Smuzhiyun 	u32 chan;
2720*4882a593Smuzhiyun 	int ret;
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	/* DMA initialization and SW reset */
2723*4882a593Smuzhiyun 	ret = stmmac_init_dma_engine(priv);
2724*4882a593Smuzhiyun 	if (ret < 0) {
2725*4882a593Smuzhiyun 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2726*4882a593Smuzhiyun 			   __func__);
2727*4882a593Smuzhiyun 		return ret;
2728*4882a593Smuzhiyun 	}
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	/* Copy the MAC addr into the HW  */
2731*4882a593Smuzhiyun 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	/* PS and related bits will be programmed according to the speed */
2734*4882a593Smuzhiyun 	if (priv->hw->pcs) {
2735*4882a593Smuzhiyun 		int speed = priv->plat->mac_port_sel_speed;
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
2738*4882a593Smuzhiyun 		    (speed == SPEED_1000)) {
2739*4882a593Smuzhiyun 			priv->hw->ps = speed;
2740*4882a593Smuzhiyun 		} else {
2741*4882a593Smuzhiyun 			dev_warn(priv->device, "invalid port speed\n");
2742*4882a593Smuzhiyun 			priv->hw->ps = 0;
2743*4882a593Smuzhiyun 		}
2744*4882a593Smuzhiyun 	}
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	/* Initialize the MAC Core */
2747*4882a593Smuzhiyun 	stmmac_core_init(priv, priv->hw, dev);
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	/* Initialize MTL*/
2750*4882a593Smuzhiyun 	stmmac_mtl_configuration(priv);
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	/* Initialize Safety Features */
2753*4882a593Smuzhiyun 	stmmac_safety_feat_configuration(priv);
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	ret = stmmac_rx_ipc(priv, priv->hw);
2756*4882a593Smuzhiyun 	if (!ret) {
2757*4882a593Smuzhiyun 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2758*4882a593Smuzhiyun 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2759*4882a593Smuzhiyun 		priv->hw->rx_csum = 0;
2760*4882a593Smuzhiyun 	}
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	/* Enable the MAC Rx/Tx */
2763*4882a593Smuzhiyun 	stmmac_mac_set(priv, priv->ioaddr, true);
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 	/* Set the HW DMA mode and the COE */
2766*4882a593Smuzhiyun 	stmmac_dma_operation_mode(priv);
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	stmmac_mmc_setup(priv);
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	if (ptp_register) {
2771*4882a593Smuzhiyun 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2772*4882a593Smuzhiyun 		if (ret < 0)
2773*4882a593Smuzhiyun 			netdev_warn(priv->dev,
2774*4882a593Smuzhiyun 				    "failed to enable PTP reference clock: %pe\n",
2775*4882a593Smuzhiyun 				    ERR_PTR(ret));
2776*4882a593Smuzhiyun 	}
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	ret = stmmac_init_ptp(priv);
2779*4882a593Smuzhiyun 	if (ret == -EOPNOTSUPP)
2780*4882a593Smuzhiyun 		netdev_warn(priv->dev, "PTP not supported by HW\n");
2781*4882a593Smuzhiyun 	else if (ret)
2782*4882a593Smuzhiyun 		netdev_warn(priv->dev, "PTP init failed\n");
2783*4882a593Smuzhiyun 	else if (ptp_register)
2784*4882a593Smuzhiyun 		stmmac_ptp_register(priv);
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	/* Convert the timer from msec to usec */
2789*4882a593Smuzhiyun 	if (!priv->tx_lpi_timer)
2790*4882a593Smuzhiyun 		priv->tx_lpi_timer = eee_timer * 1000;
2791*4882a593Smuzhiyun 
2792*4882a593Smuzhiyun 	if (priv->use_riwt) {
2793*4882a593Smuzhiyun 		if (!priv->rx_riwt)
2794*4882a593Smuzhiyun 			priv->rx_riwt = DEF_DMA_RIWT;
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2797*4882a593Smuzhiyun 	}
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	if (priv->hw->pcs)
2800*4882a593Smuzhiyun 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 	/* set TX and RX rings length */
2803*4882a593Smuzhiyun 	stmmac_set_rings_length(priv);
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 	/* Enable TSO */
2806*4882a593Smuzhiyun 	if (priv->tso) {
2807*4882a593Smuzhiyun 		for (chan = 0; chan < tx_cnt; chan++) {
2808*4882a593Smuzhiyun 			struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 			/* TSO and TBS cannot co-exist */
2811*4882a593Smuzhiyun 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
2812*4882a593Smuzhiyun 				continue;
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2815*4882a593Smuzhiyun 		}
2816*4882a593Smuzhiyun 	}
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	/* Enable Split Header */
2819*4882a593Smuzhiyun 	if (priv->sph && priv->hw->rx_csum) {
2820*4882a593Smuzhiyun 		for (chan = 0; chan < rx_cnt; chan++)
2821*4882a593Smuzhiyun 			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2822*4882a593Smuzhiyun 	}
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	/* VLAN Tag Insertion */
2825*4882a593Smuzhiyun 	if (priv->dma_cap.vlins)
2826*4882a593Smuzhiyun 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	/* TBS */
2829*4882a593Smuzhiyun 	for (chan = 0; chan < tx_cnt; chan++) {
2830*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2831*4882a593Smuzhiyun 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2834*4882a593Smuzhiyun 	}
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	/* Configure real RX and TX queues */
2837*4882a593Smuzhiyun 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2838*4882a593Smuzhiyun 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	/* Start the ball rolling... */
2841*4882a593Smuzhiyun 	stmmac_start_all_dma(priv);
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun 	return 0;
2844*4882a593Smuzhiyun }
2845*4882a593Smuzhiyun 
stmmac_hw_teardown(struct net_device * dev)2846*4882a593Smuzhiyun static void stmmac_hw_teardown(struct net_device *dev)
2847*4882a593Smuzhiyun {
2848*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun /**
2854*4882a593Smuzhiyun  *  stmmac_open - open entry point of the driver
2855*4882a593Smuzhiyun  *  @dev : pointer to the device structure.
2856*4882a593Smuzhiyun  *  Description:
2857*4882a593Smuzhiyun  *  This function is the open entry point of the driver.
2858*4882a593Smuzhiyun  *  Return value:
2859*4882a593Smuzhiyun  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2860*4882a593Smuzhiyun  *  file on failure.
2861*4882a593Smuzhiyun  */
stmmac_open(struct net_device * dev)2862*4882a593Smuzhiyun static int stmmac_open(struct net_device *dev)
2863*4882a593Smuzhiyun {
2864*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
2865*4882a593Smuzhiyun 	int bfsize = 0;
2866*4882a593Smuzhiyun 	u32 chan;
2867*4882a593Smuzhiyun 	int ret;
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(priv->device);
2870*4882a593Smuzhiyun 	if (ret < 0) {
2871*4882a593Smuzhiyun 		pm_runtime_put_noidle(priv->device);
2872*4882a593Smuzhiyun 		return ret;
2873*4882a593Smuzhiyun 	}
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2876*4882a593Smuzhiyun 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
2877*4882a593Smuzhiyun 	    priv->hw->xpcs == NULL) {
2878*4882a593Smuzhiyun 		ret = stmmac_init_phy(dev);
2879*4882a593Smuzhiyun 		if (ret) {
2880*4882a593Smuzhiyun 			netdev_err(priv->dev,
2881*4882a593Smuzhiyun 				   "%s: Cannot attach to PHY (error: %d)\n",
2882*4882a593Smuzhiyun 				   __func__, ret);
2883*4882a593Smuzhiyun 			goto init_phy_error;
2884*4882a593Smuzhiyun 		}
2885*4882a593Smuzhiyun 	}
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	/* Extra statistics */
2888*4882a593Smuzhiyun 	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2889*4882a593Smuzhiyun 	priv->xstats.threshold = tc;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2892*4882a593Smuzhiyun 	if (bfsize < 0)
2893*4882a593Smuzhiyun 		bfsize = 0;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	if (bfsize < BUF_SIZE_16KiB)
2896*4882a593Smuzhiyun 		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	priv->dma_buf_sz = bfsize;
2899*4882a593Smuzhiyun 	buf_sz = bfsize;
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 	if (!priv->dma_tx_size)
2904*4882a593Smuzhiyun 		priv->dma_tx_size = priv->plat->dma_tx_size ? priv->plat->dma_tx_size :
2905*4882a593Smuzhiyun 				    DMA_DEFAULT_TX_SIZE;
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 	if (!priv->dma_rx_size)
2908*4882a593Smuzhiyun 		priv->dma_rx_size = priv->plat->dma_rx_size ? priv->plat->dma_rx_size :
2909*4882a593Smuzhiyun 				    DMA_DEFAULT_RX_SIZE;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	/* Earlier check for TBS */
2912*4882a593Smuzhiyun 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2913*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2914*4882a593Smuzhiyun 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
2917*4882a593Smuzhiyun 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2918*4882a593Smuzhiyun 	}
2919*4882a593Smuzhiyun 
2920*4882a593Smuzhiyun 	ret = alloc_dma_desc_resources(priv);
2921*4882a593Smuzhiyun 	if (ret < 0) {
2922*4882a593Smuzhiyun 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2923*4882a593Smuzhiyun 			   __func__);
2924*4882a593Smuzhiyun 		goto dma_desc_error;
2925*4882a593Smuzhiyun 	}
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun 	ret = init_dma_desc_rings(dev, GFP_KERNEL);
2928*4882a593Smuzhiyun 	if (ret < 0) {
2929*4882a593Smuzhiyun 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2930*4882a593Smuzhiyun 			   __func__);
2931*4882a593Smuzhiyun 		goto init_error;
2932*4882a593Smuzhiyun 	}
2933*4882a593Smuzhiyun 
2934*4882a593Smuzhiyun 	if (priv->plat->serdes_powerup) {
2935*4882a593Smuzhiyun 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
2936*4882a593Smuzhiyun 		if (ret < 0) {
2937*4882a593Smuzhiyun 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
2938*4882a593Smuzhiyun 				   __func__);
2939*4882a593Smuzhiyun 			goto init_error;
2940*4882a593Smuzhiyun 		}
2941*4882a593Smuzhiyun 	}
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	ret = stmmac_hw_setup(dev, true);
2944*4882a593Smuzhiyun 	if (ret < 0) {
2945*4882a593Smuzhiyun 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2946*4882a593Smuzhiyun 		goto init_error;
2947*4882a593Smuzhiyun 	}
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	stmmac_init_coalesce(priv);
2950*4882a593Smuzhiyun 
2951*4882a593Smuzhiyun 	phylink_start(priv->phylink);
2952*4882a593Smuzhiyun 	/* We may have called phylink_speed_down before */
2953*4882a593Smuzhiyun 	phylink_speed_up(priv->phylink);
2954*4882a593Smuzhiyun 
2955*4882a593Smuzhiyun 	/* Request the IRQ lines */
2956*4882a593Smuzhiyun 	ret = request_irq(dev->irq, stmmac_interrupt,
2957*4882a593Smuzhiyun 			  IRQF_SHARED, dev->name, dev);
2958*4882a593Smuzhiyun 	if (unlikely(ret < 0)) {
2959*4882a593Smuzhiyun 		netdev_err(priv->dev,
2960*4882a593Smuzhiyun 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2961*4882a593Smuzhiyun 			   __func__, dev->irq, ret);
2962*4882a593Smuzhiyun 		goto irq_error;
2963*4882a593Smuzhiyun 	}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	/* Request the Wake IRQ in case of another line is used for WoL */
2966*4882a593Smuzhiyun 	if (priv->wol_irq != dev->irq) {
2967*4882a593Smuzhiyun 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
2968*4882a593Smuzhiyun 				  IRQF_SHARED, dev->name, dev);
2969*4882a593Smuzhiyun 		if (unlikely(ret < 0)) {
2970*4882a593Smuzhiyun 			netdev_err(priv->dev,
2971*4882a593Smuzhiyun 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2972*4882a593Smuzhiyun 				   __func__, priv->wol_irq, ret);
2973*4882a593Smuzhiyun 			goto wolirq_error;
2974*4882a593Smuzhiyun 		}
2975*4882a593Smuzhiyun 	}
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 	/* Request the IRQ lines */
2978*4882a593Smuzhiyun 	if (priv->lpi_irq > 0) {
2979*4882a593Smuzhiyun 		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2980*4882a593Smuzhiyun 				  dev->name, dev);
2981*4882a593Smuzhiyun 		if (unlikely(ret < 0)) {
2982*4882a593Smuzhiyun 			netdev_err(priv->dev,
2983*4882a593Smuzhiyun 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2984*4882a593Smuzhiyun 				   __func__, priv->lpi_irq, ret);
2985*4882a593Smuzhiyun 			goto lpiirq_error;
2986*4882a593Smuzhiyun 		}
2987*4882a593Smuzhiyun 	}
2988*4882a593Smuzhiyun 
2989*4882a593Smuzhiyun 	stmmac_enable_all_queues(priv);
2990*4882a593Smuzhiyun 	netif_tx_start_all_queues(priv->dev);
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun 	return 0;
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun lpiirq_error:
2995*4882a593Smuzhiyun 	if (priv->wol_irq != dev->irq)
2996*4882a593Smuzhiyun 		free_irq(priv->wol_irq, dev);
2997*4882a593Smuzhiyun wolirq_error:
2998*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
2999*4882a593Smuzhiyun irq_error:
3000*4882a593Smuzhiyun 	phylink_stop(priv->phylink);
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3003*4882a593Smuzhiyun 		del_timer_sync(&priv->tx_queue[chan].txtimer);
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun 	stmmac_hw_teardown(dev);
3006*4882a593Smuzhiyun init_error:
3007*4882a593Smuzhiyun 	free_dma_desc_resources(priv);
3008*4882a593Smuzhiyun dma_desc_error:
3009*4882a593Smuzhiyun 	phylink_disconnect_phy(priv->phylink);
3010*4882a593Smuzhiyun init_phy_error:
3011*4882a593Smuzhiyun 	pm_runtime_put(priv->device);
3012*4882a593Smuzhiyun 	return ret;
3013*4882a593Smuzhiyun }
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun /**
3016*4882a593Smuzhiyun  *  stmmac_release - close entry point of the driver
3017*4882a593Smuzhiyun  *  @dev : device pointer.
3018*4882a593Smuzhiyun  *  Description:
3019*4882a593Smuzhiyun  *  This is the stop entry point of the driver.
3020*4882a593Smuzhiyun  */
stmmac_release(struct net_device * dev)3021*4882a593Smuzhiyun static int stmmac_release(struct net_device *dev)
3022*4882a593Smuzhiyun {
3023*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
3024*4882a593Smuzhiyun 	u32 chan;
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun 	if (device_may_wakeup(priv->device))
3027*4882a593Smuzhiyun 		phylink_speed_down(priv->phylink, false);
3028*4882a593Smuzhiyun 	/* Stop and disconnect the PHY */
3029*4882a593Smuzhiyun 	phylink_stop(priv->phylink);
3030*4882a593Smuzhiyun 	phylink_disconnect_phy(priv->phylink);
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 	if (priv->plat->integrated_phy_power)
3033*4882a593Smuzhiyun 		priv->plat->integrated_phy_power(priv->plat->bsp_priv, false);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 	stmmac_disable_all_queues(priv);
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3038*4882a593Smuzhiyun 		del_timer_sync(&priv->tx_queue[chan].txtimer);
3039*4882a593Smuzhiyun 
3040*4882a593Smuzhiyun 	/* Free the IRQ lines */
3041*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
3042*4882a593Smuzhiyun 	if (priv->wol_irq != dev->irq)
3043*4882a593Smuzhiyun 		free_irq(priv->wol_irq, dev);
3044*4882a593Smuzhiyun 	if (priv->lpi_irq > 0)
3045*4882a593Smuzhiyun 		free_irq(priv->lpi_irq, dev);
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 	if (priv->eee_enabled) {
3048*4882a593Smuzhiyun 		priv->tx_path_in_lpi_mode = false;
3049*4882a593Smuzhiyun 		del_timer_sync(&priv->eee_ctrl_timer);
3050*4882a593Smuzhiyun 	}
3051*4882a593Smuzhiyun 
3052*4882a593Smuzhiyun 	/* Stop TX/RX DMA and clear the descriptors */
3053*4882a593Smuzhiyun 	stmmac_stop_all_dma(priv);
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	/* Release and free the Rx/Tx resources */
3056*4882a593Smuzhiyun 	free_dma_desc_resources(priv);
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	/* Disable the MAC Rx/Tx */
3059*4882a593Smuzhiyun 	stmmac_mac_set(priv, priv->ioaddr, false);
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	/* Powerdown Serdes if there is */
3062*4882a593Smuzhiyun 	if (priv->plat->serdes_powerdown)
3063*4882a593Smuzhiyun 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	netif_carrier_off(dev);
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 	stmmac_release_ptp(priv);
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	pm_runtime_put(priv->device);
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun 	return 0;
3072*4882a593Smuzhiyun }
3073*4882a593Smuzhiyun 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)3074*4882a593Smuzhiyun static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3075*4882a593Smuzhiyun 			       struct stmmac_tx_queue *tx_q)
3076*4882a593Smuzhiyun {
3077*4882a593Smuzhiyun 	u16 tag = 0x0, inner_tag = 0x0;
3078*4882a593Smuzhiyun 	u32 inner_type = 0x0;
3079*4882a593Smuzhiyun 	struct dma_desc *p;
3080*4882a593Smuzhiyun 
3081*4882a593Smuzhiyun 	if (!priv->dma_cap.vlins)
3082*4882a593Smuzhiyun 		return false;
3083*4882a593Smuzhiyun 	if (!skb_vlan_tag_present(skb))
3084*4882a593Smuzhiyun 		return false;
3085*4882a593Smuzhiyun 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3086*4882a593Smuzhiyun 		inner_tag = skb_vlan_tag_get(skb);
3087*4882a593Smuzhiyun 		inner_type = STMMAC_VLAN_INSERT;
3088*4882a593Smuzhiyun 	}
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 	tag = skb_vlan_tag_get(skb);
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3093*4882a593Smuzhiyun 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3094*4882a593Smuzhiyun 	else
3095*4882a593Smuzhiyun 		p = &tx_q->dma_tx[tx_q->cur_tx];
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3098*4882a593Smuzhiyun 		return false;
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	stmmac_set_tx_owner(priv, p);
3101*4882a593Smuzhiyun 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3102*4882a593Smuzhiyun 	return true;
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun /**
3106*4882a593Smuzhiyun  *  stmmac_tso_allocator - close entry point of the driver
3107*4882a593Smuzhiyun  *  @priv: driver private structure
3108*4882a593Smuzhiyun  *  @des: buffer start address
3109*4882a593Smuzhiyun  *  @total_len: total length to fill in descriptors
3110*4882a593Smuzhiyun  *  @last_segment: condition for the last descriptor
3111*4882a593Smuzhiyun  *  @queue: TX queue index
3112*4882a593Smuzhiyun  *  Description:
3113*4882a593Smuzhiyun  *  This function fills descriptor and request new descriptors according to
3114*4882a593Smuzhiyun  *  buffer length to fill
3115*4882a593Smuzhiyun  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)3116*4882a593Smuzhiyun static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3117*4882a593Smuzhiyun 				 int total_len, bool last_segment, u32 queue)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3120*4882a593Smuzhiyun 	struct dma_desc *desc;
3121*4882a593Smuzhiyun 	u32 buff_size;
3122*4882a593Smuzhiyun 	int tmp_len;
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 	tmp_len = total_len;
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	while (tmp_len > 0) {
3127*4882a593Smuzhiyun 		dma_addr_t curr_addr;
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3130*4882a593Smuzhiyun 						priv->dma_tx_size);
3131*4882a593Smuzhiyun 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3134*4882a593Smuzhiyun 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3135*4882a593Smuzhiyun 		else
3136*4882a593Smuzhiyun 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun 		curr_addr = des + (total_len - tmp_len);
3139*4882a593Smuzhiyun 		if (priv->dma_cap.addr64 <= 32)
3140*4882a593Smuzhiyun 			desc->des0 = cpu_to_le32(curr_addr);
3141*4882a593Smuzhiyun 		else
3142*4882a593Smuzhiyun 			stmmac_set_desc_addr(priv, desc, curr_addr);
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3145*4882a593Smuzhiyun 			    TSO_MAX_BUFF_SIZE : tmp_len;
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3148*4882a593Smuzhiyun 				0, 1,
3149*4882a593Smuzhiyun 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3150*4882a593Smuzhiyun 				0, 0);
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 		tmp_len -= TSO_MAX_BUFF_SIZE;
3153*4882a593Smuzhiyun 	}
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun /**
3157*4882a593Smuzhiyun  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3158*4882a593Smuzhiyun  *  @skb : the socket buffer
3159*4882a593Smuzhiyun  *  @dev : device pointer
3160*4882a593Smuzhiyun  *  Description: this is the transmit function that is called on TSO frames
3161*4882a593Smuzhiyun  *  (support available on GMAC4 and newer chips).
3162*4882a593Smuzhiyun  *  Diagram below show the ring programming in case of TSO frames:
3163*4882a593Smuzhiyun  *
3164*4882a593Smuzhiyun  *  First Descriptor
3165*4882a593Smuzhiyun  *   --------
3166*4882a593Smuzhiyun  *   | DES0 |---> buffer1 = L2/L3/L4 header
3167*4882a593Smuzhiyun  *   | DES1 |---> TCP Payload (can continue on next descr...)
3168*4882a593Smuzhiyun  *   | DES2 |---> buffer 1 and 2 len
3169*4882a593Smuzhiyun  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3170*4882a593Smuzhiyun  *   --------
3171*4882a593Smuzhiyun  *	|
3172*4882a593Smuzhiyun  *     ...
3173*4882a593Smuzhiyun  *	|
3174*4882a593Smuzhiyun  *   --------
3175*4882a593Smuzhiyun  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3176*4882a593Smuzhiyun  *   | DES1 | --|
3177*4882a593Smuzhiyun  *   | DES2 | --> buffer 1 and 2 len
3178*4882a593Smuzhiyun  *   | DES3 |
3179*4882a593Smuzhiyun  *   --------
3180*4882a593Smuzhiyun  *
3181*4882a593Smuzhiyun  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3182*4882a593Smuzhiyun  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)3183*4882a593Smuzhiyun static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3184*4882a593Smuzhiyun {
3185*4882a593Smuzhiyun 	struct dma_desc *desc, *first, *mss_desc = NULL;
3186*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
3187*4882a593Smuzhiyun 	int desc_size, tmp_pay_len = 0, first_tx;
3188*4882a593Smuzhiyun 	int nfrags = skb_shinfo(skb)->nr_frags;
3189*4882a593Smuzhiyun 	u32 queue = skb_get_queue_mapping(skb);
3190*4882a593Smuzhiyun 	unsigned int first_entry, tx_packets;
3191*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q;
3192*4882a593Smuzhiyun 	bool has_vlan, set_ic;
3193*4882a593Smuzhiyun 	u8 proto_hdr_len, hdr;
3194*4882a593Smuzhiyun 	u32 pay_len, mss;
3195*4882a593Smuzhiyun 	dma_addr_t des;
3196*4882a593Smuzhiyun 	int i;
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 	tx_q = &priv->tx_queue[queue];
3199*4882a593Smuzhiyun 	first_tx = tx_q->cur_tx;
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 	/* Compute header lengths */
3202*4882a593Smuzhiyun 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3203*4882a593Smuzhiyun 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3204*4882a593Smuzhiyun 		hdr = sizeof(struct udphdr);
3205*4882a593Smuzhiyun 	} else {
3206*4882a593Smuzhiyun 		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3207*4882a593Smuzhiyun 		hdr = tcp_hdrlen(skb);
3208*4882a593Smuzhiyun 	}
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	/* Desc availability based on threshold should be enough safe */
3211*4882a593Smuzhiyun 	if (unlikely(stmmac_tx_avail(priv, queue) <
3212*4882a593Smuzhiyun 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3213*4882a593Smuzhiyun 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3214*4882a593Smuzhiyun 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3215*4882a593Smuzhiyun 								queue));
3216*4882a593Smuzhiyun 			/* This is a hard error, log it. */
3217*4882a593Smuzhiyun 			netdev_err(priv->dev,
3218*4882a593Smuzhiyun 				   "%s: Tx Ring full when queue awake\n",
3219*4882a593Smuzhiyun 				   __func__);
3220*4882a593Smuzhiyun 		}
3221*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3222*4882a593Smuzhiyun 	}
3223*4882a593Smuzhiyun 
3224*4882a593Smuzhiyun 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3225*4882a593Smuzhiyun 
3226*4882a593Smuzhiyun 	mss = skb_shinfo(skb)->gso_size;
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 	/* set new MSS value if needed */
3229*4882a593Smuzhiyun 	if (mss != tx_q->mss) {
3230*4882a593Smuzhiyun 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3231*4882a593Smuzhiyun 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3232*4882a593Smuzhiyun 		else
3233*4882a593Smuzhiyun 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun 		stmmac_set_mss(priv, mss_desc, mss);
3236*4882a593Smuzhiyun 		tx_q->mss = mss;
3237*4882a593Smuzhiyun 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3238*4882a593Smuzhiyun 						priv->dma_tx_size);
3239*4882a593Smuzhiyun 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3240*4882a593Smuzhiyun 	}
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	if (netif_msg_tx_queued(priv)) {
3243*4882a593Smuzhiyun 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3244*4882a593Smuzhiyun 			__func__, hdr, proto_hdr_len, pay_len, mss);
3245*4882a593Smuzhiyun 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3246*4882a593Smuzhiyun 			skb->data_len);
3247*4882a593Smuzhiyun 	}
3248*4882a593Smuzhiyun 
3249*4882a593Smuzhiyun 	/* Check if VLAN can be inserted by HW */
3250*4882a593Smuzhiyun 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3251*4882a593Smuzhiyun 
3252*4882a593Smuzhiyun 	first_entry = tx_q->cur_tx;
3253*4882a593Smuzhiyun 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3254*4882a593Smuzhiyun 
3255*4882a593Smuzhiyun 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3256*4882a593Smuzhiyun 		desc = &tx_q->dma_entx[first_entry].basic;
3257*4882a593Smuzhiyun 	else
3258*4882a593Smuzhiyun 		desc = &tx_q->dma_tx[first_entry];
3259*4882a593Smuzhiyun 	first = desc;
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	if (has_vlan)
3262*4882a593Smuzhiyun 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun 	/* first descriptor: fill Headers on Buf1 */
3265*4882a593Smuzhiyun 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3266*4882a593Smuzhiyun 			     DMA_TO_DEVICE);
3267*4882a593Smuzhiyun 	if (dma_mapping_error(priv->device, des))
3268*4882a593Smuzhiyun 		goto dma_map_err;
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	tx_q->tx_skbuff_dma[first_entry].buf = des;
3271*4882a593Smuzhiyun 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3272*4882a593Smuzhiyun 
3273*4882a593Smuzhiyun 	if (priv->dma_cap.addr64 <= 32) {
3274*4882a593Smuzhiyun 		first->des0 = cpu_to_le32(des);
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 		/* Fill start of payload in buff2 of first descriptor */
3277*4882a593Smuzhiyun 		if (pay_len)
3278*4882a593Smuzhiyun 			first->des1 = cpu_to_le32(des + proto_hdr_len);
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 		/* If needed take extra descriptors to fill the remaining payload */
3281*4882a593Smuzhiyun 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3282*4882a593Smuzhiyun 	} else {
3283*4882a593Smuzhiyun 		stmmac_set_desc_addr(priv, first, des);
3284*4882a593Smuzhiyun 		tmp_pay_len = pay_len;
3285*4882a593Smuzhiyun 		des += proto_hdr_len;
3286*4882a593Smuzhiyun 		pay_len = 0;
3287*4882a593Smuzhiyun 	}
3288*4882a593Smuzhiyun 
3289*4882a593Smuzhiyun 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 	/* Prepare fragments */
3292*4882a593Smuzhiyun 	for (i = 0; i < nfrags; i++) {
3293*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3294*4882a593Smuzhiyun 
3295*4882a593Smuzhiyun 		des = skb_frag_dma_map(priv->device, frag, 0,
3296*4882a593Smuzhiyun 				       skb_frag_size(frag),
3297*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
3298*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des))
3299*4882a593Smuzhiyun 			goto dma_map_err;
3300*4882a593Smuzhiyun 
3301*4882a593Smuzhiyun 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3302*4882a593Smuzhiyun 				     (i == nfrags - 1), queue);
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3305*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3306*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3307*4882a593Smuzhiyun 	}
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3310*4882a593Smuzhiyun 
3311*4882a593Smuzhiyun 	/* Only the last descriptor gets to point to the skb. */
3312*4882a593Smuzhiyun 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	/* Manage tx mitigation */
3315*4882a593Smuzhiyun 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
3316*4882a593Smuzhiyun 	tx_q->tx_count_frames += tx_packets;
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3319*4882a593Smuzhiyun 		set_ic = true;
3320*4882a593Smuzhiyun 	else if (!priv->tx_coal_frames)
3321*4882a593Smuzhiyun 		set_ic = false;
3322*4882a593Smuzhiyun 	else if (tx_packets > priv->tx_coal_frames)
3323*4882a593Smuzhiyun 		set_ic = true;
3324*4882a593Smuzhiyun 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3325*4882a593Smuzhiyun 		set_ic = true;
3326*4882a593Smuzhiyun 	else
3327*4882a593Smuzhiyun 		set_ic = false;
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun 	if (set_ic) {
3330*4882a593Smuzhiyun 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
3331*4882a593Smuzhiyun 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3332*4882a593Smuzhiyun 		else
3333*4882a593Smuzhiyun 			desc = &tx_q->dma_tx[tx_q->cur_tx];
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 		tx_q->tx_count_frames = 0;
3336*4882a593Smuzhiyun 		stmmac_set_tx_ic(priv, desc);
3337*4882a593Smuzhiyun 		priv->xstats.tx_set_ic_bit++;
3338*4882a593Smuzhiyun 	}
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	/* We've used all descriptors we need for this skb, however,
3341*4882a593Smuzhiyun 	 * advance cur_tx so that it references a fresh descriptor.
3342*4882a593Smuzhiyun 	 * ndo_start_xmit will fill this descriptor the next time it's
3343*4882a593Smuzhiyun 	 * called and stmmac_tx_clean may clean up to this descriptor.
3344*4882a593Smuzhiyun 	 */
3345*4882a593Smuzhiyun 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3346*4882a593Smuzhiyun 
3347*4882a593Smuzhiyun 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3348*4882a593Smuzhiyun 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3349*4882a593Smuzhiyun 			  __func__);
3350*4882a593Smuzhiyun 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3351*4882a593Smuzhiyun 	}
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
3354*4882a593Smuzhiyun 	priv->xstats.tx_tso_frames++;
3355*4882a593Smuzhiyun 	priv->xstats.tx_tso_nfrags += nfrags;
3356*4882a593Smuzhiyun 
3357*4882a593Smuzhiyun 	if (priv->sarc_type)
3358*4882a593Smuzhiyun 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3359*4882a593Smuzhiyun 
3360*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
3361*4882a593Smuzhiyun 
3362*4882a593Smuzhiyun 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3363*4882a593Smuzhiyun 		     priv->hwts_tx_en)) {
3364*4882a593Smuzhiyun 		/* declare that device is doing timestamping */
3365*4882a593Smuzhiyun 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3366*4882a593Smuzhiyun 		stmmac_enable_tx_timestamp(priv, first);
3367*4882a593Smuzhiyun 	}
3368*4882a593Smuzhiyun 
3369*4882a593Smuzhiyun 	/* Complete the first descriptor before granting the DMA */
3370*4882a593Smuzhiyun 	stmmac_prepare_tso_tx_desc(priv, first, 1,
3371*4882a593Smuzhiyun 			proto_hdr_len,
3372*4882a593Smuzhiyun 			pay_len,
3373*4882a593Smuzhiyun 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3374*4882a593Smuzhiyun 			hdr / 4, (skb->len - proto_hdr_len));
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 	/* If context desc is used to change MSS */
3377*4882a593Smuzhiyun 	if (mss_desc) {
3378*4882a593Smuzhiyun 		/* Make sure that first descriptor has been completely
3379*4882a593Smuzhiyun 		 * written, including its own bit. This is because MSS is
3380*4882a593Smuzhiyun 		 * actually before first descriptor, so we need to make
3381*4882a593Smuzhiyun 		 * sure that MSS's own bit is the last thing written.
3382*4882a593Smuzhiyun 		 */
3383*4882a593Smuzhiyun 		dma_wmb();
3384*4882a593Smuzhiyun 		stmmac_set_tx_owner(priv, mss_desc);
3385*4882a593Smuzhiyun 	}
3386*4882a593Smuzhiyun 
3387*4882a593Smuzhiyun 	/* The own bit must be the latest setting done when prepare the
3388*4882a593Smuzhiyun 	 * descriptor and then barrier is needed to make sure that
3389*4882a593Smuzhiyun 	 * all is coherent before granting the DMA engine.
3390*4882a593Smuzhiyun 	 */
3391*4882a593Smuzhiyun 	wmb();
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun 	if (netif_msg_pktdata(priv)) {
3394*4882a593Smuzhiyun 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3395*4882a593Smuzhiyun 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3396*4882a593Smuzhiyun 			tx_q->cur_tx, first, nfrags);
3397*4882a593Smuzhiyun 		pr_info(">>> frame to be transmitted: ");
3398*4882a593Smuzhiyun 		print_pkt(skb->data, skb_headlen(skb));
3399*4882a593Smuzhiyun 	}
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
3404*4882a593Smuzhiyun 		desc_size = sizeof(struct dma_edesc);
3405*4882a593Smuzhiyun 	else
3406*4882a593Smuzhiyun 		desc_size = sizeof(struct dma_desc);
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3409*4882a593Smuzhiyun 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3410*4882a593Smuzhiyun 	stmmac_tx_timer_arm(priv, queue);
3411*4882a593Smuzhiyun 
3412*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3413*4882a593Smuzhiyun 
3414*4882a593Smuzhiyun dma_map_err:
3415*4882a593Smuzhiyun 	dev_err(priv->device, "Tx dma map failed\n");
3416*4882a593Smuzhiyun 	dev_kfree_skb(skb);
3417*4882a593Smuzhiyun 	priv->dev->stats.tx_dropped++;
3418*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3419*4882a593Smuzhiyun }
3420*4882a593Smuzhiyun 
3421*4882a593Smuzhiyun /**
3422*4882a593Smuzhiyun  *  stmmac_xmit - Tx entry point of the driver
3423*4882a593Smuzhiyun  *  @skb : the socket buffer
3424*4882a593Smuzhiyun  *  @dev : device pointer
3425*4882a593Smuzhiyun  *  Description : this is the tx entry point of the driver.
3426*4882a593Smuzhiyun  *  It programs the chain or the ring and supports oversized frames
3427*4882a593Smuzhiyun  *  and SG feature.
3428*4882a593Smuzhiyun  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)3429*4882a593Smuzhiyun static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3430*4882a593Smuzhiyun {
3431*4882a593Smuzhiyun 	unsigned int first_entry, tx_packets, enh_desc;
3432*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
3433*4882a593Smuzhiyun 	unsigned int nopaged_len = skb_headlen(skb);
3434*4882a593Smuzhiyun 	int i, csum_insertion = 0, is_jumbo = 0;
3435*4882a593Smuzhiyun 	u32 queue = skb_get_queue_mapping(skb);
3436*4882a593Smuzhiyun 	int nfrags = skb_shinfo(skb)->nr_frags;
3437*4882a593Smuzhiyun 	int gso = skb_shinfo(skb)->gso_type;
3438*4882a593Smuzhiyun 	struct dma_edesc *tbs_desc = NULL;
3439*4882a593Smuzhiyun 	int entry, desc_size, first_tx;
3440*4882a593Smuzhiyun 	struct dma_desc *desc, *first;
3441*4882a593Smuzhiyun 	struct stmmac_tx_queue *tx_q;
3442*4882a593Smuzhiyun 	bool has_vlan, set_ic;
3443*4882a593Smuzhiyun 	dma_addr_t des;
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	tx_q = &priv->tx_queue[queue];
3446*4882a593Smuzhiyun 	first_tx = tx_q->cur_tx;
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 	if (priv->tx_path_in_lpi_mode)
3449*4882a593Smuzhiyun 		stmmac_disable_eee_mode(priv);
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 	/* Manage oversized TCP frames for GMAC4 device */
3452*4882a593Smuzhiyun 	if (skb_is_gso(skb) && priv->tso) {
3453*4882a593Smuzhiyun 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3454*4882a593Smuzhiyun 			return stmmac_tso_xmit(skb, dev);
3455*4882a593Smuzhiyun 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3456*4882a593Smuzhiyun 			return stmmac_tso_xmit(skb, dev);
3457*4882a593Smuzhiyun 	}
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3460*4882a593Smuzhiyun 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3461*4882a593Smuzhiyun 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3462*4882a593Smuzhiyun 								queue));
3463*4882a593Smuzhiyun 			/* This is a hard error, log it. */
3464*4882a593Smuzhiyun 			netdev_err(priv->dev,
3465*4882a593Smuzhiyun 				   "%s: Tx Ring full when queue awake\n",
3466*4882a593Smuzhiyun 				   __func__);
3467*4882a593Smuzhiyun 		}
3468*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3469*4882a593Smuzhiyun 	}
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun 	/* Check if VLAN can be inserted by HW */
3472*4882a593Smuzhiyun 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3473*4882a593Smuzhiyun 
3474*4882a593Smuzhiyun 	entry = tx_q->cur_tx;
3475*4882a593Smuzhiyun 	first_entry = entry;
3476*4882a593Smuzhiyun 	WARN_ON(tx_q->tx_skbuff[first_entry]);
3477*4882a593Smuzhiyun 
3478*4882a593Smuzhiyun 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun 	if (likely(priv->extend_desc))
3481*4882a593Smuzhiyun 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3482*4882a593Smuzhiyun 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3483*4882a593Smuzhiyun 		desc = &tx_q->dma_entx[entry].basic;
3484*4882a593Smuzhiyun 	else
3485*4882a593Smuzhiyun 		desc = tx_q->dma_tx + entry;
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 	first = desc;
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	if (has_vlan)
3490*4882a593Smuzhiyun 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3491*4882a593Smuzhiyun 
3492*4882a593Smuzhiyun 	enh_desc = priv->plat->enh_desc;
3493*4882a593Smuzhiyun 	/* To program the descriptors according to the size of the frame */
3494*4882a593Smuzhiyun 	if (enh_desc)
3495*4882a593Smuzhiyun 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	if (unlikely(is_jumbo)) {
3498*4882a593Smuzhiyun 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3499*4882a593Smuzhiyun 		if (unlikely(entry < 0) && (entry != -EINVAL))
3500*4882a593Smuzhiyun 			goto dma_map_err;
3501*4882a593Smuzhiyun 	}
3502*4882a593Smuzhiyun 
3503*4882a593Smuzhiyun 	for (i = 0; i < nfrags; i++) {
3504*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3505*4882a593Smuzhiyun 		int len = skb_frag_size(frag);
3506*4882a593Smuzhiyun 		bool last_segment = (i == (nfrags - 1));
3507*4882a593Smuzhiyun 
3508*4882a593Smuzhiyun 		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3509*4882a593Smuzhiyun 		WARN_ON(tx_q->tx_skbuff[entry]);
3510*4882a593Smuzhiyun 
3511*4882a593Smuzhiyun 		if (likely(priv->extend_desc))
3512*4882a593Smuzhiyun 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3513*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3514*4882a593Smuzhiyun 			desc = &tx_q->dma_entx[entry].basic;
3515*4882a593Smuzhiyun 		else
3516*4882a593Smuzhiyun 			desc = tx_q->dma_tx + entry;
3517*4882a593Smuzhiyun 
3518*4882a593Smuzhiyun 		des = skb_frag_dma_map(priv->device, frag, 0, len,
3519*4882a593Smuzhiyun 				       DMA_TO_DEVICE);
3520*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des))
3521*4882a593Smuzhiyun 			goto dma_map_err; /* should reuse desc w/o issues */
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].buf = des;
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 		stmmac_set_desc_addr(priv, desc, des);
3526*4882a593Smuzhiyun 
3527*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
3528*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].len = len;
3529*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3530*4882a593Smuzhiyun 
3531*4882a593Smuzhiyun 		/* Prepare the descriptor and set the own bit too */
3532*4882a593Smuzhiyun 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3533*4882a593Smuzhiyun 				priv->mode, 1, last_segment, skb->len);
3534*4882a593Smuzhiyun 	}
3535*4882a593Smuzhiyun 
3536*4882a593Smuzhiyun 	/* Only the last descriptor gets to point to the skb. */
3537*4882a593Smuzhiyun 	tx_q->tx_skbuff[entry] = skb;
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun 	/* According to the coalesce parameter the IC bit for the latest
3540*4882a593Smuzhiyun 	 * segment is reset and the timer re-started to clean the tx status.
3541*4882a593Smuzhiyun 	 * This approach takes care about the fragments: desc is the first
3542*4882a593Smuzhiyun 	 * element in case of no SG.
3543*4882a593Smuzhiyun 	 */
3544*4882a593Smuzhiyun 	tx_packets = (entry + 1) - first_tx;
3545*4882a593Smuzhiyun 	tx_q->tx_count_frames += tx_packets;
3546*4882a593Smuzhiyun 
3547*4882a593Smuzhiyun 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3548*4882a593Smuzhiyun 		set_ic = true;
3549*4882a593Smuzhiyun 	else if (!priv->tx_coal_frames)
3550*4882a593Smuzhiyun 		set_ic = false;
3551*4882a593Smuzhiyun 	else if (tx_packets > priv->tx_coal_frames)
3552*4882a593Smuzhiyun 		set_ic = true;
3553*4882a593Smuzhiyun 	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3554*4882a593Smuzhiyun 		set_ic = true;
3555*4882a593Smuzhiyun 	else
3556*4882a593Smuzhiyun 		set_ic = false;
3557*4882a593Smuzhiyun 
3558*4882a593Smuzhiyun 	if (set_ic) {
3559*4882a593Smuzhiyun 		if (likely(priv->extend_desc))
3560*4882a593Smuzhiyun 			desc = &tx_q->dma_etx[entry].basic;
3561*4882a593Smuzhiyun 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3562*4882a593Smuzhiyun 			desc = &tx_q->dma_entx[entry].basic;
3563*4882a593Smuzhiyun 		else
3564*4882a593Smuzhiyun 			desc = &tx_q->dma_tx[entry];
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 		tx_q->tx_count_frames = 0;
3567*4882a593Smuzhiyun 		stmmac_set_tx_ic(priv, desc);
3568*4882a593Smuzhiyun 		priv->xstats.tx_set_ic_bit++;
3569*4882a593Smuzhiyun 	}
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 	/* We've used all descriptors we need for this skb, however,
3572*4882a593Smuzhiyun 	 * advance cur_tx so that it references a fresh descriptor.
3573*4882a593Smuzhiyun 	 * ndo_start_xmit will fill this descriptor the next time it's
3574*4882a593Smuzhiyun 	 * called and stmmac_tx_clean may clean up to this descriptor.
3575*4882a593Smuzhiyun 	 */
3576*4882a593Smuzhiyun 	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3577*4882a593Smuzhiyun 	tx_q->cur_tx = entry;
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	if (netif_msg_pktdata(priv)) {
3580*4882a593Smuzhiyun 		netdev_dbg(priv->dev,
3581*4882a593Smuzhiyun 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3582*4882a593Smuzhiyun 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3583*4882a593Smuzhiyun 			   entry, first, nfrags);
3584*4882a593Smuzhiyun 
3585*4882a593Smuzhiyun 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3586*4882a593Smuzhiyun 		print_pkt(skb->data, skb->len);
3587*4882a593Smuzhiyun 	}
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3590*4882a593Smuzhiyun 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3591*4882a593Smuzhiyun 			  __func__);
3592*4882a593Smuzhiyun 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3593*4882a593Smuzhiyun 	}
3594*4882a593Smuzhiyun 
3595*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun 	if (priv->sarc_type)
3598*4882a593Smuzhiyun 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3599*4882a593Smuzhiyun 
3600*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
3601*4882a593Smuzhiyun 
3602*4882a593Smuzhiyun 	/* Ready to fill the first descriptor and set the OWN bit w/o any
3603*4882a593Smuzhiyun 	 * problems because all the descriptors are actually ready to be
3604*4882a593Smuzhiyun 	 * passed to the DMA engine.
3605*4882a593Smuzhiyun 	 */
3606*4882a593Smuzhiyun 	if (likely(!is_jumbo)) {
3607*4882a593Smuzhiyun 		bool last_segment = (nfrags == 0);
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun 		des = dma_map_single(priv->device, skb->data,
3610*4882a593Smuzhiyun 				     nopaged_len, DMA_TO_DEVICE);
3611*4882a593Smuzhiyun 		if (dma_mapping_error(priv->device, des))
3612*4882a593Smuzhiyun 			goto dma_map_err;
3613*4882a593Smuzhiyun 
3614*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[first_entry].buf = des;
3615*4882a593Smuzhiyun 
3616*4882a593Smuzhiyun 		stmmac_set_desc_addr(priv, first, des);
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3619*4882a593Smuzhiyun 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3620*4882a593Smuzhiyun 
3621*4882a593Smuzhiyun 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3622*4882a593Smuzhiyun 			     priv->hwts_tx_en)) {
3623*4882a593Smuzhiyun 			/* declare that device is doing timestamping */
3624*4882a593Smuzhiyun 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3625*4882a593Smuzhiyun 			stmmac_enable_tx_timestamp(priv, first);
3626*4882a593Smuzhiyun 		}
3627*4882a593Smuzhiyun 
3628*4882a593Smuzhiyun 		/* Prepare the first descriptor setting the OWN bit too */
3629*4882a593Smuzhiyun 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3630*4882a593Smuzhiyun 				csum_insertion, priv->mode, 0, last_segment,
3631*4882a593Smuzhiyun 				skb->len);
3632*4882a593Smuzhiyun 	}
3633*4882a593Smuzhiyun 
3634*4882a593Smuzhiyun 	if (tx_q->tbs & STMMAC_TBS_EN) {
3635*4882a593Smuzhiyun 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3636*4882a593Smuzhiyun 
3637*4882a593Smuzhiyun 		tbs_desc = &tx_q->dma_entx[first_entry];
3638*4882a593Smuzhiyun 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3639*4882a593Smuzhiyun 	}
3640*4882a593Smuzhiyun 
3641*4882a593Smuzhiyun 	stmmac_set_tx_owner(priv, first);
3642*4882a593Smuzhiyun 
3643*4882a593Smuzhiyun 	/* The own bit must be the latest setting done when prepare the
3644*4882a593Smuzhiyun 	 * descriptor and then barrier is needed to make sure that
3645*4882a593Smuzhiyun 	 * all is coherent before granting the DMA engine.
3646*4882a593Smuzhiyun 	 */
3647*4882a593Smuzhiyun 	wmb();
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3650*4882a593Smuzhiyun 
3651*4882a593Smuzhiyun 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3652*4882a593Smuzhiyun 
3653*4882a593Smuzhiyun 	if (likely(priv->extend_desc))
3654*4882a593Smuzhiyun 		desc_size = sizeof(struct dma_extended_desc);
3655*4882a593Smuzhiyun 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3656*4882a593Smuzhiyun 		desc_size = sizeof(struct dma_edesc);
3657*4882a593Smuzhiyun 	else
3658*4882a593Smuzhiyun 		desc_size = sizeof(struct dma_desc);
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3661*4882a593Smuzhiyun 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3662*4882a593Smuzhiyun 	stmmac_tx_timer_arm(priv, queue);
3663*4882a593Smuzhiyun 
3664*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun dma_map_err:
3667*4882a593Smuzhiyun 	netdev_err(priv->dev, "Tx DMA map failed\n");
3668*4882a593Smuzhiyun 	dev_kfree_skb(skb);
3669*4882a593Smuzhiyun 	priv->dev->stats.tx_dropped++;
3670*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3671*4882a593Smuzhiyun }
3672*4882a593Smuzhiyun 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)3673*4882a593Smuzhiyun static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3674*4882a593Smuzhiyun {
3675*4882a593Smuzhiyun 	struct vlan_ethhdr *veth;
3676*4882a593Smuzhiyun 	__be16 vlan_proto;
3677*4882a593Smuzhiyun 	u16 vlanid;
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 	veth = (struct vlan_ethhdr *)skb->data;
3680*4882a593Smuzhiyun 	vlan_proto = veth->h_vlan_proto;
3681*4882a593Smuzhiyun 
3682*4882a593Smuzhiyun 	if ((vlan_proto == htons(ETH_P_8021Q) &&
3683*4882a593Smuzhiyun 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3684*4882a593Smuzhiyun 	    (vlan_proto == htons(ETH_P_8021AD) &&
3685*4882a593Smuzhiyun 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3686*4882a593Smuzhiyun 		/* pop the vlan tag */
3687*4882a593Smuzhiyun 		vlanid = ntohs(veth->h_vlan_TCI);
3688*4882a593Smuzhiyun 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3689*4882a593Smuzhiyun 		skb_pull(skb, VLAN_HLEN);
3690*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3691*4882a593Smuzhiyun 	}
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun 
3694*4882a593Smuzhiyun /**
3695*4882a593Smuzhiyun  * stmmac_rx_refill - refill used skb preallocated buffers
3696*4882a593Smuzhiyun  * @priv: driver private structure
3697*4882a593Smuzhiyun  * @queue: RX queue index
3698*4882a593Smuzhiyun  * Description : this is to reallocate the skb for the reception process
3699*4882a593Smuzhiyun  * that is based on zero-copy.
3700*4882a593Smuzhiyun  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)3701*4882a593Smuzhiyun static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3702*4882a593Smuzhiyun {
3703*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3704*4882a593Smuzhiyun 	int len, dirty = stmmac_rx_dirty(priv, queue);
3705*4882a593Smuzhiyun 	unsigned int entry = rx_q->dirty_rx;
3706*4882a593Smuzhiyun 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
3707*4882a593Smuzhiyun 
3708*4882a593Smuzhiyun 	if (priv->dma_cap.addr64 <= 32)
3709*4882a593Smuzhiyun 		gfp |= GFP_DMA32;
3710*4882a593Smuzhiyun 
3711*4882a593Smuzhiyun 	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3712*4882a593Smuzhiyun 
3713*4882a593Smuzhiyun 	while (dirty-- > 0) {
3714*4882a593Smuzhiyun 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3715*4882a593Smuzhiyun 		struct dma_desc *p;
3716*4882a593Smuzhiyun 		bool use_rx_wd;
3717*4882a593Smuzhiyun 
3718*4882a593Smuzhiyun 		if (priv->extend_desc)
3719*4882a593Smuzhiyun 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3720*4882a593Smuzhiyun 		else
3721*4882a593Smuzhiyun 			p = rx_q->dma_rx + entry;
3722*4882a593Smuzhiyun 
3723*4882a593Smuzhiyun 		if (!buf->page) {
3724*4882a593Smuzhiyun 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3725*4882a593Smuzhiyun 			if (!buf->page)
3726*4882a593Smuzhiyun 				break;
3727*4882a593Smuzhiyun 		}
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun 		if (priv->sph && !buf->sec_page) {
3730*4882a593Smuzhiyun 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
3731*4882a593Smuzhiyun 			if (!buf->sec_page)
3732*4882a593Smuzhiyun 				break;
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3735*4882a593Smuzhiyun 		}
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 		buf->addr = page_pool_get_dma_addr(buf->page);
3738*4882a593Smuzhiyun 		stmmac_set_desc_addr(priv, p, buf->addr);
3739*4882a593Smuzhiyun 		if (priv->sph)
3740*4882a593Smuzhiyun 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
3741*4882a593Smuzhiyun 		else
3742*4882a593Smuzhiyun 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
3743*4882a593Smuzhiyun 		stmmac_refill_desc3(priv, rx_q, p);
3744*4882a593Smuzhiyun 
3745*4882a593Smuzhiyun 		rx_q->rx_count_frames++;
3746*4882a593Smuzhiyun 		rx_q->rx_count_frames += priv->rx_coal_frames;
3747*4882a593Smuzhiyun 		if (rx_q->rx_count_frames > priv->rx_coal_frames)
3748*4882a593Smuzhiyun 			rx_q->rx_count_frames = 0;
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun 		use_rx_wd = !priv->rx_coal_frames;
3751*4882a593Smuzhiyun 		use_rx_wd |= rx_q->rx_count_frames > 0;
3752*4882a593Smuzhiyun 		if (!priv->use_riwt)
3753*4882a593Smuzhiyun 			use_rx_wd = false;
3754*4882a593Smuzhiyun 
3755*4882a593Smuzhiyun 		dma_wmb();
3756*4882a593Smuzhiyun 		stmmac_set_rx_owner(priv, p, use_rx_wd);
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun 		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3759*4882a593Smuzhiyun 	}
3760*4882a593Smuzhiyun 	rx_q->dirty_rx = entry;
3761*4882a593Smuzhiyun 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3762*4882a593Smuzhiyun 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
3763*4882a593Smuzhiyun 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)3766*4882a593Smuzhiyun static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3767*4882a593Smuzhiyun 				       struct dma_desc *p,
3768*4882a593Smuzhiyun 				       int status, unsigned int len)
3769*4882a593Smuzhiyun {
3770*4882a593Smuzhiyun 	unsigned int plen = 0, hlen = 0;
3771*4882a593Smuzhiyun 	int coe = priv->hw->rx_csum;
3772*4882a593Smuzhiyun 
3773*4882a593Smuzhiyun 	/* Not first descriptor, buffer is always zero */
3774*4882a593Smuzhiyun 	if (priv->sph && len)
3775*4882a593Smuzhiyun 		return 0;
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	/* First descriptor, get split header length */
3778*4882a593Smuzhiyun 	stmmac_get_rx_header_len(priv, p, &hlen);
3779*4882a593Smuzhiyun 	if (priv->sph && hlen) {
3780*4882a593Smuzhiyun 		priv->xstats.rx_split_hdr_pkt_n++;
3781*4882a593Smuzhiyun 		return hlen;
3782*4882a593Smuzhiyun 	}
3783*4882a593Smuzhiyun 
3784*4882a593Smuzhiyun 	/* First descriptor, not last descriptor and not split header */
3785*4882a593Smuzhiyun 	if (status & rx_not_ls)
3786*4882a593Smuzhiyun 		return priv->dma_buf_sz;
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun 	/* First descriptor and last descriptor and not split header */
3791*4882a593Smuzhiyun 	return min_t(unsigned int, priv->dma_buf_sz, plen);
3792*4882a593Smuzhiyun }
3793*4882a593Smuzhiyun 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)3794*4882a593Smuzhiyun static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3795*4882a593Smuzhiyun 				       struct dma_desc *p,
3796*4882a593Smuzhiyun 				       int status, unsigned int len)
3797*4882a593Smuzhiyun {
3798*4882a593Smuzhiyun 	int coe = priv->hw->rx_csum;
3799*4882a593Smuzhiyun 	unsigned int plen = 0;
3800*4882a593Smuzhiyun 
3801*4882a593Smuzhiyun 	/* Not split header, buffer is not available */
3802*4882a593Smuzhiyun 	if (!priv->sph)
3803*4882a593Smuzhiyun 		return 0;
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun 	/* Not last descriptor */
3806*4882a593Smuzhiyun 	if (status & rx_not_ls)
3807*4882a593Smuzhiyun 		return priv->dma_buf_sz;
3808*4882a593Smuzhiyun 
3809*4882a593Smuzhiyun 	plen = stmmac_get_rx_frame_len(priv, p, coe);
3810*4882a593Smuzhiyun 
3811*4882a593Smuzhiyun 	/* Last descriptor */
3812*4882a593Smuzhiyun 	return plen - len;
3813*4882a593Smuzhiyun }
3814*4882a593Smuzhiyun 
3815*4882a593Smuzhiyun /**
3816*4882a593Smuzhiyun  * stmmac_rx - manage the receive process
3817*4882a593Smuzhiyun  * @priv: driver private structure
3818*4882a593Smuzhiyun  * @limit: napi bugget
3819*4882a593Smuzhiyun  * @queue: RX queue index.
3820*4882a593Smuzhiyun  * Description :  this the function called by the napi poll method.
3821*4882a593Smuzhiyun  * It gets all the frames inside the ring.
3822*4882a593Smuzhiyun  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)3823*4882a593Smuzhiyun static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3824*4882a593Smuzhiyun {
3825*4882a593Smuzhiyun 	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3826*4882a593Smuzhiyun 	struct stmmac_channel *ch = &priv->channel[queue];
3827*4882a593Smuzhiyun 	unsigned int count = 0, error = 0, len = 0;
3828*4882a593Smuzhiyun 	int status = 0, coe = priv->hw->rx_csum;
3829*4882a593Smuzhiyun 	unsigned int next_entry = rx_q->cur_rx;
3830*4882a593Smuzhiyun 	unsigned int desc_size;
3831*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun 	if (netif_msg_rx_status(priv)) {
3834*4882a593Smuzhiyun 		void *rx_head;
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3837*4882a593Smuzhiyun 		if (priv->extend_desc) {
3838*4882a593Smuzhiyun 			rx_head = (void *)rx_q->dma_erx;
3839*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_extended_desc);
3840*4882a593Smuzhiyun 		} else {
3841*4882a593Smuzhiyun 			rx_head = (void *)rx_q->dma_rx;
3842*4882a593Smuzhiyun 			desc_size = sizeof(struct dma_desc);
3843*4882a593Smuzhiyun 		}
3844*4882a593Smuzhiyun 
3845*4882a593Smuzhiyun 		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
3846*4882a593Smuzhiyun 				    rx_q->dma_rx_phy, desc_size);
3847*4882a593Smuzhiyun 	}
3848*4882a593Smuzhiyun 	while (count < limit) {
3849*4882a593Smuzhiyun 		unsigned int buf1_len = 0, buf2_len = 0;
3850*4882a593Smuzhiyun 		enum pkt_hash_types hash_type;
3851*4882a593Smuzhiyun 		struct stmmac_rx_buffer *buf;
3852*4882a593Smuzhiyun 		struct dma_desc *np, *p;
3853*4882a593Smuzhiyun 		int entry;
3854*4882a593Smuzhiyun 		u32 hash;
3855*4882a593Smuzhiyun 
3856*4882a593Smuzhiyun 		if (!count && rx_q->state_saved) {
3857*4882a593Smuzhiyun 			skb = rx_q->state.skb;
3858*4882a593Smuzhiyun 			error = rx_q->state.error;
3859*4882a593Smuzhiyun 			len = rx_q->state.len;
3860*4882a593Smuzhiyun 		} else {
3861*4882a593Smuzhiyun 			rx_q->state_saved = false;
3862*4882a593Smuzhiyun 			skb = NULL;
3863*4882a593Smuzhiyun 			error = 0;
3864*4882a593Smuzhiyun 			len = 0;
3865*4882a593Smuzhiyun 		}
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun 		if ((count >= limit - 1) && limit > 1)
3868*4882a593Smuzhiyun 			break;
3869*4882a593Smuzhiyun 
3870*4882a593Smuzhiyun read_again:
3871*4882a593Smuzhiyun 		buf1_len = 0;
3872*4882a593Smuzhiyun 		buf2_len = 0;
3873*4882a593Smuzhiyun 		entry = next_entry;
3874*4882a593Smuzhiyun 		buf = &rx_q->buf_pool[entry];
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun 		if (priv->extend_desc)
3877*4882a593Smuzhiyun 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3878*4882a593Smuzhiyun 		else
3879*4882a593Smuzhiyun 			p = rx_q->dma_rx + entry;
3880*4882a593Smuzhiyun 
3881*4882a593Smuzhiyun 		/* read the status of the incoming frame */
3882*4882a593Smuzhiyun 		status = stmmac_rx_status(priv, &priv->dev->stats,
3883*4882a593Smuzhiyun 				&priv->xstats, p);
3884*4882a593Smuzhiyun 		/* check if managed by the DMA otherwise go ahead */
3885*4882a593Smuzhiyun 		if (unlikely(status & dma_own))
3886*4882a593Smuzhiyun 			break;
3887*4882a593Smuzhiyun 
3888*4882a593Smuzhiyun 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3889*4882a593Smuzhiyun 						priv->dma_rx_size);
3890*4882a593Smuzhiyun 		next_entry = rx_q->cur_rx;
3891*4882a593Smuzhiyun 
3892*4882a593Smuzhiyun 		if (priv->extend_desc)
3893*4882a593Smuzhiyun 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3894*4882a593Smuzhiyun 		else
3895*4882a593Smuzhiyun 			np = rx_q->dma_rx + next_entry;
3896*4882a593Smuzhiyun 
3897*4882a593Smuzhiyun 		prefetch(np);
3898*4882a593Smuzhiyun 
3899*4882a593Smuzhiyun 		if (priv->extend_desc)
3900*4882a593Smuzhiyun 			stmmac_rx_extended_status(priv, &priv->dev->stats,
3901*4882a593Smuzhiyun 					&priv->xstats, rx_q->dma_erx + entry);
3902*4882a593Smuzhiyun 		if (unlikely(status == discard_frame)) {
3903*4882a593Smuzhiyun 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3904*4882a593Smuzhiyun 			buf->page = NULL;
3905*4882a593Smuzhiyun 			error = 1;
3906*4882a593Smuzhiyun 			if (!priv->hwts_rx_en)
3907*4882a593Smuzhiyun 				priv->dev->stats.rx_errors++;
3908*4882a593Smuzhiyun 		}
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun 		if (unlikely(error && (status & rx_not_ls)))
3911*4882a593Smuzhiyun 			goto read_again;
3912*4882a593Smuzhiyun 		if (unlikely(error)) {
3913*4882a593Smuzhiyun 			dev_kfree_skb(skb);
3914*4882a593Smuzhiyun 			skb = NULL;
3915*4882a593Smuzhiyun 			count++;
3916*4882a593Smuzhiyun 			continue;
3917*4882a593Smuzhiyun 		}
3918*4882a593Smuzhiyun 
3919*4882a593Smuzhiyun 		/* Buffer is good. Go on. */
3920*4882a593Smuzhiyun 
3921*4882a593Smuzhiyun 		prefetch(page_address(buf->page));
3922*4882a593Smuzhiyun 		if (buf->sec_page)
3923*4882a593Smuzhiyun 			prefetch(page_address(buf->sec_page));
3924*4882a593Smuzhiyun 
3925*4882a593Smuzhiyun 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3926*4882a593Smuzhiyun 		len += buf1_len;
3927*4882a593Smuzhiyun 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3928*4882a593Smuzhiyun 		len += buf2_len;
3929*4882a593Smuzhiyun 
3930*4882a593Smuzhiyun 		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3931*4882a593Smuzhiyun 		 * Type frames (LLC/LLC-SNAP)
3932*4882a593Smuzhiyun 		 *
3933*4882a593Smuzhiyun 		 * llc_snap is never checked in GMAC >= 4, so this ACS
3934*4882a593Smuzhiyun 		 * feature is always disabled and packets need to be
3935*4882a593Smuzhiyun 		 * stripped manually.
3936*4882a593Smuzhiyun 		 */
3937*4882a593Smuzhiyun 		if (likely(!(status & rx_not_ls)) &&
3938*4882a593Smuzhiyun 		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3939*4882a593Smuzhiyun 		     unlikely(status != llc_snap))) {
3940*4882a593Smuzhiyun 			if (buf2_len)
3941*4882a593Smuzhiyun 				buf2_len -= ETH_FCS_LEN;
3942*4882a593Smuzhiyun 			else
3943*4882a593Smuzhiyun 				buf1_len -= ETH_FCS_LEN;
3944*4882a593Smuzhiyun 
3945*4882a593Smuzhiyun 			len -= ETH_FCS_LEN;
3946*4882a593Smuzhiyun 		}
3947*4882a593Smuzhiyun 
3948*4882a593Smuzhiyun 		if (!skb) {
3949*4882a593Smuzhiyun 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3950*4882a593Smuzhiyun 			if (!skb) {
3951*4882a593Smuzhiyun 				priv->dev->stats.rx_dropped++;
3952*4882a593Smuzhiyun 				count++;
3953*4882a593Smuzhiyun 				goto drain_data;
3954*4882a593Smuzhiyun 			}
3955*4882a593Smuzhiyun 
3956*4882a593Smuzhiyun 			dma_sync_single_for_cpu(priv->device, buf->addr,
3957*4882a593Smuzhiyun 						buf1_len, DMA_FROM_DEVICE);
3958*4882a593Smuzhiyun 			skb_copy_to_linear_data(skb, page_address(buf->page),
3959*4882a593Smuzhiyun 						buf1_len);
3960*4882a593Smuzhiyun 			skb_put(skb, buf1_len);
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 			/* Data payload copied into SKB, page ready for recycle */
3963*4882a593Smuzhiyun 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
3964*4882a593Smuzhiyun 			buf->page = NULL;
3965*4882a593Smuzhiyun 		} else if (buf1_len) {
3966*4882a593Smuzhiyun 			dma_sync_single_for_cpu(priv->device, buf->addr,
3967*4882a593Smuzhiyun 						buf1_len, DMA_FROM_DEVICE);
3968*4882a593Smuzhiyun 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3969*4882a593Smuzhiyun 					buf->page, 0, buf1_len,
3970*4882a593Smuzhiyun 					priv->dma_buf_sz);
3971*4882a593Smuzhiyun 
3972*4882a593Smuzhiyun 			/* Data payload appended into SKB */
3973*4882a593Smuzhiyun 			page_pool_release_page(rx_q->page_pool, buf->page);
3974*4882a593Smuzhiyun 			buf->page = NULL;
3975*4882a593Smuzhiyun 		}
3976*4882a593Smuzhiyun 
3977*4882a593Smuzhiyun 		if (buf2_len) {
3978*4882a593Smuzhiyun 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3979*4882a593Smuzhiyun 						buf2_len, DMA_FROM_DEVICE);
3980*4882a593Smuzhiyun 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3981*4882a593Smuzhiyun 					buf->sec_page, 0, buf2_len,
3982*4882a593Smuzhiyun 					priv->dma_buf_sz);
3983*4882a593Smuzhiyun 
3984*4882a593Smuzhiyun 			/* Data payload appended into SKB */
3985*4882a593Smuzhiyun 			page_pool_release_page(rx_q->page_pool, buf->sec_page);
3986*4882a593Smuzhiyun 			buf->sec_page = NULL;
3987*4882a593Smuzhiyun 		}
3988*4882a593Smuzhiyun 
3989*4882a593Smuzhiyun drain_data:
3990*4882a593Smuzhiyun 		if (likely(status & rx_not_ls))
3991*4882a593Smuzhiyun 			goto read_again;
3992*4882a593Smuzhiyun 		if (!skb)
3993*4882a593Smuzhiyun 			continue;
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun 		/* Got entire packet into SKB. Finish it. */
3996*4882a593Smuzhiyun 
3997*4882a593Smuzhiyun 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
3998*4882a593Smuzhiyun 		stmmac_rx_vlan(priv->dev, skb);
3999*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, priv->dev);
4000*4882a593Smuzhiyun 
4001*4882a593Smuzhiyun 		if (unlikely(!coe))
4002*4882a593Smuzhiyun 			skb_checksum_none_assert(skb);
4003*4882a593Smuzhiyun 		else
4004*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
4005*4882a593Smuzhiyun 
4006*4882a593Smuzhiyun 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4007*4882a593Smuzhiyun 			skb_set_hash(skb, hash, hash_type);
4008*4882a593Smuzhiyun 
4009*4882a593Smuzhiyun 		skb_record_rx_queue(skb, queue);
4010*4882a593Smuzhiyun 		napi_gro_receive(&ch->rx_napi, skb);
4011*4882a593Smuzhiyun 		skb = NULL;
4012*4882a593Smuzhiyun 
4013*4882a593Smuzhiyun 		priv->dev->stats.rx_packets++;
4014*4882a593Smuzhiyun 		priv->dev->stats.rx_bytes += len;
4015*4882a593Smuzhiyun 		count++;
4016*4882a593Smuzhiyun 	}
4017*4882a593Smuzhiyun 
4018*4882a593Smuzhiyun 	if (status & rx_not_ls || skb) {
4019*4882a593Smuzhiyun 		rx_q->state_saved = true;
4020*4882a593Smuzhiyun 		rx_q->state.skb = skb;
4021*4882a593Smuzhiyun 		rx_q->state.error = error;
4022*4882a593Smuzhiyun 		rx_q->state.len = len;
4023*4882a593Smuzhiyun 	}
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 	stmmac_rx_refill(priv, queue);
4026*4882a593Smuzhiyun 
4027*4882a593Smuzhiyun 	priv->xstats.rx_pkt_n += count;
4028*4882a593Smuzhiyun 
4029*4882a593Smuzhiyun 	return count;
4030*4882a593Smuzhiyun }
4031*4882a593Smuzhiyun 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)4032*4882a593Smuzhiyun static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
4033*4882a593Smuzhiyun {
4034*4882a593Smuzhiyun 	struct stmmac_channel *ch =
4035*4882a593Smuzhiyun 		container_of(napi, struct stmmac_channel, rx_napi);
4036*4882a593Smuzhiyun 	struct stmmac_priv *priv = ch->priv_data;
4037*4882a593Smuzhiyun 	u32 chan = ch->index;
4038*4882a593Smuzhiyun 	int work_done;
4039*4882a593Smuzhiyun 
4040*4882a593Smuzhiyun 	priv->xstats.napi_poll++;
4041*4882a593Smuzhiyun 
4042*4882a593Smuzhiyun 	work_done = stmmac_rx(priv, budget, chan);
4043*4882a593Smuzhiyun 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4044*4882a593Smuzhiyun 		unsigned long flags;
4045*4882a593Smuzhiyun 
4046*4882a593Smuzhiyun 		spin_lock_irqsave(&ch->lock, flags);
4047*4882a593Smuzhiyun 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4048*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ch->lock, flags);
4049*4882a593Smuzhiyun 	}
4050*4882a593Smuzhiyun 
4051*4882a593Smuzhiyun 	return work_done;
4052*4882a593Smuzhiyun }
4053*4882a593Smuzhiyun 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)4054*4882a593Smuzhiyun static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4055*4882a593Smuzhiyun {
4056*4882a593Smuzhiyun 	struct stmmac_channel *ch =
4057*4882a593Smuzhiyun 		container_of(napi, struct stmmac_channel, tx_napi);
4058*4882a593Smuzhiyun 	struct stmmac_priv *priv = ch->priv_data;
4059*4882a593Smuzhiyun 	u32 chan = ch->index;
4060*4882a593Smuzhiyun 	int work_done;
4061*4882a593Smuzhiyun 
4062*4882a593Smuzhiyun 	priv->xstats.napi_poll++;
4063*4882a593Smuzhiyun 
4064*4882a593Smuzhiyun 	work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4065*4882a593Smuzhiyun 	work_done = min(work_done, budget);
4066*4882a593Smuzhiyun 
4067*4882a593Smuzhiyun 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4068*4882a593Smuzhiyun 		unsigned long flags;
4069*4882a593Smuzhiyun 
4070*4882a593Smuzhiyun 		spin_lock_irqsave(&ch->lock, flags);
4071*4882a593Smuzhiyun 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4072*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ch->lock, flags);
4073*4882a593Smuzhiyun 	}
4074*4882a593Smuzhiyun 
4075*4882a593Smuzhiyun 	return work_done;
4076*4882a593Smuzhiyun }
4077*4882a593Smuzhiyun 
4078*4882a593Smuzhiyun /**
4079*4882a593Smuzhiyun  *  stmmac_tx_timeout
4080*4882a593Smuzhiyun  *  @dev : Pointer to net device structure
4081*4882a593Smuzhiyun  *  @txqueue: the index of the hanging transmit queue
4082*4882a593Smuzhiyun  *  Description: this function is called when a packet transmission fails to
4083*4882a593Smuzhiyun  *   complete within a reasonable time. The driver will mark the error in the
4084*4882a593Smuzhiyun  *   netdev structure and arrange for the device to be reset to a sane state
4085*4882a593Smuzhiyun  *   in order to transmit a new packet.
4086*4882a593Smuzhiyun  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)4087*4882a593Smuzhiyun static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4088*4882a593Smuzhiyun {
4089*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4090*4882a593Smuzhiyun 
4091*4882a593Smuzhiyun 	stmmac_global_err(priv);
4092*4882a593Smuzhiyun }
4093*4882a593Smuzhiyun 
4094*4882a593Smuzhiyun /**
4095*4882a593Smuzhiyun  *  stmmac_set_rx_mode - entry point for multicast addressing
4096*4882a593Smuzhiyun  *  @dev : pointer to the device structure
4097*4882a593Smuzhiyun  *  Description:
4098*4882a593Smuzhiyun  *  This function is a driver entry point which gets called by the kernel
4099*4882a593Smuzhiyun  *  whenever multicast addresses must be enabled/disabled.
4100*4882a593Smuzhiyun  *  Return value:
4101*4882a593Smuzhiyun  *  void.
4102*4882a593Smuzhiyun  */
stmmac_set_rx_mode(struct net_device * dev)4103*4882a593Smuzhiyun static void stmmac_set_rx_mode(struct net_device *dev)
4104*4882a593Smuzhiyun {
4105*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4106*4882a593Smuzhiyun 
4107*4882a593Smuzhiyun 	stmmac_set_filter(priv, priv->hw, dev);
4108*4882a593Smuzhiyun }
4109*4882a593Smuzhiyun 
4110*4882a593Smuzhiyun /**
4111*4882a593Smuzhiyun  *  stmmac_change_mtu - entry point to change MTU size for the device.
4112*4882a593Smuzhiyun  *  @dev : device pointer.
4113*4882a593Smuzhiyun  *  @new_mtu : the new MTU size for the device.
4114*4882a593Smuzhiyun  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
4115*4882a593Smuzhiyun  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
4116*4882a593Smuzhiyun  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
4117*4882a593Smuzhiyun  *  Return value:
4118*4882a593Smuzhiyun  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4119*4882a593Smuzhiyun  *  file on failure.
4120*4882a593Smuzhiyun  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)4121*4882a593Smuzhiyun static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4122*4882a593Smuzhiyun {
4123*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4124*4882a593Smuzhiyun 	int txfifosz = priv->plat->tx_fifo_size;
4125*4882a593Smuzhiyun 	const int mtu = new_mtu;
4126*4882a593Smuzhiyun 
4127*4882a593Smuzhiyun 	if (txfifosz == 0)
4128*4882a593Smuzhiyun 		txfifosz = priv->dma_cap.tx_fifo_size;
4129*4882a593Smuzhiyun 
4130*4882a593Smuzhiyun 	txfifosz /= priv->plat->tx_queues_to_use;
4131*4882a593Smuzhiyun 
4132*4882a593Smuzhiyun 	if (netif_running(dev)) {
4133*4882a593Smuzhiyun 		netdev_err(priv->dev, "must be stopped to change its MTU\n");
4134*4882a593Smuzhiyun 		return -EBUSY;
4135*4882a593Smuzhiyun 	}
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	new_mtu = STMMAC_ALIGN(new_mtu);
4138*4882a593Smuzhiyun 
4139*4882a593Smuzhiyun 	/* If condition true, FIFO is too small or MTU too large */
4140*4882a593Smuzhiyun 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4141*4882a593Smuzhiyun 		return -EINVAL;
4142*4882a593Smuzhiyun 
4143*4882a593Smuzhiyun 	dev->mtu = mtu;
4144*4882a593Smuzhiyun 
4145*4882a593Smuzhiyun 	netdev_update_features(dev);
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun 	return 0;
4148*4882a593Smuzhiyun }
4149*4882a593Smuzhiyun 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)4150*4882a593Smuzhiyun static netdev_features_t stmmac_fix_features(struct net_device *dev,
4151*4882a593Smuzhiyun 					     netdev_features_t features)
4152*4882a593Smuzhiyun {
4153*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4154*4882a593Smuzhiyun 
4155*4882a593Smuzhiyun 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4156*4882a593Smuzhiyun 		features &= ~NETIF_F_RXCSUM;
4157*4882a593Smuzhiyun 
4158*4882a593Smuzhiyun 	if (!priv->plat->tx_coe)
4159*4882a593Smuzhiyun 		features &= ~NETIF_F_CSUM_MASK;
4160*4882a593Smuzhiyun 
4161*4882a593Smuzhiyun 	/* Some GMAC devices have a bugged Jumbo frame support that
4162*4882a593Smuzhiyun 	 * needs to have the Tx COE disabled for oversized frames
4163*4882a593Smuzhiyun 	 * (due to limited buffer sizes). In this case we disable
4164*4882a593Smuzhiyun 	 * the TX csum insertion in the TDES and not use SF.
4165*4882a593Smuzhiyun 	 */
4166*4882a593Smuzhiyun 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4167*4882a593Smuzhiyun 		features &= ~NETIF_F_CSUM_MASK;
4168*4882a593Smuzhiyun 
4169*4882a593Smuzhiyun 	/* Disable tso if asked by ethtool */
4170*4882a593Smuzhiyun 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4171*4882a593Smuzhiyun 		if (features & NETIF_F_TSO)
4172*4882a593Smuzhiyun 			priv->tso = true;
4173*4882a593Smuzhiyun 		else
4174*4882a593Smuzhiyun 			priv->tso = false;
4175*4882a593Smuzhiyun 	}
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	return features;
4178*4882a593Smuzhiyun }
4179*4882a593Smuzhiyun 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)4180*4882a593Smuzhiyun static int stmmac_set_features(struct net_device *netdev,
4181*4882a593Smuzhiyun 			       netdev_features_t features)
4182*4882a593Smuzhiyun {
4183*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(netdev);
4184*4882a593Smuzhiyun 	bool sph_en;
4185*4882a593Smuzhiyun 	u32 chan;
4186*4882a593Smuzhiyun 
4187*4882a593Smuzhiyun 	/* Keep the COE Type in case of csum is supporting */
4188*4882a593Smuzhiyun 	if (features & NETIF_F_RXCSUM)
4189*4882a593Smuzhiyun 		priv->hw->rx_csum = priv->plat->rx_coe;
4190*4882a593Smuzhiyun 	else
4191*4882a593Smuzhiyun 		priv->hw->rx_csum = 0;
4192*4882a593Smuzhiyun 	/* No check needed because rx_coe has been set before and it will be
4193*4882a593Smuzhiyun 	 * fixed in case of issue.
4194*4882a593Smuzhiyun 	 */
4195*4882a593Smuzhiyun 	stmmac_rx_ipc(priv, priv->hw);
4196*4882a593Smuzhiyun 
4197*4882a593Smuzhiyun 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4198*4882a593Smuzhiyun 	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4199*4882a593Smuzhiyun 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 	return 0;
4202*4882a593Smuzhiyun }
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun /**
4205*4882a593Smuzhiyun  *  stmmac_interrupt - main ISR
4206*4882a593Smuzhiyun  *  @irq: interrupt number.
4207*4882a593Smuzhiyun  *  @dev_id: to pass the net device pointer (must be valid).
4208*4882a593Smuzhiyun  *  Description: this is the main driver interrupt service routine.
4209*4882a593Smuzhiyun  *  It can call:
4210*4882a593Smuzhiyun  *  o DMA service routine (to manage incoming frame reception and transmission
4211*4882a593Smuzhiyun  *    status)
4212*4882a593Smuzhiyun  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4213*4882a593Smuzhiyun  *    interrupts.
4214*4882a593Smuzhiyun  */
stmmac_interrupt(int irq,void * dev_id)4215*4882a593Smuzhiyun static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4216*4882a593Smuzhiyun {
4217*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
4218*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4219*4882a593Smuzhiyun 	u32 rx_cnt = priv->plat->rx_queues_to_use;
4220*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
4221*4882a593Smuzhiyun 	u32 queues_count;
4222*4882a593Smuzhiyun 	u32 queue;
4223*4882a593Smuzhiyun 	bool xmac;
4224*4882a593Smuzhiyun 
4225*4882a593Smuzhiyun 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4226*4882a593Smuzhiyun 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4227*4882a593Smuzhiyun 
4228*4882a593Smuzhiyun 	if (priv->irq_wake)
4229*4882a593Smuzhiyun 		pm_wakeup_event(priv->device, 0);
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun 	/* Check if adapter is up */
4232*4882a593Smuzhiyun 	if (test_bit(STMMAC_DOWN, &priv->state))
4233*4882a593Smuzhiyun 		return IRQ_HANDLED;
4234*4882a593Smuzhiyun 	/* Check if a fatal error happened */
4235*4882a593Smuzhiyun 	if (stmmac_safety_feat_interrupt(priv))
4236*4882a593Smuzhiyun 		return IRQ_HANDLED;
4237*4882a593Smuzhiyun 
4238*4882a593Smuzhiyun 	/* To handle GMAC own interrupts */
4239*4882a593Smuzhiyun 	if ((priv->plat->has_gmac) || xmac) {
4240*4882a593Smuzhiyun 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4241*4882a593Smuzhiyun 
4242*4882a593Smuzhiyun 		if (unlikely(status)) {
4243*4882a593Smuzhiyun 			/* For LPI we need to save the tx status */
4244*4882a593Smuzhiyun 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4245*4882a593Smuzhiyun 				priv->tx_path_in_lpi_mode = true;
4246*4882a593Smuzhiyun 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4247*4882a593Smuzhiyun 				priv->tx_path_in_lpi_mode = false;
4248*4882a593Smuzhiyun 		}
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 		for (queue = 0; queue < queues_count; queue++) {
4251*4882a593Smuzhiyun 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
4252*4882a593Smuzhiyun 							    queue);
4253*4882a593Smuzhiyun 		}
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun 		/* PCS link status */
4256*4882a593Smuzhiyun 		if (priv->hw->pcs) {
4257*4882a593Smuzhiyun 			if (priv->xstats.pcs_link)
4258*4882a593Smuzhiyun 				netif_carrier_on(dev);
4259*4882a593Smuzhiyun 			else
4260*4882a593Smuzhiyun 				netif_carrier_off(dev);
4261*4882a593Smuzhiyun 		}
4262*4882a593Smuzhiyun 	}
4263*4882a593Smuzhiyun 
4264*4882a593Smuzhiyun 	/* To handle DMA interrupts */
4265*4882a593Smuzhiyun 	stmmac_dma_interrupt(priv);
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun 	return IRQ_HANDLED;
4268*4882a593Smuzhiyun }
4269*4882a593Smuzhiyun 
4270*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
4271*4882a593Smuzhiyun /* Polling receive - used by NETCONSOLE and other diagnostic tools
4272*4882a593Smuzhiyun  * to allow network I/O with interrupts disabled.
4273*4882a593Smuzhiyun  */
stmmac_poll_controller(struct net_device * dev)4274*4882a593Smuzhiyun static void stmmac_poll_controller(struct net_device *dev)
4275*4882a593Smuzhiyun {
4276*4882a593Smuzhiyun 	disable_irq(dev->irq);
4277*4882a593Smuzhiyun 	stmmac_interrupt(dev->irq, dev);
4278*4882a593Smuzhiyun 	enable_irq(dev->irq);
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun #endif
4281*4882a593Smuzhiyun 
4282*4882a593Smuzhiyun /**
4283*4882a593Smuzhiyun  *  stmmac_ioctl - Entry point for the Ioctl
4284*4882a593Smuzhiyun  *  @dev: Device pointer.
4285*4882a593Smuzhiyun  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4286*4882a593Smuzhiyun  *  a proprietary structure used to pass information to the driver.
4287*4882a593Smuzhiyun  *  @cmd: IOCTL command
4288*4882a593Smuzhiyun  *  Description:
4289*4882a593Smuzhiyun  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4290*4882a593Smuzhiyun  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)4291*4882a593Smuzhiyun static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4292*4882a593Smuzhiyun {
4293*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv (dev);
4294*4882a593Smuzhiyun 	int ret = -EOPNOTSUPP;
4295*4882a593Smuzhiyun 
4296*4882a593Smuzhiyun 	if (!netif_running(dev))
4297*4882a593Smuzhiyun 		return -EINVAL;
4298*4882a593Smuzhiyun 
4299*4882a593Smuzhiyun 	switch (cmd) {
4300*4882a593Smuzhiyun 	case SIOCGMIIPHY:
4301*4882a593Smuzhiyun 	case SIOCGMIIREG:
4302*4882a593Smuzhiyun 	case SIOCSMIIREG:
4303*4882a593Smuzhiyun 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4304*4882a593Smuzhiyun 		break;
4305*4882a593Smuzhiyun 	case SIOCSHWTSTAMP:
4306*4882a593Smuzhiyun 		ret = stmmac_hwtstamp_set(dev, rq);
4307*4882a593Smuzhiyun 		break;
4308*4882a593Smuzhiyun 	case SIOCGHWTSTAMP:
4309*4882a593Smuzhiyun 		ret = stmmac_hwtstamp_get(dev, rq);
4310*4882a593Smuzhiyun 		break;
4311*4882a593Smuzhiyun 	default:
4312*4882a593Smuzhiyun 		break;
4313*4882a593Smuzhiyun 	}
4314*4882a593Smuzhiyun 
4315*4882a593Smuzhiyun 	return ret;
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4318*4882a593Smuzhiyun static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4319*4882a593Smuzhiyun 				    void *cb_priv)
4320*4882a593Smuzhiyun {
4321*4882a593Smuzhiyun 	struct stmmac_priv *priv = cb_priv;
4322*4882a593Smuzhiyun 	int ret = -EOPNOTSUPP;
4323*4882a593Smuzhiyun 
4324*4882a593Smuzhiyun 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4325*4882a593Smuzhiyun 		return ret;
4326*4882a593Smuzhiyun 
4327*4882a593Smuzhiyun 	stmmac_disable_all_queues(priv);
4328*4882a593Smuzhiyun 
4329*4882a593Smuzhiyun 	switch (type) {
4330*4882a593Smuzhiyun 	case TC_SETUP_CLSU32:
4331*4882a593Smuzhiyun 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4332*4882a593Smuzhiyun 		break;
4333*4882a593Smuzhiyun 	case TC_SETUP_CLSFLOWER:
4334*4882a593Smuzhiyun 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4335*4882a593Smuzhiyun 		break;
4336*4882a593Smuzhiyun 	default:
4337*4882a593Smuzhiyun 		break;
4338*4882a593Smuzhiyun 	}
4339*4882a593Smuzhiyun 
4340*4882a593Smuzhiyun 	stmmac_enable_all_queues(priv);
4341*4882a593Smuzhiyun 	return ret;
4342*4882a593Smuzhiyun }
4343*4882a593Smuzhiyun 
4344*4882a593Smuzhiyun static LIST_HEAD(stmmac_block_cb_list);
4345*4882a593Smuzhiyun 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)4346*4882a593Smuzhiyun static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4347*4882a593Smuzhiyun 			   void *type_data)
4348*4882a593Smuzhiyun {
4349*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	switch (type) {
4352*4882a593Smuzhiyun 	case TC_SETUP_BLOCK:
4353*4882a593Smuzhiyun 		return flow_block_cb_setup_simple(type_data,
4354*4882a593Smuzhiyun 						  &stmmac_block_cb_list,
4355*4882a593Smuzhiyun 						  stmmac_setup_tc_block_cb,
4356*4882a593Smuzhiyun 						  priv, priv, true);
4357*4882a593Smuzhiyun 	case TC_SETUP_QDISC_CBS:
4358*4882a593Smuzhiyun 		return stmmac_tc_setup_cbs(priv, priv, type_data);
4359*4882a593Smuzhiyun 	case TC_SETUP_QDISC_TAPRIO:
4360*4882a593Smuzhiyun 		return stmmac_tc_setup_taprio(priv, priv, type_data);
4361*4882a593Smuzhiyun 	case TC_SETUP_QDISC_ETF:
4362*4882a593Smuzhiyun 		return stmmac_tc_setup_etf(priv, priv, type_data);
4363*4882a593Smuzhiyun 	default:
4364*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4365*4882a593Smuzhiyun 	}
4366*4882a593Smuzhiyun }
4367*4882a593Smuzhiyun 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)4368*4882a593Smuzhiyun static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4369*4882a593Smuzhiyun 			       struct net_device *sb_dev)
4370*4882a593Smuzhiyun {
4371*4882a593Smuzhiyun 	int gso = skb_shinfo(skb)->gso_type;
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4374*4882a593Smuzhiyun 		/*
4375*4882a593Smuzhiyun 		 * There is no way to determine the number of TSO/USO
4376*4882a593Smuzhiyun 		 * capable Queues. Let's use always the Queue 0
4377*4882a593Smuzhiyun 		 * because if TSO/USO is supported then at least this
4378*4882a593Smuzhiyun 		 * one will be capable.
4379*4882a593Smuzhiyun 		 */
4380*4882a593Smuzhiyun 		return 0;
4381*4882a593Smuzhiyun 	}
4382*4882a593Smuzhiyun 
4383*4882a593Smuzhiyun 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun 
stmmac_set_mac_address(struct net_device * ndev,void * addr)4386*4882a593Smuzhiyun static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4387*4882a593Smuzhiyun {
4388*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
4389*4882a593Smuzhiyun 	int ret = 0;
4390*4882a593Smuzhiyun 
4391*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(priv->device);
4392*4882a593Smuzhiyun 	if (ret < 0) {
4393*4882a593Smuzhiyun 		pm_runtime_put_noidle(priv->device);
4394*4882a593Smuzhiyun 		return ret;
4395*4882a593Smuzhiyun 	}
4396*4882a593Smuzhiyun 
4397*4882a593Smuzhiyun 	ret = eth_mac_addr(ndev, addr);
4398*4882a593Smuzhiyun 	if (ret)
4399*4882a593Smuzhiyun 		goto set_mac_error;
4400*4882a593Smuzhiyun 
4401*4882a593Smuzhiyun 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4402*4882a593Smuzhiyun 
4403*4882a593Smuzhiyun set_mac_error:
4404*4882a593Smuzhiyun 	pm_runtime_put(priv->device);
4405*4882a593Smuzhiyun 
4406*4882a593Smuzhiyun 	return ret;
4407*4882a593Smuzhiyun }
4408*4882a593Smuzhiyun 
4409*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
4410*4882a593Smuzhiyun static struct dentry *stmmac_fs_dir;
4411*4882a593Smuzhiyun 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)4412*4882a593Smuzhiyun static void sysfs_display_ring(void *head, int size, int extend_desc,
4413*4882a593Smuzhiyun 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
4414*4882a593Smuzhiyun {
4415*4882a593Smuzhiyun 	int i;
4416*4882a593Smuzhiyun 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4417*4882a593Smuzhiyun 	struct dma_desc *p = (struct dma_desc *)head;
4418*4882a593Smuzhiyun 	dma_addr_t dma_addr;
4419*4882a593Smuzhiyun 
4420*4882a593Smuzhiyun 	for (i = 0; i < size; i++) {
4421*4882a593Smuzhiyun 		if (extend_desc) {
4422*4882a593Smuzhiyun 			dma_addr = dma_phy_addr + i * sizeof(*ep);
4423*4882a593Smuzhiyun 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4424*4882a593Smuzhiyun 				   i, &dma_addr,
4425*4882a593Smuzhiyun 				   le32_to_cpu(ep->basic.des0),
4426*4882a593Smuzhiyun 				   le32_to_cpu(ep->basic.des1),
4427*4882a593Smuzhiyun 				   le32_to_cpu(ep->basic.des2),
4428*4882a593Smuzhiyun 				   le32_to_cpu(ep->basic.des3));
4429*4882a593Smuzhiyun 			ep++;
4430*4882a593Smuzhiyun 		} else {
4431*4882a593Smuzhiyun 			dma_addr = dma_phy_addr + i * sizeof(*p);
4432*4882a593Smuzhiyun 			seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4433*4882a593Smuzhiyun 				   i, &dma_addr,
4434*4882a593Smuzhiyun 				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4435*4882a593Smuzhiyun 				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4436*4882a593Smuzhiyun 			p++;
4437*4882a593Smuzhiyun 		}
4438*4882a593Smuzhiyun 		seq_printf(seq, "\n");
4439*4882a593Smuzhiyun 	}
4440*4882a593Smuzhiyun }
4441*4882a593Smuzhiyun 
stmmac_rings_status_show(struct seq_file * seq,void * v)4442*4882a593Smuzhiyun static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4443*4882a593Smuzhiyun {
4444*4882a593Smuzhiyun 	struct net_device *dev = seq->private;
4445*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4446*4882a593Smuzhiyun 	u32 rx_count = priv->plat->rx_queues_to_use;
4447*4882a593Smuzhiyun 	u32 tx_count = priv->plat->tx_queues_to_use;
4448*4882a593Smuzhiyun 	u32 queue;
4449*4882a593Smuzhiyun 
4450*4882a593Smuzhiyun 	if ((dev->flags & IFF_UP) == 0)
4451*4882a593Smuzhiyun 		return 0;
4452*4882a593Smuzhiyun 
4453*4882a593Smuzhiyun 	for (queue = 0; queue < rx_count; queue++) {
4454*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun 		seq_printf(seq, "RX Queue %d:\n", queue);
4457*4882a593Smuzhiyun 
4458*4882a593Smuzhiyun 		if (priv->extend_desc) {
4459*4882a593Smuzhiyun 			seq_printf(seq, "Extended descriptor ring:\n");
4460*4882a593Smuzhiyun 			sysfs_display_ring((void *)rx_q->dma_erx,
4461*4882a593Smuzhiyun 					   priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4462*4882a593Smuzhiyun 		} else {
4463*4882a593Smuzhiyun 			seq_printf(seq, "Descriptor ring:\n");
4464*4882a593Smuzhiyun 			sysfs_display_ring((void *)rx_q->dma_rx,
4465*4882a593Smuzhiyun 					   priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4466*4882a593Smuzhiyun 		}
4467*4882a593Smuzhiyun 	}
4468*4882a593Smuzhiyun 
4469*4882a593Smuzhiyun 	for (queue = 0; queue < tx_count; queue++) {
4470*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4471*4882a593Smuzhiyun 
4472*4882a593Smuzhiyun 		seq_printf(seq, "TX Queue %d:\n", queue);
4473*4882a593Smuzhiyun 
4474*4882a593Smuzhiyun 		if (priv->extend_desc) {
4475*4882a593Smuzhiyun 			seq_printf(seq, "Extended descriptor ring:\n");
4476*4882a593Smuzhiyun 			sysfs_display_ring((void *)tx_q->dma_etx,
4477*4882a593Smuzhiyun 					   priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4478*4882a593Smuzhiyun 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4479*4882a593Smuzhiyun 			seq_printf(seq, "Descriptor ring:\n");
4480*4882a593Smuzhiyun 			sysfs_display_ring((void *)tx_q->dma_tx,
4481*4882a593Smuzhiyun 					   priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4482*4882a593Smuzhiyun 		}
4483*4882a593Smuzhiyun 	}
4484*4882a593Smuzhiyun 
4485*4882a593Smuzhiyun 	return 0;
4486*4882a593Smuzhiyun }
4487*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4488*4882a593Smuzhiyun 
stmmac_dma_cap_show(struct seq_file * seq,void * v)4489*4882a593Smuzhiyun static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4490*4882a593Smuzhiyun {
4491*4882a593Smuzhiyun 	struct net_device *dev = seq->private;
4492*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4493*4882a593Smuzhiyun 
4494*4882a593Smuzhiyun 	if (!priv->hw_cap_support) {
4495*4882a593Smuzhiyun 		seq_printf(seq, "DMA HW features not supported\n");
4496*4882a593Smuzhiyun 		return 0;
4497*4882a593Smuzhiyun 	}
4498*4882a593Smuzhiyun 
4499*4882a593Smuzhiyun 	seq_printf(seq, "==============================\n");
4500*4882a593Smuzhiyun 	seq_printf(seq, "\tDMA HW features\n");
4501*4882a593Smuzhiyun 	seq_printf(seq, "==============================\n");
4502*4882a593Smuzhiyun 
4503*4882a593Smuzhiyun 	seq_printf(seq, "\t10/100 Mbps: %s\n",
4504*4882a593Smuzhiyun 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4505*4882a593Smuzhiyun 	seq_printf(seq, "\t1000 Mbps: %s\n",
4506*4882a593Smuzhiyun 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4507*4882a593Smuzhiyun 	seq_printf(seq, "\tHalf duplex: %s\n",
4508*4882a593Smuzhiyun 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
4509*4882a593Smuzhiyun 	seq_printf(seq, "\tHash Filter: %s\n",
4510*4882a593Smuzhiyun 		   (priv->dma_cap.hash_filter) ? "Y" : "N");
4511*4882a593Smuzhiyun 	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4512*4882a593Smuzhiyun 		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4513*4882a593Smuzhiyun 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4514*4882a593Smuzhiyun 		   (priv->dma_cap.pcs) ? "Y" : "N");
4515*4882a593Smuzhiyun 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4516*4882a593Smuzhiyun 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
4517*4882a593Smuzhiyun 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
4518*4882a593Smuzhiyun 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4519*4882a593Smuzhiyun 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
4520*4882a593Smuzhiyun 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4521*4882a593Smuzhiyun 	seq_printf(seq, "\tRMON module: %s\n",
4522*4882a593Smuzhiyun 		   (priv->dma_cap.rmon) ? "Y" : "N");
4523*4882a593Smuzhiyun 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4524*4882a593Smuzhiyun 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4525*4882a593Smuzhiyun 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4526*4882a593Smuzhiyun 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4527*4882a593Smuzhiyun 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4528*4882a593Smuzhiyun 		   (priv->dma_cap.eee) ? "Y" : "N");
4529*4882a593Smuzhiyun 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4530*4882a593Smuzhiyun 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4531*4882a593Smuzhiyun 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
4532*4882a593Smuzhiyun 	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4533*4882a593Smuzhiyun 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4534*4882a593Smuzhiyun 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
4535*4882a593Smuzhiyun 	} else {
4536*4882a593Smuzhiyun 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4537*4882a593Smuzhiyun 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4538*4882a593Smuzhiyun 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4539*4882a593Smuzhiyun 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4540*4882a593Smuzhiyun 	}
4541*4882a593Smuzhiyun 	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4542*4882a593Smuzhiyun 		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4543*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4544*4882a593Smuzhiyun 		   priv->dma_cap.number_rx_channel);
4545*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4546*4882a593Smuzhiyun 		   priv->dma_cap.number_tx_channel);
4547*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4548*4882a593Smuzhiyun 		   priv->dma_cap.number_rx_queues);
4549*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4550*4882a593Smuzhiyun 		   priv->dma_cap.number_tx_queues);
4551*4882a593Smuzhiyun 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
4552*4882a593Smuzhiyun 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4553*4882a593Smuzhiyun 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4554*4882a593Smuzhiyun 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4555*4882a593Smuzhiyun 	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4556*4882a593Smuzhiyun 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4557*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4558*4882a593Smuzhiyun 		   priv->dma_cap.pps_out_num);
4559*4882a593Smuzhiyun 	seq_printf(seq, "\tSafety Features: %s\n",
4560*4882a593Smuzhiyun 		   priv->dma_cap.asp ? "Y" : "N");
4561*4882a593Smuzhiyun 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
4562*4882a593Smuzhiyun 		   priv->dma_cap.frpsel ? "Y" : "N");
4563*4882a593Smuzhiyun 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
4564*4882a593Smuzhiyun 		   priv->dma_cap.addr64);
4565*4882a593Smuzhiyun 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
4566*4882a593Smuzhiyun 		   priv->dma_cap.rssen ? "Y" : "N");
4567*4882a593Smuzhiyun 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4568*4882a593Smuzhiyun 		   priv->dma_cap.vlhash ? "Y" : "N");
4569*4882a593Smuzhiyun 	seq_printf(seq, "\tSplit Header: %s\n",
4570*4882a593Smuzhiyun 		   priv->dma_cap.sphen ? "Y" : "N");
4571*4882a593Smuzhiyun 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4572*4882a593Smuzhiyun 		   priv->dma_cap.vlins ? "Y" : "N");
4573*4882a593Smuzhiyun 	seq_printf(seq, "\tDouble VLAN: %s\n",
4574*4882a593Smuzhiyun 		   priv->dma_cap.dvlan ? "Y" : "N");
4575*4882a593Smuzhiyun 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4576*4882a593Smuzhiyun 		   priv->dma_cap.l3l4fnum);
4577*4882a593Smuzhiyun 	seq_printf(seq, "\tARP Offloading: %s\n",
4578*4882a593Smuzhiyun 		   priv->dma_cap.arpoffsel ? "Y" : "N");
4579*4882a593Smuzhiyun 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4580*4882a593Smuzhiyun 		   priv->dma_cap.estsel ? "Y" : "N");
4581*4882a593Smuzhiyun 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4582*4882a593Smuzhiyun 		   priv->dma_cap.fpesel ? "Y" : "N");
4583*4882a593Smuzhiyun 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4584*4882a593Smuzhiyun 		   priv->dma_cap.tbssel ? "Y" : "N");
4585*4882a593Smuzhiyun 	return 0;
4586*4882a593Smuzhiyun }
4587*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4588*4882a593Smuzhiyun 
4589*4882a593Smuzhiyun /* Use network device events to rename debugfs file entries.
4590*4882a593Smuzhiyun  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)4591*4882a593Smuzhiyun static int stmmac_device_event(struct notifier_block *unused,
4592*4882a593Smuzhiyun 			       unsigned long event, void *ptr)
4593*4882a593Smuzhiyun {
4594*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4595*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4596*4882a593Smuzhiyun 
4597*4882a593Smuzhiyun 	if (dev->netdev_ops != &stmmac_netdev_ops)
4598*4882a593Smuzhiyun 		goto done;
4599*4882a593Smuzhiyun 
4600*4882a593Smuzhiyun 	switch (event) {
4601*4882a593Smuzhiyun 	case NETDEV_CHANGENAME:
4602*4882a593Smuzhiyun 		if (priv->dbgfs_dir)
4603*4882a593Smuzhiyun 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4604*4882a593Smuzhiyun 							 priv->dbgfs_dir,
4605*4882a593Smuzhiyun 							 stmmac_fs_dir,
4606*4882a593Smuzhiyun 							 dev->name);
4607*4882a593Smuzhiyun 		break;
4608*4882a593Smuzhiyun 	}
4609*4882a593Smuzhiyun done:
4610*4882a593Smuzhiyun 	return NOTIFY_DONE;
4611*4882a593Smuzhiyun }
4612*4882a593Smuzhiyun 
4613*4882a593Smuzhiyun static struct notifier_block stmmac_notifier = {
4614*4882a593Smuzhiyun 	.notifier_call = stmmac_device_event,
4615*4882a593Smuzhiyun };
4616*4882a593Smuzhiyun 
stmmac_init_fs(struct net_device * dev)4617*4882a593Smuzhiyun static void stmmac_init_fs(struct net_device *dev)
4618*4882a593Smuzhiyun {
4619*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4620*4882a593Smuzhiyun 
4621*4882a593Smuzhiyun 	rtnl_lock();
4622*4882a593Smuzhiyun 
4623*4882a593Smuzhiyun 	/* Create per netdev entries */
4624*4882a593Smuzhiyun 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4625*4882a593Smuzhiyun 
4626*4882a593Smuzhiyun 	/* Entry to report DMA RX/TX rings */
4627*4882a593Smuzhiyun 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4628*4882a593Smuzhiyun 			    &stmmac_rings_status_fops);
4629*4882a593Smuzhiyun 
4630*4882a593Smuzhiyun 	/* Entry to report the DMA HW features */
4631*4882a593Smuzhiyun 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4632*4882a593Smuzhiyun 			    &stmmac_dma_cap_fops);
4633*4882a593Smuzhiyun 
4634*4882a593Smuzhiyun 	rtnl_unlock();
4635*4882a593Smuzhiyun }
4636*4882a593Smuzhiyun 
stmmac_exit_fs(struct net_device * dev)4637*4882a593Smuzhiyun static void stmmac_exit_fs(struct net_device *dev)
4638*4882a593Smuzhiyun {
4639*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4640*4882a593Smuzhiyun 
4641*4882a593Smuzhiyun 	debugfs_remove_recursive(priv->dbgfs_dir);
4642*4882a593Smuzhiyun }
4643*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
4644*4882a593Smuzhiyun 
stmmac_vid_crc32_le(__le16 vid_le)4645*4882a593Smuzhiyun static u32 stmmac_vid_crc32_le(__le16 vid_le)
4646*4882a593Smuzhiyun {
4647*4882a593Smuzhiyun 	unsigned char *data = (unsigned char *)&vid_le;
4648*4882a593Smuzhiyun 	unsigned char data_byte = 0;
4649*4882a593Smuzhiyun 	u32 crc = ~0x0;
4650*4882a593Smuzhiyun 	u32 temp = 0;
4651*4882a593Smuzhiyun 	int i, bits;
4652*4882a593Smuzhiyun 
4653*4882a593Smuzhiyun 	bits = get_bitmask_order(VLAN_VID_MASK);
4654*4882a593Smuzhiyun 	for (i = 0; i < bits; i++) {
4655*4882a593Smuzhiyun 		if ((i % 8) == 0)
4656*4882a593Smuzhiyun 			data_byte = data[i / 8];
4657*4882a593Smuzhiyun 
4658*4882a593Smuzhiyun 		temp = ((crc & 1) ^ data_byte) & 1;
4659*4882a593Smuzhiyun 		crc >>= 1;
4660*4882a593Smuzhiyun 		data_byte >>= 1;
4661*4882a593Smuzhiyun 
4662*4882a593Smuzhiyun 		if (temp)
4663*4882a593Smuzhiyun 			crc ^= 0xedb88320;
4664*4882a593Smuzhiyun 	}
4665*4882a593Smuzhiyun 
4666*4882a593Smuzhiyun 	return crc;
4667*4882a593Smuzhiyun }
4668*4882a593Smuzhiyun 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)4669*4882a593Smuzhiyun static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4670*4882a593Smuzhiyun {
4671*4882a593Smuzhiyun 	u32 crc, hash = 0;
4672*4882a593Smuzhiyun 	__le16 pmatch = 0;
4673*4882a593Smuzhiyun 	int count = 0;
4674*4882a593Smuzhiyun 	u16 vid = 0;
4675*4882a593Smuzhiyun 
4676*4882a593Smuzhiyun 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4677*4882a593Smuzhiyun 		__le16 vid_le = cpu_to_le16(vid);
4678*4882a593Smuzhiyun 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4679*4882a593Smuzhiyun 		hash |= (1 << crc);
4680*4882a593Smuzhiyun 		count++;
4681*4882a593Smuzhiyun 	}
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	if (!priv->dma_cap.vlhash) {
4684*4882a593Smuzhiyun 		if (count > 2) /* VID = 0 always passes filter */
4685*4882a593Smuzhiyun 			return -EOPNOTSUPP;
4686*4882a593Smuzhiyun 
4687*4882a593Smuzhiyun 		pmatch = cpu_to_le16(vid);
4688*4882a593Smuzhiyun 		hash = 0;
4689*4882a593Smuzhiyun 	}
4690*4882a593Smuzhiyun 
4691*4882a593Smuzhiyun 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4692*4882a593Smuzhiyun }
4693*4882a593Smuzhiyun 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)4694*4882a593Smuzhiyun static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4695*4882a593Smuzhiyun {
4696*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
4697*4882a593Smuzhiyun 	bool is_double = false;
4698*4882a593Smuzhiyun 	int ret;
4699*4882a593Smuzhiyun 
4700*4882a593Smuzhiyun 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4701*4882a593Smuzhiyun 		is_double = true;
4702*4882a593Smuzhiyun 
4703*4882a593Smuzhiyun 	set_bit(vid, priv->active_vlans);
4704*4882a593Smuzhiyun 	ret = stmmac_vlan_update(priv, is_double);
4705*4882a593Smuzhiyun 	if (ret) {
4706*4882a593Smuzhiyun 		clear_bit(vid, priv->active_vlans);
4707*4882a593Smuzhiyun 		return ret;
4708*4882a593Smuzhiyun 	}
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun 	if (priv->hw->num_vlan) {
4711*4882a593Smuzhiyun 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4712*4882a593Smuzhiyun 		if (ret)
4713*4882a593Smuzhiyun 			return ret;
4714*4882a593Smuzhiyun 	}
4715*4882a593Smuzhiyun 
4716*4882a593Smuzhiyun 	return 0;
4717*4882a593Smuzhiyun }
4718*4882a593Smuzhiyun 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)4719*4882a593Smuzhiyun static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4720*4882a593Smuzhiyun {
4721*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
4722*4882a593Smuzhiyun 	bool is_double = false;
4723*4882a593Smuzhiyun 	int ret;
4724*4882a593Smuzhiyun 
4725*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(priv->device);
4726*4882a593Smuzhiyun 	if (ret < 0) {
4727*4882a593Smuzhiyun 		pm_runtime_put_noidle(priv->device);
4728*4882a593Smuzhiyun 		return ret;
4729*4882a593Smuzhiyun 	}
4730*4882a593Smuzhiyun 
4731*4882a593Smuzhiyun 	if (be16_to_cpu(proto) == ETH_P_8021AD)
4732*4882a593Smuzhiyun 		is_double = true;
4733*4882a593Smuzhiyun 
4734*4882a593Smuzhiyun 	clear_bit(vid, priv->active_vlans);
4735*4882a593Smuzhiyun 
4736*4882a593Smuzhiyun 	if (priv->hw->num_vlan) {
4737*4882a593Smuzhiyun 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4738*4882a593Smuzhiyun 		if (ret)
4739*4882a593Smuzhiyun 			goto del_vlan_error;
4740*4882a593Smuzhiyun 	}
4741*4882a593Smuzhiyun 
4742*4882a593Smuzhiyun 	ret = stmmac_vlan_update(priv, is_double);
4743*4882a593Smuzhiyun 
4744*4882a593Smuzhiyun del_vlan_error:
4745*4882a593Smuzhiyun 	pm_runtime_put(priv->device);
4746*4882a593Smuzhiyun 
4747*4882a593Smuzhiyun 	return ret;
4748*4882a593Smuzhiyun }
4749*4882a593Smuzhiyun 
4750*4882a593Smuzhiyun static const struct net_device_ops stmmac_netdev_ops = {
4751*4882a593Smuzhiyun 	.ndo_open = stmmac_open,
4752*4882a593Smuzhiyun 	.ndo_start_xmit = stmmac_xmit,
4753*4882a593Smuzhiyun 	.ndo_stop = stmmac_release,
4754*4882a593Smuzhiyun 	.ndo_change_mtu = stmmac_change_mtu,
4755*4882a593Smuzhiyun 	.ndo_fix_features = stmmac_fix_features,
4756*4882a593Smuzhiyun 	.ndo_set_features = stmmac_set_features,
4757*4882a593Smuzhiyun 	.ndo_set_rx_mode = stmmac_set_rx_mode,
4758*4882a593Smuzhiyun 	.ndo_tx_timeout = stmmac_tx_timeout,
4759*4882a593Smuzhiyun 	.ndo_do_ioctl = stmmac_ioctl,
4760*4882a593Smuzhiyun 	.ndo_setup_tc = stmmac_setup_tc,
4761*4882a593Smuzhiyun 	.ndo_select_queue = stmmac_select_queue,
4762*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
4763*4882a593Smuzhiyun 	.ndo_poll_controller = stmmac_poll_controller,
4764*4882a593Smuzhiyun #endif
4765*4882a593Smuzhiyun 	.ndo_set_mac_address = stmmac_set_mac_address,
4766*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4767*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4768*4882a593Smuzhiyun };
4769*4882a593Smuzhiyun 
stmmac_reset_subtask(struct stmmac_priv * priv)4770*4882a593Smuzhiyun static void stmmac_reset_subtask(struct stmmac_priv *priv)
4771*4882a593Smuzhiyun {
4772*4882a593Smuzhiyun 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4773*4882a593Smuzhiyun 		return;
4774*4882a593Smuzhiyun 	if (test_bit(STMMAC_DOWN, &priv->state))
4775*4882a593Smuzhiyun 		return;
4776*4882a593Smuzhiyun 
4777*4882a593Smuzhiyun 	netdev_err(priv->dev, "Reset adapter.\n");
4778*4882a593Smuzhiyun 
4779*4882a593Smuzhiyun 	rtnl_lock();
4780*4882a593Smuzhiyun 	netif_trans_update(priv->dev);
4781*4882a593Smuzhiyun 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4782*4882a593Smuzhiyun 		usleep_range(1000, 2000);
4783*4882a593Smuzhiyun 
4784*4882a593Smuzhiyun 	set_bit(STMMAC_DOWN, &priv->state);
4785*4882a593Smuzhiyun 	dev_close(priv->dev);
4786*4882a593Smuzhiyun 	dev_open(priv->dev, NULL);
4787*4882a593Smuzhiyun 	clear_bit(STMMAC_DOWN, &priv->state);
4788*4882a593Smuzhiyun 	clear_bit(STMMAC_RESETING, &priv->state);
4789*4882a593Smuzhiyun 	rtnl_unlock();
4790*4882a593Smuzhiyun }
4791*4882a593Smuzhiyun 
stmmac_service_task(struct work_struct * work)4792*4882a593Smuzhiyun static void stmmac_service_task(struct work_struct *work)
4793*4882a593Smuzhiyun {
4794*4882a593Smuzhiyun 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4795*4882a593Smuzhiyun 			service_task);
4796*4882a593Smuzhiyun 
4797*4882a593Smuzhiyun 	stmmac_reset_subtask(priv);
4798*4882a593Smuzhiyun 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4799*4882a593Smuzhiyun }
4800*4882a593Smuzhiyun 
4801*4882a593Smuzhiyun /**
4802*4882a593Smuzhiyun  *  stmmac_hw_init - Init the MAC device
4803*4882a593Smuzhiyun  *  @priv: driver private structure
4804*4882a593Smuzhiyun  *  Description: this function is to configure the MAC device according to
4805*4882a593Smuzhiyun  *  some platform parameters or the HW capability register. It prepares the
4806*4882a593Smuzhiyun  *  driver to use either ring or chain modes and to setup either enhanced or
4807*4882a593Smuzhiyun  *  normal descriptors.
4808*4882a593Smuzhiyun  */
stmmac_hw_init(struct stmmac_priv * priv)4809*4882a593Smuzhiyun static int stmmac_hw_init(struct stmmac_priv *priv)
4810*4882a593Smuzhiyun {
4811*4882a593Smuzhiyun 	int ret;
4812*4882a593Smuzhiyun 
4813*4882a593Smuzhiyun 	/* dwmac-sun8i only work in chain mode */
4814*4882a593Smuzhiyun 	if (priv->plat->has_sun8i)
4815*4882a593Smuzhiyun 		chain_mode = 1;
4816*4882a593Smuzhiyun 	priv->chain_mode = chain_mode;
4817*4882a593Smuzhiyun 
4818*4882a593Smuzhiyun 	/* Initialize HW Interface */
4819*4882a593Smuzhiyun 	ret = stmmac_hwif_init(priv);
4820*4882a593Smuzhiyun 	if (ret)
4821*4882a593Smuzhiyun 		return ret;
4822*4882a593Smuzhiyun 
4823*4882a593Smuzhiyun 	/* Get the HW capability (new GMAC newer than 3.50a) */
4824*4882a593Smuzhiyun 	priv->hw_cap_support = stmmac_get_hw_features(priv);
4825*4882a593Smuzhiyun 	if (priv->hw_cap_support) {
4826*4882a593Smuzhiyun 		dev_info(priv->device, "DMA HW capability register supported\n");
4827*4882a593Smuzhiyun 
4828*4882a593Smuzhiyun 		/* We can override some gmac/dma configuration fields: e.g.
4829*4882a593Smuzhiyun 		 * enh_desc, tx_coe (e.g. that are passed through the
4830*4882a593Smuzhiyun 		 * platform) with the values from the HW capability
4831*4882a593Smuzhiyun 		 * register (if supported).
4832*4882a593Smuzhiyun 		 */
4833*4882a593Smuzhiyun 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
4834*4882a593Smuzhiyun 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4835*4882a593Smuzhiyun 		priv->hw->pmt = priv->plat->pmt;
4836*4882a593Smuzhiyun 		if (priv->dma_cap.hash_tb_sz) {
4837*4882a593Smuzhiyun 			priv->hw->multicast_filter_bins =
4838*4882a593Smuzhiyun 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
4839*4882a593Smuzhiyun 			priv->hw->mcast_bits_log2 =
4840*4882a593Smuzhiyun 					ilog2(priv->hw->multicast_filter_bins);
4841*4882a593Smuzhiyun 		}
4842*4882a593Smuzhiyun 
4843*4882a593Smuzhiyun 		/* TXCOE doesn't work in thresh DMA mode */
4844*4882a593Smuzhiyun 		if (priv->plat->force_thresh_dma_mode)
4845*4882a593Smuzhiyun 			priv->plat->tx_coe = 0;
4846*4882a593Smuzhiyun 		else
4847*4882a593Smuzhiyun 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
4848*4882a593Smuzhiyun 
4849*4882a593Smuzhiyun 		/* In case of GMAC4 rx_coe is from HW cap register. */
4850*4882a593Smuzhiyun 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4851*4882a593Smuzhiyun 
4852*4882a593Smuzhiyun 		if (priv->dma_cap.rx_coe_type2)
4853*4882a593Smuzhiyun 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4854*4882a593Smuzhiyun 		else if (priv->dma_cap.rx_coe_type1)
4855*4882a593Smuzhiyun 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4856*4882a593Smuzhiyun 
4857*4882a593Smuzhiyun 	} else {
4858*4882a593Smuzhiyun 		dev_info(priv->device, "No HW DMA feature register supported\n");
4859*4882a593Smuzhiyun 	}
4860*4882a593Smuzhiyun 
4861*4882a593Smuzhiyun 	if (priv->plat->rx_coe) {
4862*4882a593Smuzhiyun 		priv->hw->rx_csum = priv->plat->rx_coe;
4863*4882a593Smuzhiyun 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4864*4882a593Smuzhiyun 		if (priv->synopsys_id < DWMAC_CORE_4_00)
4865*4882a593Smuzhiyun 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4866*4882a593Smuzhiyun 	}
4867*4882a593Smuzhiyun 	if (priv->plat->tx_coe)
4868*4882a593Smuzhiyun 		dev_info(priv->device, "TX Checksum insertion supported\n");
4869*4882a593Smuzhiyun 
4870*4882a593Smuzhiyun 	if (priv->plat->pmt) {
4871*4882a593Smuzhiyun 		dev_info(priv->device, "Wake-Up On Lan supported\n");
4872*4882a593Smuzhiyun 		device_set_wakeup_capable(priv->device, 1);
4873*4882a593Smuzhiyun 	}
4874*4882a593Smuzhiyun 
4875*4882a593Smuzhiyun 	if (priv->dma_cap.tsoen)
4876*4882a593Smuzhiyun 		dev_info(priv->device, "TSO supported\n");
4877*4882a593Smuzhiyun 
4878*4882a593Smuzhiyun 	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4879*4882a593Smuzhiyun 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4880*4882a593Smuzhiyun 
4881*4882a593Smuzhiyun 	/* Run HW quirks, if any */
4882*4882a593Smuzhiyun 	if (priv->hwif_quirks) {
4883*4882a593Smuzhiyun 		ret = priv->hwif_quirks(priv);
4884*4882a593Smuzhiyun 		if (ret)
4885*4882a593Smuzhiyun 			return ret;
4886*4882a593Smuzhiyun 	}
4887*4882a593Smuzhiyun 
4888*4882a593Smuzhiyun 	/* Rx Watchdog is available in the COREs newer than the 3.40.
4889*4882a593Smuzhiyun 	 * In some case, for example on bugged HW this feature
4890*4882a593Smuzhiyun 	 * has to be disable and this can be done by passing the
4891*4882a593Smuzhiyun 	 * riwt_off field from the platform.
4892*4882a593Smuzhiyun 	 */
4893*4882a593Smuzhiyun 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4894*4882a593Smuzhiyun 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4895*4882a593Smuzhiyun 		priv->use_riwt = 1;
4896*4882a593Smuzhiyun 		dev_info(priv->device,
4897*4882a593Smuzhiyun 			 "Enable RX Mitigation via HW Watchdog Timer\n");
4898*4882a593Smuzhiyun 	}
4899*4882a593Smuzhiyun 
4900*4882a593Smuzhiyun 	return 0;
4901*4882a593Smuzhiyun }
4902*4882a593Smuzhiyun 
stmmac_napi_add(struct net_device * dev)4903*4882a593Smuzhiyun static void stmmac_napi_add(struct net_device *dev)
4904*4882a593Smuzhiyun {
4905*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4906*4882a593Smuzhiyun 	u32 queue, maxq;
4907*4882a593Smuzhiyun 
4908*4882a593Smuzhiyun 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4909*4882a593Smuzhiyun 
4910*4882a593Smuzhiyun 	for (queue = 0; queue < maxq; queue++) {
4911*4882a593Smuzhiyun 		struct stmmac_channel *ch = &priv->channel[queue];
4912*4882a593Smuzhiyun 		int rx_budget = ((priv->plat->dma_rx_size < NAPI_POLL_WEIGHT) &&
4913*4882a593Smuzhiyun 				 (priv->plat->dma_rx_size > 0)) ?
4914*4882a593Smuzhiyun 				 priv->plat->dma_rx_size : NAPI_POLL_WEIGHT;
4915*4882a593Smuzhiyun 		int tx_budget = ((priv->plat->dma_tx_size < NAPI_POLL_WEIGHT) &&
4916*4882a593Smuzhiyun 				 (priv->plat->dma_tx_size > 0)) ?
4917*4882a593Smuzhiyun 				 priv->plat->dma_tx_size : NAPI_POLL_WEIGHT;
4918*4882a593Smuzhiyun 
4919*4882a593Smuzhiyun 		ch->priv_data = priv;
4920*4882a593Smuzhiyun 		ch->index = queue;
4921*4882a593Smuzhiyun 		spin_lock_init(&ch->lock);
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 		if (queue < priv->plat->rx_queues_to_use) {
4924*4882a593Smuzhiyun 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4925*4882a593Smuzhiyun 				       rx_budget);
4926*4882a593Smuzhiyun 		}
4927*4882a593Smuzhiyun 		if (queue < priv->plat->tx_queues_to_use) {
4928*4882a593Smuzhiyun 			netif_tx_napi_add(dev, &ch->tx_napi,
4929*4882a593Smuzhiyun 					  stmmac_napi_poll_tx, tx_budget);
4930*4882a593Smuzhiyun 		}
4931*4882a593Smuzhiyun 	}
4932*4882a593Smuzhiyun }
4933*4882a593Smuzhiyun 
stmmac_napi_del(struct net_device * dev)4934*4882a593Smuzhiyun static void stmmac_napi_del(struct net_device *dev)
4935*4882a593Smuzhiyun {
4936*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4937*4882a593Smuzhiyun 	u32 queue, maxq;
4938*4882a593Smuzhiyun 
4939*4882a593Smuzhiyun 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4940*4882a593Smuzhiyun 
4941*4882a593Smuzhiyun 	for (queue = 0; queue < maxq; queue++) {
4942*4882a593Smuzhiyun 		struct stmmac_channel *ch = &priv->channel[queue];
4943*4882a593Smuzhiyun 
4944*4882a593Smuzhiyun 		if (queue < priv->plat->rx_queues_to_use)
4945*4882a593Smuzhiyun 			netif_napi_del(&ch->rx_napi);
4946*4882a593Smuzhiyun 		if (queue < priv->plat->tx_queues_to_use)
4947*4882a593Smuzhiyun 			netif_napi_del(&ch->tx_napi);
4948*4882a593Smuzhiyun 	}
4949*4882a593Smuzhiyun }
4950*4882a593Smuzhiyun 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)4951*4882a593Smuzhiyun int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4952*4882a593Smuzhiyun {
4953*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4954*4882a593Smuzhiyun 	int ret = 0;
4955*4882a593Smuzhiyun 
4956*4882a593Smuzhiyun 	if (netif_running(dev))
4957*4882a593Smuzhiyun 		stmmac_release(dev);
4958*4882a593Smuzhiyun 
4959*4882a593Smuzhiyun 	stmmac_napi_del(dev);
4960*4882a593Smuzhiyun 
4961*4882a593Smuzhiyun 	priv->plat->rx_queues_to_use = rx_cnt;
4962*4882a593Smuzhiyun 	priv->plat->tx_queues_to_use = tx_cnt;
4963*4882a593Smuzhiyun 
4964*4882a593Smuzhiyun 	stmmac_napi_add(dev);
4965*4882a593Smuzhiyun 
4966*4882a593Smuzhiyun 	if (netif_running(dev))
4967*4882a593Smuzhiyun 		ret = stmmac_open(dev);
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 	return ret;
4970*4882a593Smuzhiyun }
4971*4882a593Smuzhiyun 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)4972*4882a593Smuzhiyun int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4973*4882a593Smuzhiyun {
4974*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(dev);
4975*4882a593Smuzhiyun 	int ret = 0;
4976*4882a593Smuzhiyun 
4977*4882a593Smuzhiyun 	if (netif_running(dev))
4978*4882a593Smuzhiyun 		stmmac_release(dev);
4979*4882a593Smuzhiyun 
4980*4882a593Smuzhiyun 	priv->dma_rx_size = rx_size;
4981*4882a593Smuzhiyun 	priv->dma_tx_size = tx_size;
4982*4882a593Smuzhiyun 
4983*4882a593Smuzhiyun 	if (netif_running(dev))
4984*4882a593Smuzhiyun 		ret = stmmac_open(dev);
4985*4882a593Smuzhiyun 
4986*4882a593Smuzhiyun 	return ret;
4987*4882a593Smuzhiyun }
4988*4882a593Smuzhiyun 
4989*4882a593Smuzhiyun /**
4990*4882a593Smuzhiyun  * stmmac_dvr_probe
4991*4882a593Smuzhiyun  * @device: device pointer
4992*4882a593Smuzhiyun  * @plat_dat: platform data pointer
4993*4882a593Smuzhiyun  * @res: stmmac resource pointer
4994*4882a593Smuzhiyun  * Description: this is the main probe function used to
4995*4882a593Smuzhiyun  * call the alloc_etherdev, allocate the priv structure.
4996*4882a593Smuzhiyun  * Return:
4997*4882a593Smuzhiyun  * returns 0 on success, otherwise errno.
4998*4882a593Smuzhiyun  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)4999*4882a593Smuzhiyun int stmmac_dvr_probe(struct device *device,
5000*4882a593Smuzhiyun 		     struct plat_stmmacenet_data *plat_dat,
5001*4882a593Smuzhiyun 		     struct stmmac_resources *res)
5002*4882a593Smuzhiyun {
5003*4882a593Smuzhiyun 	struct net_device *ndev = NULL;
5004*4882a593Smuzhiyun 	struct stmmac_priv *priv;
5005*4882a593Smuzhiyun 	u32 rxq;
5006*4882a593Smuzhiyun 	int i, ret = 0;
5007*4882a593Smuzhiyun 
5008*4882a593Smuzhiyun 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5009*4882a593Smuzhiyun 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
5010*4882a593Smuzhiyun 	if (!ndev)
5011*4882a593Smuzhiyun 		return -ENOMEM;
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 	SET_NETDEV_DEV(ndev, device);
5014*4882a593Smuzhiyun 
5015*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
5016*4882a593Smuzhiyun 	priv->device = device;
5017*4882a593Smuzhiyun 	priv->dev = ndev;
5018*4882a593Smuzhiyun 
5019*4882a593Smuzhiyun 	stmmac_set_ethtool_ops(ndev);
5020*4882a593Smuzhiyun 	priv->pause = pause;
5021*4882a593Smuzhiyun 	priv->plat = plat_dat;
5022*4882a593Smuzhiyun 	priv->ioaddr = res->addr;
5023*4882a593Smuzhiyun 	priv->dev->base_addr = (unsigned long)res->addr;
5024*4882a593Smuzhiyun 
5025*4882a593Smuzhiyun 	priv->dev->irq = res->irq;
5026*4882a593Smuzhiyun 	priv->wol_irq = res->wol_irq;
5027*4882a593Smuzhiyun 	priv->lpi_irq = res->lpi_irq;
5028*4882a593Smuzhiyun 
5029*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(res->mac))
5030*4882a593Smuzhiyun 		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5031*4882a593Smuzhiyun 
5032*4882a593Smuzhiyun 	dev_set_drvdata(device, priv->dev);
5033*4882a593Smuzhiyun 
5034*4882a593Smuzhiyun 	/* Verify driver arguments */
5035*4882a593Smuzhiyun 	stmmac_verify_args();
5036*4882a593Smuzhiyun 
5037*4882a593Smuzhiyun 	/* Allocate workqueue */
5038*4882a593Smuzhiyun 	priv->wq = create_singlethread_workqueue("stmmac_wq");
5039*4882a593Smuzhiyun 	if (!priv->wq) {
5040*4882a593Smuzhiyun 		dev_err(priv->device, "failed to create workqueue\n");
5041*4882a593Smuzhiyun 		return -ENOMEM;
5042*4882a593Smuzhiyun 	}
5043*4882a593Smuzhiyun 
5044*4882a593Smuzhiyun 	INIT_WORK(&priv->service_task, stmmac_service_task);
5045*4882a593Smuzhiyun 
5046*4882a593Smuzhiyun 	/* Override with kernel parameters if supplied XXX CRS XXX
5047*4882a593Smuzhiyun 	 * this needs to have multiple instances
5048*4882a593Smuzhiyun 	 */
5049*4882a593Smuzhiyun 	if ((phyaddr >= 0) && (phyaddr <= 31))
5050*4882a593Smuzhiyun 		priv->plat->phy_addr = phyaddr;
5051*4882a593Smuzhiyun 
5052*4882a593Smuzhiyun 	if (priv->plat->stmmac_rst) {
5053*4882a593Smuzhiyun 		ret = reset_control_assert(priv->plat->stmmac_rst);
5054*4882a593Smuzhiyun 		reset_control_deassert(priv->plat->stmmac_rst);
5055*4882a593Smuzhiyun 		/* Some reset controllers have only reset callback instead of
5056*4882a593Smuzhiyun 		 * assert + deassert callbacks pair.
5057*4882a593Smuzhiyun 		 */
5058*4882a593Smuzhiyun 		if (ret == -ENOTSUPP)
5059*4882a593Smuzhiyun 			reset_control_reset(priv->plat->stmmac_rst);
5060*4882a593Smuzhiyun 	}
5061*4882a593Smuzhiyun 
5062*4882a593Smuzhiyun 	/* Init MAC and get the capabilities */
5063*4882a593Smuzhiyun 	ret = stmmac_hw_init(priv);
5064*4882a593Smuzhiyun 	if (ret)
5065*4882a593Smuzhiyun 		goto error_hw_init;
5066*4882a593Smuzhiyun 
5067*4882a593Smuzhiyun 	stmmac_check_ether_addr(priv);
5068*4882a593Smuzhiyun 
5069*4882a593Smuzhiyun 	ndev->netdev_ops = &stmmac_netdev_ops;
5070*4882a593Smuzhiyun 
5071*4882a593Smuzhiyun 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5072*4882a593Smuzhiyun 			    NETIF_F_RXCSUM;
5073*4882a593Smuzhiyun 
5074*4882a593Smuzhiyun 	ret = stmmac_tc_init(priv, priv);
5075*4882a593Smuzhiyun 	if (!ret) {
5076*4882a593Smuzhiyun 		ndev->hw_features |= NETIF_F_HW_TC;
5077*4882a593Smuzhiyun 	}
5078*4882a593Smuzhiyun 
5079*4882a593Smuzhiyun 	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5080*4882a593Smuzhiyun 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5081*4882a593Smuzhiyun 		if (priv->plat->has_gmac4)
5082*4882a593Smuzhiyun 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5083*4882a593Smuzhiyun 		priv->tso = true;
5084*4882a593Smuzhiyun 		dev_info(priv->device, "TSO feature enabled\n");
5085*4882a593Smuzhiyun 	}
5086*4882a593Smuzhiyun 
5087*4882a593Smuzhiyun 	if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
5088*4882a593Smuzhiyun 		ndev->hw_features |= NETIF_F_GRO;
5089*4882a593Smuzhiyun 		if (!priv->plat->sph_disable) {
5090*4882a593Smuzhiyun 			priv->sph = true;
5091*4882a593Smuzhiyun 			dev_info(priv->device, "SPH feature enabled\n");
5092*4882a593Smuzhiyun 		}
5093*4882a593Smuzhiyun 	}
5094*4882a593Smuzhiyun 
5095*4882a593Smuzhiyun 	/* The current IP register MAC_HW_Feature1[ADDR64] only define
5096*4882a593Smuzhiyun 	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
5097*4882a593Smuzhiyun 	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5098*4882a593Smuzhiyun 	 * So overwrite dma_cap.addr64 according to HW real design.
5099*4882a593Smuzhiyun 	 */
5100*4882a593Smuzhiyun 	if (priv->plat->addr64)
5101*4882a593Smuzhiyun 		priv->dma_cap.addr64 = priv->plat->addr64;
5102*4882a593Smuzhiyun 
5103*4882a593Smuzhiyun 	if (priv->dma_cap.addr64) {
5104*4882a593Smuzhiyun 		ret = dma_set_mask_and_coherent(device,
5105*4882a593Smuzhiyun 				DMA_BIT_MASK(priv->dma_cap.addr64));
5106*4882a593Smuzhiyun 		if (!ret) {
5107*4882a593Smuzhiyun 			dev_info(priv->device, "Using %d bits DMA width\n",
5108*4882a593Smuzhiyun 				 priv->dma_cap.addr64);
5109*4882a593Smuzhiyun 
5110*4882a593Smuzhiyun 			/*
5111*4882a593Smuzhiyun 			 * If more than 32 bits can be addressed, make sure to
5112*4882a593Smuzhiyun 			 * enable enhanced addressing mode.
5113*4882a593Smuzhiyun 			 */
5114*4882a593Smuzhiyun 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5115*4882a593Smuzhiyun 				priv->plat->dma_cfg->eame = true;
5116*4882a593Smuzhiyun 		} else {
5117*4882a593Smuzhiyun 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5118*4882a593Smuzhiyun 			if (ret) {
5119*4882a593Smuzhiyun 				dev_err(priv->device, "Failed to set DMA Mask\n");
5120*4882a593Smuzhiyun 				goto error_hw_init;
5121*4882a593Smuzhiyun 			}
5122*4882a593Smuzhiyun 
5123*4882a593Smuzhiyun 			priv->dma_cap.addr64 = 32;
5124*4882a593Smuzhiyun 		}
5125*4882a593Smuzhiyun 	}
5126*4882a593Smuzhiyun 
5127*4882a593Smuzhiyun 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5128*4882a593Smuzhiyun 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5129*4882a593Smuzhiyun #ifdef STMMAC_VLAN_TAG_USED
5130*4882a593Smuzhiyun 	/* Both mac100 and gmac support receive VLAN tag detection */
5131*4882a593Smuzhiyun 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5132*4882a593Smuzhiyun 	if (priv->dma_cap.vlhash) {
5133*4882a593Smuzhiyun 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5134*4882a593Smuzhiyun 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5135*4882a593Smuzhiyun 	}
5136*4882a593Smuzhiyun 	if (priv->dma_cap.vlins) {
5137*4882a593Smuzhiyun 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5138*4882a593Smuzhiyun 		if (priv->dma_cap.dvlan)
5139*4882a593Smuzhiyun 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5140*4882a593Smuzhiyun 	}
5141*4882a593Smuzhiyun #endif
5142*4882a593Smuzhiyun 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
5143*4882a593Smuzhiyun 
5144*4882a593Smuzhiyun 	/* Initialize RSS */
5145*4882a593Smuzhiyun 	rxq = priv->plat->rx_queues_to_use;
5146*4882a593Smuzhiyun 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5147*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5148*4882a593Smuzhiyun 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5149*4882a593Smuzhiyun 
5150*4882a593Smuzhiyun 	if (priv->dma_cap.rssen && priv->plat->rss_en)
5151*4882a593Smuzhiyun 		ndev->features |= NETIF_F_RXHASH;
5152*4882a593Smuzhiyun 
5153*4882a593Smuzhiyun 	/* MTU range: 46 - hw-specific max */
5154*4882a593Smuzhiyun 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5155*4882a593Smuzhiyun 	if (priv->plat->has_xgmac)
5156*4882a593Smuzhiyun 		ndev->max_mtu = XGMAC_JUMBO_LEN;
5157*4882a593Smuzhiyun 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5158*4882a593Smuzhiyun 		ndev->max_mtu = JUMBO_LEN;
5159*4882a593Smuzhiyun 	else
5160*4882a593Smuzhiyun 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5161*4882a593Smuzhiyun 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5162*4882a593Smuzhiyun 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5163*4882a593Smuzhiyun 	 */
5164*4882a593Smuzhiyun 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
5165*4882a593Smuzhiyun 	    (priv->plat->maxmtu >= ndev->min_mtu))
5166*4882a593Smuzhiyun 		ndev->max_mtu = priv->plat->maxmtu;
5167*4882a593Smuzhiyun 	else if (priv->plat->maxmtu < ndev->min_mtu)
5168*4882a593Smuzhiyun 		dev_warn(priv->device,
5169*4882a593Smuzhiyun 			 "%s: warning: maxmtu having invalid value (%d)\n",
5170*4882a593Smuzhiyun 			 __func__, priv->plat->maxmtu);
5171*4882a593Smuzhiyun 
5172*4882a593Smuzhiyun 	if (flow_ctrl)
5173*4882a593Smuzhiyun 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
5174*4882a593Smuzhiyun 
5175*4882a593Smuzhiyun 	/* Setup channels NAPI */
5176*4882a593Smuzhiyun 	stmmac_napi_add(ndev);
5177*4882a593Smuzhiyun 
5178*4882a593Smuzhiyun 	mutex_init(&priv->lock);
5179*4882a593Smuzhiyun 
5180*4882a593Smuzhiyun 	/* If a specific clk_csr value is passed from the platform
5181*4882a593Smuzhiyun 	 * this means that the CSR Clock Range selection cannot be
5182*4882a593Smuzhiyun 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
5183*4882a593Smuzhiyun 	 * set the MDC clock dynamically according to the csr actual
5184*4882a593Smuzhiyun 	 * clock input.
5185*4882a593Smuzhiyun 	 */
5186*4882a593Smuzhiyun 	if (priv->plat->clk_csr >= 0)
5187*4882a593Smuzhiyun 		priv->clk_csr = priv->plat->clk_csr;
5188*4882a593Smuzhiyun 	else
5189*4882a593Smuzhiyun 		stmmac_clk_csr_set(priv);
5190*4882a593Smuzhiyun 
5191*4882a593Smuzhiyun 	stmmac_check_pcs_mode(priv);
5192*4882a593Smuzhiyun 
5193*4882a593Smuzhiyun 	pm_runtime_get_noresume(device);
5194*4882a593Smuzhiyun 	pm_runtime_set_active(device);
5195*4882a593Smuzhiyun 	pm_runtime_enable(device);
5196*4882a593Smuzhiyun 
5197*4882a593Smuzhiyun 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5198*4882a593Smuzhiyun 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
5199*4882a593Smuzhiyun 		/* MDIO bus Registration */
5200*4882a593Smuzhiyun 		ret = stmmac_mdio_register(ndev);
5201*4882a593Smuzhiyun 		if (ret < 0) {
5202*4882a593Smuzhiyun 			dev_err(priv->device,
5203*4882a593Smuzhiyun 				"%s: MDIO bus (id: %d) registration failed",
5204*4882a593Smuzhiyun 				__func__, priv->plat->bus_id);
5205*4882a593Smuzhiyun 			goto error_mdio_register;
5206*4882a593Smuzhiyun 		}
5207*4882a593Smuzhiyun 	}
5208*4882a593Smuzhiyun 
5209*4882a593Smuzhiyun 	ret = stmmac_phy_setup(priv);
5210*4882a593Smuzhiyun 	if (ret) {
5211*4882a593Smuzhiyun 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5212*4882a593Smuzhiyun 		goto error_phy_setup;
5213*4882a593Smuzhiyun 	}
5214*4882a593Smuzhiyun 
5215*4882a593Smuzhiyun 	ret = register_netdev(ndev);
5216*4882a593Smuzhiyun 	if (ret) {
5217*4882a593Smuzhiyun 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
5218*4882a593Smuzhiyun 			__func__, ret);
5219*4882a593Smuzhiyun 		goto error_netdev_register;
5220*4882a593Smuzhiyun 	}
5221*4882a593Smuzhiyun 
5222*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
5223*4882a593Smuzhiyun 	stmmac_init_fs(ndev);
5224*4882a593Smuzhiyun #endif
5225*4882a593Smuzhiyun 
5226*4882a593Smuzhiyun 	/* Let pm_runtime_put() disable the clocks.
5227*4882a593Smuzhiyun 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
5228*4882a593Smuzhiyun 	 */
5229*4882a593Smuzhiyun 	pm_runtime_put(device);
5230*4882a593Smuzhiyun 
5231*4882a593Smuzhiyun 	return ret;
5232*4882a593Smuzhiyun 
5233*4882a593Smuzhiyun error_netdev_register:
5234*4882a593Smuzhiyun 	phylink_destroy(priv->phylink);
5235*4882a593Smuzhiyun error_phy_setup:
5236*4882a593Smuzhiyun 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5237*4882a593Smuzhiyun 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5238*4882a593Smuzhiyun 		stmmac_mdio_unregister(ndev);
5239*4882a593Smuzhiyun error_mdio_register:
5240*4882a593Smuzhiyun 	stmmac_napi_del(ndev);
5241*4882a593Smuzhiyun error_hw_init:
5242*4882a593Smuzhiyun 	destroy_workqueue(priv->wq);
5243*4882a593Smuzhiyun 
5244*4882a593Smuzhiyun 	return ret;
5245*4882a593Smuzhiyun }
5246*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5247*4882a593Smuzhiyun 
5248*4882a593Smuzhiyun /**
5249*4882a593Smuzhiyun  * stmmac_dvr_remove
5250*4882a593Smuzhiyun  * @dev: device pointer
5251*4882a593Smuzhiyun  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5252*4882a593Smuzhiyun  * changes the link status, releases the DMA descriptor rings.
5253*4882a593Smuzhiyun  */
stmmac_dvr_remove(struct device * dev)5254*4882a593Smuzhiyun int stmmac_dvr_remove(struct device *dev)
5255*4882a593Smuzhiyun {
5256*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
5257*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
5258*4882a593Smuzhiyun 
5259*4882a593Smuzhiyun 	netdev_info(priv->dev, "%s: removing driver", __func__);
5260*4882a593Smuzhiyun 
5261*4882a593Smuzhiyun 	stmmac_stop_all_dma(priv);
5262*4882a593Smuzhiyun 	stmmac_mac_set(priv, priv->ioaddr, false);
5263*4882a593Smuzhiyun 	netif_carrier_off(ndev);
5264*4882a593Smuzhiyun 	unregister_netdev(ndev);
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 	/* Serdes power down needs to happen after VLAN filter
5267*4882a593Smuzhiyun 	 * is deleted that is triggered by unregister_netdev().
5268*4882a593Smuzhiyun 	 */
5269*4882a593Smuzhiyun 	if (priv->plat->serdes_powerdown)
5270*4882a593Smuzhiyun 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5271*4882a593Smuzhiyun 
5272*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
5273*4882a593Smuzhiyun 	stmmac_exit_fs(ndev);
5274*4882a593Smuzhiyun #endif
5275*4882a593Smuzhiyun 	phylink_destroy(priv->phylink);
5276*4882a593Smuzhiyun 	if (priv->plat->stmmac_rst)
5277*4882a593Smuzhiyun 		reset_control_assert(priv->plat->stmmac_rst);
5278*4882a593Smuzhiyun 	pm_runtime_put(dev);
5279*4882a593Smuzhiyun 	pm_runtime_disable(dev);
5280*4882a593Smuzhiyun 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5281*4882a593Smuzhiyun 	    priv->hw->pcs != STMMAC_PCS_RTBI)
5282*4882a593Smuzhiyun 		stmmac_mdio_unregister(ndev);
5283*4882a593Smuzhiyun 	destroy_workqueue(priv->wq);
5284*4882a593Smuzhiyun 	mutex_destroy(&priv->lock);
5285*4882a593Smuzhiyun 
5286*4882a593Smuzhiyun 	return 0;
5287*4882a593Smuzhiyun }
5288*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5289*4882a593Smuzhiyun 
5290*4882a593Smuzhiyun /**
5291*4882a593Smuzhiyun  * stmmac_suspend - suspend callback
5292*4882a593Smuzhiyun  * @dev: device pointer
5293*4882a593Smuzhiyun  * Description: this is the function to suspend the device and it is called
5294*4882a593Smuzhiyun  * by the platform driver to stop the network queue, release the resources,
5295*4882a593Smuzhiyun  * program the PMT register (for WoL), clean and release driver resources.
5296*4882a593Smuzhiyun  */
stmmac_suspend(struct device * dev)5297*4882a593Smuzhiyun int stmmac_suspend(struct device *dev)
5298*4882a593Smuzhiyun {
5299*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
5300*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
5301*4882a593Smuzhiyun 	u32 chan;
5302*4882a593Smuzhiyun 
5303*4882a593Smuzhiyun 	if (!ndev || !netif_running(ndev))
5304*4882a593Smuzhiyun 		return 0;
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun 	phylink_mac_change(priv->phylink, false);
5307*4882a593Smuzhiyun 
5308*4882a593Smuzhiyun 	mutex_lock(&priv->lock);
5309*4882a593Smuzhiyun 
5310*4882a593Smuzhiyun 	netif_device_detach(ndev);
5311*4882a593Smuzhiyun 
5312*4882a593Smuzhiyun 	stmmac_disable_all_queues(priv);
5313*4882a593Smuzhiyun 
5314*4882a593Smuzhiyun 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5315*4882a593Smuzhiyun 		del_timer_sync(&priv->tx_queue[chan].txtimer);
5316*4882a593Smuzhiyun 
5317*4882a593Smuzhiyun 	if (priv->eee_enabled) {
5318*4882a593Smuzhiyun 		priv->tx_path_in_lpi_mode = false;
5319*4882a593Smuzhiyun 		del_timer_sync(&priv->eee_ctrl_timer);
5320*4882a593Smuzhiyun 	}
5321*4882a593Smuzhiyun 
5322*4882a593Smuzhiyun 	/* Stop TX/RX DMA */
5323*4882a593Smuzhiyun 	stmmac_stop_all_dma(priv);
5324*4882a593Smuzhiyun 
5325*4882a593Smuzhiyun 	if (priv->plat->serdes_powerdown)
5326*4882a593Smuzhiyun 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5327*4882a593Smuzhiyun 
5328*4882a593Smuzhiyun 	/* Enable Power down mode by programming the PMT regs */
5329*4882a593Smuzhiyun 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5330*4882a593Smuzhiyun 		stmmac_pmt(priv, priv->hw, priv->wolopts);
5331*4882a593Smuzhiyun 		priv->irq_wake = 1;
5332*4882a593Smuzhiyun 	} else {
5333*4882a593Smuzhiyun 		mutex_unlock(&priv->lock);
5334*4882a593Smuzhiyun 		rtnl_lock();
5335*4882a593Smuzhiyun 		if (device_may_wakeup(priv->device))
5336*4882a593Smuzhiyun 			phylink_speed_down(priv->phylink, false);
5337*4882a593Smuzhiyun 		if (priv->plat->integrated_phy_power)
5338*4882a593Smuzhiyun 			priv->plat->integrated_phy_power(priv->plat->bsp_priv,
5339*4882a593Smuzhiyun 							 false);
5340*4882a593Smuzhiyun 		phylink_stop(priv->phylink);
5341*4882a593Smuzhiyun 		rtnl_unlock();
5342*4882a593Smuzhiyun 		mutex_lock(&priv->lock);
5343*4882a593Smuzhiyun 
5344*4882a593Smuzhiyun 		stmmac_mac_set(priv, priv->ioaddr, false);
5345*4882a593Smuzhiyun 		pinctrl_pm_select_sleep_state(priv->device);
5346*4882a593Smuzhiyun 	}
5347*4882a593Smuzhiyun 	mutex_unlock(&priv->lock);
5348*4882a593Smuzhiyun 
5349*4882a593Smuzhiyun 	priv->speed = SPEED_UNKNOWN;
5350*4882a593Smuzhiyun 	return 0;
5351*4882a593Smuzhiyun }
5352*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_suspend);
5353*4882a593Smuzhiyun 
5354*4882a593Smuzhiyun /**
5355*4882a593Smuzhiyun  * stmmac_reset_queues_param - reset queue parameters
5356*4882a593Smuzhiyun  * @priv: device pointer
5357*4882a593Smuzhiyun  */
stmmac_reset_queues_param(struct stmmac_priv * priv)5358*4882a593Smuzhiyun static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5359*4882a593Smuzhiyun {
5360*4882a593Smuzhiyun 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5361*4882a593Smuzhiyun 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5362*4882a593Smuzhiyun 	u32 queue;
5363*4882a593Smuzhiyun 
5364*4882a593Smuzhiyun 	for (queue = 0; queue < rx_cnt; queue++) {
5365*4882a593Smuzhiyun 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5366*4882a593Smuzhiyun 
5367*4882a593Smuzhiyun 		rx_q->cur_rx = 0;
5368*4882a593Smuzhiyun 		rx_q->dirty_rx = 0;
5369*4882a593Smuzhiyun 	}
5370*4882a593Smuzhiyun 
5371*4882a593Smuzhiyun 	for (queue = 0; queue < tx_cnt; queue++) {
5372*4882a593Smuzhiyun 		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5373*4882a593Smuzhiyun 
5374*4882a593Smuzhiyun 		tx_q->cur_tx = 0;
5375*4882a593Smuzhiyun 		tx_q->dirty_tx = 0;
5376*4882a593Smuzhiyun 		tx_q->mss = 0;
5377*4882a593Smuzhiyun 
5378*4882a593Smuzhiyun 		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
5379*4882a593Smuzhiyun 	}
5380*4882a593Smuzhiyun }
5381*4882a593Smuzhiyun 
5382*4882a593Smuzhiyun /**
5383*4882a593Smuzhiyun  * stmmac_resume - resume callback
5384*4882a593Smuzhiyun  * @dev: device pointer
5385*4882a593Smuzhiyun  * Description: when resume this function is invoked to setup the DMA and CORE
5386*4882a593Smuzhiyun  * in a usable state.
5387*4882a593Smuzhiyun  */
stmmac_resume(struct device * dev)5388*4882a593Smuzhiyun int stmmac_resume(struct device *dev)
5389*4882a593Smuzhiyun {
5390*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
5391*4882a593Smuzhiyun 	struct stmmac_priv *priv = netdev_priv(ndev);
5392*4882a593Smuzhiyun 	int ret;
5393*4882a593Smuzhiyun 
5394*4882a593Smuzhiyun 	if (!netif_running(ndev))
5395*4882a593Smuzhiyun 		return 0;
5396*4882a593Smuzhiyun 
5397*4882a593Smuzhiyun 	/* Power Down bit, into the PM register, is cleared
5398*4882a593Smuzhiyun 	 * automatically as soon as a magic packet or a Wake-up frame
5399*4882a593Smuzhiyun 	 * is received. Anyway, it's better to manually clear
5400*4882a593Smuzhiyun 	 * this bit because it can generate problems while resuming
5401*4882a593Smuzhiyun 	 * from another devices (e.g. serial console).
5402*4882a593Smuzhiyun 	 */
5403*4882a593Smuzhiyun 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5404*4882a593Smuzhiyun 		mutex_lock(&priv->lock);
5405*4882a593Smuzhiyun 		stmmac_pmt(priv, priv->hw, 0);
5406*4882a593Smuzhiyun 		mutex_unlock(&priv->lock);
5407*4882a593Smuzhiyun 		priv->irq_wake = 0;
5408*4882a593Smuzhiyun 	} else {
5409*4882a593Smuzhiyun 		pinctrl_pm_select_default_state(priv->device);
5410*4882a593Smuzhiyun 		/* reset the phy so that it's ready */
5411*4882a593Smuzhiyun 		if (priv->mii)
5412*4882a593Smuzhiyun 			stmmac_mdio_reset(priv->mii);
5413*4882a593Smuzhiyun 		if (priv->plat->integrated_phy_power)
5414*4882a593Smuzhiyun 			priv->plat->integrated_phy_power(priv->plat->bsp_priv,
5415*4882a593Smuzhiyun 							 true);
5416*4882a593Smuzhiyun 	}
5417*4882a593Smuzhiyun 
5418*4882a593Smuzhiyun 	if (priv->plat->serdes_powerup) {
5419*4882a593Smuzhiyun 		ret = priv->plat->serdes_powerup(ndev,
5420*4882a593Smuzhiyun 						 priv->plat->bsp_priv);
5421*4882a593Smuzhiyun 
5422*4882a593Smuzhiyun 		if (ret < 0)
5423*4882a593Smuzhiyun 			return ret;
5424*4882a593Smuzhiyun 	}
5425*4882a593Smuzhiyun 
5426*4882a593Smuzhiyun 	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5427*4882a593Smuzhiyun 		rtnl_lock();
5428*4882a593Smuzhiyun 		phylink_start(priv->phylink);
5429*4882a593Smuzhiyun 		/* We may have called phylink_speed_down before */
5430*4882a593Smuzhiyun 		phylink_speed_up(priv->phylink);
5431*4882a593Smuzhiyun 		rtnl_unlock();
5432*4882a593Smuzhiyun 	}
5433*4882a593Smuzhiyun 
5434*4882a593Smuzhiyun 	rtnl_lock();
5435*4882a593Smuzhiyun 	mutex_lock(&priv->lock);
5436*4882a593Smuzhiyun 
5437*4882a593Smuzhiyun 	stmmac_reset_queues_param(priv);
5438*4882a593Smuzhiyun 
5439*4882a593Smuzhiyun 	stmmac_free_tx_skbufs(priv);
5440*4882a593Smuzhiyun 	stmmac_clear_descriptors(priv);
5441*4882a593Smuzhiyun 
5442*4882a593Smuzhiyun 	stmmac_hw_setup(ndev, false);
5443*4882a593Smuzhiyun 	stmmac_init_coalesce(priv);
5444*4882a593Smuzhiyun 	stmmac_set_rx_mode(ndev);
5445*4882a593Smuzhiyun 
5446*4882a593Smuzhiyun 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5447*4882a593Smuzhiyun 
5448*4882a593Smuzhiyun 	stmmac_enable_all_queues(priv);
5449*4882a593Smuzhiyun 
5450*4882a593Smuzhiyun 	mutex_unlock(&priv->lock);
5451*4882a593Smuzhiyun 	rtnl_unlock();
5452*4882a593Smuzhiyun 
5453*4882a593Smuzhiyun 	phylink_mac_change(priv->phylink, true);
5454*4882a593Smuzhiyun 
5455*4882a593Smuzhiyun 	netif_device_attach(ndev);
5456*4882a593Smuzhiyun 
5457*4882a593Smuzhiyun 	return 0;
5458*4882a593Smuzhiyun }
5459*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(stmmac_resume);
5460*4882a593Smuzhiyun 
5461*4882a593Smuzhiyun #ifndef MODULE
stmmac_cmdline_opt(char * str)5462*4882a593Smuzhiyun static int __init stmmac_cmdline_opt(char *str)
5463*4882a593Smuzhiyun {
5464*4882a593Smuzhiyun 	char *opt;
5465*4882a593Smuzhiyun 
5466*4882a593Smuzhiyun 	if (!str || !*str)
5467*4882a593Smuzhiyun 		return 1;
5468*4882a593Smuzhiyun 	while ((opt = strsep(&str, ",")) != NULL) {
5469*4882a593Smuzhiyun 		if (!strncmp(opt, "debug:", 6)) {
5470*4882a593Smuzhiyun 			if (kstrtoint(opt + 6, 0, &debug))
5471*4882a593Smuzhiyun 				goto err;
5472*4882a593Smuzhiyun 		} else if (!strncmp(opt, "phyaddr:", 8)) {
5473*4882a593Smuzhiyun 			if (kstrtoint(opt + 8, 0, &phyaddr))
5474*4882a593Smuzhiyun 				goto err;
5475*4882a593Smuzhiyun 		} else if (!strncmp(opt, "buf_sz:", 7)) {
5476*4882a593Smuzhiyun 			if (kstrtoint(opt + 7, 0, &buf_sz))
5477*4882a593Smuzhiyun 				goto err;
5478*4882a593Smuzhiyun 		} else if (!strncmp(opt, "tc:", 3)) {
5479*4882a593Smuzhiyun 			if (kstrtoint(opt + 3, 0, &tc))
5480*4882a593Smuzhiyun 				goto err;
5481*4882a593Smuzhiyun 		} else if (!strncmp(opt, "watchdog:", 9)) {
5482*4882a593Smuzhiyun 			if (kstrtoint(opt + 9, 0, &watchdog))
5483*4882a593Smuzhiyun 				goto err;
5484*4882a593Smuzhiyun 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5485*4882a593Smuzhiyun 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
5486*4882a593Smuzhiyun 				goto err;
5487*4882a593Smuzhiyun 		} else if (!strncmp(opt, "pause:", 6)) {
5488*4882a593Smuzhiyun 			if (kstrtoint(opt + 6, 0, &pause))
5489*4882a593Smuzhiyun 				goto err;
5490*4882a593Smuzhiyun 		} else if (!strncmp(opt, "eee_timer:", 10)) {
5491*4882a593Smuzhiyun 			if (kstrtoint(opt + 10, 0, &eee_timer))
5492*4882a593Smuzhiyun 				goto err;
5493*4882a593Smuzhiyun 		} else if (!strncmp(opt, "chain_mode:", 11)) {
5494*4882a593Smuzhiyun 			if (kstrtoint(opt + 11, 0, &chain_mode))
5495*4882a593Smuzhiyun 				goto err;
5496*4882a593Smuzhiyun 		}
5497*4882a593Smuzhiyun 	}
5498*4882a593Smuzhiyun 	return 1;
5499*4882a593Smuzhiyun 
5500*4882a593Smuzhiyun err:
5501*4882a593Smuzhiyun 	pr_err("%s: ERROR broken module parameter conversion", __func__);
5502*4882a593Smuzhiyun 	return 1;
5503*4882a593Smuzhiyun }
5504*4882a593Smuzhiyun 
5505*4882a593Smuzhiyun __setup("stmmaceth=", stmmac_cmdline_opt);
5506*4882a593Smuzhiyun #endif /* MODULE */
5507*4882a593Smuzhiyun 
stmmac_init(void)5508*4882a593Smuzhiyun static int __init stmmac_init(void)
5509*4882a593Smuzhiyun {
5510*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
5511*4882a593Smuzhiyun 	/* Create debugfs main directory if it doesn't exist yet */
5512*4882a593Smuzhiyun 	if (!stmmac_fs_dir)
5513*4882a593Smuzhiyun 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5514*4882a593Smuzhiyun 	register_netdevice_notifier(&stmmac_notifier);
5515*4882a593Smuzhiyun #endif
5516*4882a593Smuzhiyun 
5517*4882a593Smuzhiyun 	return 0;
5518*4882a593Smuzhiyun }
5519*4882a593Smuzhiyun 
stmmac_exit(void)5520*4882a593Smuzhiyun static void __exit stmmac_exit(void)
5521*4882a593Smuzhiyun {
5522*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
5523*4882a593Smuzhiyun 	unregister_netdevice_notifier(&stmmac_notifier);
5524*4882a593Smuzhiyun 	debugfs_remove_recursive(stmmac_fs_dir);
5525*4882a593Smuzhiyun #endif
5526*4882a593Smuzhiyun }
5527*4882a593Smuzhiyun 
5528*4882a593Smuzhiyun module_init(stmmac_init)
5529*4882a593Smuzhiyun module_exit(stmmac_exit)
5530*4882a593Smuzhiyun 
5531*4882a593Smuzhiyun MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5532*4882a593Smuzhiyun MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5533*4882a593Smuzhiyun MODULE_LICENSE("GPL");
5534