1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Texas Instruments Ethernet Switch Driver ethtool intf
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2019 Texas Instruments
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/if_ether.h>
9*4882a593Smuzhiyun #include <linux/if_vlan.h>
10*4882a593Smuzhiyun #include <linux/kmemleak.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/net_tstamp.h>
14*4882a593Smuzhiyun #include <linux/phy.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "cpsw.h"
19*4882a593Smuzhiyun #include "cpts.h"
20*4882a593Smuzhiyun #include "cpsw_ale.h"
21*4882a593Smuzhiyun #include "cpsw_priv.h"
22*4882a593Smuzhiyun #include "davinci_cpdma.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun struct cpsw_hw_stats {
25*4882a593Smuzhiyun u32 rxgoodframes;
26*4882a593Smuzhiyun u32 rxbroadcastframes;
27*4882a593Smuzhiyun u32 rxmulticastframes;
28*4882a593Smuzhiyun u32 rxpauseframes;
29*4882a593Smuzhiyun u32 rxcrcerrors;
30*4882a593Smuzhiyun u32 rxaligncodeerrors;
31*4882a593Smuzhiyun u32 rxoversizedframes;
32*4882a593Smuzhiyun u32 rxjabberframes;
33*4882a593Smuzhiyun u32 rxundersizedframes;
34*4882a593Smuzhiyun u32 rxfragments;
35*4882a593Smuzhiyun u32 __pad_0[2];
36*4882a593Smuzhiyun u32 rxoctets;
37*4882a593Smuzhiyun u32 txgoodframes;
38*4882a593Smuzhiyun u32 txbroadcastframes;
39*4882a593Smuzhiyun u32 txmulticastframes;
40*4882a593Smuzhiyun u32 txpauseframes;
41*4882a593Smuzhiyun u32 txdeferredframes;
42*4882a593Smuzhiyun u32 txcollisionframes;
43*4882a593Smuzhiyun u32 txsinglecollframes;
44*4882a593Smuzhiyun u32 txmultcollframes;
45*4882a593Smuzhiyun u32 txexcessivecollisions;
46*4882a593Smuzhiyun u32 txlatecollisions;
47*4882a593Smuzhiyun u32 txunderrun;
48*4882a593Smuzhiyun u32 txcarriersenseerrors;
49*4882a593Smuzhiyun u32 txoctets;
50*4882a593Smuzhiyun u32 octetframes64;
51*4882a593Smuzhiyun u32 octetframes65t127;
52*4882a593Smuzhiyun u32 octetframes128t255;
53*4882a593Smuzhiyun u32 octetframes256t511;
54*4882a593Smuzhiyun u32 octetframes512t1023;
55*4882a593Smuzhiyun u32 octetframes1024tup;
56*4882a593Smuzhiyun u32 netoctets;
57*4882a593Smuzhiyun u32 rxsofoverruns;
58*4882a593Smuzhiyun u32 rxmofoverruns;
59*4882a593Smuzhiyun u32 rxdmaoverruns;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct cpsw_stats {
63*4882a593Smuzhiyun char stat_string[ETH_GSTRING_LEN];
64*4882a593Smuzhiyun int type;
65*4882a593Smuzhiyun int sizeof_stat;
66*4882a593Smuzhiyun int stat_offset;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun enum {
70*4882a593Smuzhiyun CPSW_STATS,
71*4882a593Smuzhiyun CPDMA_RX_STATS,
72*4882a593Smuzhiyun CPDMA_TX_STATS,
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define CPSW_STAT(m) CPSW_STATS, \
76*4882a593Smuzhiyun sizeof_field(struct cpsw_hw_stats, m), \
77*4882a593Smuzhiyun offsetof(struct cpsw_hw_stats, m)
78*4882a593Smuzhiyun #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
79*4882a593Smuzhiyun sizeof_field(struct cpdma_chan_stats, m), \
80*4882a593Smuzhiyun offsetof(struct cpdma_chan_stats, m)
81*4882a593Smuzhiyun #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
82*4882a593Smuzhiyun sizeof_field(struct cpdma_chan_stats, m), \
83*4882a593Smuzhiyun offsetof(struct cpdma_chan_stats, m)
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static const struct cpsw_stats cpsw_gstrings_stats[] = {
86*4882a593Smuzhiyun { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
87*4882a593Smuzhiyun { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
88*4882a593Smuzhiyun { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
89*4882a593Smuzhiyun { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
90*4882a593Smuzhiyun { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
91*4882a593Smuzhiyun { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
92*4882a593Smuzhiyun { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
93*4882a593Smuzhiyun { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
94*4882a593Smuzhiyun { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
95*4882a593Smuzhiyun { "Rx Fragments", CPSW_STAT(rxfragments) },
96*4882a593Smuzhiyun { "Rx Octets", CPSW_STAT(rxoctets) },
97*4882a593Smuzhiyun { "Good Tx Frames", CPSW_STAT(txgoodframes) },
98*4882a593Smuzhiyun { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
99*4882a593Smuzhiyun { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
100*4882a593Smuzhiyun { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
101*4882a593Smuzhiyun { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
102*4882a593Smuzhiyun { "Collisions", CPSW_STAT(txcollisionframes) },
103*4882a593Smuzhiyun { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
104*4882a593Smuzhiyun { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
105*4882a593Smuzhiyun { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
106*4882a593Smuzhiyun { "Late Collisions", CPSW_STAT(txlatecollisions) },
107*4882a593Smuzhiyun { "Tx Underrun", CPSW_STAT(txunderrun) },
108*4882a593Smuzhiyun { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
109*4882a593Smuzhiyun { "Tx Octets", CPSW_STAT(txoctets) },
110*4882a593Smuzhiyun { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
111*4882a593Smuzhiyun { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
112*4882a593Smuzhiyun { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
113*4882a593Smuzhiyun { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
114*4882a593Smuzhiyun { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
115*4882a593Smuzhiyun { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
116*4882a593Smuzhiyun { "Net Octets", CPSW_STAT(netoctets) },
117*4882a593Smuzhiyun { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
118*4882a593Smuzhiyun { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
119*4882a593Smuzhiyun { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
123*4882a593Smuzhiyun { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
124*4882a593Smuzhiyun { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
125*4882a593Smuzhiyun { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
126*4882a593Smuzhiyun { "misqueued", CPDMA_RX_STAT(misqueued) },
127*4882a593Smuzhiyun { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
128*4882a593Smuzhiyun { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
129*4882a593Smuzhiyun { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
130*4882a593Smuzhiyun { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
131*4882a593Smuzhiyun { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
132*4882a593Smuzhiyun { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
133*4882a593Smuzhiyun { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
134*4882a593Smuzhiyun { "requeue", CPDMA_RX_STAT(requeue) },
135*4882a593Smuzhiyun { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
139*4882a593Smuzhiyun #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
140*4882a593Smuzhiyun
cpsw_get_msglevel(struct net_device * ndev)141*4882a593Smuzhiyun u32 cpsw_get_msglevel(struct net_device *ndev)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun return priv->msg_enable;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
cpsw_set_msglevel(struct net_device * ndev,u32 value)148*4882a593Smuzhiyun void cpsw_set_msglevel(struct net_device *ndev, u32 value)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun priv->msg_enable = value;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
cpsw_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * coal)155*4882a593Smuzhiyun int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun coal->rx_coalesce_usecs = cpsw->coal_intvl;
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
cpsw_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * coal)163*4882a593Smuzhiyun int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
166*4882a593Smuzhiyun u32 int_ctrl;
167*4882a593Smuzhiyun u32 num_interrupts = 0;
168*4882a593Smuzhiyun u32 prescale = 0;
169*4882a593Smuzhiyun u32 addnl_dvdr = 1;
170*4882a593Smuzhiyun u32 coal_intvl = 0;
171*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun coal_intvl = coal->rx_coalesce_usecs;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun int_ctrl = readl(&cpsw->wr_regs->int_control);
176*4882a593Smuzhiyun prescale = cpsw->bus_freq_mhz * 4;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (!coal->rx_coalesce_usecs) {
179*4882a593Smuzhiyun int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
180*4882a593Smuzhiyun goto update_return;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (coal_intvl < CPSW_CMINTMIN_INTVL)
184*4882a593Smuzhiyun coal_intvl = CPSW_CMINTMIN_INTVL;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (coal_intvl > CPSW_CMINTMAX_INTVL) {
187*4882a593Smuzhiyun /* Interrupt pacer works with 4us Pulse, we can
188*4882a593Smuzhiyun * throttle further by dilating the 4us pulse.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (addnl_dvdr > 1) {
193*4882a593Smuzhiyun prescale *= addnl_dvdr;
194*4882a593Smuzhiyun if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
195*4882a593Smuzhiyun coal_intvl = (CPSW_CMINTMAX_INTVL
196*4882a593Smuzhiyun * addnl_dvdr);
197*4882a593Smuzhiyun } else {
198*4882a593Smuzhiyun addnl_dvdr = 1;
199*4882a593Smuzhiyun coal_intvl = CPSW_CMINTMAX_INTVL;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
204*4882a593Smuzhiyun writel(num_interrupts, &cpsw->wr_regs->rx_imax);
205*4882a593Smuzhiyun writel(num_interrupts, &cpsw->wr_regs->tx_imax);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun int_ctrl |= CPSW_INTPACEEN;
208*4882a593Smuzhiyun int_ctrl &= (~CPSW_INTPRESCALE_MASK);
209*4882a593Smuzhiyun int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun update_return:
212*4882a593Smuzhiyun writel(int_ctrl, &cpsw->wr_regs->int_control);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
215*4882a593Smuzhiyun cpsw->coal_intvl = coal_intvl;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
cpsw_get_sset_count(struct net_device * ndev,int sset)220*4882a593Smuzhiyun int cpsw_get_sset_count(struct net_device *ndev, int sset)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun switch (sset) {
225*4882a593Smuzhiyun case ETH_SS_STATS:
226*4882a593Smuzhiyun return (CPSW_STATS_COMMON_LEN +
227*4882a593Smuzhiyun (cpsw->rx_ch_num + cpsw->tx_ch_num) *
228*4882a593Smuzhiyun CPSW_STATS_CH_LEN);
229*4882a593Smuzhiyun default:
230*4882a593Smuzhiyun return -EOPNOTSUPP;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
cpsw_add_ch_strings(u8 ** p,int ch_num,int rx_dir)234*4882a593Smuzhiyun static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun int ch_stats_len;
237*4882a593Smuzhiyun int line;
238*4882a593Smuzhiyun int i;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
241*4882a593Smuzhiyun for (i = 0; i < ch_stats_len; i++) {
242*4882a593Smuzhiyun line = i % CPSW_STATS_CH_LEN;
243*4882a593Smuzhiyun snprintf(*p, ETH_GSTRING_LEN,
244*4882a593Smuzhiyun "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
245*4882a593Smuzhiyun (long)(i / CPSW_STATS_CH_LEN),
246*4882a593Smuzhiyun cpsw_gstrings_ch_stats[line].stat_string);
247*4882a593Smuzhiyun *p += ETH_GSTRING_LEN;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
cpsw_get_strings(struct net_device * ndev,u32 stringset,u8 * data)251*4882a593Smuzhiyun void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
254*4882a593Smuzhiyun u8 *p = data;
255*4882a593Smuzhiyun int i;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun switch (stringset) {
258*4882a593Smuzhiyun case ETH_SS_STATS:
259*4882a593Smuzhiyun for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
260*4882a593Smuzhiyun memcpy(p, cpsw_gstrings_stats[i].stat_string,
261*4882a593Smuzhiyun ETH_GSTRING_LEN);
262*4882a593Smuzhiyun p += ETH_GSTRING_LEN;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
266*4882a593Smuzhiyun cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
cpsw_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * stats,u64 * data)271*4882a593Smuzhiyun void cpsw_get_ethtool_stats(struct net_device *ndev,
272*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *data)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun u8 *p;
275*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
276*4882a593Smuzhiyun struct cpdma_chan_stats ch_stats;
277*4882a593Smuzhiyun int i, l, ch;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Collect Davinci CPDMA stats for Rx and Tx Channel */
280*4882a593Smuzhiyun for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
281*4882a593Smuzhiyun data[l] = readl(cpsw->hw_stats +
282*4882a593Smuzhiyun cpsw_gstrings_stats[l].stat_offset);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
285*4882a593Smuzhiyun cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
286*4882a593Smuzhiyun for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
287*4882a593Smuzhiyun p = (u8 *)&ch_stats +
288*4882a593Smuzhiyun cpsw_gstrings_ch_stats[i].stat_offset;
289*4882a593Smuzhiyun data[l] = *(u32 *)p;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
294*4882a593Smuzhiyun cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
295*4882a593Smuzhiyun for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
296*4882a593Smuzhiyun p = (u8 *)&ch_stats +
297*4882a593Smuzhiyun cpsw_gstrings_ch_stats[i].stat_offset;
298*4882a593Smuzhiyun data[l] = *(u32 *)p;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
cpsw_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)303*4882a593Smuzhiyun void cpsw_get_pauseparam(struct net_device *ndev,
304*4882a593Smuzhiyun struct ethtool_pauseparam *pause)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun pause->autoneg = AUTONEG_DISABLE;
309*4882a593Smuzhiyun pause->rx_pause = priv->rx_pause ? true : false;
310*4882a593Smuzhiyun pause->tx_pause = priv->tx_pause ? true : false;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
cpsw_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)313*4882a593Smuzhiyun void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
316*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
317*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun wol->supported = 0;
320*4882a593Smuzhiyun wol->wolopts = 0;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (cpsw->slaves[slave_no].phy)
323*4882a593Smuzhiyun phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
cpsw_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)326*4882a593Smuzhiyun int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
329*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
330*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (cpsw->slaves[slave_no].phy)
333*4882a593Smuzhiyun return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
334*4882a593Smuzhiyun else
335*4882a593Smuzhiyun return -EOPNOTSUPP;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
cpsw_get_regs_len(struct net_device * ndev)338*4882a593Smuzhiyun int cpsw_get_regs_len(struct net_device *ndev)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return cpsw_ale_get_num_entries(cpsw->ale) *
343*4882a593Smuzhiyun ALE_ENTRY_WORDS * sizeof(u32);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
cpsw_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * p)346*4882a593Smuzhiyun void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun u32 *reg = p;
349*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* update CPSW IP version */
352*4882a593Smuzhiyun regs->version = cpsw->version;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun cpsw_ale_dump(cpsw->ale, reg);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
cpsw_ethtool_op_begin(struct net_device * ndev)357*4882a593Smuzhiyun int cpsw_ethtool_op_begin(struct net_device *ndev)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
360*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
361*4882a593Smuzhiyun int ret;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun ret = pm_runtime_get_sync(cpsw->dev);
364*4882a593Smuzhiyun if (ret < 0) {
365*4882a593Smuzhiyun cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
366*4882a593Smuzhiyun pm_runtime_put_noidle(cpsw->dev);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return ret;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
cpsw_ethtool_op_complete(struct net_device * ndev)372*4882a593Smuzhiyun void cpsw_ethtool_op_complete(struct net_device *ndev)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
375*4882a593Smuzhiyun int ret;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ret = pm_runtime_put(priv->cpsw->dev);
378*4882a593Smuzhiyun if (ret < 0)
379*4882a593Smuzhiyun cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
cpsw_get_channels(struct net_device * ndev,struct ethtool_channels * ch)382*4882a593Smuzhiyun void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
387*4882a593Smuzhiyun ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
388*4882a593Smuzhiyun ch->max_combined = 0;
389*4882a593Smuzhiyun ch->max_other = 0;
390*4882a593Smuzhiyun ch->other_count = 0;
391*4882a593Smuzhiyun ch->rx_count = cpsw->rx_ch_num;
392*4882a593Smuzhiyun ch->tx_count = cpsw->tx_ch_num;
393*4882a593Smuzhiyun ch->combined_count = 0;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
cpsw_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * ecmd)396*4882a593Smuzhiyun int cpsw_get_link_ksettings(struct net_device *ndev,
397*4882a593Smuzhiyun struct ethtool_link_ksettings *ecmd)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
400*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
401*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun if (!cpsw->slaves[slave_no].phy)
404*4882a593Smuzhiyun return -EOPNOTSUPP;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
407*4882a593Smuzhiyun return 0;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
cpsw_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * ecmd)410*4882a593Smuzhiyun int cpsw_set_link_ksettings(struct net_device *ndev,
411*4882a593Smuzhiyun const struct ethtool_link_ksettings *ecmd)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
414*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
415*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (!cpsw->slaves[slave_no].phy)
418*4882a593Smuzhiyun return -EOPNOTSUPP;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
cpsw_get_eee(struct net_device * ndev,struct ethtool_eee * edata)423*4882a593Smuzhiyun int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
426*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
427*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (cpsw->slaves[slave_no].phy)
430*4882a593Smuzhiyun return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
431*4882a593Smuzhiyun else
432*4882a593Smuzhiyun return -EOPNOTSUPP;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
cpsw_set_eee(struct net_device * ndev,struct ethtool_eee * edata)435*4882a593Smuzhiyun int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
438*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
439*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun if (cpsw->slaves[slave_no].phy)
442*4882a593Smuzhiyun return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
443*4882a593Smuzhiyun else
444*4882a593Smuzhiyun return -EOPNOTSUPP;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
cpsw_nway_reset(struct net_device * ndev)447*4882a593Smuzhiyun int cpsw_nway_reset(struct net_device *ndev)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
450*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
451*4882a593Smuzhiyun int slave_no = cpsw_slave_index(cpsw, priv);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (cpsw->slaves[slave_no].phy)
454*4882a593Smuzhiyun return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
455*4882a593Smuzhiyun else
456*4882a593Smuzhiyun return -EOPNOTSUPP;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
cpsw_suspend_data_pass(struct net_device * ndev)459*4882a593Smuzhiyun static void cpsw_suspend_data_pass(struct net_device *ndev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
462*4882a593Smuzhiyun int i;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* Disable NAPI scheduling */
465*4882a593Smuzhiyun cpsw_intr_disable(cpsw);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Stop all transmit queues for every network device.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun for (i = 0; i < cpsw->data.slaves; i++) {
470*4882a593Smuzhiyun ndev = cpsw->slaves[i].ndev;
471*4882a593Smuzhiyun if (!(ndev && netif_running(ndev)))
472*4882a593Smuzhiyun continue;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun netif_tx_stop_all_queues(ndev);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Barrier, so that stop_queue visible to other cpus */
477*4882a593Smuzhiyun smp_mb__after_atomic();
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Handle rest of tx packets and stop cpdma channels */
481*4882a593Smuzhiyun cpdma_ctlr_stop(cpsw->dma);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
cpsw_resume_data_pass(struct net_device * ndev)484*4882a593Smuzhiyun static int cpsw_resume_data_pass(struct net_device *ndev)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
487*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
488*4882a593Smuzhiyun int i, ret;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* After this receive is started */
491*4882a593Smuzhiyun if (cpsw->usage_count) {
492*4882a593Smuzhiyun ret = cpsw_fill_rx_channels(priv);
493*4882a593Smuzhiyun if (ret)
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun cpdma_ctlr_start(cpsw->dma);
497*4882a593Smuzhiyun cpsw_intr_enable(cpsw);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* Resume transmit for every affected interface */
501*4882a593Smuzhiyun for (i = 0; i < cpsw->data.slaves; i++) {
502*4882a593Smuzhiyun ndev = cpsw->slaves[i].ndev;
503*4882a593Smuzhiyun if (ndev && netif_running(ndev))
504*4882a593Smuzhiyun netif_tx_start_all_queues(ndev);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return 0;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
cpsw_check_ch_settings(struct cpsw_common * cpsw,struct ethtool_channels * ch)510*4882a593Smuzhiyun static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
511*4882a593Smuzhiyun struct ethtool_channels *ch)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun if (cpsw->quirk_irq) {
514*4882a593Smuzhiyun dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
515*4882a593Smuzhiyun return -EOPNOTSUPP;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (ch->combined_count)
519*4882a593Smuzhiyun return -EINVAL;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* verify we have at least one channel in each direction */
522*4882a593Smuzhiyun if (!ch->rx_count || !ch->tx_count)
523*4882a593Smuzhiyun return -EINVAL;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (ch->rx_count > cpsw->data.channels ||
526*4882a593Smuzhiyun ch->tx_count > cpsw->data.channels)
527*4882a593Smuzhiyun return -EINVAL;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun return 0;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
cpsw_update_channels_res(struct cpsw_priv * priv,int ch_num,int rx,cpdma_handler_fn rx_handler)532*4882a593Smuzhiyun static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
533*4882a593Smuzhiyun cpdma_handler_fn rx_handler)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
536*4882a593Smuzhiyun void (*handler)(void *, int, int);
537*4882a593Smuzhiyun struct netdev_queue *queue;
538*4882a593Smuzhiyun struct cpsw_vector *vec;
539*4882a593Smuzhiyun int ret, *ch, vch;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (rx) {
542*4882a593Smuzhiyun ch = &cpsw->rx_ch_num;
543*4882a593Smuzhiyun vec = cpsw->rxv;
544*4882a593Smuzhiyun handler = rx_handler;
545*4882a593Smuzhiyun } else {
546*4882a593Smuzhiyun ch = &cpsw->tx_ch_num;
547*4882a593Smuzhiyun vec = cpsw->txv;
548*4882a593Smuzhiyun handler = cpsw_tx_handler;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun while (*ch < ch_num) {
552*4882a593Smuzhiyun vch = rx ? *ch : 7 - *ch;
553*4882a593Smuzhiyun vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
554*4882a593Smuzhiyun queue = netdev_get_tx_queue(priv->ndev, *ch);
555*4882a593Smuzhiyun queue->tx_maxrate = 0;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (IS_ERR(vec[*ch].ch))
558*4882a593Smuzhiyun return PTR_ERR(vec[*ch].ch);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (!vec[*ch].ch)
561*4882a593Smuzhiyun return -EINVAL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
564*4882a593Smuzhiyun (rx ? "rx" : "tx"));
565*4882a593Smuzhiyun (*ch)++;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun while (*ch > ch_num) {
569*4882a593Smuzhiyun (*ch)--;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun ret = cpdma_chan_destroy(vec[*ch].ch);
572*4882a593Smuzhiyun if (ret)
573*4882a593Smuzhiyun return ret;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
576*4882a593Smuzhiyun (rx ? "rx" : "tx"));
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return 0;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
cpsw_fail(struct cpsw_common * cpsw)582*4882a593Smuzhiyun static void cpsw_fail(struct cpsw_common *cpsw)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun struct net_device *ndev;
585*4882a593Smuzhiyun int i;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun for (i = 0; i < cpsw->data.slaves; i++) {
588*4882a593Smuzhiyun ndev = cpsw->slaves[i].ndev;
589*4882a593Smuzhiyun if (ndev)
590*4882a593Smuzhiyun dev_close(ndev);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
cpsw_set_channels_common(struct net_device * ndev,struct ethtool_channels * chs,cpdma_handler_fn rx_handler)594*4882a593Smuzhiyun int cpsw_set_channels_common(struct net_device *ndev,
595*4882a593Smuzhiyun struct ethtool_channels *chs,
596*4882a593Smuzhiyun cpdma_handler_fn rx_handler)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
599*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
600*4882a593Smuzhiyun struct net_device *sl_ndev;
601*4882a593Smuzhiyun int i, new_pools, ret;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun ret = cpsw_check_ch_settings(cpsw, chs);
604*4882a593Smuzhiyun if (ret < 0)
605*4882a593Smuzhiyun return ret;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun cpsw_suspend_data_pass(ndev);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
612*4882a593Smuzhiyun if (ret)
613*4882a593Smuzhiyun goto err;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
616*4882a593Smuzhiyun if (ret)
617*4882a593Smuzhiyun goto err;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun for (i = 0; i < cpsw->data.slaves; i++) {
620*4882a593Smuzhiyun sl_ndev = cpsw->slaves[i].ndev;
621*4882a593Smuzhiyun if (!(sl_ndev && netif_running(sl_ndev)))
622*4882a593Smuzhiyun continue;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* Inform stack about new count of queues */
625*4882a593Smuzhiyun ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
626*4882a593Smuzhiyun if (ret) {
627*4882a593Smuzhiyun dev_err(priv->dev, "cannot set real number of tx queues\n");
628*4882a593Smuzhiyun goto err;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
632*4882a593Smuzhiyun if (ret) {
633*4882a593Smuzhiyun dev_err(priv->dev, "cannot set real number of rx queues\n");
634*4882a593Smuzhiyun goto err;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun cpsw_split_res(cpsw);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (new_pools) {
641*4882a593Smuzhiyun cpsw_destroy_xdp_rxqs(cpsw);
642*4882a593Smuzhiyun ret = cpsw_create_xdp_rxqs(cpsw);
643*4882a593Smuzhiyun if (ret)
644*4882a593Smuzhiyun goto err;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun ret = cpsw_resume_data_pass(ndev);
648*4882a593Smuzhiyun if (!ret)
649*4882a593Smuzhiyun return 0;
650*4882a593Smuzhiyun err:
651*4882a593Smuzhiyun dev_err(priv->dev, "cannot update channels number, closing device\n");
652*4882a593Smuzhiyun cpsw_fail(cpsw);
653*4882a593Smuzhiyun return ret;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
cpsw_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)656*4882a593Smuzhiyun void cpsw_get_ringparam(struct net_device *ndev,
657*4882a593Smuzhiyun struct ethtool_ringparam *ering)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun struct cpsw_priv *priv = netdev_priv(ndev);
660*4882a593Smuzhiyun struct cpsw_common *cpsw = priv->cpsw;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* not supported */
663*4882a593Smuzhiyun ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
664*4882a593Smuzhiyun ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
665*4882a593Smuzhiyun ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
666*4882a593Smuzhiyun ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
cpsw_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering)669*4882a593Smuzhiyun int cpsw_set_ringparam(struct net_device *ndev,
670*4882a593Smuzhiyun struct ethtool_ringparam *ering)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
673*4882a593Smuzhiyun int descs_num, ret;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* ignore ering->tx_pending - only rx_pending adjustment is supported */
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
678*4882a593Smuzhiyun ering->rx_pending < CPSW_MAX_QUEUES ||
679*4882a593Smuzhiyun ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
680*4882a593Smuzhiyun return -EINVAL;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun descs_num = cpdma_get_num_rx_descs(cpsw->dma);
683*4882a593Smuzhiyun if (ering->rx_pending == descs_num)
684*4882a593Smuzhiyun return 0;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun cpsw_suspend_data_pass(ndev);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
689*4882a593Smuzhiyun if (ret) {
690*4882a593Smuzhiyun if (cpsw_resume_data_pass(ndev))
691*4882a593Smuzhiyun goto err;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return ret;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (cpsw->usage_count) {
697*4882a593Smuzhiyun cpsw_destroy_xdp_rxqs(cpsw);
698*4882a593Smuzhiyun ret = cpsw_create_xdp_rxqs(cpsw);
699*4882a593Smuzhiyun if (ret)
700*4882a593Smuzhiyun goto err;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun ret = cpsw_resume_data_pass(ndev);
704*4882a593Smuzhiyun if (!ret)
705*4882a593Smuzhiyun return 0;
706*4882a593Smuzhiyun err:
707*4882a593Smuzhiyun cpdma_set_num_rx_descs(cpsw->dma, descs_num);
708*4882a593Smuzhiyun dev_err(cpsw->dev, "cannot set ring params, closing device\n");
709*4882a593Smuzhiyun cpsw_fail(cpsw);
710*4882a593Smuzhiyun return ret;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_TI_CPTS)
cpsw_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)714*4882a593Smuzhiyun int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun info->so_timestamping =
719*4882a593Smuzhiyun SOF_TIMESTAMPING_TX_HARDWARE |
720*4882a593Smuzhiyun SOF_TIMESTAMPING_TX_SOFTWARE |
721*4882a593Smuzhiyun SOF_TIMESTAMPING_RX_HARDWARE |
722*4882a593Smuzhiyun SOF_TIMESTAMPING_RX_SOFTWARE |
723*4882a593Smuzhiyun SOF_TIMESTAMPING_SOFTWARE |
724*4882a593Smuzhiyun SOF_TIMESTAMPING_RAW_HARDWARE;
725*4882a593Smuzhiyun info->phc_index = cpsw->cpts->phc_index;
726*4882a593Smuzhiyun info->tx_types =
727*4882a593Smuzhiyun (1 << HWTSTAMP_TX_OFF) |
728*4882a593Smuzhiyun (1 << HWTSTAMP_TX_ON);
729*4882a593Smuzhiyun info->rx_filters =
730*4882a593Smuzhiyun (1 << HWTSTAMP_FILTER_NONE) |
731*4882a593Smuzhiyun (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
732*4882a593Smuzhiyun return 0;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun #else
cpsw_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)735*4882a593Smuzhiyun int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun info->so_timestamping =
738*4882a593Smuzhiyun SOF_TIMESTAMPING_TX_SOFTWARE |
739*4882a593Smuzhiyun SOF_TIMESTAMPING_RX_SOFTWARE |
740*4882a593Smuzhiyun SOF_TIMESTAMPING_SOFTWARE;
741*4882a593Smuzhiyun info->phc_index = -1;
742*4882a593Smuzhiyun info->tx_types = 0;
743*4882a593Smuzhiyun info->rx_filters = 0;
744*4882a593Smuzhiyun return 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun #endif
747