1*4882a593Smuzhiyun /* Copyright 2008-2016 Freescale Semiconductor, Inc.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
4*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met:
5*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
6*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
7*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
8*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
9*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
10*4882a593Smuzhiyun * * Neither the name of Freescale Semiconductor nor the
11*4882a593Smuzhiyun * names of its contributors may be used to endorse or promote products
12*4882a593Smuzhiyun * derived from this software without specific prior written permission.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * ALTERNATIVELY, this software may be distributed under the terms of the
16*4882a593Smuzhiyun * GNU General Public License ("GPL") as published by the Free Software
17*4882a593Smuzhiyun * Foundation, either version 2 of that License or (at your option) any
18*4882a593Smuzhiyun * later version.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21*4882a593Smuzhiyun * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22*4882a593Smuzhiyun * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23*4882a593Smuzhiyun * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24*4882a593Smuzhiyun * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25*4882a593Smuzhiyun * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26*4882a593Smuzhiyun * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27*4882a593Smuzhiyun * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29*4882a593Smuzhiyun * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <linux/string.h>
35*4882a593Smuzhiyun #include <linux/of_platform.h>
36*4882a593Smuzhiyun #include <linux/net_tstamp.h>
37*4882a593Smuzhiyun #include <linux/fsl/ptp_qoriq.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "dpaa_eth.h"
40*4882a593Smuzhiyun #include "mac.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
43*4882a593Smuzhiyun "interrupts",
44*4882a593Smuzhiyun "rx packets",
45*4882a593Smuzhiyun "tx packets",
46*4882a593Smuzhiyun "tx confirm",
47*4882a593Smuzhiyun "tx S/G",
48*4882a593Smuzhiyun "tx error",
49*4882a593Smuzhiyun "rx error",
50*4882a593Smuzhiyun "rx dropped",
51*4882a593Smuzhiyun "tx dropped",
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
55*4882a593Smuzhiyun /* dpa rx errors */
56*4882a593Smuzhiyun "rx dma error",
57*4882a593Smuzhiyun "rx frame physical error",
58*4882a593Smuzhiyun "rx frame size error",
59*4882a593Smuzhiyun "rx header error",
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* demultiplexing errors */
62*4882a593Smuzhiyun "qman cg_tdrop",
63*4882a593Smuzhiyun "qman wred",
64*4882a593Smuzhiyun "qman error cond",
65*4882a593Smuzhiyun "qman early window",
66*4882a593Smuzhiyun "qman late window",
67*4882a593Smuzhiyun "qman fq tdrop",
68*4882a593Smuzhiyun "qman fq retired",
69*4882a593Smuzhiyun "qman orp disabled",
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* congestion related stats */
72*4882a593Smuzhiyun "congestion time (ms)",
73*4882a593Smuzhiyun "entered congestion",
74*4882a593Smuzhiyun "congested (0/1)"
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
78*4882a593Smuzhiyun #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
79*4882a593Smuzhiyun
dpaa_get_link_ksettings(struct net_device * net_dev,struct ethtool_link_ksettings * cmd)80*4882a593Smuzhiyun static int dpaa_get_link_ksettings(struct net_device *net_dev,
81*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun if (!net_dev->phydev)
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun phy_ethtool_ksettings_get(net_dev->phydev, cmd);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
dpaa_set_link_ksettings(struct net_device * net_dev,const struct ethtool_link_ksettings * cmd)91*4882a593Smuzhiyun static int dpaa_set_link_ksettings(struct net_device *net_dev,
92*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun int err;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (!net_dev->phydev)
97*4882a593Smuzhiyun return -ENODEV;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
100*4882a593Smuzhiyun if (err < 0)
101*4882a593Smuzhiyun netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return err;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
dpaa_get_drvinfo(struct net_device * net_dev,struct ethtool_drvinfo * drvinfo)106*4882a593Smuzhiyun static void dpaa_get_drvinfo(struct net_device *net_dev,
107*4882a593Smuzhiyun struct ethtool_drvinfo *drvinfo)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun strlcpy(drvinfo->driver, KBUILD_MODNAME,
110*4882a593Smuzhiyun sizeof(drvinfo->driver));
111*4882a593Smuzhiyun strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
112*4882a593Smuzhiyun sizeof(drvinfo->bus_info));
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
dpaa_get_msglevel(struct net_device * net_dev)115*4882a593Smuzhiyun static u32 dpaa_get_msglevel(struct net_device *net_dev)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
dpaa_set_msglevel(struct net_device * net_dev,u32 msg_enable)120*4882a593Smuzhiyun static void dpaa_set_msglevel(struct net_device *net_dev,
121*4882a593Smuzhiyun u32 msg_enable)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
dpaa_nway_reset(struct net_device * net_dev)126*4882a593Smuzhiyun static int dpaa_nway_reset(struct net_device *net_dev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun int err;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!net_dev->phydev)
131*4882a593Smuzhiyun return -ENODEV;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun err = 0;
134*4882a593Smuzhiyun if (net_dev->phydev->autoneg) {
135*4882a593Smuzhiyun err = phy_start_aneg(net_dev->phydev);
136*4882a593Smuzhiyun if (err < 0)
137*4882a593Smuzhiyun netdev_err(net_dev, "phy_start_aneg() = %d\n",
138*4882a593Smuzhiyun err);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun return err;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
dpaa_get_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)144*4882a593Smuzhiyun static void dpaa_get_pauseparam(struct net_device *net_dev,
145*4882a593Smuzhiyun struct ethtool_pauseparam *epause)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct mac_device *mac_dev;
148*4882a593Smuzhiyun struct dpaa_priv *priv;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun priv = netdev_priv(net_dev);
151*4882a593Smuzhiyun mac_dev = priv->mac_dev;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!net_dev->phydev)
154*4882a593Smuzhiyun return;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun epause->autoneg = mac_dev->autoneg_pause;
157*4882a593Smuzhiyun epause->rx_pause = mac_dev->rx_pause_active;
158*4882a593Smuzhiyun epause->tx_pause = mac_dev->tx_pause_active;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
dpaa_set_pauseparam(struct net_device * net_dev,struct ethtool_pauseparam * epause)161*4882a593Smuzhiyun static int dpaa_set_pauseparam(struct net_device *net_dev,
162*4882a593Smuzhiyun struct ethtool_pauseparam *epause)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct mac_device *mac_dev;
165*4882a593Smuzhiyun struct phy_device *phydev;
166*4882a593Smuzhiyun bool rx_pause, tx_pause;
167*4882a593Smuzhiyun struct dpaa_priv *priv;
168*4882a593Smuzhiyun int err;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun priv = netdev_priv(net_dev);
171*4882a593Smuzhiyun mac_dev = priv->mac_dev;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun phydev = net_dev->phydev;
174*4882a593Smuzhiyun if (!phydev) {
175*4882a593Smuzhiyun netdev_err(net_dev, "phy device not initialized\n");
176*4882a593Smuzhiyun return -ENODEV;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (!phy_validate_pause(phydev, epause))
180*4882a593Smuzhiyun return -EINVAL;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* The MAC should know how to handle PAUSE frame autonegotiation before
183*4882a593Smuzhiyun * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
184*4882a593Smuzhiyun * settings.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun mac_dev->autoneg_pause = !!epause->autoneg;
187*4882a593Smuzhiyun mac_dev->rx_pause_req = !!epause->rx_pause;
188*4882a593Smuzhiyun mac_dev->tx_pause_req = !!epause->tx_pause;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Determine the sym/asym advertised PAUSE capabilities from the desired
191*4882a593Smuzhiyun * rx/tx pause settings.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
197*4882a593Smuzhiyun err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
198*4882a593Smuzhiyun if (err < 0)
199*4882a593Smuzhiyun netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return err;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
dpaa_get_sset_count(struct net_device * net_dev,int type)204*4882a593Smuzhiyun static int dpaa_get_sset_count(struct net_device *net_dev, int type)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun unsigned int total_stats, num_stats;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun num_stats = num_online_cpus() + 1;
209*4882a593Smuzhiyun total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
210*4882a593Smuzhiyun DPAA_STATS_GLOBAL_LEN;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun switch (type) {
213*4882a593Smuzhiyun case ETH_SS_STATS:
214*4882a593Smuzhiyun return total_stats;
215*4882a593Smuzhiyun default:
216*4882a593Smuzhiyun return -EOPNOTSUPP;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
copy_stats(struct dpaa_percpu_priv * percpu_priv,int num_cpus,int crr_cpu,u64 bp_count,u64 * data)220*4882a593Smuzhiyun static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
221*4882a593Smuzhiyun int crr_cpu, u64 bp_count, u64 *data)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun int num_values = num_cpus + 1;
224*4882a593Smuzhiyun int crr = 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* update current CPU's stats and also add them to the total values */
227*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
228*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
231*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
234*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
237*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
240*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
243*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
246*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
249*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
252*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun data[crr * num_values + crr_cpu] = bp_count;
255*4882a593Smuzhiyun data[crr++ * num_values + num_cpus] += bp_count;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
dpaa_get_ethtool_stats(struct net_device * net_dev,struct ethtool_stats * stats,u64 * data)258*4882a593Smuzhiyun static void dpaa_get_ethtool_stats(struct net_device *net_dev,
259*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *data)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct dpaa_percpu_priv *percpu_priv;
262*4882a593Smuzhiyun struct dpaa_rx_errors rx_errors;
263*4882a593Smuzhiyun unsigned int num_cpus, offset;
264*4882a593Smuzhiyun u64 bp_count, cg_time, cg_num;
265*4882a593Smuzhiyun struct dpaa_ern_cnt ern_cnt;
266*4882a593Smuzhiyun struct dpaa_bp *dpaa_bp;
267*4882a593Smuzhiyun struct dpaa_priv *priv;
268*4882a593Smuzhiyun int total_stats, i;
269*4882a593Smuzhiyun bool cg_status;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
272*4882a593Smuzhiyun priv = netdev_priv(net_dev);
273*4882a593Smuzhiyun num_cpus = num_online_cpus();
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun memset(&bp_count, 0, sizeof(bp_count));
276*4882a593Smuzhiyun memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
277*4882a593Smuzhiyun memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
278*4882a593Smuzhiyun memset(data, 0, total_stats * sizeof(u64));
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun for_each_online_cpu(i) {
281*4882a593Smuzhiyun percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
282*4882a593Smuzhiyun dpaa_bp = priv->dpaa_bp;
283*4882a593Smuzhiyun if (!dpaa_bp->percpu_count)
284*4882a593Smuzhiyun continue;
285*4882a593Smuzhiyun bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
286*4882a593Smuzhiyun rx_errors.dme += percpu_priv->rx_errors.dme;
287*4882a593Smuzhiyun rx_errors.fpe += percpu_priv->rx_errors.fpe;
288*4882a593Smuzhiyun rx_errors.fse += percpu_priv->rx_errors.fse;
289*4882a593Smuzhiyun rx_errors.phe += percpu_priv->rx_errors.phe;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
292*4882a593Smuzhiyun ern_cnt.wred += percpu_priv->ern_cnt.wred;
293*4882a593Smuzhiyun ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
294*4882a593Smuzhiyun ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
295*4882a593Smuzhiyun ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
296*4882a593Smuzhiyun ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
297*4882a593Smuzhiyun ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
298*4882a593Smuzhiyun ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun copy_stats(percpu_priv, num_cpus, i, bp_count, data);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
304*4882a593Smuzhiyun memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
307*4882a593Smuzhiyun memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* gather congestion related counters */
310*4882a593Smuzhiyun cg_num = 0;
311*4882a593Smuzhiyun cg_status = false;
312*4882a593Smuzhiyun cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
313*4882a593Smuzhiyun if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
314*4882a593Smuzhiyun cg_num = priv->cgr_data.cgr_congested_count;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* reset congestion stats (like QMan API does */
317*4882a593Smuzhiyun priv->cgr_data.congested_jiffies = 0;
318*4882a593Smuzhiyun priv->cgr_data.cgr_congested_count = 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
322*4882a593Smuzhiyun data[offset++] = cg_time;
323*4882a593Smuzhiyun data[offset++] = cg_num;
324*4882a593Smuzhiyun data[offset++] = cg_status;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
dpaa_get_strings(struct net_device * net_dev,u32 stringset,u8 * data)327*4882a593Smuzhiyun static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
328*4882a593Smuzhiyun u8 *data)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun unsigned int i, j, num_cpus, size;
331*4882a593Smuzhiyun char string_cpu[ETH_GSTRING_LEN];
332*4882a593Smuzhiyun u8 *strings;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun memset(string_cpu, 0, sizeof(string_cpu));
335*4882a593Smuzhiyun strings = data;
336*4882a593Smuzhiyun num_cpus = num_online_cpus();
337*4882a593Smuzhiyun size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
340*4882a593Smuzhiyun for (j = 0; j < num_cpus; j++) {
341*4882a593Smuzhiyun snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
342*4882a593Smuzhiyun dpaa_stats_percpu[i], j);
343*4882a593Smuzhiyun memcpy(strings, string_cpu, ETH_GSTRING_LEN);
344*4882a593Smuzhiyun strings += ETH_GSTRING_LEN;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
347*4882a593Smuzhiyun dpaa_stats_percpu[i]);
348*4882a593Smuzhiyun memcpy(strings, string_cpu, ETH_GSTRING_LEN);
349*4882a593Smuzhiyun strings += ETH_GSTRING_LEN;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun for (j = 0; j < num_cpus; j++) {
352*4882a593Smuzhiyun snprintf(string_cpu, ETH_GSTRING_LEN,
353*4882a593Smuzhiyun "bpool [CPU %d]", j);
354*4882a593Smuzhiyun memcpy(strings, string_cpu, ETH_GSTRING_LEN);
355*4882a593Smuzhiyun strings += ETH_GSTRING_LEN;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
358*4882a593Smuzhiyun memcpy(strings, string_cpu, ETH_GSTRING_LEN);
359*4882a593Smuzhiyun strings += ETH_GSTRING_LEN;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun memcpy(strings, dpaa_stats_global, size);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
dpaa_get_hash_opts(struct net_device * dev,struct ethtool_rxnfc * cmd)364*4882a593Smuzhiyun static int dpaa_get_hash_opts(struct net_device *dev,
365*4882a593Smuzhiyun struct ethtool_rxnfc *cmd)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct dpaa_priv *priv = netdev_priv(dev);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun cmd->data = 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun switch (cmd->flow_type) {
372*4882a593Smuzhiyun case TCP_V4_FLOW:
373*4882a593Smuzhiyun case TCP_V6_FLOW:
374*4882a593Smuzhiyun case UDP_V4_FLOW:
375*4882a593Smuzhiyun case UDP_V6_FLOW:
376*4882a593Smuzhiyun if (priv->keygen_in_use)
377*4882a593Smuzhiyun cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
378*4882a593Smuzhiyun fallthrough;
379*4882a593Smuzhiyun case IPV4_FLOW:
380*4882a593Smuzhiyun case IPV6_FLOW:
381*4882a593Smuzhiyun case SCTP_V4_FLOW:
382*4882a593Smuzhiyun case SCTP_V6_FLOW:
383*4882a593Smuzhiyun case AH_ESP_V4_FLOW:
384*4882a593Smuzhiyun case AH_ESP_V6_FLOW:
385*4882a593Smuzhiyun case AH_V4_FLOW:
386*4882a593Smuzhiyun case AH_V6_FLOW:
387*4882a593Smuzhiyun case ESP_V4_FLOW:
388*4882a593Smuzhiyun case ESP_V6_FLOW:
389*4882a593Smuzhiyun if (priv->keygen_in_use)
390*4882a593Smuzhiyun cmd->data |= RXH_IP_SRC | RXH_IP_DST;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun default:
393*4882a593Smuzhiyun cmd->data = 0;
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
dpaa_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * unused)400*4882a593Smuzhiyun static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
401*4882a593Smuzhiyun u32 *unused)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun int ret = -EOPNOTSUPP;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun switch (cmd->cmd) {
406*4882a593Smuzhiyun case ETHTOOL_GRXFH:
407*4882a593Smuzhiyun ret = dpaa_get_hash_opts(dev, cmd);
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun default:
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
dpaa_set_hash(struct net_device * net_dev,bool enable)416*4882a593Smuzhiyun static void dpaa_set_hash(struct net_device *net_dev, bool enable)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct mac_device *mac_dev;
419*4882a593Smuzhiyun struct fman_port *rxport;
420*4882a593Smuzhiyun struct dpaa_priv *priv;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun priv = netdev_priv(net_dev);
423*4882a593Smuzhiyun mac_dev = priv->mac_dev;
424*4882a593Smuzhiyun rxport = mac_dev->port[0];
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun fman_port_use_kg_hash(rxport, enable);
427*4882a593Smuzhiyun priv->keygen_in_use = enable;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
dpaa_set_hash_opts(struct net_device * dev,struct ethtool_rxnfc * nfc)430*4882a593Smuzhiyun static int dpaa_set_hash_opts(struct net_device *dev,
431*4882a593Smuzhiyun struct ethtool_rxnfc *nfc)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun int ret = -EINVAL;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* we support hashing on IPv4/v6 src/dest IP and L4 src/dest port */
436*4882a593Smuzhiyun if (nfc->data &
437*4882a593Smuzhiyun ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
438*4882a593Smuzhiyun return -EINVAL;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun switch (nfc->flow_type) {
441*4882a593Smuzhiyun case TCP_V4_FLOW:
442*4882a593Smuzhiyun case TCP_V6_FLOW:
443*4882a593Smuzhiyun case UDP_V4_FLOW:
444*4882a593Smuzhiyun case UDP_V6_FLOW:
445*4882a593Smuzhiyun case IPV4_FLOW:
446*4882a593Smuzhiyun case IPV6_FLOW:
447*4882a593Smuzhiyun case SCTP_V4_FLOW:
448*4882a593Smuzhiyun case SCTP_V6_FLOW:
449*4882a593Smuzhiyun case AH_ESP_V4_FLOW:
450*4882a593Smuzhiyun case AH_ESP_V6_FLOW:
451*4882a593Smuzhiyun case AH_V4_FLOW:
452*4882a593Smuzhiyun case AH_V6_FLOW:
453*4882a593Smuzhiyun case ESP_V4_FLOW:
454*4882a593Smuzhiyun case ESP_V6_FLOW:
455*4882a593Smuzhiyun dpaa_set_hash(dev, !!nfc->data);
456*4882a593Smuzhiyun ret = 0;
457*4882a593Smuzhiyun break;
458*4882a593Smuzhiyun default:
459*4882a593Smuzhiyun break;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun return ret;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
dpaa_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)465*4882a593Smuzhiyun static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun int ret = -EOPNOTSUPP;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun switch (cmd->cmd) {
470*4882a593Smuzhiyun case ETHTOOL_SRXFH:
471*4882a593Smuzhiyun ret = dpaa_set_hash_opts(dev, cmd);
472*4882a593Smuzhiyun break;
473*4882a593Smuzhiyun default:
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return ret;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
dpaa_get_ts_info(struct net_device * net_dev,struct ethtool_ts_info * info)480*4882a593Smuzhiyun static int dpaa_get_ts_info(struct net_device *net_dev,
481*4882a593Smuzhiyun struct ethtool_ts_info *info)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct device *dev = net_dev->dev.parent;
484*4882a593Smuzhiyun struct device_node *mac_node = dev->of_node;
485*4882a593Smuzhiyun struct device_node *fman_node = NULL, *ptp_node = NULL;
486*4882a593Smuzhiyun struct platform_device *ptp_dev = NULL;
487*4882a593Smuzhiyun struct ptp_qoriq *ptp = NULL;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun info->phc_index = -1;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun fman_node = of_get_parent(mac_node);
492*4882a593Smuzhiyun if (fman_node) {
493*4882a593Smuzhiyun ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
494*4882a593Smuzhiyun of_node_put(fman_node);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (ptp_node) {
498*4882a593Smuzhiyun ptp_dev = of_find_device_by_node(ptp_node);
499*4882a593Smuzhiyun of_node_put(ptp_node);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (ptp_dev)
503*4882a593Smuzhiyun ptp = platform_get_drvdata(ptp_dev);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (ptp)
506*4882a593Smuzhiyun info->phc_index = ptp->phc_index;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
509*4882a593Smuzhiyun SOF_TIMESTAMPING_RX_HARDWARE |
510*4882a593Smuzhiyun SOF_TIMESTAMPING_RAW_HARDWARE;
511*4882a593Smuzhiyun info->tx_types = (1 << HWTSTAMP_TX_OFF) |
512*4882a593Smuzhiyun (1 << HWTSTAMP_TX_ON);
513*4882a593Smuzhiyun info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
514*4882a593Smuzhiyun (1 << HWTSTAMP_FILTER_ALL);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
dpaa_get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)519*4882a593Smuzhiyun static int dpaa_get_coalesce(struct net_device *dev,
520*4882a593Smuzhiyun struct ethtool_coalesce *c)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct qman_portal *portal;
523*4882a593Smuzhiyun u32 period;
524*4882a593Smuzhiyun u8 thresh;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun portal = qman_get_affine_portal(smp_processor_id());
527*4882a593Smuzhiyun qman_portal_get_iperiod(portal, &period);
528*4882a593Smuzhiyun qman_dqrr_get_ithresh(portal, &thresh);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun c->rx_coalesce_usecs = period;
531*4882a593Smuzhiyun c->rx_max_coalesced_frames = thresh;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return 0;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
dpaa_set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)536*4882a593Smuzhiyun static int dpaa_set_coalesce(struct net_device *dev,
537*4882a593Smuzhiyun struct ethtool_coalesce *c)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun const cpumask_t *cpus = qman_affine_cpus();
540*4882a593Smuzhiyun bool needs_revert[NR_CPUS] = {false};
541*4882a593Smuzhiyun struct qman_portal *portal;
542*4882a593Smuzhiyun u32 period, prev_period;
543*4882a593Smuzhiyun u8 thresh, prev_thresh;
544*4882a593Smuzhiyun int cpu, res;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun period = c->rx_coalesce_usecs;
547*4882a593Smuzhiyun thresh = c->rx_max_coalesced_frames;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* save previous values */
550*4882a593Smuzhiyun portal = qman_get_affine_portal(smp_processor_id());
551*4882a593Smuzhiyun qman_portal_get_iperiod(portal, &prev_period);
552*4882a593Smuzhiyun qman_dqrr_get_ithresh(portal, &prev_thresh);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* set new values */
555*4882a593Smuzhiyun for_each_cpu_and(cpu, cpus, cpu_online_mask) {
556*4882a593Smuzhiyun portal = qman_get_affine_portal(cpu);
557*4882a593Smuzhiyun res = qman_portal_set_iperiod(portal, period);
558*4882a593Smuzhiyun if (res)
559*4882a593Smuzhiyun goto revert_values;
560*4882a593Smuzhiyun res = qman_dqrr_set_ithresh(portal, thresh);
561*4882a593Smuzhiyun if (res) {
562*4882a593Smuzhiyun qman_portal_set_iperiod(portal, prev_period);
563*4882a593Smuzhiyun goto revert_values;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun needs_revert[cpu] = true;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return 0;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun revert_values:
571*4882a593Smuzhiyun /* restore previous values */
572*4882a593Smuzhiyun for_each_cpu_and(cpu, cpus, cpu_online_mask) {
573*4882a593Smuzhiyun if (!needs_revert[cpu])
574*4882a593Smuzhiyun continue;
575*4882a593Smuzhiyun portal = qman_get_affine_portal(cpu);
576*4882a593Smuzhiyun /* previous values will not fail, ignore return value */
577*4882a593Smuzhiyun qman_portal_set_iperiod(portal, prev_period);
578*4882a593Smuzhiyun qman_dqrr_set_ithresh(portal, prev_thresh);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return res;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun const struct ethtool_ops dpaa_ethtool_ops = {
585*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
586*4882a593Smuzhiyun ETHTOOL_COALESCE_RX_MAX_FRAMES,
587*4882a593Smuzhiyun .get_drvinfo = dpaa_get_drvinfo,
588*4882a593Smuzhiyun .get_msglevel = dpaa_get_msglevel,
589*4882a593Smuzhiyun .set_msglevel = dpaa_set_msglevel,
590*4882a593Smuzhiyun .nway_reset = dpaa_nway_reset,
591*4882a593Smuzhiyun .get_pauseparam = dpaa_get_pauseparam,
592*4882a593Smuzhiyun .set_pauseparam = dpaa_set_pauseparam,
593*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
594*4882a593Smuzhiyun .get_sset_count = dpaa_get_sset_count,
595*4882a593Smuzhiyun .get_ethtool_stats = dpaa_get_ethtool_stats,
596*4882a593Smuzhiyun .get_strings = dpaa_get_strings,
597*4882a593Smuzhiyun .get_link_ksettings = dpaa_get_link_ksettings,
598*4882a593Smuzhiyun .set_link_ksettings = dpaa_set_link_ksettings,
599*4882a593Smuzhiyun .get_rxnfc = dpaa_get_rxnfc,
600*4882a593Smuzhiyun .set_rxnfc = dpaa_set_rxnfc,
601*4882a593Smuzhiyun .get_ts_info = dpaa_get_ts_info,
602*4882a593Smuzhiyun .get_coalesce = dpaa_get_coalesce,
603*4882a593Smuzhiyun .set_coalesce = dpaa_set_coalesce,
604*4882a593Smuzhiyun };
605