xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/netronome/nfp/abm/qdisc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2*4882a593Smuzhiyun /* Copyright (C) 2018 Netronome Systems, Inc. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/rtnetlink.h>
5*4882a593Smuzhiyun #include <net/pkt_cls.h>
6*4882a593Smuzhiyun #include <net/pkt_sched.h>
7*4882a593Smuzhiyun #include <net/red.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "../nfpcore/nfp_cpp.h"
10*4882a593Smuzhiyun #include "../nfp_app.h"
11*4882a593Smuzhiyun #include "../nfp_main.h"
12*4882a593Smuzhiyun #include "../nfp_net.h"
13*4882a593Smuzhiyun #include "../nfp_port.h"
14*4882a593Smuzhiyun #include "main.h"
15*4882a593Smuzhiyun 
nfp_abm_qdisc_is_red(struct nfp_qdisc * qdisc)16*4882a593Smuzhiyun static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
nfp_abm_qdisc_child_valid(struct nfp_qdisc * qdisc,unsigned int id)21*4882a593Smuzhiyun static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	return qdisc->children[id] &&
24*4882a593Smuzhiyun 	       qdisc->children[id] != NFP_QDISC_UNTRACKED;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
nfp_abm_qdisc_tree_deref_slot(void __rcu ** slot)27*4882a593Smuzhiyun static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	return rtnl_dereference(*slot);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static void
nfp_abm_stats_propagate(struct nfp_alink_stats * parent,struct nfp_alink_stats * child)33*4882a593Smuzhiyun nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
34*4882a593Smuzhiyun 			struct nfp_alink_stats *child)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	parent->tx_pkts		+= child->tx_pkts;
37*4882a593Smuzhiyun 	parent->tx_bytes	+= child->tx_bytes;
38*4882a593Smuzhiyun 	parent->backlog_pkts	+= child->backlog_pkts;
39*4882a593Smuzhiyun 	parent->backlog_bytes	+= child->backlog_bytes;
40*4882a593Smuzhiyun 	parent->overlimits	+= child->overlimits;
41*4882a593Smuzhiyun 	parent->drops		+= child->drops;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static void
nfp_abm_stats_update_red(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc,unsigned int queue)45*4882a593Smuzhiyun nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
46*4882a593Smuzhiyun 			 unsigned int queue)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct nfp_cpp *cpp = alink->abm->app->cpp;
49*4882a593Smuzhiyun 	unsigned int i;
50*4882a593Smuzhiyun 	int err;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (!qdisc->offloaded)
53*4882a593Smuzhiyun 		return;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	for (i = 0; i < qdisc->red.num_bands; i++) {
56*4882a593Smuzhiyun 		err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
57*4882a593Smuzhiyun 						&qdisc->red.band[i].stats);
58*4882a593Smuzhiyun 		if (err)
59*4882a593Smuzhiyun 			nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
60*4882a593Smuzhiyun 				i, queue, err);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 		err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
63*4882a593Smuzhiyun 						 &qdisc->red.band[i].xstats);
64*4882a593Smuzhiyun 		if (err)
65*4882a593Smuzhiyun 			nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
66*4882a593Smuzhiyun 				i, queue, err);
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static void
nfp_abm_stats_update_mq(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc)71*4882a593Smuzhiyun nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	unsigned int i;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (qdisc->type != NFP_QDISC_MQ)
76*4882a593Smuzhiyun 		return;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	for (i = 0; i < alink->total_queues; i++)
79*4882a593Smuzhiyun 		if (nfp_abm_qdisc_child_valid(qdisc, i))
80*4882a593Smuzhiyun 			nfp_abm_stats_update_red(alink, qdisc->children[i], i);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
__nfp_abm_stats_update(struct nfp_abm_link * alink,u64 time_now)83*4882a593Smuzhiyun static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	alink->last_stats_update = time_now;
86*4882a593Smuzhiyun 	if (alink->root_qdisc)
87*4882a593Smuzhiyun 		nfp_abm_stats_update_mq(alink, alink->root_qdisc);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
nfp_abm_stats_update(struct nfp_abm_link * alink)90*4882a593Smuzhiyun static void nfp_abm_stats_update(struct nfp_abm_link *alink)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	u64 now;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
95*4882a593Smuzhiyun 	 * of all their leafs, so we would read the same stat multiple times
96*4882a593Smuzhiyun 	 * for every dump.
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	now = ktime_get();
99*4882a593Smuzhiyun 	if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
100*4882a593Smuzhiyun 		return;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	__nfp_abm_stats_update(alink, now);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static void
nfp_abm_qdisc_unlink_children(struct nfp_qdisc * qdisc,unsigned int start,unsigned int end)106*4882a593Smuzhiyun nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
107*4882a593Smuzhiyun 			      unsigned int start, unsigned int end)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	unsigned int i;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	for (i = start; i < end; i++)
112*4882a593Smuzhiyun 		if (nfp_abm_qdisc_child_valid(qdisc, i)) {
113*4882a593Smuzhiyun 			qdisc->children[i]->use_cnt--;
114*4882a593Smuzhiyun 			qdisc->children[i] = NULL;
115*4882a593Smuzhiyun 		}
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static void
nfp_abm_qdisc_offload_stop(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc)119*4882a593Smuzhiyun nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	unsigned int i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Don't complain when qdisc is getting unlinked */
124*4882a593Smuzhiyun 	if (qdisc->use_cnt)
125*4882a593Smuzhiyun 		nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
126*4882a593Smuzhiyun 			 qdisc->handle);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (!nfp_abm_qdisc_is_red(qdisc))
129*4882a593Smuzhiyun 		return;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	for (i = 0; i < qdisc->red.num_bands; i++) {
132*4882a593Smuzhiyun 		qdisc->red.band[i].stats.backlog_pkts = 0;
133*4882a593Smuzhiyun 		qdisc->red.band[i].stats.backlog_bytes = 0;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static int
__nfp_abm_stats_init(struct nfp_abm_link * alink,unsigned int band,unsigned int queue,struct nfp_alink_stats * prev_stats,struct nfp_alink_xstats * prev_xstats)138*4882a593Smuzhiyun __nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
139*4882a593Smuzhiyun 		     unsigned int queue, struct nfp_alink_stats *prev_stats,
140*4882a593Smuzhiyun 		     struct nfp_alink_xstats *prev_xstats)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	u64 backlog_pkts, backlog_bytes;
143*4882a593Smuzhiyun 	int err;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Don't touch the backlog, backlog can only be reset after it has
146*4882a593Smuzhiyun 	 * been reported back to the tc qdisc stats.
147*4882a593Smuzhiyun 	 */
148*4882a593Smuzhiyun 	backlog_pkts = prev_stats->backlog_pkts;
149*4882a593Smuzhiyun 	backlog_bytes = prev_stats->backlog_bytes;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
152*4882a593Smuzhiyun 	if (err) {
153*4882a593Smuzhiyun 		nfp_err(alink->abm->app->cpp,
154*4882a593Smuzhiyun 			"RED stats init (%d, %d) failed with error %d\n",
155*4882a593Smuzhiyun 			band, queue, err);
156*4882a593Smuzhiyun 		return err;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
160*4882a593Smuzhiyun 	if (err) {
161*4882a593Smuzhiyun 		nfp_err(alink->abm->app->cpp,
162*4882a593Smuzhiyun 			"RED xstats init (%d, %d) failed with error %d\n",
163*4882a593Smuzhiyun 			band, queue, err);
164*4882a593Smuzhiyun 		return err;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	prev_stats->backlog_pkts = backlog_pkts;
168*4882a593Smuzhiyun 	prev_stats->backlog_bytes = backlog_bytes;
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun static int
nfp_abm_stats_init(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc,unsigned int queue)173*4882a593Smuzhiyun nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
174*4882a593Smuzhiyun 		   unsigned int queue)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	unsigned int i;
177*4882a593Smuzhiyun 	int err;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	for (i = 0; i < qdisc->red.num_bands; i++) {
180*4882a593Smuzhiyun 		err = __nfp_abm_stats_init(alink, i, queue,
181*4882a593Smuzhiyun 					   &qdisc->red.band[i].prev_stats,
182*4882a593Smuzhiyun 					   &qdisc->red.band[i].prev_xstats);
183*4882a593Smuzhiyun 		if (err)
184*4882a593Smuzhiyun 			return err;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	return 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static void
nfp_abm_offload_compile_red(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc,unsigned int queue)191*4882a593Smuzhiyun nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
192*4882a593Smuzhiyun 			    unsigned int queue)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	bool good_red, good_gred;
195*4882a593Smuzhiyun 	unsigned int i;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	good_red = qdisc->type == NFP_QDISC_RED &&
198*4882a593Smuzhiyun 		   qdisc->params_ok &&
199*4882a593Smuzhiyun 		   qdisc->use_cnt == 1 &&
200*4882a593Smuzhiyun 		   !alink->has_prio &&
201*4882a593Smuzhiyun 		   !qdisc->children[0];
202*4882a593Smuzhiyun 	good_gred = qdisc->type == NFP_QDISC_GRED &&
203*4882a593Smuzhiyun 		    qdisc->params_ok &&
204*4882a593Smuzhiyun 		    qdisc->use_cnt == 1;
205*4882a593Smuzhiyun 	qdisc->offload_mark = good_red || good_gred;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* If we are starting offload init prev_stats */
208*4882a593Smuzhiyun 	if (qdisc->offload_mark && !qdisc->offloaded)
209*4882a593Smuzhiyun 		if (nfp_abm_stats_init(alink, qdisc, queue))
210*4882a593Smuzhiyun 			qdisc->offload_mark = false;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	if (!qdisc->offload_mark)
213*4882a593Smuzhiyun 		return;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	for (i = 0; i < alink->abm->num_bands; i++) {
216*4882a593Smuzhiyun 		enum nfp_abm_q_action act;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		nfp_abm_ctrl_set_q_lvl(alink, i, queue,
219*4882a593Smuzhiyun 				       qdisc->red.band[i].threshold);
220*4882a593Smuzhiyun 		act = qdisc->red.band[i].ecn ?
221*4882a593Smuzhiyun 			NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
222*4882a593Smuzhiyun 		nfp_abm_ctrl_set_q_act(alink, i, queue, act);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun static void
nfp_abm_offload_compile_mq(struct nfp_abm_link * alink,struct nfp_qdisc * qdisc)227*4882a593Smuzhiyun nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	unsigned int i;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
232*4882a593Smuzhiyun 	if (!qdisc->offload_mark)
233*4882a593Smuzhiyun 		return;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	for (i = 0; i < alink->total_queues; i++) {
236*4882a593Smuzhiyun 		struct nfp_qdisc *child = qdisc->children[i];
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		if (!nfp_abm_qdisc_child_valid(qdisc, i))
239*4882a593Smuzhiyun 			continue;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		nfp_abm_offload_compile_red(alink, child, i);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
nfp_abm_qdisc_offload_update(struct nfp_abm_link * alink)245*4882a593Smuzhiyun void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct nfp_abm *abm = alink->abm;
248*4882a593Smuzhiyun 	struct radix_tree_iter iter;
249*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
250*4882a593Smuzhiyun 	void __rcu **slot;
251*4882a593Smuzhiyun 	size_t i;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* Mark all thresholds as unconfigured */
254*4882a593Smuzhiyun 	for (i = 0; i < abm->num_bands; i++)
255*4882a593Smuzhiyun 		__bitmap_set(abm->threshold_undef,
256*4882a593Smuzhiyun 			     i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
257*4882a593Smuzhiyun 			     alink->total_queues);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* Clear offload marks */
260*4882a593Smuzhiyun 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
261*4882a593Smuzhiyun 		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
262*4882a593Smuzhiyun 		qdisc->offload_mark = false;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (alink->root_qdisc)
266*4882a593Smuzhiyun 		nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* Refresh offload status */
269*4882a593Smuzhiyun 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
270*4882a593Smuzhiyun 		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
271*4882a593Smuzhiyun 		if (!qdisc->offload_mark && qdisc->offloaded)
272*4882a593Smuzhiyun 			nfp_abm_qdisc_offload_stop(alink, qdisc);
273*4882a593Smuzhiyun 		qdisc->offloaded = qdisc->offload_mark;
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* Reset the unconfigured thresholds */
277*4882a593Smuzhiyun 	for (i = 0; i < abm->num_thresholds; i++)
278*4882a593Smuzhiyun 		if (test_bit(i, abm->threshold_undef))
279*4882a593Smuzhiyun 			__nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	__nfp_abm_stats_update(alink, ktime_get());
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun static void
nfp_abm_qdisc_clear_mq(struct net_device * netdev,struct nfp_abm_link * alink,struct nfp_qdisc * qdisc)285*4882a593Smuzhiyun nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
286*4882a593Smuzhiyun 		       struct nfp_qdisc *qdisc)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct radix_tree_iter iter;
289*4882a593Smuzhiyun 	unsigned int mq_refs = 0;
290*4882a593Smuzhiyun 	void __rcu **slot;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (!qdisc->use_cnt)
293*4882a593Smuzhiyun 		return;
294*4882a593Smuzhiyun 	/* MQ doesn't notify well on destruction, we need special handling of
295*4882a593Smuzhiyun 	 * MQ's children.
296*4882a593Smuzhiyun 	 */
297*4882a593Smuzhiyun 	if (qdisc->type == NFP_QDISC_MQ &&
298*4882a593Smuzhiyun 	    qdisc == alink->root_qdisc &&
299*4882a593Smuzhiyun 	    netdev->reg_state == NETREG_UNREGISTERING)
300*4882a593Smuzhiyun 		return;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* Count refs held by MQ instances and clear pointers */
303*4882a593Smuzhiyun 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
304*4882a593Smuzhiyun 		struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
305*4882a593Smuzhiyun 		unsigned int i;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
308*4882a593Smuzhiyun 			continue;
309*4882a593Smuzhiyun 		for (i = 0; i < mq->num_children; i++)
310*4882a593Smuzhiyun 			if (mq->children[i] == qdisc) {
311*4882a593Smuzhiyun 				mq->children[i] = NULL;
312*4882a593Smuzhiyun 				mq_refs++;
313*4882a593Smuzhiyun 			}
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
317*4882a593Smuzhiyun 	     qdisc->use_cnt, mq_refs);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun static void
nfp_abm_qdisc_free(struct net_device * netdev,struct nfp_abm_link * alink,struct nfp_qdisc * qdisc)321*4882a593Smuzhiyun nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
322*4882a593Smuzhiyun 		   struct nfp_qdisc *qdisc)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct nfp_port *port = nfp_port_from_netdev(netdev);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!qdisc)
327*4882a593Smuzhiyun 		return;
328*4882a593Smuzhiyun 	nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
329*4882a593Smuzhiyun 	WARN_ON(radix_tree_delete(&alink->qdiscs,
330*4882a593Smuzhiyun 				  TC_H_MAJ(qdisc->handle)) != qdisc);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	kfree(qdisc->children);
333*4882a593Smuzhiyun 	kfree(qdisc);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	port->tc_offload_cnt--;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun static struct nfp_qdisc *
nfp_abm_qdisc_alloc(struct net_device * netdev,struct nfp_abm_link * alink,enum nfp_qdisc_type type,u32 parent_handle,u32 handle,unsigned int children)339*4882a593Smuzhiyun nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
340*4882a593Smuzhiyun 		    enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
341*4882a593Smuzhiyun 		    unsigned int children)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct nfp_port *port = nfp_port_from_netdev(netdev);
344*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
345*4882a593Smuzhiyun 	int err;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
348*4882a593Smuzhiyun 	if (!qdisc)
349*4882a593Smuzhiyun 		return NULL;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (children) {
352*4882a593Smuzhiyun 		qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
353*4882a593Smuzhiyun 		if (!qdisc->children)
354*4882a593Smuzhiyun 			goto err_free_qdisc;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	qdisc->netdev = netdev;
358*4882a593Smuzhiyun 	qdisc->type = type;
359*4882a593Smuzhiyun 	qdisc->parent_handle = parent_handle;
360*4882a593Smuzhiyun 	qdisc->handle = handle;
361*4882a593Smuzhiyun 	qdisc->num_children = children;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
364*4882a593Smuzhiyun 	if (err) {
365*4882a593Smuzhiyun 		nfp_err(alink->abm->app->cpp,
366*4882a593Smuzhiyun 			"Qdisc insertion into radix tree failed: %d\n", err);
367*4882a593Smuzhiyun 		goto err_free_child_tbl;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	port->tc_offload_cnt++;
371*4882a593Smuzhiyun 	return qdisc;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun err_free_child_tbl:
374*4882a593Smuzhiyun 	kfree(qdisc->children);
375*4882a593Smuzhiyun err_free_qdisc:
376*4882a593Smuzhiyun 	kfree(qdisc);
377*4882a593Smuzhiyun 	return NULL;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun static struct nfp_qdisc *
nfp_abm_qdisc_find(struct nfp_abm_link * alink,u32 handle)381*4882a593Smuzhiyun nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun static int
nfp_abm_qdisc_replace(struct net_device * netdev,struct nfp_abm_link * alink,enum nfp_qdisc_type type,u32 parent_handle,u32 handle,unsigned int children,struct nfp_qdisc ** qdisc)387*4882a593Smuzhiyun nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
388*4882a593Smuzhiyun 		      enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
389*4882a593Smuzhiyun 		      unsigned int children, struct nfp_qdisc **qdisc)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	*qdisc = nfp_abm_qdisc_find(alink, handle);
392*4882a593Smuzhiyun 	if (*qdisc) {
393*4882a593Smuzhiyun 		if (WARN_ON((*qdisc)->type != type))
394*4882a593Smuzhiyun 			return -EINVAL;
395*4882a593Smuzhiyun 		return 1;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	*qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
399*4882a593Smuzhiyun 				     children);
400*4882a593Smuzhiyun 	return *qdisc ? 0 : -ENOMEM;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun static void
nfp_abm_qdisc_destroy(struct net_device * netdev,struct nfp_abm_link * alink,u32 handle)404*4882a593Smuzhiyun nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
405*4882a593Smuzhiyun 		      u32 handle)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	qdisc = nfp_abm_qdisc_find(alink, handle);
410*4882a593Smuzhiyun 	if (!qdisc)
411*4882a593Smuzhiyun 		return;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
414*4882a593Smuzhiyun 	if (alink->root_qdisc == qdisc)
415*4882a593Smuzhiyun 		qdisc->use_cnt--;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
418*4882a593Smuzhiyun 	nfp_abm_qdisc_free(netdev, alink, qdisc);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (alink->root_qdisc == qdisc) {
421*4882a593Smuzhiyun 		alink->root_qdisc = NULL;
422*4882a593Smuzhiyun 		/* Only root change matters, other changes are acted upon on
423*4882a593Smuzhiyun 		 * the graft notification.
424*4882a593Smuzhiyun 		 */
425*4882a593Smuzhiyun 		nfp_abm_qdisc_offload_update(alink);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun static int
nfp_abm_qdisc_graft(struct nfp_abm_link * alink,u32 handle,u32 child_handle,unsigned int id)430*4882a593Smuzhiyun nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
431*4882a593Smuzhiyun 		    unsigned int id)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct nfp_qdisc *parent, *child;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	parent = nfp_abm_qdisc_find(alink, handle);
436*4882a593Smuzhiyun 	if (!parent)
437*4882a593Smuzhiyun 		return 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (WARN(id >= parent->num_children,
440*4882a593Smuzhiyun 		 "graft child out of bound %d >= %d\n",
441*4882a593Smuzhiyun 		 id, parent->num_children))
442*4882a593Smuzhiyun 		return -EINVAL;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	nfp_abm_qdisc_unlink_children(parent, id, id + 1);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	child = nfp_abm_qdisc_find(alink, child_handle);
447*4882a593Smuzhiyun 	if (child)
448*4882a593Smuzhiyun 		child->use_cnt++;
449*4882a593Smuzhiyun 	else
450*4882a593Smuzhiyun 		child = NFP_QDISC_UNTRACKED;
451*4882a593Smuzhiyun 	parent->children[id] = child;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	nfp_abm_qdisc_offload_update(alink);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	return 0;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun static void
nfp_abm_stats_calculate(struct nfp_alink_stats * new,struct nfp_alink_stats * old,struct gnet_stats_basic_packed * bstats,struct gnet_stats_queue * qstats)459*4882a593Smuzhiyun nfp_abm_stats_calculate(struct nfp_alink_stats *new,
460*4882a593Smuzhiyun 			struct nfp_alink_stats *old,
461*4882a593Smuzhiyun 			struct gnet_stats_basic_packed *bstats,
462*4882a593Smuzhiyun 			struct gnet_stats_queue *qstats)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
465*4882a593Smuzhiyun 		       new->tx_pkts - old->tx_pkts);
466*4882a593Smuzhiyun 	qstats->qlen += new->backlog_pkts - old->backlog_pkts;
467*4882a593Smuzhiyun 	qstats->backlog += new->backlog_bytes - old->backlog_bytes;
468*4882a593Smuzhiyun 	qstats->overlimits += new->overlimits - old->overlimits;
469*4882a593Smuzhiyun 	qstats->drops += new->drops - old->drops;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun static void
nfp_abm_stats_red_calculate(struct nfp_alink_xstats * new,struct nfp_alink_xstats * old,struct red_stats * stats)473*4882a593Smuzhiyun nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
474*4882a593Smuzhiyun 			    struct nfp_alink_xstats *old,
475*4882a593Smuzhiyun 			    struct red_stats *stats)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	stats->forced_mark += new->ecn_marked - old->ecn_marked;
478*4882a593Smuzhiyun 	stats->pdrop += new->pdrop - old->pdrop;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun static int
nfp_abm_gred_stats(struct nfp_abm_link * alink,u32 handle,struct tc_gred_qopt_offload_stats * stats)482*4882a593Smuzhiyun nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
483*4882a593Smuzhiyun 		   struct tc_gred_qopt_offload_stats *stats)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
486*4882a593Smuzhiyun 	unsigned int i;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	nfp_abm_stats_update(alink);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	qdisc = nfp_abm_qdisc_find(alink, handle);
491*4882a593Smuzhiyun 	if (!qdisc)
492*4882a593Smuzhiyun 		return -EOPNOTSUPP;
493*4882a593Smuzhiyun 	/* If the qdisc offload has stopped we may need to adjust the backlog
494*4882a593Smuzhiyun 	 * counters back so carry on even if qdisc is not currently offloaded.
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	for (i = 0; i < qdisc->red.num_bands; i++) {
498*4882a593Smuzhiyun 		if (!stats->xstats[i])
499*4882a593Smuzhiyun 			continue;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
502*4882a593Smuzhiyun 					&qdisc->red.band[i].prev_stats,
503*4882a593Smuzhiyun 					&stats->bstats[i], &stats->qstats[i]);
504*4882a593Smuzhiyun 		qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
507*4882a593Smuzhiyun 					    &qdisc->red.band[i].prev_xstats,
508*4882a593Smuzhiyun 					    stats->xstats[i]);
509*4882a593Smuzhiyun 		qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun static bool
nfp_abm_gred_check_params(struct nfp_abm_link * alink,struct tc_gred_qopt_offload * opt)516*4882a593Smuzhiyun nfp_abm_gred_check_params(struct nfp_abm_link *alink,
517*4882a593Smuzhiyun 			  struct tc_gred_qopt_offload *opt)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	struct nfp_cpp *cpp = alink->abm->app->cpp;
520*4882a593Smuzhiyun 	struct nfp_abm *abm = alink->abm;
521*4882a593Smuzhiyun 	unsigned int i;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (opt->set.grio_on || opt->set.wred_on) {
524*4882a593Smuzhiyun 		nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
525*4882a593Smuzhiyun 			 opt->parent, opt->handle);
526*4882a593Smuzhiyun 		return false;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 	if (opt->set.dp_def != alink->def_band) {
529*4882a593Smuzhiyun 		nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
530*4882a593Smuzhiyun 			 alink->def_band, opt->parent, opt->handle);
531*4882a593Smuzhiyun 		return false;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 	if (opt->set.dp_cnt != abm->num_bands) {
534*4882a593Smuzhiyun 		nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
535*4882a593Smuzhiyun 			 abm->num_bands, opt->parent, opt->handle);
536*4882a593Smuzhiyun 		return false;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	for (i = 0; i < abm->num_bands; i++) {
540*4882a593Smuzhiyun 		struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		if (!band->present)
543*4882a593Smuzhiyun 			return false;
544*4882a593Smuzhiyun 		if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
545*4882a593Smuzhiyun 			nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
546*4882a593Smuzhiyun 				 opt->parent, opt->handle, i);
547*4882a593Smuzhiyun 			return false;
548*4882a593Smuzhiyun 		}
549*4882a593Smuzhiyun 		if (band->is_ecn && !nfp_abm_has_mark(abm)) {
550*4882a593Smuzhiyun 			nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
551*4882a593Smuzhiyun 				 opt->parent, opt->handle, i);
552*4882a593Smuzhiyun 			return false;
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 		if (band->is_harddrop) {
555*4882a593Smuzhiyun 			nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
556*4882a593Smuzhiyun 				 opt->parent, opt->handle, i);
557*4882a593Smuzhiyun 			return false;
558*4882a593Smuzhiyun 		}
559*4882a593Smuzhiyun 		if (band->min != band->max) {
560*4882a593Smuzhiyun 			nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
561*4882a593Smuzhiyun 				 opt->parent, opt->handle, i);
562*4882a593Smuzhiyun 			return false;
563*4882a593Smuzhiyun 		}
564*4882a593Smuzhiyun 		if (band->min > S32_MAX) {
565*4882a593Smuzhiyun 			nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
566*4882a593Smuzhiyun 				 band->min, S32_MAX, opt->parent, opt->handle,
567*4882a593Smuzhiyun 				 i);
568*4882a593Smuzhiyun 			return false;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return true;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun static int
nfp_abm_gred_replace(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_gred_qopt_offload * opt)576*4882a593Smuzhiyun nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
577*4882a593Smuzhiyun 		     struct tc_gred_qopt_offload *opt)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
580*4882a593Smuzhiyun 	unsigned int i;
581*4882a593Smuzhiyun 	int ret;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
584*4882a593Smuzhiyun 				    opt->handle, 0, &qdisc);
585*4882a593Smuzhiyun 	if (ret < 0)
586*4882a593Smuzhiyun 		return ret;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
589*4882a593Smuzhiyun 	if (qdisc->params_ok) {
590*4882a593Smuzhiyun 		qdisc->red.num_bands = opt->set.dp_cnt;
591*4882a593Smuzhiyun 		for (i = 0; i < qdisc->red.num_bands; i++) {
592*4882a593Smuzhiyun 			qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
593*4882a593Smuzhiyun 			qdisc->red.band[i].threshold = opt->set.tab[i].min;
594*4882a593Smuzhiyun 		}
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (qdisc->use_cnt)
598*4882a593Smuzhiyun 		nfp_abm_qdisc_offload_update(alink);
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
nfp_abm_setup_tc_gred(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_gred_qopt_offload * opt)603*4882a593Smuzhiyun int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
604*4882a593Smuzhiyun 			  struct tc_gred_qopt_offload *opt)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	switch (opt->command) {
607*4882a593Smuzhiyun 	case TC_GRED_REPLACE:
608*4882a593Smuzhiyun 		return nfp_abm_gred_replace(netdev, alink, opt);
609*4882a593Smuzhiyun 	case TC_GRED_DESTROY:
610*4882a593Smuzhiyun 		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
611*4882a593Smuzhiyun 		return 0;
612*4882a593Smuzhiyun 	case TC_GRED_STATS:
613*4882a593Smuzhiyun 		return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
614*4882a593Smuzhiyun 	default:
615*4882a593Smuzhiyun 		return -EOPNOTSUPP;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun static int
nfp_abm_red_xstats(struct nfp_abm_link * alink,struct tc_red_qopt_offload * opt)620*4882a593Smuzhiyun nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	nfp_abm_stats_update(alink);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	qdisc = nfp_abm_qdisc_find(alink, opt->handle);
627*4882a593Smuzhiyun 	if (!qdisc || !qdisc->offloaded)
628*4882a593Smuzhiyun 		return -EOPNOTSUPP;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
631*4882a593Smuzhiyun 				    &qdisc->red.band[0].prev_xstats,
632*4882a593Smuzhiyun 				    opt->xstats);
633*4882a593Smuzhiyun 	qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
634*4882a593Smuzhiyun 	return 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun static int
nfp_abm_red_stats(struct nfp_abm_link * alink,u32 handle,struct tc_qopt_offload_stats * stats)638*4882a593Smuzhiyun nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
639*4882a593Smuzhiyun 		  struct tc_qopt_offload_stats *stats)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	nfp_abm_stats_update(alink);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	qdisc = nfp_abm_qdisc_find(alink, handle);
646*4882a593Smuzhiyun 	if (!qdisc)
647*4882a593Smuzhiyun 		return -EOPNOTSUPP;
648*4882a593Smuzhiyun 	/* If the qdisc offload has stopped we may need to adjust the backlog
649*4882a593Smuzhiyun 	 * counters back so carry on even if qdisc is not currently offloaded.
650*4882a593Smuzhiyun 	 */
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
653*4882a593Smuzhiyun 				&qdisc->red.band[0].prev_stats,
654*4882a593Smuzhiyun 				stats->bstats, stats->qstats);
655*4882a593Smuzhiyun 	qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun static bool
nfp_abm_red_check_params(struct nfp_abm_link * alink,struct tc_red_qopt_offload * opt)661*4882a593Smuzhiyun nfp_abm_red_check_params(struct nfp_abm_link *alink,
662*4882a593Smuzhiyun 			 struct tc_red_qopt_offload *opt)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct nfp_cpp *cpp = alink->abm->app->cpp;
665*4882a593Smuzhiyun 	struct nfp_abm *abm = alink->abm;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
668*4882a593Smuzhiyun 		nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
669*4882a593Smuzhiyun 			 opt->parent, opt->handle);
670*4882a593Smuzhiyun 		return false;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 	if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
673*4882a593Smuzhiyun 		nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
674*4882a593Smuzhiyun 			 opt->parent, opt->handle);
675*4882a593Smuzhiyun 		return false;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 	if (opt->set.is_harddrop) {
678*4882a593Smuzhiyun 		nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
679*4882a593Smuzhiyun 			 opt->parent, opt->handle);
680*4882a593Smuzhiyun 		return false;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 	if (opt->set.min != opt->set.max) {
683*4882a593Smuzhiyun 		nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
684*4882a593Smuzhiyun 			 opt->parent, opt->handle);
685*4882a593Smuzhiyun 		return false;
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 	if (opt->set.min > NFP_ABM_LVL_INFINITY) {
688*4882a593Smuzhiyun 		nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
689*4882a593Smuzhiyun 			 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
690*4882a593Smuzhiyun 			 opt->handle);
691*4882a593Smuzhiyun 		return false;
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	return true;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun static int
nfp_abm_red_replace(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_red_qopt_offload * opt)698*4882a593Smuzhiyun nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
699*4882a593Smuzhiyun 		    struct tc_red_qopt_offload *opt)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
702*4882a593Smuzhiyun 	int ret;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
705*4882a593Smuzhiyun 				    opt->handle, 1, &qdisc);
706*4882a593Smuzhiyun 	if (ret < 0)
707*4882a593Smuzhiyun 		return ret;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/* If limit != 0 child gets reset */
710*4882a593Smuzhiyun 	if (opt->set.limit) {
711*4882a593Smuzhiyun 		if (nfp_abm_qdisc_child_valid(qdisc, 0))
712*4882a593Smuzhiyun 			qdisc->children[0]->use_cnt--;
713*4882a593Smuzhiyun 		qdisc->children[0] = NULL;
714*4882a593Smuzhiyun 	} else {
715*4882a593Smuzhiyun 		/* Qdisc was just allocated without a limit will use noop_qdisc,
716*4882a593Smuzhiyun 		 * i.e. a block hole.
717*4882a593Smuzhiyun 		 */
718*4882a593Smuzhiyun 		if (!ret)
719*4882a593Smuzhiyun 			qdisc->children[0] = NFP_QDISC_UNTRACKED;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
723*4882a593Smuzhiyun 	if (qdisc->params_ok) {
724*4882a593Smuzhiyun 		qdisc->red.num_bands = 1;
725*4882a593Smuzhiyun 		qdisc->red.band[0].ecn = opt->set.is_ecn;
726*4882a593Smuzhiyun 		qdisc->red.band[0].threshold = opt->set.min;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	if (qdisc->use_cnt == 1)
730*4882a593Smuzhiyun 		nfp_abm_qdisc_offload_update(alink);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	return 0;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
nfp_abm_setup_tc_red(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_red_qopt_offload * opt)735*4882a593Smuzhiyun int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
736*4882a593Smuzhiyun 			 struct tc_red_qopt_offload *opt)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun 	switch (opt->command) {
739*4882a593Smuzhiyun 	case TC_RED_REPLACE:
740*4882a593Smuzhiyun 		return nfp_abm_red_replace(netdev, alink, opt);
741*4882a593Smuzhiyun 	case TC_RED_DESTROY:
742*4882a593Smuzhiyun 		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
743*4882a593Smuzhiyun 		return 0;
744*4882a593Smuzhiyun 	case TC_RED_STATS:
745*4882a593Smuzhiyun 		return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
746*4882a593Smuzhiyun 	case TC_RED_XSTATS:
747*4882a593Smuzhiyun 		return nfp_abm_red_xstats(alink, opt);
748*4882a593Smuzhiyun 	case TC_RED_GRAFT:
749*4882a593Smuzhiyun 		return nfp_abm_qdisc_graft(alink, opt->handle,
750*4882a593Smuzhiyun 					   opt->child_handle, 0);
751*4882a593Smuzhiyun 	default:
752*4882a593Smuzhiyun 		return -EOPNOTSUPP;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun static int
nfp_abm_mq_create(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_mq_qopt_offload * opt)757*4882a593Smuzhiyun nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
758*4882a593Smuzhiyun 		  struct tc_mq_qopt_offload *opt)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc;
761*4882a593Smuzhiyun 	int ret;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
764*4882a593Smuzhiyun 				    TC_H_ROOT, opt->handle, alink->total_queues,
765*4882a593Smuzhiyun 				    &qdisc);
766*4882a593Smuzhiyun 	if (ret < 0)
767*4882a593Smuzhiyun 		return ret;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	qdisc->params_ok = true;
770*4882a593Smuzhiyun 	qdisc->offloaded = true;
771*4882a593Smuzhiyun 	nfp_abm_qdisc_offload_update(alink);
772*4882a593Smuzhiyun 	return 0;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun static int
nfp_abm_mq_stats(struct nfp_abm_link * alink,u32 handle,struct tc_qopt_offload_stats * stats)776*4882a593Smuzhiyun nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
777*4882a593Smuzhiyun 		 struct tc_qopt_offload_stats *stats)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	struct nfp_qdisc *qdisc, *red;
780*4882a593Smuzhiyun 	unsigned int i, j;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	qdisc = nfp_abm_qdisc_find(alink, handle);
783*4882a593Smuzhiyun 	if (!qdisc)
784*4882a593Smuzhiyun 		return -EOPNOTSUPP;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	nfp_abm_stats_update(alink);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	/* MQ stats are summed over the children in the core, so we need
789*4882a593Smuzhiyun 	 * to add up the unreported child values.
790*4882a593Smuzhiyun 	 */
791*4882a593Smuzhiyun 	memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
792*4882a593Smuzhiyun 	memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	for (i = 0; i < qdisc->num_children; i++) {
795*4882a593Smuzhiyun 		if (!nfp_abm_qdisc_child_valid(qdisc, i))
796*4882a593Smuzhiyun 			continue;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
799*4882a593Smuzhiyun 			continue;
800*4882a593Smuzhiyun 		red = qdisc->children[i];
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		for (j = 0; j < red->red.num_bands; j++) {
803*4882a593Smuzhiyun 			nfp_abm_stats_propagate(&qdisc->mq.stats,
804*4882a593Smuzhiyun 						&red->red.band[j].stats);
805*4882a593Smuzhiyun 			nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
806*4882a593Smuzhiyun 						&red->red.band[j].prev_stats);
807*4882a593Smuzhiyun 		}
808*4882a593Smuzhiyun 	}
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
811*4882a593Smuzhiyun 				stats->bstats, stats->qstats);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
nfp_abm_setup_tc_mq(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_mq_qopt_offload * opt)816*4882a593Smuzhiyun int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
817*4882a593Smuzhiyun 			struct tc_mq_qopt_offload *opt)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	switch (opt->command) {
820*4882a593Smuzhiyun 	case TC_MQ_CREATE:
821*4882a593Smuzhiyun 		return nfp_abm_mq_create(netdev, alink, opt);
822*4882a593Smuzhiyun 	case TC_MQ_DESTROY:
823*4882a593Smuzhiyun 		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
824*4882a593Smuzhiyun 		return 0;
825*4882a593Smuzhiyun 	case TC_MQ_STATS:
826*4882a593Smuzhiyun 		return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
827*4882a593Smuzhiyun 	case TC_MQ_GRAFT:
828*4882a593Smuzhiyun 		return nfp_abm_qdisc_graft(alink, opt->handle,
829*4882a593Smuzhiyun 					   opt->graft_params.child_handle,
830*4882a593Smuzhiyun 					   opt->graft_params.queue);
831*4882a593Smuzhiyun 	default:
832*4882a593Smuzhiyun 		return -EOPNOTSUPP;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
nfp_abm_setup_root(struct net_device * netdev,struct nfp_abm_link * alink,struct tc_root_qopt_offload * opt)836*4882a593Smuzhiyun int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
837*4882a593Smuzhiyun 		       struct tc_root_qopt_offload *opt)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	if (opt->ingress)
840*4882a593Smuzhiyun 		return -EOPNOTSUPP;
841*4882a593Smuzhiyun 	if (alink->root_qdisc)
842*4882a593Smuzhiyun 		alink->root_qdisc->use_cnt--;
843*4882a593Smuzhiyun 	alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
844*4882a593Smuzhiyun 	if (alink->root_qdisc)
845*4882a593Smuzhiyun 		alink->root_qdisc->use_cnt++;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	nfp_abm_qdisc_offload_update(alink);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	return 0;
850*4882a593Smuzhiyun }
851