xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/errno.h>
6*4882a593Smuzhiyun #include <linux/netdevice.h>
7*4882a593Smuzhiyun #include <net/pkt_cls.h>
8*4882a593Smuzhiyun #include <net/red.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "spectrum.h"
11*4882a593Smuzhiyun #include "spectrum_span.h"
12*4882a593Smuzhiyun #include "reg.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15*4882a593Smuzhiyun #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16*4882a593Smuzhiyun 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun enum mlxsw_sp_qdisc_type {
19*4882a593Smuzhiyun 	MLXSW_SP_QDISC_NO_QDISC,
20*4882a593Smuzhiyun 	MLXSW_SP_QDISC_RED,
21*4882a593Smuzhiyun 	MLXSW_SP_QDISC_PRIO,
22*4882a593Smuzhiyun 	MLXSW_SP_QDISC_ETS,
23*4882a593Smuzhiyun 	MLXSW_SP_QDISC_TBF,
24*4882a593Smuzhiyun 	MLXSW_SP_QDISC_FIFO,
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct mlxsw_sp_qdisc;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun struct mlxsw_sp_qdisc_ops {
30*4882a593Smuzhiyun 	enum mlxsw_sp_qdisc_type type;
31*4882a593Smuzhiyun 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
33*4882a593Smuzhiyun 			    void *params);
34*4882a593Smuzhiyun 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
35*4882a593Smuzhiyun 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
36*4882a593Smuzhiyun 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
37*4882a593Smuzhiyun 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
38*4882a593Smuzhiyun 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39*4882a593Smuzhiyun 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
40*4882a593Smuzhiyun 			 struct tc_qopt_offload_stats *stats_ptr);
41*4882a593Smuzhiyun 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
42*4882a593Smuzhiyun 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
43*4882a593Smuzhiyun 			  void *xstats_ptr);
44*4882a593Smuzhiyun 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
45*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
46*4882a593Smuzhiyun 	/* unoffload - to be used for a qdisc that stops being offloaded without
47*4882a593Smuzhiyun 	 * being destroyed.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
50*4882a593Smuzhiyun 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct mlxsw_sp_qdisc {
54*4882a593Smuzhiyun 	u32 handle;
55*4882a593Smuzhiyun 	u8 tclass_num;
56*4882a593Smuzhiyun 	u8 prio_bitmap;
57*4882a593Smuzhiyun 	union {
58*4882a593Smuzhiyun 		struct red_stats red;
59*4882a593Smuzhiyun 	} xstats_base;
60*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_stats {
61*4882a593Smuzhiyun 		u64 tx_bytes;
62*4882a593Smuzhiyun 		u64 tx_packets;
63*4882a593Smuzhiyun 		u64 drops;
64*4882a593Smuzhiyun 		u64 overlimits;
65*4882a593Smuzhiyun 		u64 backlog;
66*4882a593Smuzhiyun 	} stats_base;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_ops *ops;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct mlxsw_sp_qdisc_state {
72*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc root_qdisc;
73*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
76*4882a593Smuzhiyun 	 * created first. When notifications for these FIFOs arrive, it is not
77*4882a593Smuzhiyun 	 * known what qdisc their parent handle refers to. It could be a
78*4882a593Smuzhiyun 	 * newly-created PRIO that will replace the currently-offloaded one, or
79*4882a593Smuzhiyun 	 * it could be e.g. a RED that will be attached below it.
80*4882a593Smuzhiyun 	 *
81*4882a593Smuzhiyun 	 * As the notifications start to arrive, use them to note what the
82*4882a593Smuzhiyun 	 * future parent handle is, and keep track of which child FIFOs were
83*4882a593Smuzhiyun 	 * seen. Then when the parent is known, retroactively offload those
84*4882a593Smuzhiyun 	 * FIFOs.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	u32 future_handle;
87*4882a593Smuzhiyun 	bool future_fifos[IEEE_8021QAZ_MAX_TCS];
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static bool
mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u32 handle,enum mlxsw_sp_qdisc_type type)91*4882a593Smuzhiyun mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
92*4882a593Smuzhiyun 		       enum mlxsw_sp_qdisc_type type)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
95*4882a593Smuzhiyun 	       mlxsw_sp_qdisc->ops->type == type &&
96*4882a593Smuzhiyun 	       mlxsw_sp_qdisc->handle == handle;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find(struct mlxsw_sp_port * mlxsw_sp_port,u32 parent,bool root_only)100*4882a593Smuzhiyun mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
101*4882a593Smuzhiyun 		    bool root_only)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
104*4882a593Smuzhiyun 	int tclass, child_index;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (parent == TC_H_ROOT)
107*4882a593Smuzhiyun 		return &qdisc_state->root_qdisc;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (root_only || !qdisc_state ||
110*4882a593Smuzhiyun 	    !qdisc_state->root_qdisc.ops ||
111*4882a593Smuzhiyun 	    TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
112*4882a593Smuzhiyun 	    TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
113*4882a593Smuzhiyun 		return NULL;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	child_index = TC_H_MIN(parent);
116*4882a593Smuzhiyun 	tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
117*4882a593Smuzhiyun 	return &qdisc_state->tclass_qdiscs[tclass];
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle)121*4882a593Smuzhiyun mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
124*4882a593Smuzhiyun 	int i;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (qdisc_state->root_qdisc.handle == handle)
127*4882a593Smuzhiyun 		return &qdisc_state->root_qdisc;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
130*4882a593Smuzhiyun 		return NULL;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
133*4882a593Smuzhiyun 		if (qdisc_state->tclass_qdiscs[i].handle == handle)
134*4882a593Smuzhiyun 			return &qdisc_state->tclass_qdiscs[i];
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return NULL;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)140*4882a593Smuzhiyun mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
141*4882a593Smuzhiyun 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
144*4882a593Smuzhiyun 	int err_hdroom = 0;
145*4882a593Smuzhiyun 	int err = 0;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
148*4882a593Smuzhiyun 		return 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (root_qdisc == mlxsw_sp_qdisc) {
151*4882a593Smuzhiyun 		struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
154*4882a593Smuzhiyun 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
155*4882a593Smuzhiyun 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
156*4882a593Smuzhiyun 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
157*4882a593Smuzhiyun 		err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
161*4882a593Smuzhiyun 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
162*4882a593Smuzhiyun 						   mlxsw_sp_qdisc);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
165*4882a593Smuzhiyun 	mlxsw_sp_qdisc->ops = NULL;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	return err_hdroom ?: err;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct mlxsw_sp_qdisc_ops * ops,void * params)171*4882a593Smuzhiyun mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
172*4882a593Smuzhiyun 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173*4882a593Smuzhiyun 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
176*4882a593Smuzhiyun 	struct mlxsw_sp_hdroom orig_hdroom;
177*4882a593Smuzhiyun 	int err;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
180*4882a593Smuzhiyun 		/* In case this location contained a different qdisc of the
181*4882a593Smuzhiyun 		 * same type we can override the old qdisc configuration.
182*4882a593Smuzhiyun 		 * Otherwise, we need to remove the old qdisc before setting the
183*4882a593Smuzhiyun 		 * new one.
184*4882a593Smuzhiyun 		 */
185*4882a593Smuzhiyun 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	orig_hdroom = *mlxsw_sp_port->hdroom;
188*4882a593Smuzhiyun 	if (root_qdisc == mlxsw_sp_qdisc) {
189*4882a593Smuzhiyun 		struct mlxsw_sp_hdroom hdroom = orig_hdroom;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
192*4882a593Smuzhiyun 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
193*4882a593Smuzhiyun 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
194*4882a593Smuzhiyun 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
197*4882a593Smuzhiyun 		if (err)
198*4882a593Smuzhiyun 			goto err_hdroom_configure;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
202*4882a593Smuzhiyun 	if (err)
203*4882a593Smuzhiyun 		goto err_bad_param;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
206*4882a593Smuzhiyun 	if (err)
207*4882a593Smuzhiyun 		goto err_config;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* Check if the Qdisc changed. That includes a situation where an
210*4882a593Smuzhiyun 	 * invisible Qdisc replaces another one, or is being added for the
211*4882a593Smuzhiyun 	 * first time.
212*4882a593Smuzhiyun 	 */
213*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
214*4882a593Smuzhiyun 		mlxsw_sp_qdisc->ops = ops;
215*4882a593Smuzhiyun 		if (ops->clean_stats)
216*4882a593Smuzhiyun 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	mlxsw_sp_qdisc->handle = handle;
220*4882a593Smuzhiyun 	return 0;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun err_bad_param:
223*4882a593Smuzhiyun err_config:
224*4882a593Smuzhiyun 	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
225*4882a593Smuzhiyun err_hdroom_configure:
226*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
227*4882a593Smuzhiyun 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
230*4882a593Smuzhiyun 	return err;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)234*4882a593Smuzhiyun mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
235*4882a593Smuzhiyun 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
236*4882a593Smuzhiyun 			 struct tc_qopt_offload_stats *stats_ptr)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
239*4882a593Smuzhiyun 	    mlxsw_sp_qdisc->ops->get_stats)
240*4882a593Smuzhiyun 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
241*4882a593Smuzhiyun 						      mlxsw_sp_qdisc,
242*4882a593Smuzhiyun 						      stats_ptr);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return -EOPNOTSUPP;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)248*4882a593Smuzhiyun mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
249*4882a593Smuzhiyun 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
250*4882a593Smuzhiyun 			  void *xstats_ptr)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
253*4882a593Smuzhiyun 	    mlxsw_sp_qdisc->ops->get_xstats)
254*4882a593Smuzhiyun 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
255*4882a593Smuzhiyun 						      mlxsw_sp_qdisc,
256*4882a593Smuzhiyun 						      xstats_ptr);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return -EOPNOTSUPP;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun static u64
mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats * xstats,int tclass_num)262*4882a593Smuzhiyun mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	return xstats->backlog[tclass_num] +
265*4882a593Smuzhiyun 	       xstats->backlog[tclass_num + 8];
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun static u64
mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats * xstats,int tclass_num)269*4882a593Smuzhiyun mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return xstats->tail_drop[tclass_num] +
272*4882a593Smuzhiyun 	       xstats->tail_drop[tclass_num + 8];
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats * xstats,u8 prio_bitmap,u64 * tx_packets,u64 * tx_bytes)276*4882a593Smuzhiyun mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
277*4882a593Smuzhiyun 				       u8 prio_bitmap, u64 *tx_packets,
278*4882a593Smuzhiyun 				       u64 *tx_bytes)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	int i;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	*tx_packets = 0;
283*4882a593Smuzhiyun 	*tx_bytes = 0;
284*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
285*4882a593Smuzhiyun 		if (prio_bitmap & BIT(i)) {
286*4882a593Smuzhiyun 			*tx_packets += xstats->tx_packets[i];
287*4882a593Smuzhiyun 			*tx_bytes += xstats->tx_bytes[i];
288*4882a593Smuzhiyun 		}
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 * p_tx_bytes,u64 * p_tx_packets,u64 * p_drops,u64 * p_backlog)293*4882a593Smuzhiyun mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
294*4882a593Smuzhiyun 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
295*4882a593Smuzhiyun 				u64 *p_tx_bytes, u64 *p_tx_packets,
296*4882a593Smuzhiyun 				u64 *p_drops, u64 *p_backlog)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
299*4882a593Smuzhiyun 	struct mlxsw_sp_port_xstats *xstats;
300*4882a593Smuzhiyun 	u64 tx_bytes, tx_packets;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
303*4882a593Smuzhiyun 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
304*4882a593Smuzhiyun 					       mlxsw_sp_qdisc->prio_bitmap,
305*4882a593Smuzhiyun 					       &tx_packets, &tx_bytes);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	*p_tx_packets += tx_packets;
308*4882a593Smuzhiyun 	*p_tx_bytes += tx_bytes;
309*4882a593Smuzhiyun 	*p_drops += xstats->wred_drop[tclass_num] +
310*4882a593Smuzhiyun 		    mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
311*4882a593Smuzhiyun 	*p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_update_stats(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u64 tx_bytes,u64 tx_packets,u64 drops,u64 backlog,struct tc_qopt_offload_stats * stats_ptr)315*4882a593Smuzhiyun mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
316*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
317*4882a593Smuzhiyun 			    u64 tx_bytes, u64 tx_packets,
318*4882a593Smuzhiyun 			    u64 drops, u64 backlog,
319*4882a593Smuzhiyun 			    struct tc_qopt_offload_stats *stats_ptr)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	tx_bytes -= stats_base->tx_bytes;
324*4882a593Smuzhiyun 	tx_packets -= stats_base->tx_packets;
325*4882a593Smuzhiyun 	drops -= stats_base->drops;
326*4882a593Smuzhiyun 	backlog -= stats_base->backlog;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
329*4882a593Smuzhiyun 	stats_ptr->qstats->drops += drops;
330*4882a593Smuzhiyun 	stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	stats_base->backlog += backlog;
333*4882a593Smuzhiyun 	stats_base->drops += drops;
334*4882a593Smuzhiyun 	stats_base->tx_bytes += tx_bytes;
335*4882a593Smuzhiyun 	stats_base->tx_packets += tx_packets;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)339*4882a593Smuzhiyun mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
340*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
341*4882a593Smuzhiyun 			    struct tc_qopt_offload_stats *stats_ptr)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	u64 tx_packets = 0;
344*4882a593Smuzhiyun 	u64 tx_bytes = 0;
345*4882a593Smuzhiyun 	u64 backlog = 0;
346*4882a593Smuzhiyun 	u64 drops = 0;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
349*4882a593Smuzhiyun 					&tx_bytes, &tx_packets,
350*4882a593Smuzhiyun 					&drops, &backlog);
351*4882a593Smuzhiyun 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
352*4882a593Smuzhiyun 				    tx_bytes, tx_packets, drops, backlog,
353*4882a593Smuzhiyun 				    stats_ptr);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num,u32 min,u32 max,u32 probability,bool is_wred,bool is_ecn)357*4882a593Smuzhiyun mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
358*4882a593Smuzhiyun 				  int tclass_num, u32 min, u32 max,
359*4882a593Smuzhiyun 				  u32 probability, bool is_wred, bool is_ecn)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
362*4882a593Smuzhiyun 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
363*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364*4882a593Smuzhiyun 	int err;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
367*4882a593Smuzhiyun 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
368*4882a593Smuzhiyun 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
369*4882a593Smuzhiyun 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
370*4882a593Smuzhiyun 				    probability);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
373*4882a593Smuzhiyun 	if (err)
374*4882a593Smuzhiyun 		return err;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
377*4882a593Smuzhiyun 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun static int
mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port * mlxsw_sp_port,int tclass_num)383*4882a593Smuzhiyun mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
384*4882a593Smuzhiyun 				   int tclass_num)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387*4882a593Smuzhiyun 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
390*4882a593Smuzhiyun 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
391*4882a593Smuzhiyun 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)395*4882a593Smuzhiyun mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
396*4882a593Smuzhiyun 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
399*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_stats *stats_base;
400*4882a593Smuzhiyun 	struct mlxsw_sp_port_xstats *xstats;
401*4882a593Smuzhiyun 	struct red_stats *red_base;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
404*4882a593Smuzhiyun 	stats_base = &mlxsw_sp_qdisc->stats_base;
405*4882a593Smuzhiyun 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
408*4882a593Smuzhiyun 					       mlxsw_sp_qdisc->prio_bitmap,
409*4882a593Smuzhiyun 					       &stats_base->tx_packets,
410*4882a593Smuzhiyun 					       &stats_base->tx_bytes);
411*4882a593Smuzhiyun 	red_base->prob_drop = xstats->wred_drop[tclass_num];
412*4882a593Smuzhiyun 	red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
415*4882a593Smuzhiyun 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	stats_base->backlog = 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)421*4882a593Smuzhiyun mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
422*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
425*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (root_qdisc != mlxsw_sp_qdisc)
428*4882a593Smuzhiyun 		root_qdisc->stats_base.backlog -=
429*4882a593Smuzhiyun 					mlxsw_sp_qdisc->stats_base.backlog;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
432*4882a593Smuzhiyun 						  mlxsw_sp_qdisc->tclass_num);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)436*4882a593Smuzhiyun mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
437*4882a593Smuzhiyun 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
438*4882a593Smuzhiyun 				void *params)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
441*4882a593Smuzhiyun 	struct tc_red_qopt_offload_params *p = params;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (p->min > p->max) {
444*4882a593Smuzhiyun 		dev_err(mlxsw_sp->bus_info->dev,
445*4882a593Smuzhiyun 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
446*4882a593Smuzhiyun 			p->max);
447*4882a593Smuzhiyun 		return -EINVAL;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
450*4882a593Smuzhiyun 					GUARANTEED_SHARED_BUFFER)) {
451*4882a593Smuzhiyun 		dev_err(mlxsw_sp->bus_info->dev,
452*4882a593Smuzhiyun 			"spectrum: RED: max value %u is too big\n", p->max);
453*4882a593Smuzhiyun 		return -EINVAL;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 	if (p->min == 0 || p->max == 0) {
456*4882a593Smuzhiyun 		dev_err(mlxsw_sp->bus_info->dev,
457*4882a593Smuzhiyun 			"spectrum: RED: 0 value is illegal for min and max\n");
458*4882a593Smuzhiyun 		return -EINVAL;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 	return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)464*4882a593Smuzhiyun mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
465*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
466*4882a593Smuzhiyun 			   void *params)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
469*4882a593Smuzhiyun 	struct tc_red_qopt_offload_params *p = params;
470*4882a593Smuzhiyun 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
471*4882a593Smuzhiyun 	u32 min, max;
472*4882a593Smuzhiyun 	u64 prob;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* calculate probability in percentage */
475*4882a593Smuzhiyun 	prob = p->probability;
476*4882a593Smuzhiyun 	prob *= 100;
477*4882a593Smuzhiyun 	prob = DIV_ROUND_UP(prob, 1 << 16);
478*4882a593Smuzhiyun 	prob = DIV_ROUND_UP(prob, 1 << 16);
479*4882a593Smuzhiyun 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
480*4882a593Smuzhiyun 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
481*4882a593Smuzhiyun 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
482*4882a593Smuzhiyun 						 min, max, prob,
483*4882a593Smuzhiyun 						 !p->is_nodrop, p->is_ecn);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)487*4882a593Smuzhiyun mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
488*4882a593Smuzhiyun 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
489*4882a593Smuzhiyun 			      struct gnet_stats_queue *qstats)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	u64 backlog;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
494*4882a593Smuzhiyun 				       mlxsw_sp_qdisc->stats_base.backlog);
495*4882a593Smuzhiyun 	qstats->backlog -= backlog;
496*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.backlog = 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)500*4882a593Smuzhiyun mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
501*4882a593Smuzhiyun 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
502*4882a593Smuzhiyun 			     void *params)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct tc_red_qopt_offload_params *p = params;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * xstats_ptr)510*4882a593Smuzhiyun mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
511*4882a593Smuzhiyun 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
512*4882a593Smuzhiyun 			      void *xstats_ptr)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
515*4882a593Smuzhiyun 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
516*4882a593Smuzhiyun 	struct mlxsw_sp_port_xstats *xstats;
517*4882a593Smuzhiyun 	struct red_stats *res = xstats_ptr;
518*4882a593Smuzhiyun 	int early_drops, pdrops;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
523*4882a593Smuzhiyun 	pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
524*4882a593Smuzhiyun 		 xstats_base->pdrop;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	res->pdrop += pdrops;
527*4882a593Smuzhiyun 	res->prob_drop += early_drops;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	xstats_base->pdrop += pdrops;
530*4882a593Smuzhiyun 	xstats_base->prob_drop += early_drops;
531*4882a593Smuzhiyun 	return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)535*4882a593Smuzhiyun mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
536*4882a593Smuzhiyun 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
537*4882a593Smuzhiyun 			     struct tc_qopt_offload_stats *stats_ptr)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
540*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_stats *stats_base;
541*4882a593Smuzhiyun 	struct mlxsw_sp_port_xstats *xstats;
542*4882a593Smuzhiyun 	u64 overlimits;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
545*4882a593Smuzhiyun 	stats_base = &mlxsw_sp_qdisc->stats_base;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
548*4882a593Smuzhiyun 	overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	stats_ptr->qstats->overlimits += overlimits;
551*4882a593Smuzhiyun 	stats_base->overlimits += overlimits;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
559*4882a593Smuzhiyun 	.type = MLXSW_SP_QDISC_RED,
560*4882a593Smuzhiyun 	.check_params = mlxsw_sp_qdisc_red_check_params,
561*4882a593Smuzhiyun 	.replace = mlxsw_sp_qdisc_red_replace,
562*4882a593Smuzhiyun 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
563*4882a593Smuzhiyun 	.destroy = mlxsw_sp_qdisc_red_destroy,
564*4882a593Smuzhiyun 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
565*4882a593Smuzhiyun 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
566*4882a593Smuzhiyun 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
567*4882a593Smuzhiyun };
568*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_red(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_red_qopt_offload * p)569*4882a593Smuzhiyun int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
570*4882a593Smuzhiyun 			  struct tc_red_qopt_offload *p)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
575*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
576*4882a593Smuzhiyun 		return -EOPNOTSUPP;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (p->command == TC_RED_REPLACE)
579*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
580*4882a593Smuzhiyun 					      mlxsw_sp_qdisc,
581*4882a593Smuzhiyun 					      &mlxsw_sp_qdisc_ops_red,
582*4882a593Smuzhiyun 					      &p->set);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
585*4882a593Smuzhiyun 				    MLXSW_SP_QDISC_RED))
586*4882a593Smuzhiyun 		return -EOPNOTSUPP;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	switch (p->command) {
589*4882a593Smuzhiyun 	case TC_RED_DESTROY:
590*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
591*4882a593Smuzhiyun 	case TC_RED_XSTATS:
592*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
593*4882a593Smuzhiyun 						 p->xstats);
594*4882a593Smuzhiyun 	case TC_RED_STATS:
595*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
596*4882a593Smuzhiyun 						&p->stats);
597*4882a593Smuzhiyun 	default:
598*4882a593Smuzhiyun 		return -EOPNOTSUPP;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun static void
mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)603*4882a593Smuzhiyun mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
604*4882a593Smuzhiyun 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	u64 backlog_cells = 0;
607*4882a593Smuzhiyun 	u64 tx_packets = 0;
608*4882a593Smuzhiyun 	u64 tx_bytes = 0;
609*4882a593Smuzhiyun 	u64 drops = 0;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
612*4882a593Smuzhiyun 					&tx_bytes, &tx_packets,
613*4882a593Smuzhiyun 					&drops, &backlog_cells);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
616*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
617*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.drops = drops;
618*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.backlog = 0;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)622*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
623*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
626*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (root_qdisc != mlxsw_sp_qdisc)
629*4882a593Smuzhiyun 		root_qdisc->stats_base.backlog -=
630*4882a593Smuzhiyun 					mlxsw_sp_qdisc->stats_base.backlog;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
633*4882a593Smuzhiyun 					     MLXSW_REG_QEEC_HR_SUBGROUP,
634*4882a593Smuzhiyun 					     mlxsw_sp_qdisc->tclass_num, 0,
635*4882a593Smuzhiyun 					     MLXSW_REG_QEEC_MAS_DIS, 0);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port * mlxsw_sp_port,u32 max_size,u8 * p_burst_size)639*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
640*4882a593Smuzhiyun 		      u32 max_size, u8 *p_burst_size)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	/* TBF burst size is configured in bytes. The ASIC burst size value is
643*4882a593Smuzhiyun 	 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
644*4882a593Smuzhiyun 	 */
645*4882a593Smuzhiyun 	u32 bs512 = max_size / 64;
646*4882a593Smuzhiyun 	u8 bs = fls(bs512);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	if (!bs)
649*4882a593Smuzhiyun 		return -EINVAL;
650*4882a593Smuzhiyun 	--bs;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	/* Demand a power of two. */
653*4882a593Smuzhiyun 	if ((1 << bs) != bs512)
654*4882a593Smuzhiyun 		return -EINVAL;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
657*4882a593Smuzhiyun 	    bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
658*4882a593Smuzhiyun 		return -EINVAL;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	*p_burst_size = bs;
661*4882a593Smuzhiyun 	return 0;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun static u32
mlxsw_sp_qdisc_tbf_max_size(u8 bs)665*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_max_size(u8 bs)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	return (1U << bs) * 64;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun static u64
mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params * p)671*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
674*4882a593Smuzhiyun 	 * Kbits/s.
675*4882a593Smuzhiyun 	 */
676*4882a593Smuzhiyun 	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)680*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
681*4882a593Smuzhiyun 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
682*4882a593Smuzhiyun 				void *params)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct tc_tbf_qopt_offload_replace_params *p = params;
685*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
686*4882a593Smuzhiyun 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
687*4882a593Smuzhiyun 	u8 burst_size;
688*4882a593Smuzhiyun 	int err;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
691*4882a593Smuzhiyun 		dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
692*4882a593Smuzhiyun 			"spectrum: TBF: rate of %lluKbps must be below %u\n",
693*4882a593Smuzhiyun 			rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
694*4882a593Smuzhiyun 		return -EINVAL;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
698*4882a593Smuzhiyun 	if (err) {
699*4882a593Smuzhiyun 		u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		dev_err(mlxsw_sp->bus_info->dev,
702*4882a593Smuzhiyun 			"spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
703*4882a593Smuzhiyun 			p->max_size,
704*4882a593Smuzhiyun 			mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
705*4882a593Smuzhiyun 			mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
706*4882a593Smuzhiyun 		return -EINVAL;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	return 0;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)713*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
714*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
715*4882a593Smuzhiyun 			   void *params)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct tc_tbf_qopt_offload_replace_params *p = params;
718*4882a593Smuzhiyun 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
719*4882a593Smuzhiyun 	u8 burst_size;
720*4882a593Smuzhiyun 	int err;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
723*4882a593Smuzhiyun 	if (WARN_ON_ONCE(err))
724*4882a593Smuzhiyun 		/* check_params above was supposed to reject this value. */
725*4882a593Smuzhiyun 		return -EINVAL;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* Configure subgroup shaper, so that both UC and MC traffic is subject
728*4882a593Smuzhiyun 	 * to shaping. That is unlike RED, however UC queue lengths are going to
729*4882a593Smuzhiyun 	 * be different than MC ones due to different pool and quota
730*4882a593Smuzhiyun 	 * configurations, so the configuration is not applicable. For shaper on
731*4882a593Smuzhiyun 	 * the other hand, subjecting the overall stream to the configured
732*4882a593Smuzhiyun 	 * shaper makes sense. Also note that that is what we do for
733*4882a593Smuzhiyun 	 * ieee_setmaxrate().
734*4882a593Smuzhiyun 	 */
735*4882a593Smuzhiyun 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
736*4882a593Smuzhiyun 					     MLXSW_REG_QEEC_HR_SUBGROUP,
737*4882a593Smuzhiyun 					     mlxsw_sp_qdisc->tclass_num, 0,
738*4882a593Smuzhiyun 					     rate_kbps, burst_size);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)742*4882a593Smuzhiyun mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
743*4882a593Smuzhiyun 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744*4882a593Smuzhiyun 			     void *params)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct tc_tbf_qopt_offload_replace_params *p = params;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)752*4882a593Smuzhiyun mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
753*4882a593Smuzhiyun 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
754*4882a593Smuzhiyun 			     struct tc_qopt_offload_stats *stats_ptr)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
757*4882a593Smuzhiyun 				    stats_ptr);
758*4882a593Smuzhiyun 	return 0;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
762*4882a593Smuzhiyun 	.type = MLXSW_SP_QDISC_TBF,
763*4882a593Smuzhiyun 	.check_params = mlxsw_sp_qdisc_tbf_check_params,
764*4882a593Smuzhiyun 	.replace = mlxsw_sp_qdisc_tbf_replace,
765*4882a593Smuzhiyun 	.unoffload = mlxsw_sp_qdisc_tbf_unoffload,
766*4882a593Smuzhiyun 	.destroy = mlxsw_sp_qdisc_tbf_destroy,
767*4882a593Smuzhiyun 	.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
768*4882a593Smuzhiyun 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
769*4882a593Smuzhiyun };
770*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_tbf_qopt_offload * p)771*4882a593Smuzhiyun int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
772*4882a593Smuzhiyun 			  struct tc_tbf_qopt_offload *p)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
777*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
778*4882a593Smuzhiyun 		return -EOPNOTSUPP;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (p->command == TC_TBF_REPLACE)
781*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
782*4882a593Smuzhiyun 					      mlxsw_sp_qdisc,
783*4882a593Smuzhiyun 					      &mlxsw_sp_qdisc_ops_tbf,
784*4882a593Smuzhiyun 					      &p->replace_params);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
787*4882a593Smuzhiyun 				    MLXSW_SP_QDISC_TBF))
788*4882a593Smuzhiyun 		return -EOPNOTSUPP;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	switch (p->command) {
791*4882a593Smuzhiyun 	case TC_TBF_DESTROY:
792*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
793*4882a593Smuzhiyun 	case TC_TBF_STATS:
794*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
795*4882a593Smuzhiyun 						&p->stats);
796*4882a593Smuzhiyun 	default:
797*4882a593Smuzhiyun 		return -EOPNOTSUPP;
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)802*4882a593Smuzhiyun mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
803*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
806*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (root_qdisc != mlxsw_sp_qdisc)
809*4882a593Smuzhiyun 		root_qdisc->stats_base.backlog -=
810*4882a593Smuzhiyun 					mlxsw_sp_qdisc->stats_base.backlog;
811*4882a593Smuzhiyun 	return 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)815*4882a593Smuzhiyun mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
816*4882a593Smuzhiyun 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
817*4882a593Smuzhiyun 				 void *params)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)823*4882a593Smuzhiyun mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
824*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
825*4882a593Smuzhiyun 			    void *params)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)831*4882a593Smuzhiyun mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
832*4882a593Smuzhiyun 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
833*4882a593Smuzhiyun 			      struct tc_qopt_offload_stats *stats_ptr)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
836*4882a593Smuzhiyun 				    stats_ptr);
837*4882a593Smuzhiyun 	return 0;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
841*4882a593Smuzhiyun 	.type = MLXSW_SP_QDISC_FIFO,
842*4882a593Smuzhiyun 	.check_params = mlxsw_sp_qdisc_fifo_check_params,
843*4882a593Smuzhiyun 	.replace = mlxsw_sp_qdisc_fifo_replace,
844*4882a593Smuzhiyun 	.destroy = mlxsw_sp_qdisc_fifo_destroy,
845*4882a593Smuzhiyun 	.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
846*4882a593Smuzhiyun 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
847*4882a593Smuzhiyun };
848*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_fifo_qopt_offload * p)849*4882a593Smuzhiyun int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
850*4882a593Smuzhiyun 			   struct tc_fifo_qopt_offload *p)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
853*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
854*4882a593Smuzhiyun 	int tclass, child_index;
855*4882a593Smuzhiyun 	u32 parent_handle;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/* Invisible FIFOs are tracked in future_handle and future_fifos. Make
858*4882a593Smuzhiyun 	 * sure that not more than one qdisc is created for a port at a time.
859*4882a593Smuzhiyun 	 * RTNL is a simple proxy for that.
860*4882a593Smuzhiyun 	 */
861*4882a593Smuzhiyun 	ASSERT_RTNL();
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
864*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
865*4882a593Smuzhiyun 		parent_handle = TC_H_MAJ(p->parent);
866*4882a593Smuzhiyun 		if (parent_handle != qdisc_state->future_handle) {
867*4882a593Smuzhiyun 			/* This notifications is for a different Qdisc than
868*4882a593Smuzhiyun 			 * previously. Wipe the future cache.
869*4882a593Smuzhiyun 			 */
870*4882a593Smuzhiyun 			memset(qdisc_state->future_fifos, 0,
871*4882a593Smuzhiyun 			       sizeof(qdisc_state->future_fifos));
872*4882a593Smuzhiyun 			qdisc_state->future_handle = parent_handle;
873*4882a593Smuzhiyun 		}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		child_index = TC_H_MIN(p->parent);
876*4882a593Smuzhiyun 		tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
877*4882a593Smuzhiyun 		if (tclass < IEEE_8021QAZ_MAX_TCS) {
878*4882a593Smuzhiyun 			if (p->command == TC_FIFO_REPLACE)
879*4882a593Smuzhiyun 				qdisc_state->future_fifos[tclass] = true;
880*4882a593Smuzhiyun 			else if (p->command == TC_FIFO_DESTROY)
881*4882a593Smuzhiyun 				qdisc_state->future_fifos[tclass] = false;
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
885*4882a593Smuzhiyun 		return -EOPNOTSUPP;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	if (p->command == TC_FIFO_REPLACE) {
888*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
889*4882a593Smuzhiyun 					      mlxsw_sp_qdisc,
890*4882a593Smuzhiyun 					      &mlxsw_sp_qdisc_ops_fifo, NULL);
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
894*4882a593Smuzhiyun 				    MLXSW_SP_QDISC_FIFO))
895*4882a593Smuzhiyun 		return -EOPNOTSUPP;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	switch (p->command) {
898*4882a593Smuzhiyun 	case TC_FIFO_DESTROY:
899*4882a593Smuzhiyun 		if (p->handle == mlxsw_sp_qdisc->handle)
900*4882a593Smuzhiyun 			return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
901*4882a593Smuzhiyun 						      mlxsw_sp_qdisc);
902*4882a593Smuzhiyun 		return 0;
903*4882a593Smuzhiyun 	case TC_FIFO_STATS:
904*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
905*4882a593Smuzhiyun 						&p->stats);
906*4882a593Smuzhiyun 	case TC_FIFO_REPLACE: /* Handled above. */
907*4882a593Smuzhiyun 		break;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	return -EOPNOTSUPP;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun static int
__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port)914*4882a593Smuzhiyun __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
917*4882a593Smuzhiyun 	int i;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
920*4882a593Smuzhiyun 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
921*4882a593Smuzhiyun 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
922*4882a593Smuzhiyun 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
923*4882a593Smuzhiyun 				      MLXSW_REG_QEEC_HR_SUBGROUP,
924*4882a593Smuzhiyun 				      i, 0, false, 0);
925*4882a593Smuzhiyun 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
926*4882a593Smuzhiyun 				       &qdisc_state->tclass_qdiscs[i]);
927*4882a593Smuzhiyun 		qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	return 0;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)934*4882a593Smuzhiyun mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
935*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun static int
__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)941*4882a593Smuzhiyun __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun 	if (nbands > IEEE_8021QAZ_MAX_TCS)
944*4882a593Smuzhiyun 		return -EOPNOTSUPP;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	return 0;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)950*4882a593Smuzhiyun mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
951*4882a593Smuzhiyun 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
952*4882a593Smuzhiyun 				 void *params)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun 	struct tc_prio_qopt_offload_params *p = params;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,unsigned int nbands,const unsigned int * quanta,const unsigned int * weights,const u8 * priomap)960*4882a593Smuzhiyun __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
961*4882a593Smuzhiyun 			     unsigned int nbands,
962*4882a593Smuzhiyun 			     const unsigned int *quanta,
963*4882a593Smuzhiyun 			     const unsigned int *weights,
964*4882a593Smuzhiyun 			     const u8 *priomap)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
967*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *child_qdisc;
968*4882a593Smuzhiyun 	int tclass, i, band, backlog;
969*4882a593Smuzhiyun 	u8 old_priomap;
970*4882a593Smuzhiyun 	int err;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	for (band = 0; band < nbands; band++) {
973*4882a593Smuzhiyun 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
974*4882a593Smuzhiyun 		child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
975*4882a593Smuzhiyun 		old_priomap = child_qdisc->prio_bitmap;
976*4882a593Smuzhiyun 		child_qdisc->prio_bitmap = 0;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
979*4882a593Smuzhiyun 					    MLXSW_REG_QEEC_HR_SUBGROUP,
980*4882a593Smuzhiyun 					    tclass, 0, !!quanta[band],
981*4882a593Smuzhiyun 					    weights[band]);
982*4882a593Smuzhiyun 		if (err)
983*4882a593Smuzhiyun 			return err;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
986*4882a593Smuzhiyun 			if (priomap[i] == band) {
987*4882a593Smuzhiyun 				child_qdisc->prio_bitmap |= BIT(i);
988*4882a593Smuzhiyun 				if (BIT(i) & old_priomap)
989*4882a593Smuzhiyun 					continue;
990*4882a593Smuzhiyun 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
991*4882a593Smuzhiyun 								i, tclass);
992*4882a593Smuzhiyun 				if (err)
993*4882a593Smuzhiyun 					return err;
994*4882a593Smuzhiyun 			}
995*4882a593Smuzhiyun 		}
996*4882a593Smuzhiyun 		if (old_priomap != child_qdisc->prio_bitmap &&
997*4882a593Smuzhiyun 		    child_qdisc->ops && child_qdisc->ops->clean_stats) {
998*4882a593Smuzhiyun 			backlog = child_qdisc->stats_base.backlog;
999*4882a593Smuzhiyun 			child_qdisc->ops->clean_stats(mlxsw_sp_port,
1000*4882a593Smuzhiyun 						      child_qdisc);
1001*4882a593Smuzhiyun 			child_qdisc->stats_base.backlog = backlog;
1002*4882a593Smuzhiyun 		}
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 		if (handle == qdisc_state->future_handle &&
1005*4882a593Smuzhiyun 		    qdisc_state->future_fifos[tclass]) {
1006*4882a593Smuzhiyun 			err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1007*4882a593Smuzhiyun 						     child_qdisc,
1008*4882a593Smuzhiyun 						     &mlxsw_sp_qdisc_ops_fifo,
1009*4882a593Smuzhiyun 						     NULL);
1010*4882a593Smuzhiyun 			if (err)
1011*4882a593Smuzhiyun 				return err;
1012*4882a593Smuzhiyun 		}
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1015*4882a593Smuzhiyun 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1016*4882a593Smuzhiyun 		child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
1017*4882a593Smuzhiyun 		child_qdisc->prio_bitmap = 0;
1018*4882a593Smuzhiyun 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1019*4882a593Smuzhiyun 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1020*4882a593Smuzhiyun 				      MLXSW_REG_QEEC_HR_SUBGROUP,
1021*4882a593Smuzhiyun 				      tclass, 0, false, 0);
1022*4882a593Smuzhiyun 	}
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	qdisc_state->future_handle = TC_H_UNSPEC;
1025*4882a593Smuzhiyun 	memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1026*4882a593Smuzhiyun 	return 0;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1030*4882a593Smuzhiyun mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1031*4882a593Smuzhiyun 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1032*4882a593Smuzhiyun 			    void *params)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	struct tc_prio_qopt_offload_params *p = params;
1035*4882a593Smuzhiyun 	unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1038*4882a593Smuzhiyun 					    zeroes, zeroes, p->priomap);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun static void
__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct gnet_stats_queue * qstats)1042*4882a593Smuzhiyun __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1043*4882a593Smuzhiyun 			       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1044*4882a593Smuzhiyun 			       struct gnet_stats_queue *qstats)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	u64 backlog;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1049*4882a593Smuzhiyun 				       mlxsw_sp_qdisc->stats_base.backlog);
1050*4882a593Smuzhiyun 	qstats->backlog -= backlog;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1054*4882a593Smuzhiyun mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1055*4882a593Smuzhiyun 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1056*4882a593Smuzhiyun 			      void *params)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun 	struct tc_prio_qopt_offload_params *p = params;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1061*4882a593Smuzhiyun 				       p->qstats);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_qopt_offload_stats * stats_ptr)1065*4882a593Smuzhiyun mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1066*4882a593Smuzhiyun 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1067*4882a593Smuzhiyun 			      struct tc_qopt_offload_stats *stats_ptr)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1070*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *tc_qdisc;
1071*4882a593Smuzhiyun 	u64 tx_packets = 0;
1072*4882a593Smuzhiyun 	u64 tx_bytes = 0;
1073*4882a593Smuzhiyun 	u64 backlog = 0;
1074*4882a593Smuzhiyun 	u64 drops = 0;
1075*4882a593Smuzhiyun 	int i;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1078*4882a593Smuzhiyun 		tc_qdisc = &qdisc_state->tclass_qdiscs[i];
1079*4882a593Smuzhiyun 		mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1080*4882a593Smuzhiyun 						&tx_bytes, &tx_packets,
1081*4882a593Smuzhiyun 						&drops, &backlog);
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1085*4882a593Smuzhiyun 				    tx_bytes, tx_packets, drops, backlog,
1086*4882a593Smuzhiyun 				    stats_ptr);
1087*4882a593Smuzhiyun 	return 0;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun static void
mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1091*4882a593Smuzhiyun mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1092*4882a593Smuzhiyun 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_stats *stats_base;
1095*4882a593Smuzhiyun 	struct mlxsw_sp_port_xstats *xstats;
1096*4882a593Smuzhiyun 	struct rtnl_link_stats64 *stats;
1097*4882a593Smuzhiyun 	int i;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1100*4882a593Smuzhiyun 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1101*4882a593Smuzhiyun 	stats_base = &mlxsw_sp_qdisc->stats_base;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	stats_base->tx_packets = stats->tx_packets;
1104*4882a593Smuzhiyun 	stats_base->tx_bytes = stats->tx_bytes;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	stats_base->drops = 0;
1107*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1108*4882a593Smuzhiyun 		stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1109*4882a593Smuzhiyun 		stats_base->drops += xstats->wred_drop[i];
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	mlxsw_sp_qdisc->stats_base.backlog = 0;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1116*4882a593Smuzhiyun 	.type = MLXSW_SP_QDISC_PRIO,
1117*4882a593Smuzhiyun 	.check_params = mlxsw_sp_qdisc_prio_check_params,
1118*4882a593Smuzhiyun 	.replace = mlxsw_sp_qdisc_prio_replace,
1119*4882a593Smuzhiyun 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
1120*4882a593Smuzhiyun 	.destroy = mlxsw_sp_qdisc_prio_destroy,
1121*4882a593Smuzhiyun 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1122*4882a593Smuzhiyun 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1123*4882a593Smuzhiyun };
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1126*4882a593Smuzhiyun mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1127*4882a593Smuzhiyun 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1128*4882a593Smuzhiyun 				void *params)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	struct tc_ets_qopt_offload_replace_params *p = params;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1136*4882a593Smuzhiyun mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138*4882a593Smuzhiyun 			   void *params)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	struct tc_ets_qopt_offload_replace_params *p = params;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1143*4882a593Smuzhiyun 					    p->quanta, p->weights, p->priomap);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun static void
mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,void * params)1147*4882a593Smuzhiyun mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1148*4882a593Smuzhiyun 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1149*4882a593Smuzhiyun 			     void *params)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun 	struct tc_ets_qopt_offload_replace_params *p = params;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1154*4882a593Smuzhiyun 				       p->qstats);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc)1158*4882a593Smuzhiyun mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1159*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1165*4882a593Smuzhiyun 	.type = MLXSW_SP_QDISC_ETS,
1166*4882a593Smuzhiyun 	.check_params = mlxsw_sp_qdisc_ets_check_params,
1167*4882a593Smuzhiyun 	.replace = mlxsw_sp_qdisc_ets_replace,
1168*4882a593Smuzhiyun 	.unoffload = mlxsw_sp_qdisc_ets_unoffload,
1169*4882a593Smuzhiyun 	.destroy = mlxsw_sp_qdisc_ets_destroy,
1170*4882a593Smuzhiyun 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1171*4882a593Smuzhiyun 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1172*4882a593Smuzhiyun };
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1175*4882a593Smuzhiyun  * graph is free of cycles). These operations do not change the parent handle
1176*4882a593Smuzhiyun  * though, which means it can be incomplete (if there is more than one class
1177*4882a593Smuzhiyun  * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1178*4882a593Smuzhiyun  * linked to a different class and then removed from the original class).
1179*4882a593Smuzhiyun  *
1180*4882a593Smuzhiyun  * E.g. consider this sequence of operations:
1181*4882a593Smuzhiyun  *
1182*4882a593Smuzhiyun  *  # tc qdisc add dev swp1 root handle 1: prio
1183*4882a593Smuzhiyun  *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1184*4882a593Smuzhiyun  *  RED: set bandwidth to 10Mbit
1185*4882a593Smuzhiyun  *  # tc qdisc link dev swp1 handle 13: parent 1:2
1186*4882a593Smuzhiyun  *
1187*4882a593Smuzhiyun  * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1188*4882a593Smuzhiyun  * child. But RED will still only claim that 1:3 is its parent. If it's removed
1189*4882a593Smuzhiyun  * from that band, its only parent will be 1:2, but it will continue to claim
1190*4882a593Smuzhiyun  * that it is in fact 1:3.
1191*4882a593Smuzhiyun  *
1192*4882a593Smuzhiyun  * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1193*4882a593Smuzhiyun  * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1194*4882a593Smuzhiyun  * notification to offload the child Qdisc, based on its parent handle, and use
1195*4882a593Smuzhiyun  * the graft operation to validate that the class where the child is actually
1196*4882a593Smuzhiyun  * grafted corresponds to the parent handle. If the two don't match, we
1197*4882a593Smuzhiyun  * unoffload the child.
1198*4882a593Smuzhiyun  */
1199*4882a593Smuzhiyun static int
__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,u8 band,u32 child_handle)1200*4882a593Smuzhiyun __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1201*4882a593Smuzhiyun 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1202*4882a593Smuzhiyun 			   u8 band, u32 child_handle)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1205*4882a593Smuzhiyun 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1206*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *old_qdisc;
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (band < IEEE_8021QAZ_MAX_TCS &&
1209*4882a593Smuzhiyun 	    qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
1210*4882a593Smuzhiyun 		return 0;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	if (!child_handle) {
1213*4882a593Smuzhiyun 		/* This is an invisible FIFO replacing the original Qdisc.
1214*4882a593Smuzhiyun 		 * Ignore it--the original Qdisc's destroy will follow.
1215*4882a593Smuzhiyun 		 */
1216*4882a593Smuzhiyun 		return 0;
1217*4882a593Smuzhiyun 	}
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
1220*4882a593Smuzhiyun 	 * unoffload it.
1221*4882a593Smuzhiyun 	 */
1222*4882a593Smuzhiyun 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1223*4882a593Smuzhiyun 						  child_handle);
1224*4882a593Smuzhiyun 	if (old_qdisc)
1225*4882a593Smuzhiyun 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
1228*4882a593Smuzhiyun 			       &qdisc_state->tclass_qdiscs[tclass_num]);
1229*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun static int
mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_qdisc * mlxsw_sp_qdisc,struct tc_prio_qopt_offload_graft_params * p)1233*4882a593Smuzhiyun mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1234*4882a593Smuzhiyun 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1235*4882a593Smuzhiyun 			  struct tc_prio_qopt_offload_graft_params *p)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1238*4882a593Smuzhiyun 					  p->band, p->child_handle);
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_prio_qopt_offload * p)1241*4882a593Smuzhiyun int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1242*4882a593Smuzhiyun 			   struct tc_prio_qopt_offload *p)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1247*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
1248*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	if (p->command == TC_PRIO_REPLACE)
1251*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1252*4882a593Smuzhiyun 					      mlxsw_sp_qdisc,
1253*4882a593Smuzhiyun 					      &mlxsw_sp_qdisc_ops_prio,
1254*4882a593Smuzhiyun 					      &p->replace_params);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1257*4882a593Smuzhiyun 				    MLXSW_SP_QDISC_PRIO))
1258*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	switch (p->command) {
1261*4882a593Smuzhiyun 	case TC_PRIO_DESTROY:
1262*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1263*4882a593Smuzhiyun 	case TC_PRIO_STATS:
1264*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1265*4882a593Smuzhiyun 						&p->stats);
1266*4882a593Smuzhiyun 	case TC_PRIO_GRAFT:
1267*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1268*4882a593Smuzhiyun 						 &p->graft_params);
1269*4882a593Smuzhiyun 	default:
1270*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port * mlxsw_sp_port,struct tc_ets_qopt_offload * p)1274*4882a593Smuzhiyun int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1275*4882a593Smuzhiyun 			  struct tc_ets_qopt_offload *p)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1280*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc)
1281*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	if (p->command == TC_ETS_REPLACE)
1284*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1285*4882a593Smuzhiyun 					      mlxsw_sp_qdisc,
1286*4882a593Smuzhiyun 					      &mlxsw_sp_qdisc_ops_ets,
1287*4882a593Smuzhiyun 					      &p->replace_params);
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1290*4882a593Smuzhiyun 				    MLXSW_SP_QDISC_ETS))
1291*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	switch (p->command) {
1294*4882a593Smuzhiyun 	case TC_ETS_DESTROY:
1295*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1296*4882a593Smuzhiyun 	case TC_ETS_STATS:
1297*4882a593Smuzhiyun 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1298*4882a593Smuzhiyun 						&p->stats);
1299*4882a593Smuzhiyun 	case TC_ETS_GRAFT:
1300*4882a593Smuzhiyun 		return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1301*4882a593Smuzhiyun 						  p->graft_params.band,
1302*4882a593Smuzhiyun 						  p->graft_params.child_handle);
1303*4882a593Smuzhiyun 	default:
1304*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun struct mlxsw_sp_qevent_block {
1309*4882a593Smuzhiyun 	struct list_head binding_list;
1310*4882a593Smuzhiyun 	struct list_head mall_entry_list;
1311*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp;
1312*4882a593Smuzhiyun };
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun struct mlxsw_sp_qevent_binding {
1315*4882a593Smuzhiyun 	struct list_head list;
1316*4882a593Smuzhiyun 	struct mlxsw_sp_port *mlxsw_sp_port;
1317*4882a593Smuzhiyun 	u32 handle;
1318*4882a593Smuzhiyun 	int tclass_num;
1319*4882a593Smuzhiyun 	enum mlxsw_sp_span_trigger span_trigger;
1320*4882a593Smuzhiyun };
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1323*4882a593Smuzhiyun 
mlxsw_sp_qevent_span_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding,const struct mlxsw_sp_span_agent_parms * agent_parms,int * p_span_id)1324*4882a593Smuzhiyun static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1325*4882a593Smuzhiyun 					  struct mlxsw_sp_mall_entry *mall_entry,
1326*4882a593Smuzhiyun 					  struct mlxsw_sp_qevent_binding *qevent_binding,
1327*4882a593Smuzhiyun 					  const struct mlxsw_sp_span_agent_parms *agent_parms,
1328*4882a593Smuzhiyun 					  int *p_span_id)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1331*4882a593Smuzhiyun 	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1332*4882a593Smuzhiyun 	int span_id;
1333*4882a593Smuzhiyun 	int err;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1336*4882a593Smuzhiyun 	if (err)
1337*4882a593Smuzhiyun 		return err;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
1340*4882a593Smuzhiyun 	if (err)
1341*4882a593Smuzhiyun 		goto err_analyzed_port_get;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	trigger_parms.span_id = span_id;
1344*4882a593Smuzhiyun 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1345*4882a593Smuzhiyun 				       &trigger_parms);
1346*4882a593Smuzhiyun 	if (err)
1347*4882a593Smuzhiyun 		goto err_agent_bind;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
1350*4882a593Smuzhiyun 					   qevent_binding->tclass_num);
1351*4882a593Smuzhiyun 	if (err)
1352*4882a593Smuzhiyun 		goto err_trigger_enable;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	*p_span_id = span_id;
1355*4882a593Smuzhiyun 	return 0;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun err_trigger_enable:
1358*4882a593Smuzhiyun 	mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1359*4882a593Smuzhiyun 				   &trigger_parms);
1360*4882a593Smuzhiyun err_agent_bind:
1361*4882a593Smuzhiyun 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1362*4882a593Smuzhiyun err_analyzed_port_get:
1363*4882a593Smuzhiyun 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1364*4882a593Smuzhiyun 	return err;
1365*4882a593Smuzhiyun }
1366*4882a593Smuzhiyun 
mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_binding * qevent_binding,int span_id)1367*4882a593Smuzhiyun static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1368*4882a593Smuzhiyun 					     struct mlxsw_sp_qevent_binding *qevent_binding,
1369*4882a593Smuzhiyun 					     int span_id)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1372*4882a593Smuzhiyun 	struct mlxsw_sp_span_trigger_parms trigger_parms = {
1373*4882a593Smuzhiyun 		.span_id = span_id,
1374*4882a593Smuzhiyun 	};
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
1377*4882a593Smuzhiyun 				      qevent_binding->tclass_num);
1378*4882a593Smuzhiyun 	mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1379*4882a593Smuzhiyun 				   &trigger_parms);
1380*4882a593Smuzhiyun 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1381*4882a593Smuzhiyun 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun 
mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1384*4882a593Smuzhiyun static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1385*4882a593Smuzhiyun 					    struct mlxsw_sp_mall_entry *mall_entry,
1386*4882a593Smuzhiyun 					    struct mlxsw_sp_qevent_binding *qevent_binding)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun 	struct mlxsw_sp_span_agent_parms agent_parms = {
1389*4882a593Smuzhiyun 		.to_dev = mall_entry->mirror.to_dev,
1390*4882a593Smuzhiyun 	};
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1393*4882a593Smuzhiyun 					      &agent_parms, &mall_entry->mirror.span_id);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1396*4882a593Smuzhiyun static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1397*4882a593Smuzhiyun 					       struct mlxsw_sp_mall_entry *mall_entry,
1398*4882a593Smuzhiyun 					       struct mlxsw_sp_qevent_binding *qevent_binding)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun 
mlxsw_sp_qevent_trap_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1403*4882a593Smuzhiyun static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1404*4882a593Smuzhiyun 					  struct mlxsw_sp_mall_entry *mall_entry,
1405*4882a593Smuzhiyun 					  struct mlxsw_sp_qevent_binding *qevent_binding)
1406*4882a593Smuzhiyun {
1407*4882a593Smuzhiyun 	struct mlxsw_sp_span_agent_parms agent_parms = {};
1408*4882a593Smuzhiyun 	int err;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1411*4882a593Smuzhiyun 						    DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1412*4882a593Smuzhiyun 						    &agent_parms.policer_enable,
1413*4882a593Smuzhiyun 						    &agent_parms.policer_id);
1414*4882a593Smuzhiyun 	if (err)
1415*4882a593Smuzhiyun 		return err;
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1418*4882a593Smuzhiyun 					      &agent_parms, &mall_entry->trap.span_id);
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun 
mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1421*4882a593Smuzhiyun static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1422*4882a593Smuzhiyun 					     struct mlxsw_sp_mall_entry *mall_entry,
1423*4882a593Smuzhiyun 					     struct mlxsw_sp_qevent_binding *qevent_binding)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun 
mlxsw_sp_qevent_entry_configure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1428*4882a593Smuzhiyun static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1429*4882a593Smuzhiyun 					   struct mlxsw_sp_mall_entry *mall_entry,
1430*4882a593Smuzhiyun 					   struct mlxsw_sp_qevent_binding *qevent_binding)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun 	switch (mall_entry->type) {
1433*4882a593Smuzhiyun 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1434*4882a593Smuzhiyun 		return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1435*4882a593Smuzhiyun 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1436*4882a593Smuzhiyun 		return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1437*4882a593Smuzhiyun 	default:
1438*4882a593Smuzhiyun 		/* This should have been validated away. */
1439*4882a593Smuzhiyun 		WARN_ON(1);
1440*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1441*4882a593Smuzhiyun 	}
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mall_entry * mall_entry,struct mlxsw_sp_qevent_binding * qevent_binding)1444*4882a593Smuzhiyun static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1445*4882a593Smuzhiyun 					      struct mlxsw_sp_mall_entry *mall_entry,
1446*4882a593Smuzhiyun 					      struct mlxsw_sp_qevent_binding *qevent_binding)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun 	switch (mall_entry->type) {
1449*4882a593Smuzhiyun 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1450*4882a593Smuzhiyun 		return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1451*4882a593Smuzhiyun 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1452*4882a593Smuzhiyun 		return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1453*4882a593Smuzhiyun 	default:
1454*4882a593Smuzhiyun 		WARN_ON(1);
1455*4882a593Smuzhiyun 		return;
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun 
mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding)1459*4882a593Smuzhiyun static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1460*4882a593Smuzhiyun 					     struct mlxsw_sp_qevent_binding *qevent_binding)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun 	struct mlxsw_sp_mall_entry *mall_entry;
1463*4882a593Smuzhiyun 	int err;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1466*4882a593Smuzhiyun 		err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1467*4882a593Smuzhiyun 						      qevent_binding);
1468*4882a593Smuzhiyun 		if (err)
1469*4882a593Smuzhiyun 			goto err_entry_configure;
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return 0;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun err_entry_configure:
1475*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1476*4882a593Smuzhiyun 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1477*4882a593Smuzhiyun 						  qevent_binding);
1478*4882a593Smuzhiyun 	return err;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun 
mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block * qevent_block,struct mlxsw_sp_qevent_binding * qevent_binding)1481*4882a593Smuzhiyun static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1482*4882a593Smuzhiyun 						struct mlxsw_sp_qevent_binding *qevent_binding)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	struct mlxsw_sp_mall_entry *mall_entry;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1487*4882a593Smuzhiyun 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1488*4882a593Smuzhiyun 						  qevent_binding);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block * qevent_block)1491*4882a593Smuzhiyun static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *qevent_binding;
1494*4882a593Smuzhiyun 	int err;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1497*4882a593Smuzhiyun 		err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1498*4882a593Smuzhiyun 		if (err)
1499*4882a593Smuzhiyun 			goto err_binding_configure;
1500*4882a593Smuzhiyun 	}
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	return 0;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun err_binding_configure:
1505*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1506*4882a593Smuzhiyun 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1507*4882a593Smuzhiyun 	return err;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block * qevent_block)1510*4882a593Smuzhiyun static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *qevent_binding;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1515*4882a593Smuzhiyun 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun static struct mlxsw_sp_mall_entry *
mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block * block,unsigned long cookie)1519*4882a593Smuzhiyun mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	struct mlxsw_sp_mall_entry *mall_entry;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1524*4882a593Smuzhiyun 		if (mall_entry->cookie == cookie)
1525*4882a593Smuzhiyun 			return mall_entry;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	return NULL;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun 
mlxsw_sp_qevent_mall_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1530*4882a593Smuzhiyun static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1531*4882a593Smuzhiyun 					struct mlxsw_sp_qevent_block *qevent_block,
1532*4882a593Smuzhiyun 					struct tc_cls_matchall_offload *f)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun 	struct mlxsw_sp_mall_entry *mall_entry;
1535*4882a593Smuzhiyun 	struct flow_action_entry *act;
1536*4882a593Smuzhiyun 	int err;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	/* It should not currently be possible to replace a matchall rule. So
1539*4882a593Smuzhiyun 	 * this must be a new rule.
1540*4882a593Smuzhiyun 	 */
1541*4882a593Smuzhiyun 	if (!list_empty(&qevent_block->mall_entry_list)) {
1542*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1543*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1544*4882a593Smuzhiyun 	}
1545*4882a593Smuzhiyun 	if (f->rule->action.num_entries != 1) {
1546*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1547*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1548*4882a593Smuzhiyun 	}
1549*4882a593Smuzhiyun 	if (f->common.chain_index) {
1550*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1551*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1552*4882a593Smuzhiyun 	}
1553*4882a593Smuzhiyun 	if (f->common.protocol != htons(ETH_P_ALL)) {
1554*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1555*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1556*4882a593Smuzhiyun 	}
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	act = &f->rule->action.entries[0];
1559*4882a593Smuzhiyun 	if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1560*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1561*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1562*4882a593Smuzhiyun 	}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1565*4882a593Smuzhiyun 	if (!mall_entry)
1566*4882a593Smuzhiyun 		return -ENOMEM;
1567*4882a593Smuzhiyun 	mall_entry->cookie = f->cookie;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	if (act->id == FLOW_ACTION_MIRRED) {
1570*4882a593Smuzhiyun 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1571*4882a593Smuzhiyun 		mall_entry->mirror.to_dev = act->dev;
1572*4882a593Smuzhiyun 	} else if (act->id == FLOW_ACTION_TRAP) {
1573*4882a593Smuzhiyun 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1574*4882a593Smuzhiyun 	} else {
1575*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1576*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
1577*4882a593Smuzhiyun 		goto err_unsupported_action;
1578*4882a593Smuzhiyun 	}
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	err = mlxsw_sp_qevent_block_configure(qevent_block);
1583*4882a593Smuzhiyun 	if (err)
1584*4882a593Smuzhiyun 		goto err_block_configure;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	return 0;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun err_block_configure:
1589*4882a593Smuzhiyun 	list_del(&mall_entry->list);
1590*4882a593Smuzhiyun err_unsupported_action:
1591*4882a593Smuzhiyun 	kfree(mall_entry);
1592*4882a593Smuzhiyun 	return err;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1595*4882a593Smuzhiyun static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1596*4882a593Smuzhiyun 					 struct tc_cls_matchall_offload *f)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun 	struct mlxsw_sp_mall_entry *mall_entry;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1601*4882a593Smuzhiyun 	if (!mall_entry)
1602*4882a593Smuzhiyun 		return;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	mlxsw_sp_qevent_block_deconfigure(qevent_block);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	list_del(&mall_entry->list);
1607*4882a593Smuzhiyun 	kfree(mall_entry);
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block * qevent_block,struct tc_cls_matchall_offload * f)1610*4882a593Smuzhiyun static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1611*4882a593Smuzhiyun 					 struct tc_cls_matchall_offload *f)
1612*4882a593Smuzhiyun {
1613*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	switch (f->command) {
1616*4882a593Smuzhiyun 	case TC_CLSMATCHALL_REPLACE:
1617*4882a593Smuzhiyun 		return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1618*4882a593Smuzhiyun 	case TC_CLSMATCHALL_DESTROY:
1619*4882a593Smuzhiyun 		mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1620*4882a593Smuzhiyun 		return 0;
1621*4882a593Smuzhiyun 	default:
1622*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1623*4882a593Smuzhiyun 	}
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1626*4882a593Smuzhiyun static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	switch (type) {
1631*4882a593Smuzhiyun 	case TC_SETUP_CLSMATCHALL:
1632*4882a593Smuzhiyun 		return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1633*4882a593Smuzhiyun 	default:
1634*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1635*4882a593Smuzhiyun 	}
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_create(struct mlxsw_sp * mlxsw_sp,struct net * net)1638*4882a593Smuzhiyun static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1639*4882a593Smuzhiyun 								  struct net *net)
1640*4882a593Smuzhiyun {
1641*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_block *qevent_block;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1644*4882a593Smuzhiyun 	if (!qevent_block)
1645*4882a593Smuzhiyun 		return NULL;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	INIT_LIST_HEAD(&qevent_block->binding_list);
1648*4882a593Smuzhiyun 	INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1649*4882a593Smuzhiyun 	qevent_block->mlxsw_sp = mlxsw_sp;
1650*4882a593Smuzhiyun 	return qevent_block;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun static void
mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block * qevent_block)1654*4882a593Smuzhiyun mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun 	WARN_ON(!list_empty(&qevent_block->binding_list));
1657*4882a593Smuzhiyun 	WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1658*4882a593Smuzhiyun 	kfree(qevent_block);
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun 
mlxsw_sp_qevent_block_release(void * cb_priv)1661*4882a593Smuzhiyun static void mlxsw_sp_qevent_block_release(void *cb_priv)
1662*4882a593Smuzhiyun {
1663*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	mlxsw_sp_qevent_block_destroy(qevent_block);
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,int tclass_num,enum mlxsw_sp_span_trigger span_trigger)1669*4882a593Smuzhiyun mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
1670*4882a593Smuzhiyun 			       enum mlxsw_sp_span_trigger span_trigger)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *binding;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
1675*4882a593Smuzhiyun 	if (!binding)
1676*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	binding->mlxsw_sp_port = mlxsw_sp_port;
1679*4882a593Smuzhiyun 	binding->handle = handle;
1680*4882a593Smuzhiyun 	binding->tclass_num = tclass_num;
1681*4882a593Smuzhiyun 	binding->span_trigger = span_trigger;
1682*4882a593Smuzhiyun 	return binding;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun static void
mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding * binding)1686*4882a593Smuzhiyun mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
1687*4882a593Smuzhiyun {
1688*4882a593Smuzhiyun 	kfree(binding);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block * block,struct mlxsw_sp_port * mlxsw_sp_port,u32 handle,enum mlxsw_sp_span_trigger span_trigger)1692*4882a593Smuzhiyun mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
1693*4882a593Smuzhiyun 			       struct mlxsw_sp_port *mlxsw_sp_port,
1694*4882a593Smuzhiyun 			       u32 handle,
1695*4882a593Smuzhiyun 			       enum mlxsw_sp_span_trigger span_trigger)
1696*4882a593Smuzhiyun {
1697*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *qevent_binding;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	list_for_each_entry(qevent_binding, &block->binding_list, list)
1700*4882a593Smuzhiyun 		if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
1701*4882a593Smuzhiyun 		    qevent_binding->handle == handle &&
1702*4882a593Smuzhiyun 		    qevent_binding->span_trigger == span_trigger)
1703*4882a593Smuzhiyun 			return qevent_binding;
1704*4882a593Smuzhiyun 	return NULL;
1705*4882a593Smuzhiyun }
1706*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1707*4882a593Smuzhiyun static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1708*4882a593Smuzhiyun 					       struct flow_block_offload *f,
1709*4882a593Smuzhiyun 					       enum mlxsw_sp_span_trigger span_trigger)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1712*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *qevent_binding;
1713*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_block *qevent_block;
1714*4882a593Smuzhiyun 	struct flow_block_cb *block_cb;
1715*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc *qdisc;
1716*4882a593Smuzhiyun 	bool register_block = false;
1717*4882a593Smuzhiyun 	int err;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1720*4882a593Smuzhiyun 	if (!block_cb) {
1721*4882a593Smuzhiyun 		qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
1722*4882a593Smuzhiyun 		if (!qevent_block)
1723*4882a593Smuzhiyun 			return -ENOMEM;
1724*4882a593Smuzhiyun 		block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
1725*4882a593Smuzhiyun 					       mlxsw_sp_qevent_block_release);
1726*4882a593Smuzhiyun 		if (IS_ERR(block_cb)) {
1727*4882a593Smuzhiyun 			mlxsw_sp_qevent_block_destroy(qevent_block);
1728*4882a593Smuzhiyun 			return PTR_ERR(block_cb);
1729*4882a593Smuzhiyun 		}
1730*4882a593Smuzhiyun 		register_block = true;
1731*4882a593Smuzhiyun 	} else {
1732*4882a593Smuzhiyun 		qevent_block = flow_block_cb_priv(block_cb);
1733*4882a593Smuzhiyun 	}
1734*4882a593Smuzhiyun 	flow_block_cb_incref(block_cb);
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
1737*4882a593Smuzhiyun 	if (!qdisc) {
1738*4882a593Smuzhiyun 		NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
1739*4882a593Smuzhiyun 		err = -ENOENT;
1740*4882a593Smuzhiyun 		goto err_find_qdisc;
1741*4882a593Smuzhiyun 	}
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1744*4882a593Smuzhiyun 						   span_trigger))) {
1745*4882a593Smuzhiyun 		err = -EEXIST;
1746*4882a593Smuzhiyun 		goto err_binding_exists;
1747*4882a593Smuzhiyun 	}
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
1750*4882a593Smuzhiyun 							qdisc->tclass_num, span_trigger);
1751*4882a593Smuzhiyun 	if (IS_ERR(qevent_binding)) {
1752*4882a593Smuzhiyun 		err = PTR_ERR(qevent_binding);
1753*4882a593Smuzhiyun 		goto err_binding_create;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1757*4882a593Smuzhiyun 	if (err)
1758*4882a593Smuzhiyun 		goto err_binding_configure;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	list_add(&qevent_binding->list, &qevent_block->binding_list);
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	if (register_block) {
1763*4882a593Smuzhiyun 		flow_block_cb_add(block_cb, f);
1764*4882a593Smuzhiyun 		list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	return 0;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun err_binding_configure:
1770*4882a593Smuzhiyun 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
1771*4882a593Smuzhiyun err_binding_create:
1772*4882a593Smuzhiyun err_binding_exists:
1773*4882a593Smuzhiyun err_find_qdisc:
1774*4882a593Smuzhiyun 	if (!flow_block_cb_decref(block_cb))
1775*4882a593Smuzhiyun 		flow_block_cb_free(block_cb);
1776*4882a593Smuzhiyun 	return err;
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1779*4882a593Smuzhiyun static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1780*4882a593Smuzhiyun 						  struct flow_block_offload *f,
1781*4882a593Smuzhiyun 						  enum mlxsw_sp_span_trigger span_trigger)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1784*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_binding *qevent_binding;
1785*4882a593Smuzhiyun 	struct mlxsw_sp_qevent_block *qevent_block;
1786*4882a593Smuzhiyun 	struct flow_block_cb *block_cb;
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1789*4882a593Smuzhiyun 	if (!block_cb)
1790*4882a593Smuzhiyun 		return;
1791*4882a593Smuzhiyun 	qevent_block = flow_block_cb_priv(block_cb);
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1794*4882a593Smuzhiyun 							span_trigger);
1795*4882a593Smuzhiyun 	if (!qevent_binding)
1796*4882a593Smuzhiyun 		return;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	list_del(&qevent_binding->list);
1799*4882a593Smuzhiyun 	mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1800*4882a593Smuzhiyun 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	if (!flow_block_cb_decref(block_cb)) {
1803*4882a593Smuzhiyun 		flow_block_cb_remove(block_cb, f);
1804*4882a593Smuzhiyun 		list_del(&block_cb->driver_list);
1805*4882a593Smuzhiyun 	}
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f,enum mlxsw_sp_span_trigger span_trigger)1808*4882a593Smuzhiyun static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
1809*4882a593Smuzhiyun 					  struct flow_block_offload *f,
1810*4882a593Smuzhiyun 					  enum mlxsw_sp_span_trigger span_trigger)
1811*4882a593Smuzhiyun {
1812*4882a593Smuzhiyun 	f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	switch (f->command) {
1815*4882a593Smuzhiyun 	case FLOW_BLOCK_BIND:
1816*4882a593Smuzhiyun 		return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
1817*4882a593Smuzhiyun 	case FLOW_BLOCK_UNBIND:
1818*4882a593Smuzhiyun 		mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
1819*4882a593Smuzhiyun 		return 0;
1820*4882a593Smuzhiyun 	default:
1821*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1822*4882a593Smuzhiyun 	}
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun 
mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)1825*4882a593Smuzhiyun int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
1826*4882a593Smuzhiyun 					      struct flow_block_offload *f)
1827*4882a593Smuzhiyun {
1828*4882a593Smuzhiyun 	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun 
mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port * mlxsw_sp_port)1831*4882a593Smuzhiyun int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun 	struct mlxsw_sp_qdisc_state *qdisc_state;
1834*4882a593Smuzhiyun 	int i;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
1837*4882a593Smuzhiyun 	if (!qdisc_state)
1838*4882a593Smuzhiyun 		return -ENOMEM;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	qdisc_state->root_qdisc.prio_bitmap = 0xff;
1841*4882a593Smuzhiyun 	qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1842*4882a593Smuzhiyun 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1843*4882a593Smuzhiyun 		qdisc_state->tclass_qdiscs[i].tclass_num = i;
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	mlxsw_sp_port->qdisc = qdisc_state;
1846*4882a593Smuzhiyun 	return 0;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun 
mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port * mlxsw_sp_port)1849*4882a593Smuzhiyun void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun 	kfree(mlxsw_sp_port->qdisc);
1852*4882a593Smuzhiyun }
1853