xref: /OK3568_Linux_fs/kernel/net/sched/sch_mqprio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net/sched/sch_mqprio.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/string.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <net/netlink.h>
16*4882a593Smuzhiyun #include <net/pkt_sched.h>
17*4882a593Smuzhiyun #include <net/sch_generic.h>
18*4882a593Smuzhiyun #include <net/pkt_cls.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct mqprio_sched {
21*4882a593Smuzhiyun 	struct Qdisc		**qdiscs;
22*4882a593Smuzhiyun 	u16 mode;
23*4882a593Smuzhiyun 	u16 shaper;
24*4882a593Smuzhiyun 	int hw_offload;
25*4882a593Smuzhiyun 	u32 flags;
26*4882a593Smuzhiyun 	u64 min_rate[TC_QOPT_MAX_QUEUE];
27*4882a593Smuzhiyun 	u64 max_rate[TC_QOPT_MAX_QUEUE];
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
mqprio_destroy(struct Qdisc * sch)30*4882a593Smuzhiyun static void mqprio_destroy(struct Qdisc *sch)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
33*4882a593Smuzhiyun 	struct mqprio_sched *priv = qdisc_priv(sch);
34*4882a593Smuzhiyun 	unsigned int ntx;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	if (priv->qdiscs) {
37*4882a593Smuzhiyun 		for (ntx = 0;
38*4882a593Smuzhiyun 		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
39*4882a593Smuzhiyun 		     ntx++)
40*4882a593Smuzhiyun 			qdisc_put(priv->qdiscs[ntx]);
41*4882a593Smuzhiyun 		kfree(priv->qdiscs);
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
45*4882a593Smuzhiyun 		struct tc_mqprio_qopt_offload mqprio = { { 0 } };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 		switch (priv->mode) {
48*4882a593Smuzhiyun 		case TC_MQPRIO_MODE_DCB:
49*4882a593Smuzhiyun 		case TC_MQPRIO_MODE_CHANNEL:
50*4882a593Smuzhiyun 			dev->netdev_ops->ndo_setup_tc(dev,
51*4882a593Smuzhiyun 						      TC_SETUP_QDISC_MQPRIO,
52*4882a593Smuzhiyun 						      &mqprio);
53*4882a593Smuzhiyun 			break;
54*4882a593Smuzhiyun 		default:
55*4882a593Smuzhiyun 			return;
56*4882a593Smuzhiyun 		}
57*4882a593Smuzhiyun 	} else {
58*4882a593Smuzhiyun 		netdev_set_num_tc(dev, 0);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
mqprio_parse_opt(struct net_device * dev,struct tc_mqprio_qopt * qopt)62*4882a593Smuzhiyun static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	int i, j;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* Verify num_tc is not out of max range */
67*4882a593Smuzhiyun 	if (qopt->num_tc > TC_MAX_QUEUE)
68*4882a593Smuzhiyun 		return -EINVAL;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Verify priority mapping uses valid tcs */
71*4882a593Smuzhiyun 	for (i = 0; i < TC_BITMASK + 1; i++) {
72*4882a593Smuzhiyun 		if (qopt->prio_tc_map[i] >= qopt->num_tc)
73*4882a593Smuzhiyun 			return -EINVAL;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* Limit qopt->hw to maximum supported offload value.  Drivers have
77*4882a593Smuzhiyun 	 * the option of overriding this later if they don't support the a
78*4882a593Smuzhiyun 	 * given offload type.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
81*4882a593Smuzhiyun 		qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* If hardware offload is requested we will leave it to the device
84*4882a593Smuzhiyun 	 * to either populate the queue counts itself or to validate the
85*4882a593Smuzhiyun 	 * provided queue counts.  If ndo_setup_tc is not present then
86*4882a593Smuzhiyun 	 * hardware doesn't support offload and we should return an error.
87*4882a593Smuzhiyun 	 */
88*4882a593Smuzhiyun 	if (qopt->hw)
89*4882a593Smuzhiyun 		return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	for (i = 0; i < qopt->num_tc; i++) {
92*4882a593Smuzhiyun 		unsigned int last = qopt->offset[i] + qopt->count[i];
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		/* Verify the queue count is in tx range being equal to the
95*4882a593Smuzhiyun 		 * real_num_tx_queues indicates the last queue is in use.
96*4882a593Smuzhiyun 		 */
97*4882a593Smuzhiyun 		if (qopt->offset[i] >= dev->real_num_tx_queues ||
98*4882a593Smuzhiyun 		    !qopt->count[i] ||
99*4882a593Smuzhiyun 		    last > dev->real_num_tx_queues)
100*4882a593Smuzhiyun 			return -EINVAL;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		/* Verify that the offset and counts do not overlap */
103*4882a593Smuzhiyun 		for (j = i + 1; j < qopt->num_tc; j++) {
104*4882a593Smuzhiyun 			if (last > qopt->offset[j])
105*4882a593Smuzhiyun 				return -EINVAL;
106*4882a593Smuzhiyun 		}
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
113*4882a593Smuzhiyun 	[TCA_MQPRIO_MODE]	= { .len = sizeof(u16) },
114*4882a593Smuzhiyun 	[TCA_MQPRIO_SHAPER]	= { .len = sizeof(u16) },
115*4882a593Smuzhiyun 	[TCA_MQPRIO_MIN_RATE64]	= { .type = NLA_NESTED },
116*4882a593Smuzhiyun 	[TCA_MQPRIO_MAX_RATE64]	= { .type = NLA_NESTED },
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
parse_attr(struct nlattr * tb[],int maxtype,struct nlattr * nla,const struct nla_policy * policy,int len)119*4882a593Smuzhiyun static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
120*4882a593Smuzhiyun 		      const struct nla_policy *policy, int len)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	int nested_len = nla_len(nla) - NLA_ALIGN(len);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (nested_len >= nla_attr_size(0))
125*4882a593Smuzhiyun 		return nla_parse_deprecated(tb, maxtype,
126*4882a593Smuzhiyun 					    nla_data(nla) + NLA_ALIGN(len),
127*4882a593Smuzhiyun 					    nested_len, policy, NULL);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
130*4882a593Smuzhiyun 	return 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
mqprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)133*4882a593Smuzhiyun static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
134*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
137*4882a593Smuzhiyun 	struct mqprio_sched *priv = qdisc_priv(sch);
138*4882a593Smuzhiyun 	struct netdev_queue *dev_queue;
139*4882a593Smuzhiyun 	struct Qdisc *qdisc;
140*4882a593Smuzhiyun 	int i, err = -EOPNOTSUPP;
141*4882a593Smuzhiyun 	struct tc_mqprio_qopt *qopt = NULL;
142*4882a593Smuzhiyun 	struct nlattr *tb[TCA_MQPRIO_MAX + 1];
143*4882a593Smuzhiyun 	struct nlattr *attr;
144*4882a593Smuzhiyun 	int rem;
145*4882a593Smuzhiyun 	int len;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
148*4882a593Smuzhiyun 	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (sch->parent != TC_H_ROOT)
151*4882a593Smuzhiyun 		return -EOPNOTSUPP;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (!netif_is_multiqueue(dev))
154*4882a593Smuzhiyun 		return -EOPNOTSUPP;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* make certain can allocate enough classids to handle queues */
157*4882a593Smuzhiyun 	if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
158*4882a593Smuzhiyun 		return -ENOMEM;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (!opt || nla_len(opt) < sizeof(*qopt))
161*4882a593Smuzhiyun 		return -EINVAL;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	qopt = nla_data(opt);
164*4882a593Smuzhiyun 	if (mqprio_parse_opt(dev, qopt))
165*4882a593Smuzhiyun 		return -EINVAL;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
168*4882a593Smuzhiyun 	if (len > 0) {
169*4882a593Smuzhiyun 		err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
170*4882a593Smuzhiyun 				 sizeof(*qopt));
171*4882a593Smuzhiyun 		if (err < 0)
172*4882a593Smuzhiyun 			return err;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		if (!qopt->hw)
175*4882a593Smuzhiyun 			return -EINVAL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		if (tb[TCA_MQPRIO_MODE]) {
178*4882a593Smuzhiyun 			priv->flags |= TC_MQPRIO_F_MODE;
179*4882a593Smuzhiyun 			priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		if (tb[TCA_MQPRIO_SHAPER]) {
183*4882a593Smuzhiyun 			priv->flags |= TC_MQPRIO_F_SHAPER;
184*4882a593Smuzhiyun 			priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		if (tb[TCA_MQPRIO_MIN_RATE64]) {
188*4882a593Smuzhiyun 			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
189*4882a593Smuzhiyun 				return -EINVAL;
190*4882a593Smuzhiyun 			i = 0;
191*4882a593Smuzhiyun 			nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
192*4882a593Smuzhiyun 					    rem) {
193*4882a593Smuzhiyun 				if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
194*4882a593Smuzhiyun 					return -EINVAL;
195*4882a593Smuzhiyun 				if (i >= qopt->num_tc)
196*4882a593Smuzhiyun 					break;
197*4882a593Smuzhiyun 				priv->min_rate[i] = *(u64 *)nla_data(attr);
198*4882a593Smuzhiyun 				i++;
199*4882a593Smuzhiyun 			}
200*4882a593Smuzhiyun 			priv->flags |= TC_MQPRIO_F_MIN_RATE;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		if (tb[TCA_MQPRIO_MAX_RATE64]) {
204*4882a593Smuzhiyun 			if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
205*4882a593Smuzhiyun 				return -EINVAL;
206*4882a593Smuzhiyun 			i = 0;
207*4882a593Smuzhiyun 			nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
208*4882a593Smuzhiyun 					    rem) {
209*4882a593Smuzhiyun 				if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
210*4882a593Smuzhiyun 					return -EINVAL;
211*4882a593Smuzhiyun 				if (i >= qopt->num_tc)
212*4882a593Smuzhiyun 					break;
213*4882a593Smuzhiyun 				priv->max_rate[i] = *(u64 *)nla_data(attr);
214*4882a593Smuzhiyun 				i++;
215*4882a593Smuzhiyun 			}
216*4882a593Smuzhiyun 			priv->flags |= TC_MQPRIO_F_MAX_RATE;
217*4882a593Smuzhiyun 		}
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* pre-allocate qdisc, attachment can't fail */
221*4882a593Smuzhiyun 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
222*4882a593Smuzhiyun 			       GFP_KERNEL);
223*4882a593Smuzhiyun 	if (!priv->qdiscs)
224*4882a593Smuzhiyun 		return -ENOMEM;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	for (i = 0; i < dev->num_tx_queues; i++) {
227*4882a593Smuzhiyun 		dev_queue = netdev_get_tx_queue(dev, i);
228*4882a593Smuzhiyun 		qdisc = qdisc_create_dflt(dev_queue,
229*4882a593Smuzhiyun 					  get_default_qdisc_ops(dev, i),
230*4882a593Smuzhiyun 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
231*4882a593Smuzhiyun 						    TC_H_MIN(i + 1)), extack);
232*4882a593Smuzhiyun 		if (!qdisc)
233*4882a593Smuzhiyun 			return -ENOMEM;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		priv->qdiscs[i] = qdisc;
236*4882a593Smuzhiyun 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* If the mqprio options indicate that hardware should own
240*4882a593Smuzhiyun 	 * the queue mapping then run ndo_setup_tc otherwise use the
241*4882a593Smuzhiyun 	 * supplied and verified mapping
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 	if (qopt->hw) {
244*4882a593Smuzhiyun 		struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		switch (priv->mode) {
247*4882a593Smuzhiyun 		case TC_MQPRIO_MODE_DCB:
248*4882a593Smuzhiyun 			if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
249*4882a593Smuzhiyun 				return -EINVAL;
250*4882a593Smuzhiyun 			break;
251*4882a593Smuzhiyun 		case TC_MQPRIO_MODE_CHANNEL:
252*4882a593Smuzhiyun 			mqprio.flags = priv->flags;
253*4882a593Smuzhiyun 			if (priv->flags & TC_MQPRIO_F_MODE)
254*4882a593Smuzhiyun 				mqprio.mode = priv->mode;
255*4882a593Smuzhiyun 			if (priv->flags & TC_MQPRIO_F_SHAPER)
256*4882a593Smuzhiyun 				mqprio.shaper = priv->shaper;
257*4882a593Smuzhiyun 			if (priv->flags & TC_MQPRIO_F_MIN_RATE)
258*4882a593Smuzhiyun 				for (i = 0; i < mqprio.qopt.num_tc; i++)
259*4882a593Smuzhiyun 					mqprio.min_rate[i] = priv->min_rate[i];
260*4882a593Smuzhiyun 			if (priv->flags & TC_MQPRIO_F_MAX_RATE)
261*4882a593Smuzhiyun 				for (i = 0; i < mqprio.qopt.num_tc; i++)
262*4882a593Smuzhiyun 					mqprio.max_rate[i] = priv->max_rate[i];
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		default:
265*4882a593Smuzhiyun 			return -EINVAL;
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 		err = dev->netdev_ops->ndo_setup_tc(dev,
268*4882a593Smuzhiyun 						    TC_SETUP_QDISC_MQPRIO,
269*4882a593Smuzhiyun 						    &mqprio);
270*4882a593Smuzhiyun 		if (err)
271*4882a593Smuzhiyun 			return err;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		priv->hw_offload = mqprio.qopt.hw;
274*4882a593Smuzhiyun 	} else {
275*4882a593Smuzhiyun 		netdev_set_num_tc(dev, qopt->num_tc);
276*4882a593Smuzhiyun 		for (i = 0; i < qopt->num_tc; i++)
277*4882a593Smuzhiyun 			netdev_set_tc_queue(dev, i,
278*4882a593Smuzhiyun 					    qopt->count[i], qopt->offset[i]);
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/* Always use supplied priority mappings */
282*4882a593Smuzhiyun 	for (i = 0; i < TC_BITMASK + 1; i++)
283*4882a593Smuzhiyun 		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	sch->flags |= TCQ_F_MQROOT;
286*4882a593Smuzhiyun 	return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
mqprio_attach(struct Qdisc * sch)289*4882a593Smuzhiyun static void mqprio_attach(struct Qdisc *sch)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
292*4882a593Smuzhiyun 	struct mqprio_sched *priv = qdisc_priv(sch);
293*4882a593Smuzhiyun 	struct Qdisc *qdisc, *old;
294*4882a593Smuzhiyun 	unsigned int ntx;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* Attach underlying qdisc */
297*4882a593Smuzhiyun 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
298*4882a593Smuzhiyun 		qdisc = priv->qdiscs[ntx];
299*4882a593Smuzhiyun 		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
300*4882a593Smuzhiyun 		if (old)
301*4882a593Smuzhiyun 			qdisc_put(old);
302*4882a593Smuzhiyun 		if (ntx < dev->real_num_tx_queues)
303*4882a593Smuzhiyun 			qdisc_hash_add(qdisc, false);
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 	kfree(priv->qdiscs);
306*4882a593Smuzhiyun 	priv->qdiscs = NULL;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
mqprio_queue_get(struct Qdisc * sch,unsigned long cl)309*4882a593Smuzhiyun static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
310*4882a593Smuzhiyun 					     unsigned long cl)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
313*4882a593Smuzhiyun 	unsigned long ntx = cl - 1;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (ntx >= dev->num_tx_queues)
316*4882a593Smuzhiyun 		return NULL;
317*4882a593Smuzhiyun 	return netdev_get_tx_queue(dev, ntx);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
mqprio_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)320*4882a593Smuzhiyun static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
321*4882a593Smuzhiyun 			struct Qdisc **old, struct netlink_ext_ack *extack)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
324*4882a593Smuzhiyun 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!dev_queue)
327*4882a593Smuzhiyun 		return -EINVAL;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (dev->flags & IFF_UP)
330*4882a593Smuzhiyun 		dev_deactivate(dev);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	*old = dev_graft_qdisc(dev_queue, new);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (new)
335*4882a593Smuzhiyun 		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (dev->flags & IFF_UP)
338*4882a593Smuzhiyun 		dev_activate(dev);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
dump_rates(struct mqprio_sched * priv,struct tc_mqprio_qopt * opt,struct sk_buff * skb)343*4882a593Smuzhiyun static int dump_rates(struct mqprio_sched *priv,
344*4882a593Smuzhiyun 		      struct tc_mqprio_qopt *opt, struct sk_buff *skb)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct nlattr *nest;
347*4882a593Smuzhiyun 	int i;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
350*4882a593Smuzhiyun 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
351*4882a593Smuzhiyun 		if (!nest)
352*4882a593Smuzhiyun 			goto nla_put_failure;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		for (i = 0; i < opt->num_tc; i++) {
355*4882a593Smuzhiyun 			if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
356*4882a593Smuzhiyun 				    sizeof(priv->min_rate[i]),
357*4882a593Smuzhiyun 				    &priv->min_rate[i]))
358*4882a593Smuzhiyun 				goto nla_put_failure;
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 		nla_nest_end(skb, nest);
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
364*4882a593Smuzhiyun 		nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
365*4882a593Smuzhiyun 		if (!nest)
366*4882a593Smuzhiyun 			goto nla_put_failure;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		for (i = 0; i < opt->num_tc; i++) {
369*4882a593Smuzhiyun 			if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
370*4882a593Smuzhiyun 				    sizeof(priv->max_rate[i]),
371*4882a593Smuzhiyun 				    &priv->max_rate[i]))
372*4882a593Smuzhiyun 				goto nla_put_failure;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 		nla_nest_end(skb, nest);
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	return 0;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun nla_put_failure:
379*4882a593Smuzhiyun 	nla_nest_cancel(skb, nest);
380*4882a593Smuzhiyun 	return -1;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
mqprio_dump(struct Qdisc * sch,struct sk_buff * skb)383*4882a593Smuzhiyun static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
386*4882a593Smuzhiyun 	struct mqprio_sched *priv = qdisc_priv(sch);
387*4882a593Smuzhiyun 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
388*4882a593Smuzhiyun 	struct tc_mqprio_qopt opt = { 0 };
389*4882a593Smuzhiyun 	struct Qdisc *qdisc;
390*4882a593Smuzhiyun 	unsigned int ntx, tc;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	sch->q.qlen = 0;
393*4882a593Smuzhiyun 	memset(&sch->bstats, 0, sizeof(sch->bstats));
394*4882a593Smuzhiyun 	memset(&sch->qstats, 0, sizeof(sch->qstats));
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* MQ supports lockless qdiscs. However, statistics accounting needs
397*4882a593Smuzhiyun 	 * to account for all, none, or a mix of locked and unlocked child
398*4882a593Smuzhiyun 	 * qdiscs. Percpu stats are added to counters in-band and locking
399*4882a593Smuzhiyun 	 * qdisc totals are added at end.
400*4882a593Smuzhiyun 	 */
401*4882a593Smuzhiyun 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
402*4882a593Smuzhiyun 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
403*4882a593Smuzhiyun 		spin_lock_bh(qdisc_lock(qdisc));
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		if (qdisc_is_percpu_stats(qdisc)) {
406*4882a593Smuzhiyun 			__u32 qlen = qdisc_qlen_sum(qdisc);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 			__gnet_stats_copy_basic(NULL, &sch->bstats,
409*4882a593Smuzhiyun 						qdisc->cpu_bstats,
410*4882a593Smuzhiyun 						&qdisc->bstats);
411*4882a593Smuzhiyun 			__gnet_stats_copy_queue(&sch->qstats,
412*4882a593Smuzhiyun 						qdisc->cpu_qstats,
413*4882a593Smuzhiyun 						&qdisc->qstats, qlen);
414*4882a593Smuzhiyun 			sch->q.qlen		+= qlen;
415*4882a593Smuzhiyun 		} else {
416*4882a593Smuzhiyun 			sch->q.qlen		+= qdisc->q.qlen;
417*4882a593Smuzhiyun 			sch->bstats.bytes	+= qdisc->bstats.bytes;
418*4882a593Smuzhiyun 			sch->bstats.packets	+= qdisc->bstats.packets;
419*4882a593Smuzhiyun 			sch->qstats.backlog	+= qdisc->qstats.backlog;
420*4882a593Smuzhiyun 			sch->qstats.drops	+= qdisc->qstats.drops;
421*4882a593Smuzhiyun 			sch->qstats.requeues	+= qdisc->qstats.requeues;
422*4882a593Smuzhiyun 			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
423*4882a593Smuzhiyun 		}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		spin_unlock_bh(qdisc_lock(qdisc));
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	opt.num_tc = netdev_get_num_tc(dev);
429*4882a593Smuzhiyun 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
430*4882a593Smuzhiyun 	opt.hw = priv->hw_offload;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
433*4882a593Smuzhiyun 		opt.count[tc] = dev->tc_to_txq[tc].count;
434*4882a593Smuzhiyun 		opt.offset[tc] = dev->tc_to_txq[tc].offset;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
438*4882a593Smuzhiyun 		goto nla_put_failure;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if ((priv->flags & TC_MQPRIO_F_MODE) &&
441*4882a593Smuzhiyun 	    nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
442*4882a593Smuzhiyun 		goto nla_put_failure;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
445*4882a593Smuzhiyun 	    nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
446*4882a593Smuzhiyun 		goto nla_put_failure;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
449*4882a593Smuzhiyun 	     priv->flags & TC_MQPRIO_F_MAX_RATE) &&
450*4882a593Smuzhiyun 	    (dump_rates(priv, &opt, skb) != 0))
451*4882a593Smuzhiyun 		goto nla_put_failure;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return nla_nest_end(skb, nla);
454*4882a593Smuzhiyun nla_put_failure:
455*4882a593Smuzhiyun 	nlmsg_trim(skb, nla);
456*4882a593Smuzhiyun 	return -1;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
mqprio_leaf(struct Qdisc * sch,unsigned long cl)459*4882a593Smuzhiyun static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (!dev_queue)
464*4882a593Smuzhiyun 		return NULL;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return dev_queue->qdisc_sleeping;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
mqprio_find(struct Qdisc * sch,u32 classid)469*4882a593Smuzhiyun static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
472*4882a593Smuzhiyun 	unsigned int ntx = TC_H_MIN(classid);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* There are essentially two regions here that have valid classid
475*4882a593Smuzhiyun 	 * values. The first region will have a classid value of 1 through
476*4882a593Smuzhiyun 	 * num_tx_queues. All of these are backed by actual Qdiscs.
477*4882a593Smuzhiyun 	 */
478*4882a593Smuzhiyun 	if (ntx < TC_H_MIN_PRIORITY)
479*4882a593Smuzhiyun 		return (ntx <= dev->num_tx_queues) ? ntx : 0;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* The second region represents the hardware traffic classes. These
482*4882a593Smuzhiyun 	 * are represented by classid values of TC_H_MIN_PRIORITY through
483*4882a593Smuzhiyun 	 * TC_H_MIN_PRIORITY + netdev_get_num_tc - 1
484*4882a593Smuzhiyun 	 */
485*4882a593Smuzhiyun 	return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
mqprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)488*4882a593Smuzhiyun static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
489*4882a593Smuzhiyun 			 struct sk_buff *skb, struct tcmsg *tcm)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	if (cl < TC_H_MIN_PRIORITY) {
492*4882a593Smuzhiyun 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
493*4882a593Smuzhiyun 		struct net_device *dev = qdisc_dev(sch);
494*4882a593Smuzhiyun 		int tc = netdev_txq_to_tc(dev, cl - 1);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		tcm->tcm_parent = (tc < 0) ? 0 :
497*4882a593Smuzhiyun 			TC_H_MAKE(TC_H_MAJ(sch->handle),
498*4882a593Smuzhiyun 				  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
499*4882a593Smuzhiyun 		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
500*4882a593Smuzhiyun 	} else {
501*4882a593Smuzhiyun 		tcm->tcm_parent = TC_H_ROOT;
502*4882a593Smuzhiyun 		tcm->tcm_info = 0;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 	tcm->tcm_handle |= TC_H_MIN(cl);
505*4882a593Smuzhiyun 	return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
mqprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)508*4882a593Smuzhiyun static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
509*4882a593Smuzhiyun 				   struct gnet_dump *d)
510*4882a593Smuzhiyun 	__releases(d->lock)
511*4882a593Smuzhiyun 	__acquires(d->lock)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	if (cl >= TC_H_MIN_PRIORITY) {
514*4882a593Smuzhiyun 		int i;
515*4882a593Smuzhiyun 		__u32 qlen = 0;
516*4882a593Smuzhiyun 		struct gnet_stats_queue qstats = {0};
517*4882a593Smuzhiyun 		struct gnet_stats_basic_packed bstats = {0};
518*4882a593Smuzhiyun 		struct net_device *dev = qdisc_dev(sch);
519*4882a593Smuzhiyun 		struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		/* Drop lock here it will be reclaimed before touching
522*4882a593Smuzhiyun 		 * statistics this is required because the d->lock we
523*4882a593Smuzhiyun 		 * hold here is the look on dev_queue->qdisc_sleeping
524*4882a593Smuzhiyun 		 * also acquired below.
525*4882a593Smuzhiyun 		 */
526*4882a593Smuzhiyun 		if (d->lock)
527*4882a593Smuzhiyun 			spin_unlock_bh(d->lock);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
530*4882a593Smuzhiyun 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
531*4882a593Smuzhiyun 			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 			spin_lock_bh(qdisc_lock(qdisc));
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 			if (qdisc_is_percpu_stats(qdisc)) {
536*4882a593Smuzhiyun 				qlen = qdisc_qlen_sum(qdisc);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 				__gnet_stats_copy_basic(NULL, &bstats,
539*4882a593Smuzhiyun 							qdisc->cpu_bstats,
540*4882a593Smuzhiyun 							&qdisc->bstats);
541*4882a593Smuzhiyun 				__gnet_stats_copy_queue(&qstats,
542*4882a593Smuzhiyun 							qdisc->cpu_qstats,
543*4882a593Smuzhiyun 							&qdisc->qstats,
544*4882a593Smuzhiyun 							qlen);
545*4882a593Smuzhiyun 			} else {
546*4882a593Smuzhiyun 				qlen		+= qdisc->q.qlen;
547*4882a593Smuzhiyun 				bstats.bytes	+= qdisc->bstats.bytes;
548*4882a593Smuzhiyun 				bstats.packets	+= qdisc->bstats.packets;
549*4882a593Smuzhiyun 				qstats.backlog	+= qdisc->qstats.backlog;
550*4882a593Smuzhiyun 				qstats.drops	+= qdisc->qstats.drops;
551*4882a593Smuzhiyun 				qstats.requeues	+= qdisc->qstats.requeues;
552*4882a593Smuzhiyun 				qstats.overlimits += qdisc->qstats.overlimits;
553*4882a593Smuzhiyun 			}
554*4882a593Smuzhiyun 			spin_unlock_bh(qdisc_lock(qdisc));
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		/* Reclaim root sleeping lock before completing stats */
558*4882a593Smuzhiyun 		if (d->lock)
559*4882a593Smuzhiyun 			spin_lock_bh(d->lock);
560*4882a593Smuzhiyun 		if (gnet_stats_copy_basic(NULL, d, NULL, &bstats) < 0 ||
561*4882a593Smuzhiyun 		    gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
562*4882a593Smuzhiyun 			return -1;
563*4882a593Smuzhiyun 	} else {
564*4882a593Smuzhiyun 		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		sch = dev_queue->qdisc_sleeping;
567*4882a593Smuzhiyun 		if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
568*4882a593Smuzhiyun 					  sch->cpu_bstats, &sch->bstats) < 0 ||
569*4882a593Smuzhiyun 		    qdisc_qstats_copy(d, sch) < 0)
570*4882a593Smuzhiyun 			return -1;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 	return 0;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
mqprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)575*4882a593Smuzhiyun static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
578*4882a593Smuzhiyun 	unsigned long ntx;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (arg->stop)
581*4882a593Smuzhiyun 		return;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* Walk hierarchy with a virtual class per tc */
584*4882a593Smuzhiyun 	arg->count = arg->skip;
585*4882a593Smuzhiyun 	for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
586*4882a593Smuzhiyun 		if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
587*4882a593Smuzhiyun 			arg->stop = 1;
588*4882a593Smuzhiyun 			return;
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 		arg->count++;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/* Pad the values and skip over unused traffic classes */
594*4882a593Smuzhiyun 	if (ntx < TC_MAX_QUEUE) {
595*4882a593Smuzhiyun 		arg->count = TC_MAX_QUEUE;
596*4882a593Smuzhiyun 		ntx = TC_MAX_QUEUE;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* Reset offset, sort out remaining per-queue qdiscs */
600*4882a593Smuzhiyun 	for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
601*4882a593Smuzhiyun 		if (arg->fn(sch, ntx + 1, arg) < 0) {
602*4882a593Smuzhiyun 			arg->stop = 1;
603*4882a593Smuzhiyun 			return;
604*4882a593Smuzhiyun 		}
605*4882a593Smuzhiyun 		arg->count++;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
mqprio_select_queue(struct Qdisc * sch,struct tcmsg * tcm)609*4882a593Smuzhiyun static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
610*4882a593Smuzhiyun 						struct tcmsg *tcm)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun static const struct Qdisc_class_ops mqprio_class_ops = {
616*4882a593Smuzhiyun 	.graft		= mqprio_graft,
617*4882a593Smuzhiyun 	.leaf		= mqprio_leaf,
618*4882a593Smuzhiyun 	.find		= mqprio_find,
619*4882a593Smuzhiyun 	.walk		= mqprio_walk,
620*4882a593Smuzhiyun 	.dump		= mqprio_dump_class,
621*4882a593Smuzhiyun 	.dump_stats	= mqprio_dump_class_stats,
622*4882a593Smuzhiyun 	.select_queue	= mqprio_select_queue,
623*4882a593Smuzhiyun };
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
626*4882a593Smuzhiyun 	.cl_ops		= &mqprio_class_ops,
627*4882a593Smuzhiyun 	.id		= "mqprio",
628*4882a593Smuzhiyun 	.priv_size	= sizeof(struct mqprio_sched),
629*4882a593Smuzhiyun 	.init		= mqprio_init,
630*4882a593Smuzhiyun 	.destroy	= mqprio_destroy,
631*4882a593Smuzhiyun 	.attach		= mqprio_attach,
632*4882a593Smuzhiyun 	.dump		= mqprio_dump,
633*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
634*4882a593Smuzhiyun };
635*4882a593Smuzhiyun 
mqprio_module_init(void)636*4882a593Smuzhiyun static int __init mqprio_module_init(void)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	return register_qdisc(&mqprio_qdisc_ops);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
mqprio_module_exit(void)641*4882a593Smuzhiyun static void __exit mqprio_module_exit(void)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	unregister_qdisc(&mqprio_qdisc_ops);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun module_init(mqprio_module_init);
647*4882a593Smuzhiyun module_exit(mqprio_module_exit);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun MODULE_LICENSE("GPL");
650