xref: /OK3568_Linux_fs/kernel/net/sched/sch_multiq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2008, Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Alexander Duyck <alexander.h.duyck@intel.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <net/netlink.h>
16*4882a593Smuzhiyun #include <net/pkt_sched.h>
17*4882a593Smuzhiyun #include <net/pkt_cls.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct multiq_sched_data {
20*4882a593Smuzhiyun 	u16 bands;
21*4882a593Smuzhiyun 	u16 max_bands;
22*4882a593Smuzhiyun 	u16 curband;
23*4882a593Smuzhiyun 	struct tcf_proto __rcu *filter_list;
24*4882a593Smuzhiyun 	struct tcf_block *block;
25*4882a593Smuzhiyun 	struct Qdisc **queues;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static struct Qdisc *
multiq_classify(struct sk_buff * skb,struct Qdisc * sch,int * qerr)30*4882a593Smuzhiyun multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
33*4882a593Smuzhiyun 	u32 band;
34*4882a593Smuzhiyun 	struct tcf_result res;
35*4882a593Smuzhiyun 	struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
36*4882a593Smuzhiyun 	int err;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
39*4882a593Smuzhiyun 	err = tcf_classify(skb, fl, &res, false);
40*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
41*4882a593Smuzhiyun 	switch (err) {
42*4882a593Smuzhiyun 	case TC_ACT_STOLEN:
43*4882a593Smuzhiyun 	case TC_ACT_QUEUED:
44*4882a593Smuzhiyun 	case TC_ACT_TRAP:
45*4882a593Smuzhiyun 		*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
46*4882a593Smuzhiyun 		fallthrough;
47*4882a593Smuzhiyun 	case TC_ACT_SHOT:
48*4882a593Smuzhiyun 		return NULL;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 	band = skb_get_queue_mapping(skb);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (band >= q->bands)
54*4882a593Smuzhiyun 		return q->queues[0];
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	return q->queues[band];
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static int
multiq_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)60*4882a593Smuzhiyun multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
61*4882a593Smuzhiyun 	       struct sk_buff **to_free)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct Qdisc *qdisc;
64*4882a593Smuzhiyun 	int ret;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	qdisc = multiq_classify(skb, sch, &ret);
67*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
68*4882a593Smuzhiyun 	if (qdisc == NULL) {
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		if (ret & __NET_XMIT_BYPASS)
71*4882a593Smuzhiyun 			qdisc_qstats_drop(sch);
72*4882a593Smuzhiyun 		__qdisc_drop(skb, to_free);
73*4882a593Smuzhiyun 		return ret;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	ret = qdisc_enqueue(skb, qdisc, to_free);
78*4882a593Smuzhiyun 	if (ret == NET_XMIT_SUCCESS) {
79*4882a593Smuzhiyun 		sch->q.qlen++;
80*4882a593Smuzhiyun 		return NET_XMIT_SUCCESS;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 	if (net_xmit_drop_count(ret))
83*4882a593Smuzhiyun 		qdisc_qstats_drop(sch);
84*4882a593Smuzhiyun 	return ret;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
multiq_dequeue(struct Qdisc * sch)87*4882a593Smuzhiyun static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
90*4882a593Smuzhiyun 	struct Qdisc *qdisc;
91*4882a593Smuzhiyun 	struct sk_buff *skb;
92*4882a593Smuzhiyun 	int band;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	for (band = 0; band < q->bands; band++) {
95*4882a593Smuzhiyun 		/* cycle through bands to ensure fairness */
96*4882a593Smuzhiyun 		q->curband++;
97*4882a593Smuzhiyun 		if (q->curband >= q->bands)
98*4882a593Smuzhiyun 			q->curband = 0;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		/* Check that target subqueue is available before
101*4882a593Smuzhiyun 		 * pulling an skb to avoid head-of-line blocking.
102*4882a593Smuzhiyun 		 */
103*4882a593Smuzhiyun 		if (!netif_xmit_stopped(
104*4882a593Smuzhiyun 		    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
105*4882a593Smuzhiyun 			qdisc = q->queues[q->curband];
106*4882a593Smuzhiyun 			skb = qdisc->dequeue(qdisc);
107*4882a593Smuzhiyun 			if (skb) {
108*4882a593Smuzhiyun 				qdisc_bstats_update(sch, skb);
109*4882a593Smuzhiyun 				sch->q.qlen--;
110*4882a593Smuzhiyun 				return skb;
111*4882a593Smuzhiyun 			}
112*4882a593Smuzhiyun 		}
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 	return NULL;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
multiq_peek(struct Qdisc * sch)118*4882a593Smuzhiyun static struct sk_buff *multiq_peek(struct Qdisc *sch)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
121*4882a593Smuzhiyun 	unsigned int curband = q->curband;
122*4882a593Smuzhiyun 	struct Qdisc *qdisc;
123*4882a593Smuzhiyun 	struct sk_buff *skb;
124*4882a593Smuzhiyun 	int band;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	for (band = 0; band < q->bands; band++) {
127*4882a593Smuzhiyun 		/* cycle through bands to ensure fairness */
128*4882a593Smuzhiyun 		curband++;
129*4882a593Smuzhiyun 		if (curband >= q->bands)
130*4882a593Smuzhiyun 			curband = 0;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		/* Check that target subqueue is available before
133*4882a593Smuzhiyun 		 * pulling an skb to avoid head-of-line blocking.
134*4882a593Smuzhiyun 		 */
135*4882a593Smuzhiyun 		if (!netif_xmit_stopped(
136*4882a593Smuzhiyun 		    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
137*4882a593Smuzhiyun 			qdisc = q->queues[curband];
138*4882a593Smuzhiyun 			skb = qdisc->ops->peek(qdisc);
139*4882a593Smuzhiyun 			if (skb)
140*4882a593Smuzhiyun 				return skb;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 	return NULL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static void
multiq_reset(struct Qdisc * sch)148*4882a593Smuzhiyun multiq_reset(struct Qdisc *sch)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	u16 band;
151*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	for (band = 0; band < q->bands; band++)
154*4882a593Smuzhiyun 		qdisc_reset(q->queues[band]);
155*4882a593Smuzhiyun 	q->curband = 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun static void
multiq_destroy(struct Qdisc * sch)159*4882a593Smuzhiyun multiq_destroy(struct Qdisc *sch)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	int band;
162*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	tcf_block_put(q->block);
165*4882a593Smuzhiyun 	for (band = 0; band < q->bands; band++)
166*4882a593Smuzhiyun 		qdisc_put(q->queues[band]);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	kfree(q->queues);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
multiq_tune(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)171*4882a593Smuzhiyun static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
172*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
175*4882a593Smuzhiyun 	struct tc_multiq_qopt *qopt;
176*4882a593Smuzhiyun 	struct Qdisc **removed;
177*4882a593Smuzhiyun 	int i, n_removed = 0;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (!netif_is_multiqueue(qdisc_dev(sch)))
180*4882a593Smuzhiyun 		return -EOPNOTSUPP;
181*4882a593Smuzhiyun 	if (nla_len(opt) < sizeof(*qopt))
182*4882a593Smuzhiyun 		return -EINVAL;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	qopt = nla_data(opt);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
189*4882a593Smuzhiyun 			  GFP_KERNEL);
190*4882a593Smuzhiyun 	if (!removed)
191*4882a593Smuzhiyun 		return -ENOMEM;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	sch_tree_lock(sch);
194*4882a593Smuzhiyun 	q->bands = qopt->bands;
195*4882a593Smuzhiyun 	for (i = q->bands; i < q->max_bands; i++) {
196*4882a593Smuzhiyun 		if (q->queues[i] != &noop_qdisc) {
197*4882a593Smuzhiyun 			struct Qdisc *child = q->queues[i];
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 			q->queues[i] = &noop_qdisc;
200*4882a593Smuzhiyun 			qdisc_purge_queue(child);
201*4882a593Smuzhiyun 			removed[n_removed++] = child;
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	sch_tree_unlock(sch);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	for (i = 0; i < n_removed; i++)
208*4882a593Smuzhiyun 		qdisc_put(removed[i]);
209*4882a593Smuzhiyun 	kfree(removed);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < q->bands; i++) {
212*4882a593Smuzhiyun 		if (q->queues[i] == &noop_qdisc) {
213*4882a593Smuzhiyun 			struct Qdisc *child, *old;
214*4882a593Smuzhiyun 			child = qdisc_create_dflt(sch->dev_queue,
215*4882a593Smuzhiyun 						  &pfifo_qdisc_ops,
216*4882a593Smuzhiyun 						  TC_H_MAKE(sch->handle,
217*4882a593Smuzhiyun 							    i + 1), extack);
218*4882a593Smuzhiyun 			if (child) {
219*4882a593Smuzhiyun 				sch_tree_lock(sch);
220*4882a593Smuzhiyun 				old = q->queues[i];
221*4882a593Smuzhiyun 				q->queues[i] = child;
222*4882a593Smuzhiyun 				if (child != &noop_qdisc)
223*4882a593Smuzhiyun 					qdisc_hash_add(child, true);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 				if (old != &noop_qdisc)
226*4882a593Smuzhiyun 					qdisc_purge_queue(old);
227*4882a593Smuzhiyun 				sch_tree_unlock(sch);
228*4882a593Smuzhiyun 				qdisc_put(old);
229*4882a593Smuzhiyun 			}
230*4882a593Smuzhiyun 		}
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 	return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
multiq_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)235*4882a593Smuzhiyun static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
236*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
239*4882a593Smuzhiyun 	int i, err;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	q->queues = NULL;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (!opt)
244*4882a593Smuzhiyun 		return -EINVAL;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
247*4882a593Smuzhiyun 	if (err)
248*4882a593Smuzhiyun 		return err;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	q->max_bands = qdisc_dev(sch)->num_tx_queues;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
253*4882a593Smuzhiyun 	if (!q->queues)
254*4882a593Smuzhiyun 		return -ENOBUFS;
255*4882a593Smuzhiyun 	for (i = 0; i < q->max_bands; i++)
256*4882a593Smuzhiyun 		q->queues[i] = &noop_qdisc;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return multiq_tune(sch, opt, extack);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
multiq_dump(struct Qdisc * sch,struct sk_buff * skb)261*4882a593Smuzhiyun static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
264*4882a593Smuzhiyun 	unsigned char *b = skb_tail_pointer(skb);
265*4882a593Smuzhiyun 	struct tc_multiq_qopt opt;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	opt.bands = q->bands;
268*4882a593Smuzhiyun 	opt.max_bands = q->max_bands;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
271*4882a593Smuzhiyun 		goto nla_put_failure;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return skb->len;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun nla_put_failure:
276*4882a593Smuzhiyun 	nlmsg_trim(skb, b);
277*4882a593Smuzhiyun 	return -1;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
multiq_graft(struct Qdisc * sch,unsigned long arg,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)280*4882a593Smuzhiyun static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
281*4882a593Smuzhiyun 			struct Qdisc **old, struct netlink_ext_ack *extack)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
284*4882a593Smuzhiyun 	unsigned long band = arg - 1;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (new == NULL)
287*4882a593Smuzhiyun 		new = &noop_qdisc;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	*old = qdisc_replace(sch, new, &q->queues[band]);
290*4882a593Smuzhiyun 	return 0;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun static struct Qdisc *
multiq_leaf(struct Qdisc * sch,unsigned long arg)294*4882a593Smuzhiyun multiq_leaf(struct Qdisc *sch, unsigned long arg)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
297*4882a593Smuzhiyun 	unsigned long band = arg - 1;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return q->queues[band];
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
multiq_find(struct Qdisc * sch,u32 classid)302*4882a593Smuzhiyun static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
305*4882a593Smuzhiyun 	unsigned long band = TC_H_MIN(classid);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (band - 1 >= q->bands)
308*4882a593Smuzhiyun 		return 0;
309*4882a593Smuzhiyun 	return band;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
multiq_bind(struct Qdisc * sch,unsigned long parent,u32 classid)312*4882a593Smuzhiyun static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
313*4882a593Smuzhiyun 				 u32 classid)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	return multiq_find(sch, classid);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 
multiq_unbind(struct Qdisc * q,unsigned long cl)319*4882a593Smuzhiyun static void multiq_unbind(struct Qdisc *q, unsigned long cl)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
multiq_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)323*4882a593Smuzhiyun static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
324*4882a593Smuzhiyun 			     struct sk_buff *skb, struct tcmsg *tcm)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	tcm->tcm_handle |= TC_H_MIN(cl);
329*4882a593Smuzhiyun 	tcm->tcm_info = q->queues[cl - 1]->handle;
330*4882a593Smuzhiyun 	return 0;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
multiq_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)333*4882a593Smuzhiyun static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
334*4882a593Smuzhiyun 				 struct gnet_dump *d)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
337*4882a593Smuzhiyun 	struct Qdisc *cl_q;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	cl_q = q->queues[cl - 1];
340*4882a593Smuzhiyun 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
341*4882a593Smuzhiyun 				  d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
342*4882a593Smuzhiyun 	    qdisc_qstats_copy(d, cl_q) < 0)
343*4882a593Smuzhiyun 		return -1;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
multiq_walk(struct Qdisc * sch,struct qdisc_walker * arg)348*4882a593Smuzhiyun static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
351*4882a593Smuzhiyun 	int band;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (arg->stop)
354*4882a593Smuzhiyun 		return;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	for (band = 0; band < q->bands; band++) {
357*4882a593Smuzhiyun 		if (arg->count < arg->skip) {
358*4882a593Smuzhiyun 			arg->count++;
359*4882a593Smuzhiyun 			continue;
360*4882a593Smuzhiyun 		}
361*4882a593Smuzhiyun 		if (arg->fn(sch, band + 1, arg) < 0) {
362*4882a593Smuzhiyun 			arg->stop = 1;
363*4882a593Smuzhiyun 			break;
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 		arg->count++;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
multiq_tcf_block(struct Qdisc * sch,unsigned long cl,struct netlink_ext_ack * extack)369*4882a593Smuzhiyun static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
370*4882a593Smuzhiyun 					  struct netlink_ext_ack *extack)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct multiq_sched_data *q = qdisc_priv(sch);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	if (cl)
375*4882a593Smuzhiyun 		return NULL;
376*4882a593Smuzhiyun 	return q->block;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun static const struct Qdisc_class_ops multiq_class_ops = {
380*4882a593Smuzhiyun 	.graft		=	multiq_graft,
381*4882a593Smuzhiyun 	.leaf		=	multiq_leaf,
382*4882a593Smuzhiyun 	.find		=	multiq_find,
383*4882a593Smuzhiyun 	.walk		=	multiq_walk,
384*4882a593Smuzhiyun 	.tcf_block	=	multiq_tcf_block,
385*4882a593Smuzhiyun 	.bind_tcf	=	multiq_bind,
386*4882a593Smuzhiyun 	.unbind_tcf	=	multiq_unbind,
387*4882a593Smuzhiyun 	.dump		=	multiq_dump_class,
388*4882a593Smuzhiyun 	.dump_stats	=	multiq_dump_class_stats,
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
392*4882a593Smuzhiyun 	.next		=	NULL,
393*4882a593Smuzhiyun 	.cl_ops		=	&multiq_class_ops,
394*4882a593Smuzhiyun 	.id		=	"multiq",
395*4882a593Smuzhiyun 	.priv_size	=	sizeof(struct multiq_sched_data),
396*4882a593Smuzhiyun 	.enqueue	=	multiq_enqueue,
397*4882a593Smuzhiyun 	.dequeue	=	multiq_dequeue,
398*4882a593Smuzhiyun 	.peek		=	multiq_peek,
399*4882a593Smuzhiyun 	.init		=	multiq_init,
400*4882a593Smuzhiyun 	.reset		=	multiq_reset,
401*4882a593Smuzhiyun 	.destroy	=	multiq_destroy,
402*4882a593Smuzhiyun 	.change		=	multiq_tune,
403*4882a593Smuzhiyun 	.dump		=	multiq_dump,
404*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
405*4882a593Smuzhiyun };
406*4882a593Smuzhiyun 
multiq_module_init(void)407*4882a593Smuzhiyun static int __init multiq_module_init(void)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	return register_qdisc(&multiq_qdisc_ops);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
multiq_module_exit(void)412*4882a593Smuzhiyun static void __exit multiq_module_exit(void)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	unregister_qdisc(&multiq_qdisc_ops);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun module_init(multiq_module_init)
418*4882a593Smuzhiyun module_exit(multiq_module_exit)
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun MODULE_LICENSE("GPL");
421