xref: /OK3568_Linux_fs/kernel/net/sched/sch_fifo.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net/sched/sch_fifo.c	The simplest FIFO queue.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <net/pkt_sched.h>
15*4882a593Smuzhiyun #include <net/pkt_cls.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* 1 band FIFO pseudo-"scheduler" */
18*4882a593Smuzhiyun 
bfifo_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)19*4882a593Smuzhiyun static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20*4882a593Smuzhiyun 			 struct sk_buff **to_free)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
23*4882a593Smuzhiyun 		return qdisc_enqueue_tail(skb, sch);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	return qdisc_drop(skb, sch, to_free);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
pfifo_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)28*4882a593Smuzhiyun static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
29*4882a593Smuzhiyun 			 struct sk_buff **to_free)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (likely(sch->q.qlen < sch->limit))
32*4882a593Smuzhiyun 		return qdisc_enqueue_tail(skb, sch);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	return qdisc_drop(skb, sch, to_free);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
pfifo_tail_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)37*4882a593Smuzhiyun static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
38*4882a593Smuzhiyun 			      struct sk_buff **to_free)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	unsigned int prev_backlog;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (likely(sch->q.qlen < sch->limit))
43*4882a593Smuzhiyun 		return qdisc_enqueue_tail(skb, sch);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	prev_backlog = sch->qstats.backlog;
46*4882a593Smuzhiyun 	/* queue full, remove one skb to fulfill the limit */
47*4882a593Smuzhiyun 	__qdisc_queue_drop_head(sch, &sch->q, to_free);
48*4882a593Smuzhiyun 	qdisc_qstats_drop(sch);
49*4882a593Smuzhiyun 	qdisc_enqueue_tail(skb, sch);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
52*4882a593Smuzhiyun 	return NET_XMIT_CN;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
fifo_offload_init(struct Qdisc * sch)55*4882a593Smuzhiyun static void fifo_offload_init(struct Qdisc *sch)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
58*4882a593Smuzhiyun 	struct tc_fifo_qopt_offload qopt;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
61*4882a593Smuzhiyun 		return;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	qopt.command = TC_FIFO_REPLACE;
64*4882a593Smuzhiyun 	qopt.handle = sch->handle;
65*4882a593Smuzhiyun 	qopt.parent = sch->parent;
66*4882a593Smuzhiyun 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
fifo_offload_destroy(struct Qdisc * sch)69*4882a593Smuzhiyun static void fifo_offload_destroy(struct Qdisc *sch)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct net_device *dev = qdisc_dev(sch);
72*4882a593Smuzhiyun 	struct tc_fifo_qopt_offload qopt;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
75*4882a593Smuzhiyun 		return;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	qopt.command = TC_FIFO_DESTROY;
78*4882a593Smuzhiyun 	qopt.handle = sch->handle;
79*4882a593Smuzhiyun 	qopt.parent = sch->parent;
80*4882a593Smuzhiyun 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
fifo_offload_dump(struct Qdisc * sch)83*4882a593Smuzhiyun static int fifo_offload_dump(struct Qdisc *sch)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct tc_fifo_qopt_offload qopt;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	qopt.command = TC_FIFO_STATS;
88*4882a593Smuzhiyun 	qopt.handle = sch->handle;
89*4882a593Smuzhiyun 	qopt.parent = sch->parent;
90*4882a593Smuzhiyun 	qopt.stats.bstats = &sch->bstats;
91*4882a593Smuzhiyun 	qopt.stats.qstats = &sch->qstats;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
__fifo_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)96*4882a593Smuzhiyun static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
97*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	bool bypass;
100*4882a593Smuzhiyun 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (opt == NULL) {
103*4882a593Smuzhiyun 		u32 limit = qdisc_dev(sch)->tx_queue_len;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		if (is_bfifo)
106*4882a593Smuzhiyun 			limit *= psched_mtu(qdisc_dev(sch));
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		sch->limit = limit;
109*4882a593Smuzhiyun 	} else {
110*4882a593Smuzhiyun 		struct tc_fifo_qopt *ctl = nla_data(opt);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		if (nla_len(opt) < sizeof(*ctl))
113*4882a593Smuzhiyun 			return -EINVAL;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		sch->limit = ctl->limit;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (is_bfifo)
119*4882a593Smuzhiyun 		bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
120*4882a593Smuzhiyun 	else
121*4882a593Smuzhiyun 		bypass = sch->limit >= 1;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (bypass)
124*4882a593Smuzhiyun 		sch->flags |= TCQ_F_CAN_BYPASS;
125*4882a593Smuzhiyun 	else
126*4882a593Smuzhiyun 		sch->flags &= ~TCQ_F_CAN_BYPASS;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
fifo_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)131*4882a593Smuzhiyun static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
132*4882a593Smuzhiyun 		     struct netlink_ext_ack *extack)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	int err;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	err = __fifo_init(sch, opt, extack);
137*4882a593Smuzhiyun 	if (err)
138*4882a593Smuzhiyun 		return err;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	fifo_offload_init(sch);
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
fifo_hd_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)144*4882a593Smuzhiyun static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
145*4882a593Smuzhiyun 			struct netlink_ext_ack *extack)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	return __fifo_init(sch, opt, extack);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
fifo_destroy(struct Qdisc * sch)150*4882a593Smuzhiyun static void fifo_destroy(struct Qdisc *sch)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	fifo_offload_destroy(sch);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
__fifo_dump(struct Qdisc * sch,struct sk_buff * skb)155*4882a593Smuzhiyun static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct tc_fifo_qopt opt = { .limit = sch->limit };
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
160*4882a593Smuzhiyun 		goto nla_put_failure;
161*4882a593Smuzhiyun 	return skb->len;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun nla_put_failure:
164*4882a593Smuzhiyun 	return -1;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
fifo_dump(struct Qdisc * sch,struct sk_buff * skb)167*4882a593Smuzhiyun static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	int err;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	err = fifo_offload_dump(sch);
172*4882a593Smuzhiyun 	if (err)
173*4882a593Smuzhiyun 		return err;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return __fifo_dump(sch, skb);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
fifo_hd_dump(struct Qdisc * sch,struct sk_buff * skb)178*4882a593Smuzhiyun static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	return __fifo_dump(sch, skb);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
184*4882a593Smuzhiyun 	.id		=	"pfifo",
185*4882a593Smuzhiyun 	.priv_size	=	0,
186*4882a593Smuzhiyun 	.enqueue	=	pfifo_enqueue,
187*4882a593Smuzhiyun 	.dequeue	=	qdisc_dequeue_head,
188*4882a593Smuzhiyun 	.peek		=	qdisc_peek_head,
189*4882a593Smuzhiyun 	.init		=	fifo_init,
190*4882a593Smuzhiyun 	.destroy	=	fifo_destroy,
191*4882a593Smuzhiyun 	.reset		=	qdisc_reset_queue,
192*4882a593Smuzhiyun 	.change		=	fifo_init,
193*4882a593Smuzhiyun 	.dump		=	fifo_dump,
194*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun EXPORT_SYMBOL(pfifo_qdisc_ops);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
199*4882a593Smuzhiyun 	.id		=	"bfifo",
200*4882a593Smuzhiyun 	.priv_size	=	0,
201*4882a593Smuzhiyun 	.enqueue	=	bfifo_enqueue,
202*4882a593Smuzhiyun 	.dequeue	=	qdisc_dequeue_head,
203*4882a593Smuzhiyun 	.peek		=	qdisc_peek_head,
204*4882a593Smuzhiyun 	.init		=	fifo_init,
205*4882a593Smuzhiyun 	.destroy	=	fifo_destroy,
206*4882a593Smuzhiyun 	.reset		=	qdisc_reset_queue,
207*4882a593Smuzhiyun 	.change		=	fifo_init,
208*4882a593Smuzhiyun 	.dump		=	fifo_dump,
209*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun EXPORT_SYMBOL(bfifo_qdisc_ops);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
214*4882a593Smuzhiyun 	.id		=	"pfifo_head_drop",
215*4882a593Smuzhiyun 	.priv_size	=	0,
216*4882a593Smuzhiyun 	.enqueue	=	pfifo_tail_enqueue,
217*4882a593Smuzhiyun 	.dequeue	=	qdisc_dequeue_head,
218*4882a593Smuzhiyun 	.peek		=	qdisc_peek_head,
219*4882a593Smuzhiyun 	.init		=	fifo_hd_init,
220*4882a593Smuzhiyun 	.reset		=	qdisc_reset_queue,
221*4882a593Smuzhiyun 	.change		=	fifo_hd_init,
222*4882a593Smuzhiyun 	.dump		=	fifo_hd_dump,
223*4882a593Smuzhiyun 	.owner		=	THIS_MODULE,
224*4882a593Smuzhiyun };
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* Pass size change message down to embedded FIFO */
fifo_set_limit(struct Qdisc * q,unsigned int limit)227*4882a593Smuzhiyun int fifo_set_limit(struct Qdisc *q, unsigned int limit)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct nlattr *nla;
230*4882a593Smuzhiyun 	int ret = -ENOMEM;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Hack to avoid sending change message to non-FIFO */
233*4882a593Smuzhiyun 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
234*4882a593Smuzhiyun 		return 0;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (!q->ops->change)
237*4882a593Smuzhiyun 		return 0;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
240*4882a593Smuzhiyun 	if (nla) {
241*4882a593Smuzhiyun 		nla->nla_type = RTM_NEWQDISC;
242*4882a593Smuzhiyun 		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
243*4882a593Smuzhiyun 		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		ret = q->ops->change(q, nla, NULL);
246*4882a593Smuzhiyun 		kfree(nla);
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 	return ret;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun EXPORT_SYMBOL(fifo_set_limit);
251*4882a593Smuzhiyun 
fifo_create_dflt(struct Qdisc * sch,struct Qdisc_ops * ops,unsigned int limit,struct netlink_ext_ack * extack)252*4882a593Smuzhiyun struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
253*4882a593Smuzhiyun 			       unsigned int limit,
254*4882a593Smuzhiyun 			       struct netlink_ext_ack *extack)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct Qdisc *q;
257*4882a593Smuzhiyun 	int err = -ENOMEM;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
260*4882a593Smuzhiyun 			      extack);
261*4882a593Smuzhiyun 	if (q) {
262*4882a593Smuzhiyun 		err = fifo_set_limit(q, limit);
263*4882a593Smuzhiyun 		if (err < 0) {
264*4882a593Smuzhiyun 			qdisc_put(q);
265*4882a593Smuzhiyun 			q = NULL;
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return q ? : ERR_PTR(err);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun EXPORT_SYMBOL(fifo_create_dflt);
272