xref: /OK3568_Linux_fs/kernel/net/sched/sch_plug.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * sch_plug.c Queue traffic until an explicit release command
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * There are two ways to use this qdisc:
6*4882a593Smuzhiyun  * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating
7*4882a593Smuzhiyun  *    sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * 2. For network output buffering (a.k.a output commit) functionality.
10*4882a593Smuzhiyun  *    Output commit property is commonly used by applications using checkpoint
11*4882a593Smuzhiyun  *    based fault-tolerance to ensure that the checkpoint from which a system
12*4882a593Smuzhiyun  *    is being restored is consistent w.r.t outside world.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *    Consider for e.g. Remus - a Virtual Machine checkpointing system,
15*4882a593Smuzhiyun  *    wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated
16*4882a593Smuzhiyun  *    asynchronously to the backup host, while the VM continues executing the
17*4882a593Smuzhiyun  *    next epoch speculatively.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *    The following is a typical sequence of output buffer operations:
20*4882a593Smuzhiyun  *       1.At epoch i, start_buffer(i)
21*4882a593Smuzhiyun  *       2. At end of epoch i (i.e. after 50ms):
22*4882a593Smuzhiyun  *          2.1 Stop VM and take checkpoint(i).
23*4882a593Smuzhiyun  *          2.2 start_buffer(i+1) and Resume VM
24*4882a593Smuzhiyun  *       3. While speculatively executing epoch(i+1), asynchronously replicate
25*4882a593Smuzhiyun  *          checkpoint(i) to backup host.
26*4882a593Smuzhiyun  *       4. When checkpoint_ack(i) is received from backup, release_buffer(i)
27*4882a593Smuzhiyun  *    Thus, this Qdisc would receive the following sequence of commands:
28*4882a593Smuzhiyun  *       TCQ_PLUG_BUFFER (epoch i)
29*4882a593Smuzhiyun  *       .. TCQ_PLUG_BUFFER (epoch i+1)
30*4882a593Smuzhiyun  *       ....TCQ_PLUG_RELEASE_ONE (epoch i)
31*4882a593Smuzhiyun  *       ......TCQ_PLUG_BUFFER (epoch i+2)
32*4882a593Smuzhiyun  *       ........
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/types.h>
37*4882a593Smuzhiyun #include <linux/kernel.h>
38*4882a593Smuzhiyun #include <linux/errno.h>
39*4882a593Smuzhiyun #include <linux/netdevice.h>
40*4882a593Smuzhiyun #include <linux/skbuff.h>
41*4882a593Smuzhiyun #include <net/pkt_sched.h>
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * State of the queue, when used for network output buffering:
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  *                 plug(i+1)            plug(i)          head
47*4882a593Smuzhiyun  * ------------------+--------------------+---------------->
48*4882a593Smuzhiyun  *                   |                    |
49*4882a593Smuzhiyun  *                   |                    |
50*4882a593Smuzhiyun  * pkts_current_epoch| pkts_last_epoch    |pkts_to_release
51*4882a593Smuzhiyun  * ----------------->|<--------+--------->|+--------------->
52*4882a593Smuzhiyun  *                   v                    v
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct plug_sched_data {
57*4882a593Smuzhiyun 	/* If true, the dequeue function releases all packets
58*4882a593Smuzhiyun 	 * from head to end of the queue. The queue turns into
59*4882a593Smuzhiyun 	 * a pass-through queue for newly arriving packets.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	bool unplug_indefinite;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	bool throttled;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* Queue Limit in bytes */
66*4882a593Smuzhiyun 	u32 limit;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* Number of packets (output) from the current speculatively
69*4882a593Smuzhiyun 	 * executing epoch.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	u32 pkts_current_epoch;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* Number of packets corresponding to the recently finished
74*4882a593Smuzhiyun 	 * epoch. These will be released when we receive a
75*4882a593Smuzhiyun 	 * TCQ_PLUG_RELEASE_ONE command. This command is typically
76*4882a593Smuzhiyun 	 * issued after committing a checkpoint at the target.
77*4882a593Smuzhiyun 	 */
78*4882a593Smuzhiyun 	u32 pkts_last_epoch;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * Number of packets from the head of the queue, that can
82*4882a593Smuzhiyun 	 * be released (committed checkpoint).
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	u32 pkts_to_release;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
plug_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)87*4882a593Smuzhiyun static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
88*4882a593Smuzhiyun 			struct sk_buff **to_free)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct plug_sched_data *q = qdisc_priv(sch);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
93*4882a593Smuzhiyun 		if (!q->unplug_indefinite)
94*4882a593Smuzhiyun 			q->pkts_current_epoch++;
95*4882a593Smuzhiyun 		return qdisc_enqueue_tail(skb, sch);
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return qdisc_drop(skb, sch, to_free);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
plug_dequeue(struct Qdisc * sch)101*4882a593Smuzhiyun static struct sk_buff *plug_dequeue(struct Qdisc *sch)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct plug_sched_data *q = qdisc_priv(sch);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (q->throttled)
106*4882a593Smuzhiyun 		return NULL;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!q->unplug_indefinite) {
109*4882a593Smuzhiyun 		if (!q->pkts_to_release) {
110*4882a593Smuzhiyun 			/* No more packets to dequeue. Block the queue
111*4882a593Smuzhiyun 			 * and wait for the next release command.
112*4882a593Smuzhiyun 			 */
113*4882a593Smuzhiyun 			q->throttled = true;
114*4882a593Smuzhiyun 			return NULL;
115*4882a593Smuzhiyun 		}
116*4882a593Smuzhiyun 		q->pkts_to_release--;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return qdisc_dequeue_head(sch);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
plug_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)122*4882a593Smuzhiyun static int plug_init(struct Qdisc *sch, struct nlattr *opt,
123*4882a593Smuzhiyun 		     struct netlink_ext_ack *extack)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct plug_sched_data *q = qdisc_priv(sch);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	q->pkts_current_epoch = 0;
128*4882a593Smuzhiyun 	q->pkts_last_epoch = 0;
129*4882a593Smuzhiyun 	q->pkts_to_release = 0;
130*4882a593Smuzhiyun 	q->unplug_indefinite = false;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (opt == NULL) {
133*4882a593Smuzhiyun 		q->limit = qdisc_dev(sch)->tx_queue_len
134*4882a593Smuzhiyun 		           * psched_mtu(qdisc_dev(sch));
135*4882a593Smuzhiyun 	} else {
136*4882a593Smuzhiyun 		struct tc_plug_qopt *ctl = nla_data(opt);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 		if (nla_len(opt) < sizeof(*ctl))
139*4882a593Smuzhiyun 			return -EINVAL;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		q->limit = ctl->limit;
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	q->throttled = true;
145*4882a593Smuzhiyun 	return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* Receives 4 types of messages:
149*4882a593Smuzhiyun  * TCQ_PLUG_BUFFER: Inset a plug into the queue and
150*4882a593Smuzhiyun  *  buffer any incoming packets
151*4882a593Smuzhiyun  * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
152*4882a593Smuzhiyun  *   to beginning of the next plug.
153*4882a593Smuzhiyun  * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
154*4882a593Smuzhiyun  *   Stop buffering packets until the next TCQ_PLUG_BUFFER
155*4882a593Smuzhiyun  *   command is received (just act as a pass-thru queue).
156*4882a593Smuzhiyun  * TCQ_PLUG_LIMIT: Increase/decrease queue size
157*4882a593Smuzhiyun  */
plug_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)158*4882a593Smuzhiyun static int plug_change(struct Qdisc *sch, struct nlattr *opt,
159*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct plug_sched_data *q = qdisc_priv(sch);
162*4882a593Smuzhiyun 	struct tc_plug_qopt *msg;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (opt == NULL)
165*4882a593Smuzhiyun 		return -EINVAL;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	msg = nla_data(opt);
168*4882a593Smuzhiyun 	if (nla_len(opt) < sizeof(*msg))
169*4882a593Smuzhiyun 		return -EINVAL;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	switch (msg->action) {
172*4882a593Smuzhiyun 	case TCQ_PLUG_BUFFER:
173*4882a593Smuzhiyun 		/* Save size of the current buffer */
174*4882a593Smuzhiyun 		q->pkts_last_epoch = q->pkts_current_epoch;
175*4882a593Smuzhiyun 		q->pkts_current_epoch = 0;
176*4882a593Smuzhiyun 		if (q->unplug_indefinite)
177*4882a593Smuzhiyun 			q->throttled = true;
178*4882a593Smuzhiyun 		q->unplug_indefinite = false;
179*4882a593Smuzhiyun 		break;
180*4882a593Smuzhiyun 	case TCQ_PLUG_RELEASE_ONE:
181*4882a593Smuzhiyun 		/* Add packets from the last complete buffer to the
182*4882a593Smuzhiyun 		 * packets to be released set.
183*4882a593Smuzhiyun 		 */
184*4882a593Smuzhiyun 		q->pkts_to_release += q->pkts_last_epoch;
185*4882a593Smuzhiyun 		q->pkts_last_epoch = 0;
186*4882a593Smuzhiyun 		q->throttled = false;
187*4882a593Smuzhiyun 		netif_schedule_queue(sch->dev_queue);
188*4882a593Smuzhiyun 		break;
189*4882a593Smuzhiyun 	case TCQ_PLUG_RELEASE_INDEFINITE:
190*4882a593Smuzhiyun 		q->unplug_indefinite = true;
191*4882a593Smuzhiyun 		q->pkts_to_release = 0;
192*4882a593Smuzhiyun 		q->pkts_last_epoch = 0;
193*4882a593Smuzhiyun 		q->pkts_current_epoch = 0;
194*4882a593Smuzhiyun 		q->throttled = false;
195*4882a593Smuzhiyun 		netif_schedule_queue(sch->dev_queue);
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 	case TCQ_PLUG_LIMIT:
198*4882a593Smuzhiyun 		/* Limit is supplied in bytes */
199*4882a593Smuzhiyun 		q->limit = msg->limit;
200*4882a593Smuzhiyun 		break;
201*4882a593Smuzhiyun 	default:
202*4882a593Smuzhiyun 		return -EINVAL;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
209*4882a593Smuzhiyun 	.id          =       "plug",
210*4882a593Smuzhiyun 	.priv_size   =       sizeof(struct plug_sched_data),
211*4882a593Smuzhiyun 	.enqueue     =       plug_enqueue,
212*4882a593Smuzhiyun 	.dequeue     =       plug_dequeue,
213*4882a593Smuzhiyun 	.peek        =       qdisc_peek_head,
214*4882a593Smuzhiyun 	.init        =       plug_init,
215*4882a593Smuzhiyun 	.change      =       plug_change,
216*4882a593Smuzhiyun 	.reset       =	     qdisc_reset_queue,
217*4882a593Smuzhiyun 	.owner       =       THIS_MODULE,
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
plug_module_init(void)220*4882a593Smuzhiyun static int __init plug_module_init(void)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	return register_qdisc(&plug_qdisc_ops);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
plug_module_exit(void)225*4882a593Smuzhiyun static void __exit plug_module_exit(void)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	unregister_qdisc(&plug_qdisc_ops);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun module_init(plug_module_init)
230*4882a593Smuzhiyun module_exit(plug_module_exit)
231*4882a593Smuzhiyun MODULE_LICENSE("GPL");
232