xref: /OK3568_Linux_fs/kernel/samples/bpf/hbm_edt_kern.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019 Facebook
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
5*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General Public
6*4882a593Smuzhiyun  * License as published by the Free Software Foundation.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Sample Host Bandwidth Manager (HBM) BPF program.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * A cgroup skb BPF egress program to limit cgroup output bandwidth.
11*4882a593Smuzhiyun  * It uses a modified virtual token bucket queue to limit average
12*4882a593Smuzhiyun  * egress bandwidth. The implementation uses credits instead of tokens.
13*4882a593Smuzhiyun  * Negative credits imply that queueing would have happened (this is
14*4882a593Smuzhiyun  * a virtual queue, so no queueing is done by it. However, queueing may
15*4882a593Smuzhiyun  * occur at the actual qdisc (which is not used for rate limiting).
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * This implementation uses 3 thresholds, one to start marking packets and
18*4882a593Smuzhiyun  * the other two to drop packets:
19*4882a593Smuzhiyun  *                                  CREDIT
20*4882a593Smuzhiyun  *        - <--------------------------|------------------------> +
21*4882a593Smuzhiyun  *              |    |          |      0
22*4882a593Smuzhiyun  *              |  Large pkt    |
23*4882a593Smuzhiyun  *              |  drop thresh  |
24*4882a593Smuzhiyun  *   Small pkt drop             Mark threshold
25*4882a593Smuzhiyun  *       thresh
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * The effect of marking depends on the type of packet:
28*4882a593Smuzhiyun  * a) If the packet is ECN enabled and it is a TCP packet, then the packet
29*4882a593Smuzhiyun  *    is ECN marked.
30*4882a593Smuzhiyun  * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
31*4882a593Smuzhiyun  *    to reduce the congestion window. The current implementation uses a linear
32*4882a593Smuzhiyun  *    distribution (0% probability at marking threshold, 100% probability
33*4882a593Smuzhiyun  *    at drop threshold).
34*4882a593Smuzhiyun  * c) If the packet is not a TCP packet, then it is dropped.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * If the credit is below the drop threshold, the packet is dropped. If it
37*4882a593Smuzhiyun  * is a TCP packet, then it also calls tcp_cwr since packets dropped by
38*4882a593Smuzhiyun  * by a cgroup skb BPF program do not automatically trigger a call to
39*4882a593Smuzhiyun  * tcp_cwr in the current kernel code.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * This BPF program actually uses 2 drop thresholds, one threshold
42*4882a593Smuzhiyun  * for larger packets (>= 120 bytes) and another for smaller packets. This
43*4882a593Smuzhiyun  * protects smaller packets such as SYNs, ACKs, etc.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * The default bandwidth limit is set at 1Gbps but this can be changed by
46*4882a593Smuzhiyun  * a user program through a shared BPF map. In addition, by default this BPF
47*4882a593Smuzhiyun  * program does not limit connections using loopback. This behavior can be
48*4882a593Smuzhiyun  * overwritten by the user program. There is also an option to calculate
49*4882a593Smuzhiyun  * some statistics, such as percent of packets marked or dropped, which
50*4882a593Smuzhiyun  * a user program, such as hbm, can access.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #include "hbm_kern.h"
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun SEC("cgroup_skb/egress")
_hbm_out_cg(struct __sk_buff * skb)56*4882a593Smuzhiyun int _hbm_out_cg(struct __sk_buff *skb)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	long long delta = 0, delta_send;
59*4882a593Smuzhiyun 	unsigned long long curtime, sendtime;
60*4882a593Smuzhiyun 	struct hbm_queue_stats *qsp = NULL;
61*4882a593Smuzhiyun 	unsigned int queue_index = 0;
62*4882a593Smuzhiyun 	bool congestion_flag = false;
63*4882a593Smuzhiyun 	bool ecn_ce_flag = false;
64*4882a593Smuzhiyun 	struct hbm_pkt_info pkti = {};
65*4882a593Smuzhiyun 	struct hbm_vqueue *qdp;
66*4882a593Smuzhiyun 	bool drop_flag = false;
67*4882a593Smuzhiyun 	bool cwr_flag = false;
68*4882a593Smuzhiyun 	int len = skb->len;
69*4882a593Smuzhiyun 	int rv = ALLOW_PKT;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	// Check if we should ignore loopback traffic
74*4882a593Smuzhiyun 	if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
75*4882a593Smuzhiyun 		return ALLOW_PKT;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	hbm_get_pkt_info(skb, &pkti);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	// We may want to account for the length of headers in len
80*4882a593Smuzhiyun 	// calculation, like ETH header + overhead, specially if it
81*4882a593Smuzhiyun 	// is a gso packet. But I am not doing it right now.
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	qdp = bpf_get_local_storage(&queue_state, 0);
84*4882a593Smuzhiyun 	if (!qdp)
85*4882a593Smuzhiyun 		return ALLOW_PKT;
86*4882a593Smuzhiyun 	if (qdp->lasttime == 0)
87*4882a593Smuzhiyun 		hbm_init_edt_vqueue(qdp, 1024);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	curtime = bpf_ktime_get_ns();
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	// Begin critical section
92*4882a593Smuzhiyun 	bpf_spin_lock(&qdp->lock);
93*4882a593Smuzhiyun 	delta = qdp->lasttime - curtime;
94*4882a593Smuzhiyun 	// bound bursts to 100us
95*4882a593Smuzhiyun 	if (delta < -BURST_SIZE_NS) {
96*4882a593Smuzhiyun 		// negative delta is a credit that allows bursts
97*4882a593Smuzhiyun 		qdp->lasttime = curtime - BURST_SIZE_NS;
98*4882a593Smuzhiyun 		delta = -BURST_SIZE_NS;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	sendtime = qdp->lasttime;
101*4882a593Smuzhiyun 	delta_send = BYTES_TO_NS(len, qdp->rate);
102*4882a593Smuzhiyun 	__sync_add_and_fetch(&(qdp->lasttime), delta_send);
103*4882a593Smuzhiyun 	bpf_spin_unlock(&qdp->lock);
104*4882a593Smuzhiyun 	// End critical section
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	// Set EDT of packet
107*4882a593Smuzhiyun 	skb->tstamp = sendtime;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	// Check if we should update rate
110*4882a593Smuzhiyun 	if (qsp != NULL && (qsp->rate * 128) != qdp->rate)
111*4882a593Smuzhiyun 		qdp->rate = qsp->rate * 128;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	// Set flags (drop, congestion, cwr)
114*4882a593Smuzhiyun 	// last packet will be sent in the future, bound latency
115*4882a593Smuzhiyun 	if (delta > DROP_THRESH_NS || (delta > LARGE_PKT_DROP_THRESH_NS &&
116*4882a593Smuzhiyun 				       len > LARGE_PKT_THRESH)) {
117*4882a593Smuzhiyun 		drop_flag = true;
118*4882a593Smuzhiyun 		if (pkti.is_tcp && pkti.ecn == 0)
119*4882a593Smuzhiyun 			cwr_flag = true;
120*4882a593Smuzhiyun 	} else if (delta > MARK_THRESH_NS) {
121*4882a593Smuzhiyun 		if (pkti.is_tcp)
122*4882a593Smuzhiyun 			congestion_flag = true;
123*4882a593Smuzhiyun 		else
124*4882a593Smuzhiyun 			drop_flag = true;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (congestion_flag) {
128*4882a593Smuzhiyun 		if (bpf_skb_ecn_set_ce(skb)) {
129*4882a593Smuzhiyun 			ecn_ce_flag = true;
130*4882a593Smuzhiyun 		} else {
131*4882a593Smuzhiyun 			if (pkti.is_tcp) {
132*4882a593Smuzhiyun 				unsigned int rand = bpf_get_prandom_u32();
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 				if (delta >= MARK_THRESH_NS +
135*4882a593Smuzhiyun 				    (rand % MARK_REGION_SIZE_NS)) {
136*4882a593Smuzhiyun 					// Do congestion control
137*4882a593Smuzhiyun 					cwr_flag = true;
138*4882a593Smuzhiyun 				}
139*4882a593Smuzhiyun 			} else if (len > LARGE_PKT_THRESH) {
140*4882a593Smuzhiyun 				// Problem if too many small packets?
141*4882a593Smuzhiyun 				drop_flag = true;
142*4882a593Smuzhiyun 				congestion_flag = false;
143*4882a593Smuzhiyun 			}
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (pkti.is_tcp && drop_flag && pkti.packets_out <= 1) {
148*4882a593Smuzhiyun 		drop_flag = false;
149*4882a593Smuzhiyun 		cwr_flag = true;
150*4882a593Smuzhiyun 		congestion_flag = false;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (qsp != NULL && qsp->no_cn)
154*4882a593Smuzhiyun 			cwr_flag = false;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
157*4882a593Smuzhiyun 			 cwr_flag, ecn_ce_flag, &pkti, (int) delta);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (drop_flag) {
160*4882a593Smuzhiyun 		__sync_add_and_fetch(&(qdp->lasttime), -delta_send);
161*4882a593Smuzhiyun 		rv = DROP_PKT;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (cwr_flag)
165*4882a593Smuzhiyun 		rv |= CWR;
166*4882a593Smuzhiyun 	return rv;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun char _license[] SEC("license") = "GPL";
169