xref: /OK3568_Linux_fs/kernel/samples/bpf/hbm_out_kern.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019 Facebook
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
5*4882a593Smuzhiyun  * modify it under the terms of version 2 of the GNU General Public
6*4882a593Smuzhiyun  * License as published by the Free Software Foundation.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Sample Host Bandwidth Manager (HBM) BPF program.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * A cgroup skb BPF egress program to limit cgroup output bandwidth.
11*4882a593Smuzhiyun  * It uses a modified virtual token bucket queue to limit average
12*4882a593Smuzhiyun  * egress bandwidth. The implementation uses credits instead of tokens.
13*4882a593Smuzhiyun  * Negative credits imply that queueing would have happened (this is
14*4882a593Smuzhiyun  * a virtual queue, so no queueing is done by it. However, queueing may
15*4882a593Smuzhiyun  * occur at the actual qdisc (which is not used for rate limiting).
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * This implementation uses 3 thresholds, one to start marking packets and
18*4882a593Smuzhiyun  * the other two to drop packets:
19*4882a593Smuzhiyun  *                                  CREDIT
20*4882a593Smuzhiyun  *        - <--------------------------|------------------------> +
21*4882a593Smuzhiyun  *              |    |          |      0
22*4882a593Smuzhiyun  *              |  Large pkt    |
23*4882a593Smuzhiyun  *              |  drop thresh  |
24*4882a593Smuzhiyun  *   Small pkt drop             Mark threshold
25*4882a593Smuzhiyun  *       thresh
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * The effect of marking depends on the type of packet:
28*4882a593Smuzhiyun  * a) If the packet is ECN enabled and it is a TCP packet, then the packet
29*4882a593Smuzhiyun  *    is ECN marked.
30*4882a593Smuzhiyun  * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
31*4882a593Smuzhiyun  *    to reduce the congestion window. The current implementation uses a linear
32*4882a593Smuzhiyun  *    distribution (0% probability at marking threshold, 100% probability
33*4882a593Smuzhiyun  *    at drop threshold).
34*4882a593Smuzhiyun  * c) If the packet is not a TCP packet, then it is dropped.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * If the credit is below the drop threshold, the packet is dropped. If it
37*4882a593Smuzhiyun  * is a TCP packet, then it also calls tcp_cwr since packets dropped by
38*4882a593Smuzhiyun  * by a cgroup skb BPF program do not automatically trigger a call to
39*4882a593Smuzhiyun  * tcp_cwr in the current kernel code.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * This BPF program actually uses 2 drop thresholds, one threshold
42*4882a593Smuzhiyun  * for larger packets (>= 120 bytes) and another for smaller packets. This
43*4882a593Smuzhiyun  * protects smaller packets such as SYNs, ACKs, etc.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * The default bandwidth limit is set at 1Gbps but this can be changed by
46*4882a593Smuzhiyun  * a user program through a shared BPF map. In addition, by default this BPF
47*4882a593Smuzhiyun  * program does not limit connections using loopback. This behavior can be
48*4882a593Smuzhiyun  * overwritten by the user program. There is also an option to calculate
49*4882a593Smuzhiyun  * some statistics, such as percent of packets marked or dropped, which
50*4882a593Smuzhiyun  * the user program can access.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * A latter patch provides such a program (hbm.c)
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #include "hbm_kern.h"
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun SEC("cgroup_skb/egress")
_hbm_out_cg(struct __sk_buff * skb)58*4882a593Smuzhiyun int _hbm_out_cg(struct __sk_buff *skb)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct hbm_pkt_info pkti;
61*4882a593Smuzhiyun 	int len = skb->len;
62*4882a593Smuzhiyun 	unsigned int queue_index = 0;
63*4882a593Smuzhiyun 	unsigned long long curtime;
64*4882a593Smuzhiyun 	int credit;
65*4882a593Smuzhiyun 	signed long long delta = 0, new_credit;
66*4882a593Smuzhiyun 	int max_credit = MAX_CREDIT;
67*4882a593Smuzhiyun 	bool congestion_flag = false;
68*4882a593Smuzhiyun 	bool drop_flag = false;
69*4882a593Smuzhiyun 	bool cwr_flag = false;
70*4882a593Smuzhiyun 	bool ecn_ce_flag = false;
71*4882a593Smuzhiyun 	struct hbm_vqueue *qdp;
72*4882a593Smuzhiyun 	struct hbm_queue_stats *qsp = NULL;
73*4882a593Smuzhiyun 	int rv = ALLOW_PKT;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
76*4882a593Smuzhiyun 	if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
77*4882a593Smuzhiyun 		return ALLOW_PKT;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	hbm_get_pkt_info(skb, &pkti);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	// We may want to account for the length of headers in len
82*4882a593Smuzhiyun 	// calculation, like ETH header + overhead, specially if it
83*4882a593Smuzhiyun 	// is a gso packet. But I am not doing it right now.
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	qdp = bpf_get_local_storage(&queue_state, 0);
86*4882a593Smuzhiyun 	if (!qdp)
87*4882a593Smuzhiyun 		return ALLOW_PKT;
88*4882a593Smuzhiyun 	else if (qdp->lasttime == 0)
89*4882a593Smuzhiyun 		hbm_init_vqueue(qdp, 1024);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	curtime = bpf_ktime_get_ns();
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	// Begin critical section
94*4882a593Smuzhiyun 	bpf_spin_lock(&qdp->lock);
95*4882a593Smuzhiyun 	credit = qdp->credit;
96*4882a593Smuzhiyun 	delta = curtime - qdp->lasttime;
97*4882a593Smuzhiyun 	/* delta < 0 implies that another process with a curtime greater
98*4882a593Smuzhiyun 	 * than ours beat us to the critical section and already added
99*4882a593Smuzhiyun 	 * the new credit, so we should not add it ourselves
100*4882a593Smuzhiyun 	 */
101*4882a593Smuzhiyun 	if (delta > 0) {
102*4882a593Smuzhiyun 		qdp->lasttime = curtime;
103*4882a593Smuzhiyun 		new_credit = credit + CREDIT_PER_NS(delta, qdp->rate);
104*4882a593Smuzhiyun 		if (new_credit > MAX_CREDIT)
105*4882a593Smuzhiyun 			credit = MAX_CREDIT;
106*4882a593Smuzhiyun 		else
107*4882a593Smuzhiyun 			credit = new_credit;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 	credit -= len;
110*4882a593Smuzhiyun 	qdp->credit = credit;
111*4882a593Smuzhiyun 	bpf_spin_unlock(&qdp->lock);
112*4882a593Smuzhiyun 	// End critical section
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	// Check if we should update rate
115*4882a593Smuzhiyun 	if (qsp != NULL && (qsp->rate * 128) != qdp->rate) {
116*4882a593Smuzhiyun 		qdp->rate = qsp->rate * 128;
117*4882a593Smuzhiyun 		bpf_printk("Updating rate: %d (1sec:%llu bits)\n",
118*4882a593Smuzhiyun 			   (int)qdp->rate,
119*4882a593Smuzhiyun 			   CREDIT_PER_NS(1000000000, qdp->rate) * 8);
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	// Set flags (drop, congestion, cwr)
123*4882a593Smuzhiyun 	// Dropping => we are congested, so ignore congestion flag
124*4882a593Smuzhiyun 	if (credit < -DROP_THRESH ||
125*4882a593Smuzhiyun 	    (len > LARGE_PKT_THRESH && credit < -LARGE_PKT_DROP_THRESH)) {
126*4882a593Smuzhiyun 		// Very congested, set drop packet
127*4882a593Smuzhiyun 		drop_flag = true;
128*4882a593Smuzhiyun 		if (pkti.ecn)
129*4882a593Smuzhiyun 			congestion_flag = true;
130*4882a593Smuzhiyun 		else if (pkti.is_tcp)
131*4882a593Smuzhiyun 			cwr_flag = true;
132*4882a593Smuzhiyun 	} else if (credit < 0) {
133*4882a593Smuzhiyun 		// Congested, set congestion flag
134*4882a593Smuzhiyun 		if (pkti.ecn || pkti.is_tcp) {
135*4882a593Smuzhiyun 			if (credit < -MARK_THRESH)
136*4882a593Smuzhiyun 				congestion_flag = true;
137*4882a593Smuzhiyun 			else
138*4882a593Smuzhiyun 				congestion_flag = false;
139*4882a593Smuzhiyun 		} else {
140*4882a593Smuzhiyun 			congestion_flag = true;
141*4882a593Smuzhiyun 		}
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (congestion_flag) {
145*4882a593Smuzhiyun 		if (bpf_skb_ecn_set_ce(skb)) {
146*4882a593Smuzhiyun 			ecn_ce_flag = true;
147*4882a593Smuzhiyun 		} else {
148*4882a593Smuzhiyun 			if (pkti.is_tcp) {
149*4882a593Smuzhiyun 				unsigned int rand = bpf_get_prandom_u32();
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 				if (-credit >= MARK_THRESH +
152*4882a593Smuzhiyun 				    (rand % MARK_REGION_SIZE)) {
153*4882a593Smuzhiyun 					// Do congestion control
154*4882a593Smuzhiyun 					cwr_flag = true;
155*4882a593Smuzhiyun 				}
156*4882a593Smuzhiyun 			} else if (len > LARGE_PKT_THRESH) {
157*4882a593Smuzhiyun 				// Problem if too many small packets?
158*4882a593Smuzhiyun 				drop_flag = true;
159*4882a593Smuzhiyun 			}
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (qsp != NULL)
164*4882a593Smuzhiyun 		if (qsp->no_cn)
165*4882a593Smuzhiyun 			cwr_flag = false;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
168*4882a593Smuzhiyun 			 cwr_flag, ecn_ce_flag, &pkti, credit);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (drop_flag) {
171*4882a593Smuzhiyun 		__sync_add_and_fetch(&(qdp->credit), len);
172*4882a593Smuzhiyun 		rv = DROP_PKT;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (cwr_flag)
176*4882a593Smuzhiyun 		rv |= 2;
177*4882a593Smuzhiyun 	return rv;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun char _license[] SEC("license") = "GPL";
180