1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* net/sched/sch_hhf.c Heavy-Hitter Filter (HHF)
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2013 Terry Lam <vtlam@google.com>
5*4882a593Smuzhiyun * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/jiffies.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/skbuff.h>
11*4882a593Smuzhiyun #include <linux/vmalloc.h>
12*4882a593Smuzhiyun #include <linux/siphash.h>
13*4882a593Smuzhiyun #include <net/pkt_sched.h>
14*4882a593Smuzhiyun #include <net/sock.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Heavy-Hitter Filter (HHF)
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Principles :
19*4882a593Smuzhiyun * Flows are classified into two buckets: non-heavy-hitter and heavy-hitter
20*4882a593Smuzhiyun * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified
21*4882a593Smuzhiyun * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
22*4882a593Smuzhiyun * The buckets are dequeued by a Weighted Deficit Round Robin (WDRR) scheduler,
23*4882a593Smuzhiyun * in which the heavy-hitter bucket is served with less weight.
24*4882a593Smuzhiyun * In other words, non-heavy-hitters (e.g., short bursts of critical traffic)
25*4882a593Smuzhiyun * are isolated from heavy-hitters (e.g., persistent bulk traffic) and also have
26*4882a593Smuzhiyun * higher share of bandwidth.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * To capture heavy-hitters, we use the "multi-stage filter" algorithm in the
29*4882a593Smuzhiyun * following paper:
30*4882a593Smuzhiyun * [EV02] C. Estan and G. Varghese, "New Directions in Traffic Measurement and
31*4882a593Smuzhiyun * Accounting", in ACM SIGCOMM, 2002.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Conceptually, a multi-stage filter comprises k independent hash functions
34*4882a593Smuzhiyun * and k counter arrays. Packets are indexed into k counter arrays by k hash
35*4882a593Smuzhiyun * functions, respectively. The counters are then increased by the packet sizes.
36*4882a593Smuzhiyun * Therefore,
37*4882a593Smuzhiyun * - For a heavy-hitter flow: *all* of its k array counters must be large.
38*4882a593Smuzhiyun * - For a non-heavy-hitter flow: some of its k array counters can be large
39*4882a593Smuzhiyun * due to hash collision with other small flows; however, with high
40*4882a593Smuzhiyun * probability, not *all* k counters are large.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * By the design of the multi-stage filter algorithm, the false negative rate
43*4882a593Smuzhiyun * (heavy-hitters getting away uncaptured) is zero. However, the algorithm is
44*4882a593Smuzhiyun * susceptible to false positives (non-heavy-hitters mistakenly classified as
45*4882a593Smuzhiyun * heavy-hitters).
46*4882a593Smuzhiyun * Therefore, we also implement the following optimizations to reduce false
47*4882a593Smuzhiyun * positives by avoiding unnecessary increment of the counter values:
48*4882a593Smuzhiyun * - Optimization O1: once a heavy-hitter is identified, its bytes are not
49*4882a593Smuzhiyun * accounted in the array counters. This technique is called "shielding"
50*4882a593Smuzhiyun * in Section 3.3.1 of [EV02].
51*4882a593Smuzhiyun * - Optimization O2: conservative update of counters
52*4882a593Smuzhiyun * (Section 3.3.2 of [EV02]),
53*4882a593Smuzhiyun * New counter value = max {old counter value,
54*4882a593Smuzhiyun * smallest counter value + packet bytes}
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Finally, we refresh the counters periodically since otherwise the counter
57*4882a593Smuzhiyun * values will keep accumulating.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Once a flow is classified as heavy-hitter, we also save its per-flow state
60*4882a593Smuzhiyun * in an exact-matching flow table so that its subsequent packets can be
61*4882a593Smuzhiyun * dispatched to the heavy-hitter bucket accordingly.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * At a high level, this qdisc works as follows:
65*4882a593Smuzhiyun * Given a packet p:
66*4882a593Smuzhiyun * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching
67*4882a593Smuzhiyun * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter
68*4882a593Smuzhiyun * bucket.
69*4882a593Smuzhiyun * - Otherwise, forward p to the multi-stage filter, denoted filter F
70*4882a593Smuzhiyun * + If F decides that p belongs to a non-heavy-hitter flow, then send p
71*4882a593Smuzhiyun * to the non-heavy-hitter bucket.
72*4882a593Smuzhiyun * + Otherwise, if F decides that p belongs to a new heavy-hitter flow,
73*4882a593Smuzhiyun * then set up a new flow entry for the flow-id of p in the table T and
74*4882a593Smuzhiyun * send p to the heavy-hitter bucket.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * In this implementation:
77*4882a593Smuzhiyun * - T is a fixed-size hash-table with 1024 entries. Hash collision is
78*4882a593Smuzhiyun * resolved by linked-list chaining.
79*4882a593Smuzhiyun * - F has four counter arrays, each array containing 1024 32-bit counters.
80*4882a593Smuzhiyun * That means 4 * 1024 * 32 bits = 16KB of memory.
81*4882a593Smuzhiyun * - Since each array in F contains 1024 counters, 10 bits are sufficient to
82*4882a593Smuzhiyun * index into each array.
83*4882a593Smuzhiyun * Hence, instead of having four hash functions, we chop the 32-bit
84*4882a593Smuzhiyun * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is
85*4882a593Smuzhiyun * computed as XOR sum of those three chunks.
86*4882a593Smuzhiyun * - We need to clear the counter arrays periodically; however, directly
87*4882a593Smuzhiyun * memsetting 16KB of memory can lead to cache eviction and unwanted delay.
88*4882a593Smuzhiyun * So by representing each counter by a valid bit, we only need to reset
89*4882a593Smuzhiyun * 4K of 1 bit (i.e. 512 bytes) instead of 16KB of memory.
90*4882a593Smuzhiyun * - The Deficit Round Robin engine is taken from fq_codel implementation
91*4882a593Smuzhiyun * (net/sched/sch_fq_codel.c). Note that wdrr_bucket corresponds to
92*4882a593Smuzhiyun * fq_codel_flow in fq_codel implementation.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Non-configurable parameters */
97*4882a593Smuzhiyun #define HH_FLOWS_CNT 1024 /* number of entries in exact-matching table T */
98*4882a593Smuzhiyun #define HHF_ARRAYS_CNT 4 /* number of arrays in multi-stage filter F */
99*4882a593Smuzhiyun #define HHF_ARRAYS_LEN 1024 /* number of counters in each array of F */
100*4882a593Smuzhiyun #define HHF_BIT_MASK_LEN 10 /* masking 10 bits */
101*4882a593Smuzhiyun #define HHF_BIT_MASK 0x3FF /* bitmask of 10 bits */
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define WDRR_BUCKET_CNT 2 /* two buckets for Weighted DRR */
104*4882a593Smuzhiyun enum wdrr_bucket_idx {
105*4882a593Smuzhiyun WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
106*4882a593Smuzhiyun WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #define hhf_time_before(a, b) \
110*4882a593Smuzhiyun (typecheck(u32, a) && typecheck(u32, b) && ((s32)((a) - (b)) < 0))
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* Heavy-hitter per-flow state */
113*4882a593Smuzhiyun struct hh_flow_state {
114*4882a593Smuzhiyun u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */
115*4882a593Smuzhiyun u32 hit_timestamp; /* last time heavy-hitter was seen */
116*4882a593Smuzhiyun struct list_head flowchain; /* chaining under hash collision */
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Weighted Deficit Round Robin (WDRR) scheduler */
120*4882a593Smuzhiyun struct wdrr_bucket {
121*4882a593Smuzhiyun struct sk_buff *head;
122*4882a593Smuzhiyun struct sk_buff *tail;
123*4882a593Smuzhiyun struct list_head bucketchain;
124*4882a593Smuzhiyun int deficit;
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct hhf_sched_data {
128*4882a593Smuzhiyun struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
129*4882a593Smuzhiyun siphash_key_t perturbation; /* hash perturbation */
130*4882a593Smuzhiyun u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
131*4882a593Smuzhiyun u32 drop_overlimit; /* number of times max qdisc packet
132*4882a593Smuzhiyun * limit was hit
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun struct list_head *hh_flows; /* table T (currently active HHs) */
135*4882a593Smuzhiyun u32 hh_flows_limit; /* max active HH allocs */
136*4882a593Smuzhiyun u32 hh_flows_overlimit; /* num of disallowed HH allocs */
137*4882a593Smuzhiyun u32 hh_flows_total_cnt; /* total admitted HHs */
138*4882a593Smuzhiyun u32 hh_flows_current_cnt; /* total current HHs */
139*4882a593Smuzhiyun u32 *hhf_arrays[HHF_ARRAYS_CNT]; /* HH filter F */
140*4882a593Smuzhiyun u32 hhf_arrays_reset_timestamp; /* last time hhf_arrays
141*4882a593Smuzhiyun * was reset
142*4882a593Smuzhiyun */
143*4882a593Smuzhiyun unsigned long *hhf_valid_bits[HHF_ARRAYS_CNT]; /* shadow valid bits
144*4882a593Smuzhiyun * of hhf_arrays
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun /* Similar to the "new_flows" vs. "old_flows" concept in fq_codel DRR */
147*4882a593Smuzhiyun struct list_head new_buckets; /* list of new buckets */
148*4882a593Smuzhiyun struct list_head old_buckets; /* list of old buckets */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Configurable HHF parameters */
151*4882a593Smuzhiyun u32 hhf_reset_timeout; /* interval to reset counter
152*4882a593Smuzhiyun * arrays in filter F
153*4882a593Smuzhiyun * (default 40ms)
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun u32 hhf_admit_bytes; /* counter thresh to classify as
156*4882a593Smuzhiyun * HH (default 128KB).
157*4882a593Smuzhiyun * With these default values,
158*4882a593Smuzhiyun * 128KB / 40ms = 25 Mbps
159*4882a593Smuzhiyun * i.e., we expect to capture HHs
160*4882a593Smuzhiyun * sending > 25 Mbps.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun u32 hhf_evict_timeout; /* aging threshold to evict idle
163*4882a593Smuzhiyun * HHs out of table T. This should
164*4882a593Smuzhiyun * be large enough to avoid
165*4882a593Smuzhiyun * reordering during HH eviction.
166*4882a593Smuzhiyun * (default 1s)
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun u32 hhf_non_hh_weight; /* WDRR weight for non-HHs
169*4882a593Smuzhiyun * (default 2,
170*4882a593Smuzhiyun * i.e., non-HH : HH = 2 : 1)
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun
hhf_time_stamp(void)174*4882a593Smuzhiyun static u32 hhf_time_stamp(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun return jiffies;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Looks up a heavy-hitter flow in a chaining list of table T. */
seek_list(const u32 hash,struct list_head * head,struct hhf_sched_data * q)180*4882a593Smuzhiyun static struct hh_flow_state *seek_list(const u32 hash,
181*4882a593Smuzhiyun struct list_head *head,
182*4882a593Smuzhiyun struct hhf_sched_data *q)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct hh_flow_state *flow, *next;
185*4882a593Smuzhiyun u32 now = hhf_time_stamp();
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (list_empty(head))
188*4882a593Smuzhiyun return NULL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun list_for_each_entry_safe(flow, next, head, flowchain) {
191*4882a593Smuzhiyun u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (hhf_time_before(prev, now)) {
194*4882a593Smuzhiyun /* Delete expired heavy-hitters, but preserve one entry
195*4882a593Smuzhiyun * to avoid kzalloc() when next time this slot is hit.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (list_is_last(&flow->flowchain, head))
198*4882a593Smuzhiyun return NULL;
199*4882a593Smuzhiyun list_del(&flow->flowchain);
200*4882a593Smuzhiyun kfree(flow);
201*4882a593Smuzhiyun q->hh_flows_current_cnt--;
202*4882a593Smuzhiyun } else if (flow->hash_id == hash) {
203*4882a593Smuzhiyun return flow;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun return NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Returns a flow state entry for a new heavy-hitter. Either reuses an expired
210*4882a593Smuzhiyun * entry or dynamically alloc a new entry.
211*4882a593Smuzhiyun */
alloc_new_hh(struct list_head * head,struct hhf_sched_data * q)212*4882a593Smuzhiyun static struct hh_flow_state *alloc_new_hh(struct list_head *head,
213*4882a593Smuzhiyun struct hhf_sched_data *q)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct hh_flow_state *flow;
216*4882a593Smuzhiyun u32 now = hhf_time_stamp();
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (!list_empty(head)) {
219*4882a593Smuzhiyun /* Find an expired heavy-hitter flow entry. */
220*4882a593Smuzhiyun list_for_each_entry(flow, head, flowchain) {
221*4882a593Smuzhiyun u32 prev = flow->hit_timestamp + q->hhf_evict_timeout;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (hhf_time_before(prev, now))
224*4882a593Smuzhiyun return flow;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
229*4882a593Smuzhiyun q->hh_flows_overlimit++;
230*4882a593Smuzhiyun return NULL;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun /* Create new entry. */
233*4882a593Smuzhiyun flow = kzalloc(sizeof(struct hh_flow_state), GFP_ATOMIC);
234*4882a593Smuzhiyun if (!flow)
235*4882a593Smuzhiyun return NULL;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun q->hh_flows_current_cnt++;
238*4882a593Smuzhiyun INIT_LIST_HEAD(&flow->flowchain);
239*4882a593Smuzhiyun list_add_tail(&flow->flowchain, head);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun return flow;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Assigns packets to WDRR buckets. Implements a multi-stage filter to
245*4882a593Smuzhiyun * classify heavy-hitters.
246*4882a593Smuzhiyun */
hhf_classify(struct sk_buff * skb,struct Qdisc * sch)247*4882a593Smuzhiyun static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
250*4882a593Smuzhiyun u32 tmp_hash, hash;
251*4882a593Smuzhiyun u32 xorsum, filter_pos[HHF_ARRAYS_CNT], flow_pos;
252*4882a593Smuzhiyun struct hh_flow_state *flow;
253*4882a593Smuzhiyun u32 pkt_len, min_hhf_val;
254*4882a593Smuzhiyun int i;
255*4882a593Smuzhiyun u32 prev;
256*4882a593Smuzhiyun u32 now = hhf_time_stamp();
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Reset the HHF counter arrays if this is the right time. */
259*4882a593Smuzhiyun prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout;
260*4882a593Smuzhiyun if (hhf_time_before(prev, now)) {
261*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++)
262*4882a593Smuzhiyun bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN);
263*4882a593Smuzhiyun q->hhf_arrays_reset_timestamp = now;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /* Get hashed flow-id of the skb. */
267*4882a593Smuzhiyun hash = skb_get_hash_perturb(skb, &q->perturbation);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Check if this packet belongs to an already established HH flow. */
270*4882a593Smuzhiyun flow_pos = hash & HHF_BIT_MASK;
271*4882a593Smuzhiyun flow = seek_list(hash, &q->hh_flows[flow_pos], q);
272*4882a593Smuzhiyun if (flow) { /* found its HH flow */
273*4882a593Smuzhiyun flow->hit_timestamp = now;
274*4882a593Smuzhiyun return WDRR_BUCKET_FOR_HH;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Now pass the packet through the multi-stage filter. */
278*4882a593Smuzhiyun tmp_hash = hash;
279*4882a593Smuzhiyun xorsum = 0;
280*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT - 1; i++) {
281*4882a593Smuzhiyun /* Split the skb_hash into three 10-bit chunks. */
282*4882a593Smuzhiyun filter_pos[i] = tmp_hash & HHF_BIT_MASK;
283*4882a593Smuzhiyun xorsum ^= filter_pos[i];
284*4882a593Smuzhiyun tmp_hash >>= HHF_BIT_MASK_LEN;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun /* The last chunk is computed as XOR sum of other chunks. */
287*4882a593Smuzhiyun filter_pos[HHF_ARRAYS_CNT - 1] = xorsum ^ tmp_hash;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun pkt_len = qdisc_pkt_len(skb);
290*4882a593Smuzhiyun min_hhf_val = ~0U;
291*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++) {
292*4882a593Smuzhiyun u32 val;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) {
295*4882a593Smuzhiyun q->hhf_arrays[i][filter_pos[i]] = 0;
296*4882a593Smuzhiyun __set_bit(filter_pos[i], q->hhf_valid_bits[i]);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun val = q->hhf_arrays[i][filter_pos[i]] + pkt_len;
300*4882a593Smuzhiyun if (min_hhf_val > val)
301*4882a593Smuzhiyun min_hhf_val = val;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Found a new HH iff all counter values > HH admit threshold. */
305*4882a593Smuzhiyun if (min_hhf_val > q->hhf_admit_bytes) {
306*4882a593Smuzhiyun /* Just captured a new heavy-hitter. */
307*4882a593Smuzhiyun flow = alloc_new_hh(&q->hh_flows[flow_pos], q);
308*4882a593Smuzhiyun if (!flow) /* memory alloc problem */
309*4882a593Smuzhiyun return WDRR_BUCKET_FOR_NON_HH;
310*4882a593Smuzhiyun flow->hash_id = hash;
311*4882a593Smuzhiyun flow->hit_timestamp = now;
312*4882a593Smuzhiyun q->hh_flows_total_cnt++;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* By returning without updating counters in q->hhf_arrays,
315*4882a593Smuzhiyun * we implicitly implement "shielding" (see Optimization O1).
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun return WDRR_BUCKET_FOR_HH;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Conservative update of HHF arrays (see Optimization O2). */
321*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++) {
322*4882a593Smuzhiyun if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val)
323*4882a593Smuzhiyun q->hhf_arrays[i][filter_pos[i]] = min_hhf_val;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun return WDRR_BUCKET_FOR_NON_HH;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Removes one skb from head of bucket. */
dequeue_head(struct wdrr_bucket * bucket)329*4882a593Smuzhiyun static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct sk_buff *skb = bucket->head;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun bucket->head = skb->next;
334*4882a593Smuzhiyun skb_mark_not_on_list(skb);
335*4882a593Smuzhiyun return skb;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Tail-adds skb to bucket. */
bucket_add(struct wdrr_bucket * bucket,struct sk_buff * skb)339*4882a593Smuzhiyun static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun if (bucket->head == NULL)
342*4882a593Smuzhiyun bucket->head = skb;
343*4882a593Smuzhiyun else
344*4882a593Smuzhiyun bucket->tail->next = skb;
345*4882a593Smuzhiyun bucket->tail = skb;
346*4882a593Smuzhiyun skb->next = NULL;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
hhf_drop(struct Qdisc * sch,struct sk_buff ** to_free)349*4882a593Smuzhiyun static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
352*4882a593Smuzhiyun struct wdrr_bucket *bucket;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Always try to drop from heavy-hitters first. */
355*4882a593Smuzhiyun bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
356*4882a593Smuzhiyun if (!bucket->head)
357*4882a593Smuzhiyun bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (bucket->head) {
360*4882a593Smuzhiyun struct sk_buff *skb = dequeue_head(bucket);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun sch->q.qlen--;
363*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
364*4882a593Smuzhiyun qdisc_drop(skb, sch, to_free);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Return id of the bucket from which the packet was dropped. */
368*4882a593Smuzhiyun return bucket - q->buckets;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
hhf_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)371*4882a593Smuzhiyun static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
372*4882a593Smuzhiyun struct sk_buff **to_free)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
375*4882a593Smuzhiyun enum wdrr_bucket_idx idx;
376*4882a593Smuzhiyun struct wdrr_bucket *bucket;
377*4882a593Smuzhiyun unsigned int prev_backlog;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun idx = hhf_classify(skb, sch);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun bucket = &q->buckets[idx];
382*4882a593Smuzhiyun bucket_add(bucket, skb);
383*4882a593Smuzhiyun qdisc_qstats_backlog_inc(sch, skb);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (list_empty(&bucket->bucketchain)) {
386*4882a593Smuzhiyun unsigned int weight;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* The logic of new_buckets vs. old_buckets is the same as
389*4882a593Smuzhiyun * new_flows vs. old_flows in the implementation of fq_codel,
390*4882a593Smuzhiyun * i.e., short bursts of non-HHs should have strict priority.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun if (idx == WDRR_BUCKET_FOR_HH) {
393*4882a593Smuzhiyun /* Always move heavy-hitters to old bucket. */
394*4882a593Smuzhiyun weight = 1;
395*4882a593Smuzhiyun list_add_tail(&bucket->bucketchain, &q->old_buckets);
396*4882a593Smuzhiyun } else {
397*4882a593Smuzhiyun weight = q->hhf_non_hh_weight;
398*4882a593Smuzhiyun list_add_tail(&bucket->bucketchain, &q->new_buckets);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun bucket->deficit = weight * q->quantum;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun if (++sch->q.qlen <= sch->limit)
403*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun prev_backlog = sch->qstats.backlog;
406*4882a593Smuzhiyun q->drop_overlimit++;
407*4882a593Smuzhiyun /* Return Congestion Notification only if we dropped a packet from this
408*4882a593Smuzhiyun * bucket.
409*4882a593Smuzhiyun */
410*4882a593Smuzhiyun if (hhf_drop(sch, to_free) == idx)
411*4882a593Smuzhiyun return NET_XMIT_CN;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* As we dropped a packet, better let upper stack know this. */
414*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
415*4882a593Smuzhiyun return NET_XMIT_SUCCESS;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
hhf_dequeue(struct Qdisc * sch)418*4882a593Smuzhiyun static struct sk_buff *hhf_dequeue(struct Qdisc *sch)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
421*4882a593Smuzhiyun struct sk_buff *skb = NULL;
422*4882a593Smuzhiyun struct wdrr_bucket *bucket;
423*4882a593Smuzhiyun struct list_head *head;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun begin:
426*4882a593Smuzhiyun head = &q->new_buckets;
427*4882a593Smuzhiyun if (list_empty(head)) {
428*4882a593Smuzhiyun head = &q->old_buckets;
429*4882a593Smuzhiyun if (list_empty(head))
430*4882a593Smuzhiyun return NULL;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (bucket->deficit <= 0) {
435*4882a593Smuzhiyun int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
436*4882a593Smuzhiyun 1 : q->hhf_non_hh_weight;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun bucket->deficit += weight * q->quantum;
439*4882a593Smuzhiyun list_move_tail(&bucket->bucketchain, &q->old_buckets);
440*4882a593Smuzhiyun goto begin;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (bucket->head) {
444*4882a593Smuzhiyun skb = dequeue_head(bucket);
445*4882a593Smuzhiyun sch->q.qlen--;
446*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (!skb) {
450*4882a593Smuzhiyun /* Force a pass through old_buckets to prevent starvation. */
451*4882a593Smuzhiyun if ((head == &q->new_buckets) && !list_empty(&q->old_buckets))
452*4882a593Smuzhiyun list_move_tail(&bucket->bucketchain, &q->old_buckets);
453*4882a593Smuzhiyun else
454*4882a593Smuzhiyun list_del_init(&bucket->bucketchain);
455*4882a593Smuzhiyun goto begin;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
458*4882a593Smuzhiyun bucket->deficit -= qdisc_pkt_len(skb);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun return skb;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
hhf_reset(struct Qdisc * sch)463*4882a593Smuzhiyun static void hhf_reset(struct Qdisc *sch)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct sk_buff *skb;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun while ((skb = hhf_dequeue(sch)) != NULL)
468*4882a593Smuzhiyun rtnl_kfree_skbs(skb, skb);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
hhf_destroy(struct Qdisc * sch)471*4882a593Smuzhiyun static void hhf_destroy(struct Qdisc *sch)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun int i;
474*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++) {
477*4882a593Smuzhiyun kvfree(q->hhf_arrays[i]);
478*4882a593Smuzhiyun kvfree(q->hhf_valid_bits[i]);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!q->hh_flows)
482*4882a593Smuzhiyun return;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (i = 0; i < HH_FLOWS_CNT; i++) {
485*4882a593Smuzhiyun struct hh_flow_state *flow, *next;
486*4882a593Smuzhiyun struct list_head *head = &q->hh_flows[i];
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (list_empty(head))
489*4882a593Smuzhiyun continue;
490*4882a593Smuzhiyun list_for_each_entry_safe(flow, next, head, flowchain) {
491*4882a593Smuzhiyun list_del(&flow->flowchain);
492*4882a593Smuzhiyun kfree(flow);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun kvfree(q->hh_flows);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
499*4882a593Smuzhiyun [TCA_HHF_BACKLOG_LIMIT] = { .type = NLA_U32 },
500*4882a593Smuzhiyun [TCA_HHF_QUANTUM] = { .type = NLA_U32 },
501*4882a593Smuzhiyun [TCA_HHF_HH_FLOWS_LIMIT] = { .type = NLA_U32 },
502*4882a593Smuzhiyun [TCA_HHF_RESET_TIMEOUT] = { .type = NLA_U32 },
503*4882a593Smuzhiyun [TCA_HHF_ADMIT_BYTES] = { .type = NLA_U32 },
504*4882a593Smuzhiyun [TCA_HHF_EVICT_TIMEOUT] = { .type = NLA_U32 },
505*4882a593Smuzhiyun [TCA_HHF_NON_HH_WEIGHT] = { .type = NLA_U32 },
506*4882a593Smuzhiyun };
507*4882a593Smuzhiyun
hhf_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)508*4882a593Smuzhiyun static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
509*4882a593Smuzhiyun struct netlink_ext_ack *extack)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
512*4882a593Smuzhiyun struct nlattr *tb[TCA_HHF_MAX + 1];
513*4882a593Smuzhiyun unsigned int qlen, prev_backlog;
514*4882a593Smuzhiyun int err;
515*4882a593Smuzhiyun u64 non_hh_quantum;
516*4882a593Smuzhiyun u32 new_quantum = q->quantum;
517*4882a593Smuzhiyun u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (!opt)
520*4882a593Smuzhiyun return -EINVAL;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_HHF_MAX, opt, hhf_policy,
523*4882a593Smuzhiyun NULL);
524*4882a593Smuzhiyun if (err < 0)
525*4882a593Smuzhiyun return err;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (tb[TCA_HHF_QUANTUM])
528*4882a593Smuzhiyun new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (tb[TCA_HHF_NON_HH_WEIGHT])
531*4882a593Smuzhiyun new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
534*4882a593Smuzhiyun if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
535*4882a593Smuzhiyun return -EINVAL;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun sch_tree_lock(sch);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (tb[TCA_HHF_BACKLOG_LIMIT])
540*4882a593Smuzhiyun sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun q->quantum = new_quantum;
543*4882a593Smuzhiyun q->hhf_non_hh_weight = new_hhf_non_hh_weight;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (tb[TCA_HHF_HH_FLOWS_LIMIT])
546*4882a593Smuzhiyun q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (tb[TCA_HHF_RESET_TIMEOUT]) {
549*4882a593Smuzhiyun u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun q->hhf_reset_timeout = usecs_to_jiffies(us);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (tb[TCA_HHF_ADMIT_BYTES])
555*4882a593Smuzhiyun q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (tb[TCA_HHF_EVICT_TIMEOUT]) {
558*4882a593Smuzhiyun u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun q->hhf_evict_timeout = usecs_to_jiffies(us);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun qlen = sch->q.qlen;
564*4882a593Smuzhiyun prev_backlog = sch->qstats.backlog;
565*4882a593Smuzhiyun while (sch->q.qlen > sch->limit) {
566*4882a593Smuzhiyun struct sk_buff *skb = hhf_dequeue(sch);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun rtnl_kfree_skbs(skb, skb);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
571*4882a593Smuzhiyun prev_backlog - sch->qstats.backlog);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun sch_tree_unlock(sch);
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
hhf_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)577*4882a593Smuzhiyun static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
578*4882a593Smuzhiyun struct netlink_ext_ack *extack)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
581*4882a593Smuzhiyun int i;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun sch->limit = 1000;
584*4882a593Smuzhiyun q->quantum = psched_mtu(qdisc_dev(sch));
585*4882a593Smuzhiyun get_random_bytes(&q->perturbation, sizeof(q->perturbation));
586*4882a593Smuzhiyun INIT_LIST_HEAD(&q->new_buckets);
587*4882a593Smuzhiyun INIT_LIST_HEAD(&q->old_buckets);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* Configurable HHF parameters */
590*4882a593Smuzhiyun q->hhf_reset_timeout = HZ / 25; /* 40 ms */
591*4882a593Smuzhiyun q->hhf_admit_bytes = 131072; /* 128 KB */
592*4882a593Smuzhiyun q->hhf_evict_timeout = HZ; /* 1 sec */
593*4882a593Smuzhiyun q->hhf_non_hh_weight = 2;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (opt) {
596*4882a593Smuzhiyun int err = hhf_change(sch, opt, extack);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (err)
599*4882a593Smuzhiyun return err;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (!q->hh_flows) {
603*4882a593Smuzhiyun /* Initialize heavy-hitter flow table. */
604*4882a593Smuzhiyun q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head),
605*4882a593Smuzhiyun GFP_KERNEL);
606*4882a593Smuzhiyun if (!q->hh_flows)
607*4882a593Smuzhiyun return -ENOMEM;
608*4882a593Smuzhiyun for (i = 0; i < HH_FLOWS_CNT; i++)
609*4882a593Smuzhiyun INIT_LIST_HEAD(&q->hh_flows[i]);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Cap max active HHs at twice len of hh_flows table. */
612*4882a593Smuzhiyun q->hh_flows_limit = 2 * HH_FLOWS_CNT;
613*4882a593Smuzhiyun q->hh_flows_overlimit = 0;
614*4882a593Smuzhiyun q->hh_flows_total_cnt = 0;
615*4882a593Smuzhiyun q->hh_flows_current_cnt = 0;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Initialize heavy-hitter filter arrays. */
618*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++) {
619*4882a593Smuzhiyun q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN,
620*4882a593Smuzhiyun sizeof(u32),
621*4882a593Smuzhiyun GFP_KERNEL);
622*4882a593Smuzhiyun if (!q->hhf_arrays[i]) {
623*4882a593Smuzhiyun /* Note: hhf_destroy() will be called
624*4882a593Smuzhiyun * by our caller.
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun return -ENOMEM;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun q->hhf_arrays_reset_timestamp = hhf_time_stamp();
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Initialize valid bits of heavy-hitter filter arrays. */
632*4882a593Smuzhiyun for (i = 0; i < HHF_ARRAYS_CNT; i++) {
633*4882a593Smuzhiyun q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN /
634*4882a593Smuzhiyun BITS_PER_BYTE, GFP_KERNEL);
635*4882a593Smuzhiyun if (!q->hhf_valid_bits[i]) {
636*4882a593Smuzhiyun /* Note: hhf_destroy() will be called
637*4882a593Smuzhiyun * by our caller.
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun return -ENOMEM;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Initialize Weighted DRR buckets. */
644*4882a593Smuzhiyun for (i = 0; i < WDRR_BUCKET_CNT; i++) {
645*4882a593Smuzhiyun struct wdrr_bucket *bucket = q->buckets + i;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun INIT_LIST_HEAD(&bucket->bucketchain);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
hhf_dump(struct Qdisc * sch,struct sk_buff * skb)654*4882a593Smuzhiyun static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
657*4882a593Smuzhiyun struct nlattr *opts;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
660*4882a593Smuzhiyun if (opts == NULL)
661*4882a593Smuzhiyun goto nla_put_failure;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) ||
664*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) ||
665*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) ||
666*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_RESET_TIMEOUT,
667*4882a593Smuzhiyun jiffies_to_usecs(q->hhf_reset_timeout)) ||
668*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) ||
669*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_EVICT_TIMEOUT,
670*4882a593Smuzhiyun jiffies_to_usecs(q->hhf_evict_timeout)) ||
671*4882a593Smuzhiyun nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
672*4882a593Smuzhiyun goto nla_put_failure;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return nla_nest_end(skb, opts);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun nla_put_failure:
677*4882a593Smuzhiyun return -1;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
hhf_dump_stats(struct Qdisc * sch,struct gnet_dump * d)680*4882a593Smuzhiyun static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun struct hhf_sched_data *q = qdisc_priv(sch);
683*4882a593Smuzhiyun struct tc_hhf_xstats st = {
684*4882a593Smuzhiyun .drop_overlimit = q->drop_overlimit,
685*4882a593Smuzhiyun .hh_overlimit = q->hh_flows_overlimit,
686*4882a593Smuzhiyun .hh_tot_count = q->hh_flows_total_cnt,
687*4882a593Smuzhiyun .hh_cur_count = q->hh_flows_current_cnt,
688*4882a593Smuzhiyun };
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return gnet_stats_copy_app(d, &st, sizeof(st));
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
694*4882a593Smuzhiyun .id = "hhf",
695*4882a593Smuzhiyun .priv_size = sizeof(struct hhf_sched_data),
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun .enqueue = hhf_enqueue,
698*4882a593Smuzhiyun .dequeue = hhf_dequeue,
699*4882a593Smuzhiyun .peek = qdisc_peek_dequeued,
700*4882a593Smuzhiyun .init = hhf_init,
701*4882a593Smuzhiyun .reset = hhf_reset,
702*4882a593Smuzhiyun .destroy = hhf_destroy,
703*4882a593Smuzhiyun .change = hhf_change,
704*4882a593Smuzhiyun .dump = hhf_dump,
705*4882a593Smuzhiyun .dump_stats = hhf_dump_stats,
706*4882a593Smuzhiyun .owner = THIS_MODULE,
707*4882a593Smuzhiyun };
708*4882a593Smuzhiyun
hhf_module_init(void)709*4882a593Smuzhiyun static int __init hhf_module_init(void)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun return register_qdisc(&hhf_qdisc_ops);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
hhf_module_exit(void)714*4882a593Smuzhiyun static void __exit hhf_module_exit(void)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun unregister_qdisc(&hhf_qdisc_ops);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun module_init(hhf_module_init)
720*4882a593Smuzhiyun module_exit(hhf_module_exit)
721*4882a593Smuzhiyun MODULE_AUTHOR("Terry Lam");
722*4882a593Smuzhiyun MODULE_AUTHOR("Nandita Dukkipati");
723*4882a593Smuzhiyun MODULE_LICENSE("GPL");
724*4882a593Smuzhiyun MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
725