Lines Matching +full:1 +full:q
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
260 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
299 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
305 1, 1, 1, 1, 1, 1, 1, 1,
315 2, 0, 1, 2, 4, 2, 2, 2,
316 1, 2, 1, 2, 1, 2, 1, 2,
326 0, 1, 0, 0, 2, 0, 0, 0,
327 1, 0, 0, 0, 0, 0, 0, 0,
337 0, 1, 0, 0, 2, 0, 0, 0,
338 1, 0, 0, 0, 0, 0, 0, 0,
360 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
361 static const u8 bulk_order[] = {1, 0, 2, 3};
382 val = (val * invsqrt) >> (32 - 2 + 1); in cobalt_newton_step()
397 * count values, particularly when stepping from count 1 to 2 or vice versa.
413 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) { in cobalt_cache_init()
464 vars->count = 1; in cobalt_queue_full()
524 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close in cobalt_should_drop()
545 vars->count = 1; in cobalt_should_drop()
646 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, in cake_hash() argument
722 flow_hash = flow_override - 1; in cake_hash()
726 dsthost_hash = host_override - 1; in cake_hash()
727 srchost_hash = host_override - 1; in cake_hash()
742 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash()
743 q->flows[reduced_hash].set)) { in cake_hash()
744 q->way_directs++; in cake_hash()
756 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
757 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash()
759 q->way_hits++; in cake_hash()
761 if (!q->flows[outer_hash + k].set) { in cake_hash()
775 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
776 if (!q->flows[outer_hash + k].set) { in cake_hash()
777 q->way_misses++; in cake_hash()
787 q->way_collisions++; in cake_hash()
788 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
789 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash()
790 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash()
797 q->tags[reduced_hash] = flow_hash; in cake_hash()
804 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
805 if (q->hosts[outer_hash + k].srchost_tag == in cake_hash()
810 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
811 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) in cake_hash()
814 q->hosts[outer_hash + k].srchost_tag = srchost_hash; in cake_hash()
817 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
818 q->hosts[srchost_idx].srchost_bulk_flow_count++; in cake_hash()
819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
827 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
828 if (q->hosts[outer_hash + k].dsthost_tag == in cake_hash()
833 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
834 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) in cake_hash()
837 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; in cake_hash()
840 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
841 q->hosts[dsthost_idx].dsthost_bulk_flow_count++; in cake_hash()
842 q->flows[reduced_hash].dsthost = dsthost_idx; in cake_hash()
958 const u8 *ptr = (const u8 *)(tcph + 1); in cake_get_tcpopt()
993 * @return -1, 0 or 1 as normal compare functions
1013 return -1; in cake_tcph_sack_compare()
1016 return 1; in cake_tcph_sack_compare()
1030 return -1; in cake_tcph_sack_compare()
1052 return -1; in cake_tcph_sack_compare()
1062 return bytes_b > bytes_a ? 1 : 0; in cake_tcph_sack_compare()
1084 const u8 *ptr = (const u8 *)(tcph + 1); in cake_tcph_may_drop()
1091 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero) in cake_tcph_may_drop()
1151 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, in cake_ack_filter() argument
1154 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; in cake_ack_filter()
1224 WARN_ON(1); /* shouldn't happen */ in cake_ack_filter()
1314 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) in cake_calc_overhead() argument
1316 if (q->rate_flags & CAKE_FLAG_OVERHEAD) in cake_calc_overhead()
1319 if (q->max_netlen < len) in cake_calc_overhead()
1320 q->max_netlen = len; in cake_calc_overhead()
1321 if (q->min_netlen > len) in cake_calc_overhead()
1322 q->min_netlen = len; in cake_calc_overhead()
1324 len += q->rate_overhead; in cake_calc_overhead()
1326 if (len < q->rate_mpu) in cake_calc_overhead()
1327 len = q->rate_mpu; in cake_calc_overhead()
1329 if (q->atm_mode == CAKE_ATM_ATM) { in cake_calc_overhead()
1333 } else if (q->atm_mode == CAKE_ATM_PTM) { in cake_calc_overhead()
1341 if (q->max_adjlen < len) in cake_calc_overhead()
1342 q->max_adjlen = len; in cake_calc_overhead()
1343 if (q->min_adjlen > len) in cake_calc_overhead()
1344 q->min_adjlen = len; in cake_calc_overhead()
1349 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) in cake_overhead() argument
1355 u16 segs = 1; in cake_overhead()
1357 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); in cake_overhead()
1360 return cake_calc_overhead(q, len, off); in cake_overhead()
1390 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
1392 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1393 cake_calc_overhead(q, last_len, off)); in cake_overhead()
1396 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) in cake_heap_swap() argument
1398 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_swap()
1399 struct cake_heap_entry jj = q->overflow_heap[j]; in cake_heap_swap()
1401 q->overflow_heap[i] = jj; in cake_heap_swap()
1402 q->overflow_heap[j] = ii; in cake_heap_swap()
1404 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap()
1405 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap()
1408 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) in cake_heap_get_backlog() argument
1410 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_get_backlog()
1412 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog()
1415 static void cake_heapify(struct cake_sched_data *q, u16 i) in cake_heapify() argument
1418 u32 mb = cake_heap_get_backlog(q, i); in cake_heapify()
1422 u32 l = m + m + 1; in cake_heapify()
1423 u32 r = l + 1; in cake_heapify()
1426 u32 lb = cake_heap_get_backlog(q, l); in cake_heapify()
1435 u32 rb = cake_heap_get_backlog(q, r); in cake_heapify()
1444 cake_heap_swap(q, i, m); in cake_heapify()
1452 static void cake_heapify_up(struct cake_sched_data *q, u16 i) in cake_heapify_up() argument
1455 u16 p = (i - 1) >> 1; in cake_heapify_up()
1456 u32 ib = cake_heap_get_backlog(q, i); in cake_heapify_up()
1457 u32 pb = cake_heap_get_backlog(q, p); in cake_heapify_up()
1460 cake_heap_swap(q, i, p); in cake_heapify_up()
1468 static int cake_advance_shaper(struct cake_sched_data *q, in cake_advance_shaper() argument
1478 if (q->rate_ns) { in cake_advance_shaper()
1480 u64 global_dur = (len * q->rate_ns) >> q->rate_shft; in cake_advance_shaper()
1481 u64 failsafe_dur = global_dur + (global_dur >> 1); in cake_advance_shaper()
1491 q->time_next_packet = ktime_add_ns(q->time_next_packet, in cake_advance_shaper()
1494 q->failsafe_next_packet = \ in cake_advance_shaper()
1495 ktime_add_ns(q->failsafe_next_packet, in cake_advance_shaper()
1503 struct cake_sched_data *q = qdisc_priv(sch); in cake_drop() local
1511 if (!q->overflow_timeout) { in cake_drop()
1515 cake_heapify(q, i); in cake_drop()
1517 q->overflow_timeout = 65535; in cake_drop()
1520 qq = q->overflow_heap[0]; in cake_drop()
1524 b = &q->tins[tin]; in cake_drop()
1529 q->overflow_timeout = 0; in cake_drop()
1537 q->buffer_used -= skb->truesize; in cake_drop()
1541 qdisc_tree_reduce_backlog(sch, 1, len); in cake_drop()
1547 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_drop()
1548 cake_advance_shaper(q, b, skb, now, true); in cake_drop()
1551 sch->q.qlen--; in cake_drop()
1553 cake_heapify(q, 0); in cake_drop()
1617 struct cake_sched_data *q = qdisc_priv(sch); in cake_select_tin() local
1626 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; in cake_select_tin()
1627 wash = !!(q->rate_flags & CAKE_FLAG_WASH); in cake_select_tin()
1631 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) in cake_select_tin()
1634 else if (mark && mark <= q->tin_cnt) in cake_select_tin()
1635 tin = q->tin_order[mark - 1]; in cake_select_tin()
1639 TC_H_MIN(skb->priority) <= q->tin_cnt) in cake_select_tin()
1640 tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; in cake_select_tin()
1645 tin = q->tin_index[dscp]; in cake_select_tin()
1647 if (unlikely(tin >= q->tin_cnt)) in cake_select_tin()
1651 return &q->tins[tin]; in cake_select_tin()
1657 struct cake_sched_data *q = qdisc_priv(sch); in cake_classify() local
1663 filter = rcu_dereference_bh(q->filter_list); in cake_classify()
1689 return cake_hash(*t, skb, flow_mode, flow, host) + 1; in cake_classify()
1697 struct cake_sched_data *q = qdisc_priv(sch); in cake_enqueue() local
1707 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); in cake_enqueue()
1722 if (!sch->q.qlen) { in cake_enqueue()
1723 if (ktime_before(q->time_next_packet, now)) { in cake_enqueue()
1724 q->failsafe_next_packet = now; in cake_enqueue()
1725 q->time_next_packet = now; in cake_enqueue()
1726 } else if (ktime_after(q->time_next_packet, now) && in cake_enqueue()
1727 ktime_after(q->failsafe_next_packet, now)) { in cake_enqueue()
1729 min(ktime_to_ns(q->time_next_packet), in cake_enqueue()
1731 q->failsafe_next_packet)); in cake_enqueue()
1733 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_enqueue()
1741 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { in cake_enqueue()
1754 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, in cake_enqueue()
1758 sch->q.qlen++; in cake_enqueue()
1761 q->buffer_used += segs->truesize; in cake_enqueue()
1770 q->avg_window_bytes += slen; in cake_enqueue()
1772 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); in cake_enqueue()
1777 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); in cake_enqueue()
1780 if (q->ack_filter) in cake_enqueue()
1781 ack = cake_ack_filter(q, flow); in cake_enqueue()
1788 q->buffer_used += skb->truesize - ack->truesize; in cake_enqueue()
1789 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_enqueue()
1790 cake_advance_shaper(q, b, ack, now, true); in cake_enqueue()
1792 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); in cake_enqueue()
1795 sch->q.qlen++; in cake_enqueue()
1796 q->buffer_used += skb->truesize; in cake_enqueue()
1805 q->avg_window_bytes += len; in cake_enqueue()
1808 if (q->overflow_timeout) in cake_enqueue()
1809 cake_heapify_up(q, b->overflow_idx[idx]); in cake_enqueue()
1812 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { in cake_enqueue()
1814 ktime_to_ns(ktime_sub(now, q->last_packet_time)); in cake_enqueue()
1820 q->avg_packet_interval = \ in cake_enqueue()
1821 cake_ewma(q->avg_packet_interval, in cake_enqueue()
1823 (packet_interval > q->avg_packet_interval ? in cake_enqueue()
1826 q->last_packet_time = now; in cake_enqueue()
1828 if (packet_interval > q->avg_packet_interval) { in cake_enqueue()
1831 q->avg_window_begin)); in cake_enqueue()
1832 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; in cake_enqueue()
1835 q->avg_peak_bandwidth = in cake_enqueue()
1836 cake_ewma(q->avg_peak_bandwidth, b, in cake_enqueue()
1837 b > q->avg_peak_bandwidth ? 2 : 8); in cake_enqueue()
1838 q->avg_window_bytes = 0; in cake_enqueue()
1839 q->avg_window_begin = now; in cake_enqueue()
1842 ktime_add_ms(q->last_reconfig_time, in cake_enqueue()
1844 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; in cake_enqueue()
1849 q->avg_window_bytes = 0; in cake_enqueue()
1850 q->last_packet_time = now; in cake_enqueue()
1857 u16 host_load = 1; in cake_enqueue()
1868 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1871 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1887 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1890 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1895 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1896 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1898 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1901 while (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1912 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue_one() local
1913 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue_one()
1914 struct cake_flow *flow = &b->flows[q->cur_flow]; in cake_dequeue_one()
1921 b->backlogs[q->cur_flow] -= len; in cake_dequeue_one()
1924 q->buffer_used -= skb->truesize; in cake_dequeue_one()
1925 sch->q.qlen--; in cake_dequeue_one()
1927 if (q->overflow_timeout) in cake_dequeue_one()
1928 cake_heapify(q, b->overflow_idx[q->cur_flow]); in cake_dequeue_one()
1936 struct cake_sched_data *q = qdisc_priv(sch); in cake_clear_tin() local
1939 q->cur_tin = tin; in cake_clear_tin()
1940 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1947 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue() local
1948 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue()
1960 if (!sch->q.qlen) in cake_dequeue()
1964 if (ktime_after(q->time_next_packet, now) && in cake_dequeue()
1965 ktime_after(q->failsafe_next_packet, now)) { in cake_dequeue()
1966 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
1967 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
1970 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
1975 if (!q->rate_ns) { in cake_dequeue()
1988 q->cur_tin++; in cake_dequeue()
1990 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
1991 q->cur_tin = 0; in cake_dequeue()
1992 b = q->tins; in cake_dequeue()
1995 /* It's possible for q->qlen to be in cake_dequeue()
2014 for (tin = 0; tin < q->tin_cnt; tin++) { in cake_dequeue()
2015 b = q->tins + tin; in cake_dequeue()
2029 q->cur_tin = best_tin; in cake_dequeue()
2030 b = q->tins + best_tin; in cake_dequeue()
2052 q->cur_flow = flow - b->flows; in cake_dequeue()
2058 host_load = 1; in cake_dequeue()
2071 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2074 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2087 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2090 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2106 while (1) { in cake_dequeue()
2123 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2126 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2145 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2148 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2162 !!(q->rate_flags & in cake_dequeue()
2168 if (q->rate_flags & CAKE_FLAG_INGRESS) { in cake_dequeue()
2169 len = cake_advance_shaper(q, b, skb, in cake_dequeue()
2176 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); in cake_dequeue()
2179 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_dequeue()
2194 len = cake_advance_shaper(q, b, skb, now, false); in cake_dequeue()
2198 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { in cake_dequeue()
2199 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2200 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2202 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2203 } else if (!sch->q.qlen) { in cake_dequeue()
2206 for (i = 0; i < q->tin_cnt; i++) { in cake_dequeue()
2207 if (q->tins[i].decaying_flow_count) { in cake_dequeue()
2210 q->tins[i].cparams.target); in cake_dequeue()
2212 qdisc_watchdog_schedule_ns(&q->watchdog, in cake_dequeue()
2219 if (q->overflow_timeout) in cake_dequeue()
2220 q->overflow_timeout--; in cake_dequeue()
2227 struct cake_sched_data *q = qdisc_priv(sch); in cake_reset() local
2230 if (!q->tins) in cake_reset()
2237 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2276 rate_ns >>= 1; in cake_set_rate()
2292 b->cparams.p_inc = 1 << 24; /* 1/256 */ in cake_set_rate()
2293 b->cparams.p_dec = 1 << 20; /* 1/4096 */ in cake_set_rate()
2298 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_besteffort() local
2299 struct cake_tin_data *b = &q->tins[0]; in cake_config_besteffort()
2301 u64 rate = q->rate_bps; in cake_config_besteffort()
2303 q->tin_cnt = 1; in cake_config_besteffort()
2305 q->tin_index = besteffort; in cake_config_besteffort()
2306 q->tin_order = normal_order; in cake_config_besteffort()
2309 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_besteffort()
2318 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_precedence() local
2320 u64 rate = q->rate_bps; in cake_config_precedence()
2324 q->tin_cnt = 8; in cake_config_precedence()
2325 q->tin_index = precedence; in cake_config_precedence()
2326 q->tin_order = normal_order; in cake_config_precedence()
2328 for (i = 0; i < q->tin_cnt; i++) { in cake_config_precedence()
2329 struct cake_tin_data *b = &q->tins[i]; in cake_config_precedence()
2331 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_precedence()
2332 us_to_ns(q->interval)); in cake_config_precedence()
2334 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_precedence()
2355 * Assured Forwarding 1 (AF1x) - x3
2407 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv8() local
2409 u64 rate = q->rate_bps; in cake_config_diffserv8()
2413 q->tin_cnt = 8; in cake_config_diffserv8()
2416 q->tin_index = diffserv8; in cake_config_diffserv8()
2417 q->tin_order = normal_order; in cake_config_diffserv8()
2420 for (i = 0; i < q->tin_cnt; i++) { in cake_config_diffserv8()
2421 struct cake_tin_data *b = &q->tins[i]; in cake_config_diffserv8()
2423 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_diffserv8()
2424 us_to_ns(q->interval)); in cake_config_diffserv8()
2426 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_diffserv8()
2451 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv4() local
2453 u64 rate = q->rate_bps; in cake_config_diffserv4()
2456 q->tin_cnt = 4; in cake_config_diffserv4()
2459 q->tin_index = diffserv4; in cake_config_diffserv4()
2460 q->tin_order = bulk_order; in cake_config_diffserv4()
2463 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv4()
2464 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2465 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv4()
2466 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2467 cake_set_rate(&q->tins[2], rate >> 1, mtu, in cake_config_diffserv4()
2468 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2469 cake_set_rate(&q->tins[3], rate >> 2, mtu, in cake_config_diffserv4()
2470 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2473 q->tins[0].tin_quantum = quantum; in cake_config_diffserv4()
2474 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv4()
2475 q->tins[2].tin_quantum = quantum >> 1; in cake_config_diffserv4()
2476 q->tins[3].tin_quantum = quantum >> 2; in cake_config_diffserv4()
2488 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv3() local
2490 u64 rate = q->rate_bps; in cake_config_diffserv3()
2493 q->tin_cnt = 3; in cake_config_diffserv3()
2496 q->tin_index = diffserv3; in cake_config_diffserv3()
2497 q->tin_order = bulk_order; in cake_config_diffserv3()
2500 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv3()
2501 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2502 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv3()
2503 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2504 cake_set_rate(&q->tins[2], rate >> 2, mtu, in cake_config_diffserv3()
2505 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2508 q->tins[0].tin_quantum = quantum; in cake_config_diffserv3()
2509 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv3()
2510 q->tins[2].tin_quantum = quantum >> 2; in cake_config_diffserv3()
2517 struct cake_sched_data *q = qdisc_priv(sch); in cake_reconfigure() local
2520 switch (q->tin_mode) { in cake_reconfigure()
2543 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { in cake_reconfigure()
2545 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; in cake_reconfigure()
2548 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2549 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2551 if (q->buffer_config_limit) { in cake_reconfigure()
2552 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2553 } else if (q->rate_bps) { in cake_reconfigure()
2554 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2557 q->buffer_limit = max_t(u32, t, 4U << 20); in cake_reconfigure()
2559 q->buffer_limit = ~0; in cake_reconfigure()
2564 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2566 q->buffer_config_limit)); in cake_reconfigure()
2572 struct cake_sched_data *q = qdisc_priv(sch); in cake_change() local
2573 struct nlattr *tb[TCA_CAKE_MAX + 1]; in cake_change()
2586 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; in cake_change()
2587 q->flow_mode |= CAKE_FLOW_NAT_FLAG * in cake_change()
2597 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); in cake_change()
2600 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); in cake_change()
2604 q->rate_flags |= CAKE_FLAG_WASH; in cake_change()
2606 q->rate_flags &= ~CAKE_FLAG_WASH; in cake_change()
2610 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | in cake_change()
2615 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); in cake_change()
2618 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); in cake_change()
2619 q->rate_flags |= CAKE_FLAG_OVERHEAD; in cake_change()
2621 q->max_netlen = 0; in cake_change()
2622 q->max_adjlen = 0; in cake_change()
2623 q->min_netlen = ~0; in cake_change()
2624 q->min_adjlen = ~0; in cake_change()
2628 q->rate_flags &= ~CAKE_FLAG_OVERHEAD; in cake_change()
2630 q->max_netlen = 0; in cake_change()
2631 q->max_adjlen = 0; in cake_change()
2632 q->min_netlen = ~0; in cake_change()
2633 q->min_adjlen = ~0; in cake_change()
2637 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); in cake_change()
2640 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); in cake_change()
2642 if (!q->interval) in cake_change()
2643 q->interval = 1; in cake_change()
2647 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); in cake_change()
2649 if (!q->target) in cake_change()
2650 q->target = 1; in cake_change()
2655 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; in cake_change()
2657 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; in cake_change()
2662 q->rate_flags |= CAKE_FLAG_INGRESS; in cake_change()
2664 q->rate_flags &= ~CAKE_FLAG_INGRESS; in cake_change()
2668 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); in cake_change()
2671 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); in cake_change()
2675 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_change()
2677 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; in cake_change()
2681 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); in cake_change()
2682 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; in cake_change()
2685 if (q->tins) { in cake_change()
2696 struct cake_sched_data *q = qdisc_priv(sch); in cake_destroy() local
2698 qdisc_watchdog_cancel(&q->watchdog); in cake_destroy()
2699 tcf_block_put(q->block); in cake_destroy()
2700 kvfree(q->tins); in cake_destroy()
2706 struct cake_sched_data *q = qdisc_priv(sch); in cake_init() local
2710 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; in cake_init()
2711 q->flow_mode = CAKE_FLOW_TRIPLE; in cake_init()
2713 q->rate_bps = 0; /* unlimited by default */ in cake_init()
2715 q->interval = 100000; /* 100ms default */ in cake_init()
2716 q->target = 5000; /* 5ms: codel RFC argues in cake_init()
2719 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_init()
2720 q->cur_tin = 0; in cake_init()
2721 q->cur_flow = 0; in cake_init()
2723 qdisc_watchdog_init(&q->watchdog, sch); in cake_init()
2732 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in cake_init()
2737 for (i = 1; i <= CAKE_QUEUES; i++) in cake_init()
2740 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), in cake_init()
2742 if (!q->tins) in cake_init()
2746 struct cake_tin_data *b = q->tins + i; in cake_init()
2762 q->overflow_heap[k].t = i; in cake_init()
2763 q->overflow_heap[k].b = j; in cake_init()
2769 q->avg_peak_bandwidth = q->rate_bps; in cake_init()
2770 q->min_netlen = ~0; in cake_init()
2771 q->min_adjlen = ~0; in cake_init()
2777 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump() local
2784 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, in cake_dump()
2789 q->flow_mode & CAKE_FLOW_MASK)) in cake_dump()
2792 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) in cake_dump()
2795 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) in cake_dump()
2798 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) in cake_dump()
2802 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) in cake_dump()
2806 !!(q->rate_flags & CAKE_FLAG_INGRESS))) in cake_dump()
2809 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) in cake_dump()
2813 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) in cake_dump()
2816 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) in cake_dump()
2820 !!(q->rate_flags & CAKE_FLAG_WASH))) in cake_dump()
2823 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) in cake_dump()
2826 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) in cake_dump()
2830 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) in cake_dump()
2833 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) in cake_dump()
2837 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) in cake_dump()
2840 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) in cake_dump()
2846 return -1; in cake_dump()
2852 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_stats() local
2857 return -1; in cake_dump_stats()
2869 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); in cake_dump_stats()
2870 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); in cake_dump_stats()
2871 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); in cake_dump_stats()
2872 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); in cake_dump_stats()
2873 PUT_STAT_U32(MAX_NETLEN, q->max_netlen); in cake_dump_stats()
2874 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); in cake_dump_stats()
2875 PUT_STAT_U32(MIN_NETLEN, q->min_netlen); in cake_dump_stats()
2876 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); in cake_dump_stats()
2895 for (i = 0; i < q->tin_cnt; i++) { in cake_dump_stats()
2896 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_dump_stats()
2898 ts = nla_nest_start_noflag(d->skb, i + 1); in cake_dump_stats()
2945 return -1; in cake_dump_stats()
2964 static void cake_unbind(struct Qdisc *q, unsigned long cl) in cake_unbind() argument
2971 struct cake_sched_data *q = qdisc_priv(sch); in cake_tcf_block() local
2975 return q->block; in cake_tcf_block()
2988 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_class_stats() local
2992 u32 idx = cl - 1; in cake_dump_class_stats()
2994 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
2996 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3014 return -1; in cake_dump_class_stats()
3020 return -1; in cake_dump_class_stats()
3049 return -1; in cake_dump_class_stats()
3056 return -1; in cake_dump_class_stats()
3061 struct cake_sched_data *q = qdisc_priv(sch); in cake_walk() local
3067 for (i = 0; i < q->tin_cnt; i++) { in cake_walk()
3068 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_walk()
3076 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) { in cake_walk()
3077 arg->stop = 1; in cake_walk()