1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* net/sched/sch_taprio.c Time Aware Priority Scheduler
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/skbuff.h>
16*4882a593Smuzhiyun #include <linux/math64.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/rcupdate.h>
20*4882a593Smuzhiyun #include <net/netlink.h>
21*4882a593Smuzhiyun #include <net/pkt_sched.h>
22*4882a593Smuzhiyun #include <net/pkt_cls.h>
23*4882a593Smuzhiyun #include <net/sch_generic.h>
24*4882a593Smuzhiyun #include <net/sock.h>
25*4882a593Smuzhiyun #include <net/tcp.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static LIST_HEAD(taprio_list);
28*4882a593Smuzhiyun static DEFINE_SPINLOCK(taprio_list_lock);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define TAPRIO_ALL_GATES_OPEN -1
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
33*4882a593Smuzhiyun #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
34*4882a593Smuzhiyun #define TAPRIO_FLAGS_INVALID U32_MAX
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct sched_entry {
37*4882a593Smuzhiyun struct list_head list;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* The instant that this entry "closes" and the next one
40*4882a593Smuzhiyun * should open, the qdisc will make some effort so that no
41*4882a593Smuzhiyun * packet leaves after this time.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun ktime_t close_time;
44*4882a593Smuzhiyun ktime_t next_txtime;
45*4882a593Smuzhiyun atomic_t budget;
46*4882a593Smuzhiyun int index;
47*4882a593Smuzhiyun u32 gate_mask;
48*4882a593Smuzhiyun u32 interval;
49*4882a593Smuzhiyun u8 command;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct sched_gate_list {
53*4882a593Smuzhiyun struct rcu_head rcu;
54*4882a593Smuzhiyun struct list_head entries;
55*4882a593Smuzhiyun size_t num_entries;
56*4882a593Smuzhiyun ktime_t cycle_close_time;
57*4882a593Smuzhiyun s64 cycle_time;
58*4882a593Smuzhiyun s64 cycle_time_extension;
59*4882a593Smuzhiyun s64 base_time;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct taprio_sched {
63*4882a593Smuzhiyun struct Qdisc **qdiscs;
64*4882a593Smuzhiyun struct Qdisc *root;
65*4882a593Smuzhiyun u32 flags;
66*4882a593Smuzhiyun enum tk_offsets tk_offset;
67*4882a593Smuzhiyun int clockid;
68*4882a593Smuzhiyun bool offloaded;
69*4882a593Smuzhiyun atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
70*4882a593Smuzhiyun * speeds it's sub-nanoseconds per byte
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Protects the update side of the RCU protected current_entry */
74*4882a593Smuzhiyun spinlock_t current_entry_lock;
75*4882a593Smuzhiyun struct sched_entry __rcu *current_entry;
76*4882a593Smuzhiyun struct sched_gate_list __rcu *oper_sched;
77*4882a593Smuzhiyun struct sched_gate_list __rcu *admin_sched;
78*4882a593Smuzhiyun struct hrtimer advance_timer;
79*4882a593Smuzhiyun struct list_head taprio_list;
80*4882a593Smuzhiyun struct sk_buff *(*dequeue)(struct Qdisc *sch);
81*4882a593Smuzhiyun struct sk_buff *(*peek)(struct Qdisc *sch);
82*4882a593Smuzhiyun u32 txtime_delay;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct __tc_taprio_qopt_offload {
86*4882a593Smuzhiyun refcount_t users;
87*4882a593Smuzhiyun struct tc_taprio_qopt_offload offload;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
sched_base_time(const struct sched_gate_list * sched)90*4882a593Smuzhiyun static ktime_t sched_base_time(const struct sched_gate_list *sched)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun if (!sched)
93*4882a593Smuzhiyun return KTIME_MAX;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return ns_to_ktime(sched->base_time);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
taprio_mono_to_any(const struct taprio_sched * q,ktime_t mono)98*4882a593Smuzhiyun static ktime_t taprio_mono_to_any(const struct taprio_sched *q, ktime_t mono)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun /* This pairs with WRITE_ONCE() in taprio_parse_clockid() */
101*4882a593Smuzhiyun enum tk_offsets tk_offset = READ_ONCE(q->tk_offset);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun switch (tk_offset) {
104*4882a593Smuzhiyun case TK_OFFS_MAX:
105*4882a593Smuzhiyun return mono;
106*4882a593Smuzhiyun default:
107*4882a593Smuzhiyun return ktime_mono_to_any(mono, tk_offset);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
taprio_get_time(const struct taprio_sched * q)111*4882a593Smuzhiyun static ktime_t taprio_get_time(const struct taprio_sched *q)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun return taprio_mono_to_any(q, ktime_get());
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
taprio_free_sched_cb(struct rcu_head * head)116*4882a593Smuzhiyun static void taprio_free_sched_cb(struct rcu_head *head)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
119*4882a593Smuzhiyun struct sched_entry *entry, *n;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!sched)
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun list_for_each_entry_safe(entry, n, &sched->entries, list) {
125*4882a593Smuzhiyun list_del(&entry->list);
126*4882a593Smuzhiyun kfree(entry);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun kfree(sched);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
switch_schedules(struct taprio_sched * q,struct sched_gate_list ** admin,struct sched_gate_list ** oper)132*4882a593Smuzhiyun static void switch_schedules(struct taprio_sched *q,
133*4882a593Smuzhiyun struct sched_gate_list **admin,
134*4882a593Smuzhiyun struct sched_gate_list **oper)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun rcu_assign_pointer(q->oper_sched, *admin);
137*4882a593Smuzhiyun rcu_assign_pointer(q->admin_sched, NULL);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (*oper)
140*4882a593Smuzhiyun call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun *oper = *admin;
143*4882a593Smuzhiyun *admin = NULL;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Get how much time has been already elapsed in the current cycle. */
get_cycle_time_elapsed(struct sched_gate_list * sched,ktime_t time)147*4882a593Smuzhiyun static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun ktime_t time_since_sched_start;
150*4882a593Smuzhiyun s32 time_elapsed;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun time_since_sched_start = ktime_sub(time, sched->base_time);
153*4882a593Smuzhiyun div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return time_elapsed;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
get_interval_end_time(struct sched_gate_list * sched,struct sched_gate_list * admin,struct sched_entry * entry,ktime_t intv_start)158*4882a593Smuzhiyun static ktime_t get_interval_end_time(struct sched_gate_list *sched,
159*4882a593Smuzhiyun struct sched_gate_list *admin,
160*4882a593Smuzhiyun struct sched_entry *entry,
161*4882a593Smuzhiyun ktime_t intv_start)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
164*4882a593Smuzhiyun ktime_t intv_end, cycle_ext_end, cycle_end;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
167*4882a593Smuzhiyun intv_end = ktime_add_ns(intv_start, entry->interval);
168*4882a593Smuzhiyun cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (ktime_before(intv_end, cycle_end))
171*4882a593Smuzhiyun return intv_end;
172*4882a593Smuzhiyun else if (admin && admin != sched &&
173*4882a593Smuzhiyun ktime_after(admin->base_time, cycle_end) &&
174*4882a593Smuzhiyun ktime_before(admin->base_time, cycle_ext_end))
175*4882a593Smuzhiyun return admin->base_time;
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun return cycle_end;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
length_to_duration(struct taprio_sched * q,int len)180*4882a593Smuzhiyun static int length_to_duration(struct taprio_sched *q, int len)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Returns the entry corresponding to next available interval. If
186*4882a593Smuzhiyun * validate_interval is set, it only validates whether the timestamp occurs
187*4882a593Smuzhiyun * when the gate corresponding to the skb's traffic class is open.
188*4882a593Smuzhiyun */
find_entry_to_transmit(struct sk_buff * skb,struct Qdisc * sch,struct sched_gate_list * sched,struct sched_gate_list * admin,ktime_t time,ktime_t * interval_start,ktime_t * interval_end,bool validate_interval)189*4882a593Smuzhiyun static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
190*4882a593Smuzhiyun struct Qdisc *sch,
191*4882a593Smuzhiyun struct sched_gate_list *sched,
192*4882a593Smuzhiyun struct sched_gate_list *admin,
193*4882a593Smuzhiyun ktime_t time,
194*4882a593Smuzhiyun ktime_t *interval_start,
195*4882a593Smuzhiyun ktime_t *interval_end,
196*4882a593Smuzhiyun bool validate_interval)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
199*4882a593Smuzhiyun ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
200*4882a593Smuzhiyun struct sched_entry *entry = NULL, *entry_found = NULL;
201*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
202*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
203*4882a593Smuzhiyun bool entry_available = false;
204*4882a593Smuzhiyun s32 cycle_elapsed;
205*4882a593Smuzhiyun int tc, n;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun tc = netdev_get_prio_tc_map(dev, skb->priority);
208*4882a593Smuzhiyun packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun *interval_start = 0;
211*4882a593Smuzhiyun *interval_end = 0;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (!sched)
214*4882a593Smuzhiyun return NULL;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun cycle = sched->cycle_time;
217*4882a593Smuzhiyun cycle_elapsed = get_cycle_time_elapsed(sched, time);
218*4882a593Smuzhiyun curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
219*4882a593Smuzhiyun cycle_end = ktime_add_ns(curr_intv_end, cycle);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun list_for_each_entry(entry, &sched->entries, list) {
222*4882a593Smuzhiyun curr_intv_start = curr_intv_end;
223*4882a593Smuzhiyun curr_intv_end = get_interval_end_time(sched, admin, entry,
224*4882a593Smuzhiyun curr_intv_start);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (ktime_after(curr_intv_start, cycle_end))
227*4882a593Smuzhiyun break;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (!(entry->gate_mask & BIT(tc)) ||
230*4882a593Smuzhiyun packet_transmit_time > entry->interval)
231*4882a593Smuzhiyun continue;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun txtime = entry->next_txtime;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (ktime_before(txtime, time) || validate_interval) {
236*4882a593Smuzhiyun transmit_end_time = ktime_add_ns(time, packet_transmit_time);
237*4882a593Smuzhiyun if ((ktime_before(curr_intv_start, time) &&
238*4882a593Smuzhiyun ktime_before(transmit_end_time, curr_intv_end)) ||
239*4882a593Smuzhiyun (ktime_after(curr_intv_start, time) && !validate_interval)) {
240*4882a593Smuzhiyun entry_found = entry;
241*4882a593Smuzhiyun *interval_start = curr_intv_start;
242*4882a593Smuzhiyun *interval_end = curr_intv_end;
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun } else if (!entry_available && !validate_interval) {
245*4882a593Smuzhiyun /* Here, we are just trying to find out the
246*4882a593Smuzhiyun * first available interval in the next cycle.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun entry_available = 1;
249*4882a593Smuzhiyun entry_found = entry;
250*4882a593Smuzhiyun *interval_start = ktime_add_ns(curr_intv_start, cycle);
251*4882a593Smuzhiyun *interval_end = ktime_add_ns(curr_intv_end, cycle);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun } else if (ktime_before(txtime, earliest_txtime) &&
254*4882a593Smuzhiyun !entry_available) {
255*4882a593Smuzhiyun earliest_txtime = txtime;
256*4882a593Smuzhiyun entry_found = entry;
257*4882a593Smuzhiyun n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
258*4882a593Smuzhiyun *interval_start = ktime_add(curr_intv_start, n * cycle);
259*4882a593Smuzhiyun *interval_end = ktime_add(curr_intv_end, n * cycle);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return entry_found;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
is_valid_interval(struct sk_buff * skb,struct Qdisc * sch)266*4882a593Smuzhiyun static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
269*4882a593Smuzhiyun struct sched_gate_list *sched, *admin;
270*4882a593Smuzhiyun ktime_t interval_start, interval_end;
271*4882a593Smuzhiyun struct sched_entry *entry;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun rcu_read_lock();
274*4882a593Smuzhiyun sched = rcu_dereference(q->oper_sched);
275*4882a593Smuzhiyun admin = rcu_dereference(q->admin_sched);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
278*4882a593Smuzhiyun &interval_start, &interval_end, true);
279*4882a593Smuzhiyun rcu_read_unlock();
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return entry;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
taprio_flags_valid(u32 flags)284*4882a593Smuzhiyun static bool taprio_flags_valid(u32 flags)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun /* Make sure no other flag bits are set. */
287*4882a593Smuzhiyun if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
288*4882a593Smuzhiyun TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
289*4882a593Smuzhiyun return false;
290*4882a593Smuzhiyun /* txtime-assist and full offload are mutually exclusive */
291*4882a593Smuzhiyun if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
292*4882a593Smuzhiyun (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
293*4882a593Smuzhiyun return false;
294*4882a593Smuzhiyun return true;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* This returns the tstamp value set by TCP in terms of the set clock. */
get_tcp_tstamp(struct taprio_sched * q,struct sk_buff * skb)298*4882a593Smuzhiyun static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun unsigned int offset = skb_network_offset(skb);
301*4882a593Smuzhiyun const struct ipv6hdr *ipv6h;
302*4882a593Smuzhiyun const struct iphdr *iph;
303*4882a593Smuzhiyun struct ipv6hdr _ipv6h;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
306*4882a593Smuzhiyun if (!ipv6h)
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (ipv6h->version == 4) {
310*4882a593Smuzhiyun iph = (struct iphdr *)ipv6h;
311*4882a593Smuzhiyun offset += iph->ihl * 4;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* special-case 6in4 tunnelling, as that is a common way to get
314*4882a593Smuzhiyun * v6 connectivity in the home
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun if (iph->protocol == IPPROTO_IPV6) {
317*4882a593Smuzhiyun ipv6h = skb_header_pointer(skb, offset,
318*4882a593Smuzhiyun sizeof(_ipv6h), &_ipv6h);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun } else if (iph->protocol != IPPROTO_TCP) {
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
326*4882a593Smuzhiyun return 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return taprio_mono_to_any(q, skb->skb_mstamp_ns);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* There are a few scenarios where we will have to modify the txtime from
333*4882a593Smuzhiyun * what is read from next_txtime in sched_entry. They are:
334*4882a593Smuzhiyun * 1. If txtime is in the past,
335*4882a593Smuzhiyun * a. The gate for the traffic class is currently open and packet can be
336*4882a593Smuzhiyun * transmitted before it closes, schedule the packet right away.
337*4882a593Smuzhiyun * b. If the gate corresponding to the traffic class is going to open later
338*4882a593Smuzhiyun * in the cycle, set the txtime of packet to the interval start.
339*4882a593Smuzhiyun * 2. If txtime is in the future, there are packets corresponding to the
340*4882a593Smuzhiyun * current traffic class waiting to be transmitted. So, the following
341*4882a593Smuzhiyun * possibilities exist:
342*4882a593Smuzhiyun * a. We can transmit the packet before the window containing the txtime
343*4882a593Smuzhiyun * closes.
344*4882a593Smuzhiyun * b. The window might close before the transmission can be completed
345*4882a593Smuzhiyun * successfully. So, schedule the packet in the next open window.
346*4882a593Smuzhiyun */
get_packet_txtime(struct sk_buff * skb,struct Qdisc * sch)347*4882a593Smuzhiyun static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
350*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
351*4882a593Smuzhiyun struct sched_gate_list *sched, *admin;
352*4882a593Smuzhiyun ktime_t minimum_time, now, txtime;
353*4882a593Smuzhiyun int len, packet_transmit_time;
354*4882a593Smuzhiyun struct sched_entry *entry;
355*4882a593Smuzhiyun bool sched_changed;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun now = taprio_get_time(q);
358*4882a593Smuzhiyun minimum_time = ktime_add_ns(now, q->txtime_delay);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun tcp_tstamp = get_tcp_tstamp(q, skb);
361*4882a593Smuzhiyun minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun rcu_read_lock();
364*4882a593Smuzhiyun admin = rcu_dereference(q->admin_sched);
365*4882a593Smuzhiyun sched = rcu_dereference(q->oper_sched);
366*4882a593Smuzhiyun if (admin && ktime_after(minimum_time, admin->base_time))
367*4882a593Smuzhiyun switch_schedules(q, &admin, &sched);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* Until the schedule starts, all the queues are open */
370*4882a593Smuzhiyun if (!sched || ktime_before(minimum_time, sched->base_time)) {
371*4882a593Smuzhiyun txtime = minimum_time;
372*4882a593Smuzhiyun goto done;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun len = qdisc_pkt_len(skb);
376*4882a593Smuzhiyun packet_transmit_time = length_to_duration(q, len);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun do {
379*4882a593Smuzhiyun sched_changed = 0;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun entry = find_entry_to_transmit(skb, sch, sched, admin,
382*4882a593Smuzhiyun minimum_time,
383*4882a593Smuzhiyun &interval_start, &interval_end,
384*4882a593Smuzhiyun false);
385*4882a593Smuzhiyun if (!entry) {
386*4882a593Smuzhiyun txtime = 0;
387*4882a593Smuzhiyun goto done;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun txtime = entry->next_txtime;
391*4882a593Smuzhiyun txtime = max_t(ktime_t, txtime, minimum_time);
392*4882a593Smuzhiyun txtime = max_t(ktime_t, txtime, interval_start);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (admin && admin != sched &&
395*4882a593Smuzhiyun ktime_after(txtime, admin->base_time)) {
396*4882a593Smuzhiyun sched = admin;
397*4882a593Smuzhiyun sched_changed = 1;
398*4882a593Smuzhiyun continue;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun transmit_end_time = ktime_add(txtime, packet_transmit_time);
402*4882a593Smuzhiyun minimum_time = transmit_end_time;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Update the txtime of current entry to the next time it's
405*4882a593Smuzhiyun * interval starts.
406*4882a593Smuzhiyun */
407*4882a593Smuzhiyun if (ktime_after(transmit_end_time, interval_end))
408*4882a593Smuzhiyun entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
409*4882a593Smuzhiyun } while (sched_changed || ktime_after(transmit_end_time, interval_end));
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun entry->next_txtime = transmit_end_time;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun done:
414*4882a593Smuzhiyun rcu_read_unlock();
415*4882a593Smuzhiyun return txtime;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
taprio_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)418*4882a593Smuzhiyun static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
419*4882a593Smuzhiyun struct sk_buff **to_free)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
422*4882a593Smuzhiyun struct Qdisc *child;
423*4882a593Smuzhiyun int queue;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun queue = skb_get_queue_mapping(skb);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun child = q->qdiscs[queue];
428*4882a593Smuzhiyun if (unlikely(!child))
429*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* sk_flags are only safe to use on full sockets. */
432*4882a593Smuzhiyun if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
433*4882a593Smuzhiyun if (!is_valid_interval(skb, sch))
434*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
435*4882a593Smuzhiyun } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
436*4882a593Smuzhiyun skb->tstamp = get_packet_txtime(skb, sch);
437*4882a593Smuzhiyun if (!skb->tstamp)
438*4882a593Smuzhiyun return qdisc_drop(skb, sch, to_free);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun qdisc_qstats_backlog_inc(sch, skb);
442*4882a593Smuzhiyun sch->q.qlen++;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun return qdisc_enqueue(skb, child, to_free);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
taprio_peek_soft(struct Qdisc * sch)447*4882a593Smuzhiyun static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
450*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
451*4882a593Smuzhiyun struct sched_entry *entry;
452*4882a593Smuzhiyun struct sk_buff *skb;
453*4882a593Smuzhiyun u32 gate_mask;
454*4882a593Smuzhiyun int i;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun rcu_read_lock();
457*4882a593Smuzhiyun entry = rcu_dereference(q->current_entry);
458*4882a593Smuzhiyun gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
459*4882a593Smuzhiyun rcu_read_unlock();
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (!gate_mask)
462*4882a593Smuzhiyun return NULL;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
465*4882a593Smuzhiyun struct Qdisc *child = q->qdiscs[i];
466*4882a593Smuzhiyun int prio;
467*4882a593Smuzhiyun u8 tc;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (unlikely(!child))
470*4882a593Smuzhiyun continue;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun skb = child->ops->peek(child);
473*4882a593Smuzhiyun if (!skb)
474*4882a593Smuzhiyun continue;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (TXTIME_ASSIST_IS_ENABLED(q->flags))
477*4882a593Smuzhiyun return skb;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun prio = skb->priority;
480*4882a593Smuzhiyun tc = netdev_get_prio_tc_map(dev, prio);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (!(gate_mask & BIT(tc)))
483*4882a593Smuzhiyun continue;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return skb;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return NULL;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
taprio_peek_offload(struct Qdisc * sch)491*4882a593Smuzhiyun static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
494*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
495*4882a593Smuzhiyun struct sk_buff *skb;
496*4882a593Smuzhiyun int i;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
499*4882a593Smuzhiyun struct Qdisc *child = q->qdiscs[i];
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (unlikely(!child))
502*4882a593Smuzhiyun continue;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun skb = child->ops->peek(child);
505*4882a593Smuzhiyun if (!skb)
506*4882a593Smuzhiyun continue;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return skb;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return NULL;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
taprio_peek(struct Qdisc * sch)514*4882a593Smuzhiyun static struct sk_buff *taprio_peek(struct Qdisc *sch)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return q->peek(sch);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
taprio_set_budget(struct taprio_sched * q,struct sched_entry * entry)521*4882a593Smuzhiyun static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun atomic_set(&entry->budget,
524*4882a593Smuzhiyun div64_u64((u64)entry->interval * 1000,
525*4882a593Smuzhiyun atomic64_read(&q->picos_per_byte)));
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
taprio_dequeue_soft(struct Qdisc * sch)528*4882a593Smuzhiyun static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
531*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
532*4882a593Smuzhiyun struct sk_buff *skb = NULL;
533*4882a593Smuzhiyun struct sched_entry *entry;
534*4882a593Smuzhiyun u32 gate_mask;
535*4882a593Smuzhiyun int i;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun rcu_read_lock();
538*4882a593Smuzhiyun entry = rcu_dereference(q->current_entry);
539*4882a593Smuzhiyun /* if there's no entry, it means that the schedule didn't
540*4882a593Smuzhiyun * start yet, so force all gates to be open, this is in
541*4882a593Smuzhiyun * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
542*4882a593Smuzhiyun * "AdminGateSates"
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (!gate_mask)
547*4882a593Smuzhiyun goto done;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
550*4882a593Smuzhiyun struct Qdisc *child = q->qdiscs[i];
551*4882a593Smuzhiyun ktime_t guard;
552*4882a593Smuzhiyun int prio;
553*4882a593Smuzhiyun int len;
554*4882a593Smuzhiyun u8 tc;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun if (unlikely(!child))
557*4882a593Smuzhiyun continue;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
560*4882a593Smuzhiyun skb = child->ops->dequeue(child);
561*4882a593Smuzhiyun if (!skb)
562*4882a593Smuzhiyun continue;
563*4882a593Smuzhiyun goto skb_found;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun skb = child->ops->peek(child);
567*4882a593Smuzhiyun if (!skb)
568*4882a593Smuzhiyun continue;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun prio = skb->priority;
571*4882a593Smuzhiyun tc = netdev_get_prio_tc_map(dev, prio);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (!(gate_mask & BIT(tc))) {
574*4882a593Smuzhiyun skb = NULL;
575*4882a593Smuzhiyun continue;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun len = qdisc_pkt_len(skb);
579*4882a593Smuzhiyun guard = ktime_add_ns(taprio_get_time(q),
580*4882a593Smuzhiyun length_to_duration(q, len));
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* In the case that there's no gate entry, there's no
583*4882a593Smuzhiyun * guard band ...
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
586*4882a593Smuzhiyun ktime_after(guard, entry->close_time)) {
587*4882a593Smuzhiyun skb = NULL;
588*4882a593Smuzhiyun continue;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* ... and no budget. */
592*4882a593Smuzhiyun if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
593*4882a593Smuzhiyun atomic_sub_return(len, &entry->budget) < 0) {
594*4882a593Smuzhiyun skb = NULL;
595*4882a593Smuzhiyun continue;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun skb = child->ops->dequeue(child);
599*4882a593Smuzhiyun if (unlikely(!skb))
600*4882a593Smuzhiyun goto done;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun skb_found:
603*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
604*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
605*4882a593Smuzhiyun sch->q.qlen--;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun goto done;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun done:
611*4882a593Smuzhiyun rcu_read_unlock();
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun return skb;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
taprio_dequeue_offload(struct Qdisc * sch)616*4882a593Smuzhiyun static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
619*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
620*4882a593Smuzhiyun struct sk_buff *skb;
621*4882a593Smuzhiyun int i;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
624*4882a593Smuzhiyun struct Qdisc *child = q->qdiscs[i];
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (unlikely(!child))
627*4882a593Smuzhiyun continue;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun skb = child->ops->dequeue(child);
630*4882a593Smuzhiyun if (unlikely(!skb))
631*4882a593Smuzhiyun continue;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun qdisc_bstats_update(sch, skb);
634*4882a593Smuzhiyun qdisc_qstats_backlog_dec(sch, skb);
635*4882a593Smuzhiyun sch->q.qlen--;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun return skb;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun return NULL;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
taprio_dequeue(struct Qdisc * sch)643*4882a593Smuzhiyun static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun return q->dequeue(sch);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
should_restart_cycle(const struct sched_gate_list * oper,const struct sched_entry * entry)650*4882a593Smuzhiyun static bool should_restart_cycle(const struct sched_gate_list *oper,
651*4882a593Smuzhiyun const struct sched_entry *entry)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun if (list_is_last(&entry->list, &oper->entries))
654*4882a593Smuzhiyun return true;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
657*4882a593Smuzhiyun return true;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun return false;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
should_change_schedules(const struct sched_gate_list * admin,const struct sched_gate_list * oper,ktime_t close_time)662*4882a593Smuzhiyun static bool should_change_schedules(const struct sched_gate_list *admin,
663*4882a593Smuzhiyun const struct sched_gate_list *oper,
664*4882a593Smuzhiyun ktime_t close_time)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun ktime_t next_base_time, extension_time;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (!admin)
669*4882a593Smuzhiyun return false;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun next_base_time = sched_base_time(admin);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* This is the simple case, the close_time would fall after
674*4882a593Smuzhiyun * the next schedule base_time.
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun if (ktime_compare(next_base_time, close_time) <= 0)
677*4882a593Smuzhiyun return true;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* This is the cycle_time_extension case, if the close_time
680*4882a593Smuzhiyun * plus the amount that can be extended would fall after the
681*4882a593Smuzhiyun * next schedule base_time, we can extend the current schedule
682*4882a593Smuzhiyun * for that amount.
683*4882a593Smuzhiyun */
684*4882a593Smuzhiyun extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
687*4882a593Smuzhiyun * how precisely the extension should be made. So after
688*4882a593Smuzhiyun * conformance testing, this logic may change.
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun if (ktime_compare(next_base_time, extension_time) <= 0)
691*4882a593Smuzhiyun return true;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return false;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
advance_sched(struct hrtimer * timer)696*4882a593Smuzhiyun static enum hrtimer_restart advance_sched(struct hrtimer *timer)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct taprio_sched *q = container_of(timer, struct taprio_sched,
699*4882a593Smuzhiyun advance_timer);
700*4882a593Smuzhiyun struct sched_gate_list *oper, *admin;
701*4882a593Smuzhiyun struct sched_entry *entry, *next;
702*4882a593Smuzhiyun struct Qdisc *sch = q->root;
703*4882a593Smuzhiyun ktime_t close_time;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun spin_lock(&q->current_entry_lock);
706*4882a593Smuzhiyun entry = rcu_dereference_protected(q->current_entry,
707*4882a593Smuzhiyun lockdep_is_held(&q->current_entry_lock));
708*4882a593Smuzhiyun oper = rcu_dereference_protected(q->oper_sched,
709*4882a593Smuzhiyun lockdep_is_held(&q->current_entry_lock));
710*4882a593Smuzhiyun admin = rcu_dereference_protected(q->admin_sched,
711*4882a593Smuzhiyun lockdep_is_held(&q->current_entry_lock));
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (!oper)
714*4882a593Smuzhiyun switch_schedules(q, &admin, &oper);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* This can happen in two cases: 1. this is the very first run
717*4882a593Smuzhiyun * of this function (i.e. we weren't running any schedule
718*4882a593Smuzhiyun * previously); 2. The previous schedule just ended. The first
719*4882a593Smuzhiyun * entry of all schedules are pre-calculated during the
720*4882a593Smuzhiyun * schedule initialization.
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun if (unlikely(!entry || entry->close_time == oper->base_time)) {
723*4882a593Smuzhiyun next = list_first_entry(&oper->entries, struct sched_entry,
724*4882a593Smuzhiyun list);
725*4882a593Smuzhiyun close_time = next->close_time;
726*4882a593Smuzhiyun goto first_run;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (should_restart_cycle(oper, entry)) {
730*4882a593Smuzhiyun next = list_first_entry(&oper->entries, struct sched_entry,
731*4882a593Smuzhiyun list);
732*4882a593Smuzhiyun oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
733*4882a593Smuzhiyun oper->cycle_time);
734*4882a593Smuzhiyun } else {
735*4882a593Smuzhiyun next = list_next_entry(entry, list);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun close_time = ktime_add_ns(entry->close_time, next->interval);
739*4882a593Smuzhiyun close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (should_change_schedules(admin, oper, close_time)) {
742*4882a593Smuzhiyun /* Set things so the next time this runs, the new
743*4882a593Smuzhiyun * schedule runs.
744*4882a593Smuzhiyun */
745*4882a593Smuzhiyun close_time = sched_base_time(admin);
746*4882a593Smuzhiyun switch_schedules(q, &admin, &oper);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun next->close_time = close_time;
750*4882a593Smuzhiyun taprio_set_budget(q, next);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun first_run:
753*4882a593Smuzhiyun rcu_assign_pointer(q->current_entry, next);
754*4882a593Smuzhiyun spin_unlock(&q->current_entry_lock);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun hrtimer_set_expires(&q->advance_timer, close_time);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun rcu_read_lock();
759*4882a593Smuzhiyun __netif_schedule(sch);
760*4882a593Smuzhiyun rcu_read_unlock();
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun return HRTIMER_RESTART;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
766*4882a593Smuzhiyun [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
767*4882a593Smuzhiyun [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
768*4882a593Smuzhiyun [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
769*4882a593Smuzhiyun [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
770*4882a593Smuzhiyun };
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
773*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_PRIOMAP] = {
774*4882a593Smuzhiyun .len = sizeof(struct tc_mqprio_qopt)
775*4882a593Smuzhiyun },
776*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
777*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
778*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
779*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
780*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
781*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
782*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
783*4882a593Smuzhiyun [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
784*4882a593Smuzhiyun };
785*4882a593Smuzhiyun
fill_sched_entry(struct taprio_sched * q,struct nlattr ** tb,struct sched_entry * entry,struct netlink_ext_ack * extack)786*4882a593Smuzhiyun static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
787*4882a593Smuzhiyun struct sched_entry *entry,
788*4882a593Smuzhiyun struct netlink_ext_ack *extack)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun int min_duration = length_to_duration(q, ETH_ZLEN);
791*4882a593Smuzhiyun u32 interval = 0;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
794*4882a593Smuzhiyun entry->command = nla_get_u8(
795*4882a593Smuzhiyun tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
798*4882a593Smuzhiyun entry->gate_mask = nla_get_u32(
799*4882a593Smuzhiyun tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
802*4882a593Smuzhiyun interval = nla_get_u32(
803*4882a593Smuzhiyun tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* The interval should allow at least the minimum ethernet
806*4882a593Smuzhiyun * frame to go out.
807*4882a593Smuzhiyun */
808*4882a593Smuzhiyun if (interval < min_duration) {
809*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
810*4882a593Smuzhiyun return -EINVAL;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun entry->interval = interval;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun return 0;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
parse_sched_entry(struct taprio_sched * q,struct nlattr * n,struct sched_entry * entry,int index,struct netlink_ext_ack * extack)818*4882a593Smuzhiyun static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
819*4882a593Smuzhiyun struct sched_entry *entry, int index,
820*4882a593Smuzhiyun struct netlink_ext_ack *extack)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
823*4882a593Smuzhiyun int err;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
826*4882a593Smuzhiyun entry_policy, NULL);
827*4882a593Smuzhiyun if (err < 0) {
828*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Could not parse nested entry");
829*4882a593Smuzhiyun return -EINVAL;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun entry->index = index;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun return fill_sched_entry(q, tb, entry, extack);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
parse_sched_list(struct taprio_sched * q,struct nlattr * list,struct sched_gate_list * sched,struct netlink_ext_ack * extack)837*4882a593Smuzhiyun static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
838*4882a593Smuzhiyun struct sched_gate_list *sched,
839*4882a593Smuzhiyun struct netlink_ext_ack *extack)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct nlattr *n;
842*4882a593Smuzhiyun int err, rem;
843*4882a593Smuzhiyun int i = 0;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (!list)
846*4882a593Smuzhiyun return -EINVAL;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun nla_for_each_nested(n, list, rem) {
849*4882a593Smuzhiyun struct sched_entry *entry;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
852*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
853*4882a593Smuzhiyun continue;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun entry = kzalloc(sizeof(*entry), GFP_KERNEL);
857*4882a593Smuzhiyun if (!entry) {
858*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Not enough memory for entry");
859*4882a593Smuzhiyun return -ENOMEM;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun err = parse_sched_entry(q, n, entry, i, extack);
863*4882a593Smuzhiyun if (err < 0) {
864*4882a593Smuzhiyun kfree(entry);
865*4882a593Smuzhiyun return err;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun list_add_tail(&entry->list, &sched->entries);
869*4882a593Smuzhiyun i++;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun sched->num_entries = i;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun return i;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
parse_taprio_schedule(struct taprio_sched * q,struct nlattr ** tb,struct sched_gate_list * new,struct netlink_ext_ack * extack)877*4882a593Smuzhiyun static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
878*4882a593Smuzhiyun struct sched_gate_list *new,
879*4882a593Smuzhiyun struct netlink_ext_ack *extack)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun int err = 0;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
884*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
885*4882a593Smuzhiyun return -ENOTSUPP;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
889*4882a593Smuzhiyun new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
892*4882a593Smuzhiyun new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
895*4882a593Smuzhiyun new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
898*4882a593Smuzhiyun err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
899*4882a593Smuzhiyun new, extack);
900*4882a593Smuzhiyun if (err < 0)
901*4882a593Smuzhiyun return err;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (!new->cycle_time) {
904*4882a593Smuzhiyun struct sched_entry *entry;
905*4882a593Smuzhiyun ktime_t cycle = 0;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun list_for_each_entry(entry, &new->entries, list)
908*4882a593Smuzhiyun cycle = ktime_add_ns(cycle, entry->interval);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (!cycle) {
911*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
912*4882a593Smuzhiyun return -EINVAL;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun new->cycle_time = cycle;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return 0;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
taprio_parse_mqprio_opt(struct net_device * dev,struct tc_mqprio_qopt * qopt,struct netlink_ext_ack * extack,u32 taprio_flags)921*4882a593Smuzhiyun static int taprio_parse_mqprio_opt(struct net_device *dev,
922*4882a593Smuzhiyun struct tc_mqprio_qopt *qopt,
923*4882a593Smuzhiyun struct netlink_ext_ack *extack,
924*4882a593Smuzhiyun u32 taprio_flags)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun int i, j;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if (!qopt && !dev->num_tc) {
929*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
930*4882a593Smuzhiyun return -EINVAL;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* If num_tc is already set, it means that the user already
934*4882a593Smuzhiyun * configured the mqprio part
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun if (dev->num_tc)
937*4882a593Smuzhiyun return 0;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun /* Verify num_tc is not out of max range */
940*4882a593Smuzhiyun if (qopt->num_tc > TC_MAX_QUEUE) {
941*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
942*4882a593Smuzhiyun return -EINVAL;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* taprio imposes that traffic classes map 1:n to tx queues */
946*4882a593Smuzhiyun if (qopt->num_tc > dev->num_tx_queues) {
947*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
948*4882a593Smuzhiyun return -EINVAL;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* Verify priority mapping uses valid tcs */
952*4882a593Smuzhiyun for (i = 0; i <= TC_BITMASK; i++) {
953*4882a593Smuzhiyun if (qopt->prio_tc_map[i] >= qopt->num_tc) {
954*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
955*4882a593Smuzhiyun return -EINVAL;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun for (i = 0; i < qopt->num_tc; i++) {
960*4882a593Smuzhiyun unsigned int last = qopt->offset[i] + qopt->count[i];
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /* Verify the queue count is in tx range being equal to the
963*4882a593Smuzhiyun * real_num_tx_queues indicates the last queue is in use.
964*4882a593Smuzhiyun */
965*4882a593Smuzhiyun if (qopt->offset[i] >= dev->num_tx_queues ||
966*4882a593Smuzhiyun !qopt->count[i] ||
967*4882a593Smuzhiyun last > dev->real_num_tx_queues) {
968*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
969*4882a593Smuzhiyun return -EINVAL;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
973*4882a593Smuzhiyun continue;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /* Verify that the offset and counts do not overlap */
976*4882a593Smuzhiyun for (j = i + 1; j < qopt->num_tc; j++) {
977*4882a593Smuzhiyun if (last > qopt->offset[j]) {
978*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
979*4882a593Smuzhiyun return -EINVAL;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
taprio_get_start_time(struct Qdisc * sch,struct sched_gate_list * sched,ktime_t * start)987*4882a593Smuzhiyun static int taprio_get_start_time(struct Qdisc *sch,
988*4882a593Smuzhiyun struct sched_gate_list *sched,
989*4882a593Smuzhiyun ktime_t *start)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
992*4882a593Smuzhiyun ktime_t now, base, cycle;
993*4882a593Smuzhiyun s64 n;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun base = sched_base_time(sched);
996*4882a593Smuzhiyun now = taprio_get_time(q);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (ktime_after(base, now)) {
999*4882a593Smuzhiyun *start = base;
1000*4882a593Smuzhiyun return 0;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun cycle = sched->cycle_time;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* The qdisc is expected to have at least one sched_entry. Moreover,
1006*4882a593Smuzhiyun * any entry must have 'interval' > 0. Thus if the cycle time is zero,
1007*4882a593Smuzhiyun * something went really wrong. In that case, we should warn about this
1008*4882a593Smuzhiyun * inconsistent state and return error.
1009*4882a593Smuzhiyun */
1010*4882a593Smuzhiyun if (WARN_ON(!cycle))
1011*4882a593Smuzhiyun return -EFAULT;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /* Schedule the start time for the beginning of the next
1014*4882a593Smuzhiyun * cycle.
1015*4882a593Smuzhiyun */
1016*4882a593Smuzhiyun n = div64_s64(ktime_sub_ns(now, base), cycle);
1017*4882a593Smuzhiyun *start = ktime_add_ns(base, (n + 1) * cycle);
1018*4882a593Smuzhiyun return 0;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
setup_first_close_time(struct taprio_sched * q,struct sched_gate_list * sched,ktime_t base)1021*4882a593Smuzhiyun static void setup_first_close_time(struct taprio_sched *q,
1022*4882a593Smuzhiyun struct sched_gate_list *sched, ktime_t base)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun struct sched_entry *first;
1025*4882a593Smuzhiyun ktime_t cycle;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun first = list_first_entry(&sched->entries,
1028*4882a593Smuzhiyun struct sched_entry, list);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun cycle = sched->cycle_time;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /* FIXME: find a better place to do this */
1033*4882a593Smuzhiyun sched->cycle_close_time = ktime_add_ns(base, cycle);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun first->close_time = ktime_add_ns(base, first->interval);
1036*4882a593Smuzhiyun taprio_set_budget(q, first);
1037*4882a593Smuzhiyun rcu_assign_pointer(q->current_entry, NULL);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
taprio_start_sched(struct Qdisc * sch,ktime_t start,struct sched_gate_list * new)1040*4882a593Smuzhiyun static void taprio_start_sched(struct Qdisc *sch,
1041*4882a593Smuzhiyun ktime_t start, struct sched_gate_list *new)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1044*4882a593Smuzhiyun ktime_t expires;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1047*4882a593Smuzhiyun return;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun expires = hrtimer_get_expires(&q->advance_timer);
1050*4882a593Smuzhiyun if (expires == 0)
1051*4882a593Smuzhiyun expires = KTIME_MAX;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* If the new schedule starts before the next expiration, we
1054*4882a593Smuzhiyun * reprogram it to the earliest one, so we change the admin
1055*4882a593Smuzhiyun * schedule to the operational one at the right time.
1056*4882a593Smuzhiyun */
1057*4882a593Smuzhiyun start = min_t(ktime_t, start, expires);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
taprio_set_picos_per_byte(struct net_device * dev,struct taprio_sched * q)1062*4882a593Smuzhiyun static void taprio_set_picos_per_byte(struct net_device *dev,
1063*4882a593Smuzhiyun struct taprio_sched *q)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun struct ethtool_link_ksettings ecmd;
1066*4882a593Smuzhiyun int speed = SPEED_10;
1067*4882a593Smuzhiyun int picos_per_byte;
1068*4882a593Smuzhiyun int err;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun err = __ethtool_get_link_ksettings(dev, &ecmd);
1071*4882a593Smuzhiyun if (err < 0)
1072*4882a593Smuzhiyun goto skip;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1075*4882a593Smuzhiyun speed = ecmd.base.speed;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun skip:
1078*4882a593Smuzhiyun picos_per_byte = (USEC_PER_SEC * 8) / speed;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun atomic64_set(&q->picos_per_byte, picos_per_byte);
1081*4882a593Smuzhiyun netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1082*4882a593Smuzhiyun dev->name, (long long)atomic64_read(&q->picos_per_byte),
1083*4882a593Smuzhiyun ecmd.base.speed);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
taprio_dev_notifier(struct notifier_block * nb,unsigned long event,void * ptr)1086*4882a593Smuzhiyun static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1087*4882a593Smuzhiyun void *ptr)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1090*4882a593Smuzhiyun struct net_device *qdev;
1091*4882a593Smuzhiyun struct taprio_sched *q;
1092*4882a593Smuzhiyun bool found = false;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun ASSERT_RTNL();
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (event != NETDEV_UP && event != NETDEV_CHANGE)
1097*4882a593Smuzhiyun return NOTIFY_DONE;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun spin_lock(&taprio_list_lock);
1100*4882a593Smuzhiyun list_for_each_entry(q, &taprio_list, taprio_list) {
1101*4882a593Smuzhiyun qdev = qdisc_dev(q->root);
1102*4882a593Smuzhiyun if (qdev == dev) {
1103*4882a593Smuzhiyun found = true;
1104*4882a593Smuzhiyun break;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun spin_unlock(&taprio_list_lock);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (found)
1110*4882a593Smuzhiyun taprio_set_picos_per_byte(dev, q);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun return NOTIFY_DONE;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
setup_txtime(struct taprio_sched * q,struct sched_gate_list * sched,ktime_t base)1115*4882a593Smuzhiyun static void setup_txtime(struct taprio_sched *q,
1116*4882a593Smuzhiyun struct sched_gate_list *sched, ktime_t base)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun struct sched_entry *entry;
1119*4882a593Smuzhiyun u32 interval = 0;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun list_for_each_entry(entry, &sched->entries, list) {
1122*4882a593Smuzhiyun entry->next_txtime = ktime_add_ns(base, interval);
1123*4882a593Smuzhiyun interval += entry->interval;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
taprio_offload_alloc(int num_entries)1127*4882a593Smuzhiyun static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun struct __tc_taprio_qopt_offload *__offload;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1132*4882a593Smuzhiyun GFP_KERNEL);
1133*4882a593Smuzhiyun if (!__offload)
1134*4882a593Smuzhiyun return NULL;
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun refcount_set(&__offload->users, 1);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun return &__offload->offload;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun
taprio_offload_get(struct tc_taprio_qopt_offload * offload)1141*4882a593Smuzhiyun struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1142*4882a593Smuzhiyun *offload)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun struct __tc_taprio_qopt_offload *__offload;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1147*4882a593Smuzhiyun offload);
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun refcount_inc(&__offload->users);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun return offload;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(taprio_offload_get);
1154*4882a593Smuzhiyun
taprio_offload_free(struct tc_taprio_qopt_offload * offload)1155*4882a593Smuzhiyun void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun struct __tc_taprio_qopt_offload *__offload;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1160*4882a593Smuzhiyun offload);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if (!refcount_dec_and_test(&__offload->users))
1163*4882a593Smuzhiyun return;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun kfree(__offload);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(taprio_offload_free);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /* The function will only serve to keep the pointers to the "oper" and "admin"
1170*4882a593Smuzhiyun * schedules valid in relation to their base times, so when calling dump() the
1171*4882a593Smuzhiyun * users looks at the right schedules.
1172*4882a593Smuzhiyun * When using full offload, the admin configuration is promoted to oper at the
1173*4882a593Smuzhiyun * base_time in the PHC time domain. But because the system time is not
1174*4882a593Smuzhiyun * necessarily in sync with that, we can't just trigger a hrtimer to call
1175*4882a593Smuzhiyun * switch_schedules at the right hardware time.
1176*4882a593Smuzhiyun * At the moment we call this by hand right away from taprio, but in the future
1177*4882a593Smuzhiyun * it will be useful to create a mechanism for drivers to notify taprio of the
1178*4882a593Smuzhiyun * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1179*4882a593Smuzhiyun * This is left as TODO.
1180*4882a593Smuzhiyun */
taprio_offload_config_changed(struct taprio_sched * q)1181*4882a593Smuzhiyun static void taprio_offload_config_changed(struct taprio_sched *q)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun struct sched_gate_list *oper, *admin;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun spin_lock(&q->current_entry_lock);
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun oper = rcu_dereference_protected(q->oper_sched,
1188*4882a593Smuzhiyun lockdep_is_held(&q->current_entry_lock));
1189*4882a593Smuzhiyun admin = rcu_dereference_protected(q->admin_sched,
1190*4882a593Smuzhiyun lockdep_is_held(&q->current_entry_lock));
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun switch_schedules(q, &admin, &oper);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun spin_unlock(&q->current_entry_lock);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
tc_map_to_queue_mask(struct net_device * dev,u32 tc_mask)1197*4882a593Smuzhiyun static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun u32 i, queue_mask = 0;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun for (i = 0; i < dev->num_tc; i++) {
1202*4882a593Smuzhiyun u32 offset, count;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun if (!(tc_mask & BIT(i)))
1205*4882a593Smuzhiyun continue;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun offset = dev->tc_to_txq[i].offset;
1208*4882a593Smuzhiyun count = dev->tc_to_txq[i].count;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun queue_mask |= GENMASK(offset + count - 1, offset);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun return queue_mask;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
taprio_sched_to_offload(struct net_device * dev,struct sched_gate_list * sched,struct tc_taprio_qopt_offload * offload)1216*4882a593Smuzhiyun static void taprio_sched_to_offload(struct net_device *dev,
1217*4882a593Smuzhiyun struct sched_gate_list *sched,
1218*4882a593Smuzhiyun struct tc_taprio_qopt_offload *offload)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun struct sched_entry *entry;
1221*4882a593Smuzhiyun int i = 0;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun offload->base_time = sched->base_time;
1224*4882a593Smuzhiyun offload->cycle_time = sched->cycle_time;
1225*4882a593Smuzhiyun offload->cycle_time_extension = sched->cycle_time_extension;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun list_for_each_entry(entry, &sched->entries, list) {
1228*4882a593Smuzhiyun struct tc_taprio_sched_entry *e = &offload->entries[i];
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun e->command = entry->command;
1231*4882a593Smuzhiyun e->interval = entry->interval;
1232*4882a593Smuzhiyun e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun i++;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun offload->num_entries = i;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
taprio_enable_offload(struct net_device * dev,struct taprio_sched * q,struct sched_gate_list * sched,struct netlink_ext_ack * extack)1240*4882a593Smuzhiyun static int taprio_enable_offload(struct net_device *dev,
1241*4882a593Smuzhiyun struct taprio_sched *q,
1242*4882a593Smuzhiyun struct sched_gate_list *sched,
1243*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun const struct net_device_ops *ops = dev->netdev_ops;
1246*4882a593Smuzhiyun struct tc_taprio_qopt_offload *offload;
1247*4882a593Smuzhiyun int err = 0;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun if (!ops->ndo_setup_tc) {
1250*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1251*4882a593Smuzhiyun "Device does not support taprio offload");
1252*4882a593Smuzhiyun return -EOPNOTSUPP;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun offload = taprio_offload_alloc(sched->num_entries);
1256*4882a593Smuzhiyun if (!offload) {
1257*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1258*4882a593Smuzhiyun "Not enough memory for enabling offload mode");
1259*4882a593Smuzhiyun return -ENOMEM;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun offload->enable = 1;
1262*4882a593Smuzhiyun taprio_sched_to_offload(dev, sched, offload);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1265*4882a593Smuzhiyun if (err < 0) {
1266*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1267*4882a593Smuzhiyun "Device failed to setup taprio offload");
1268*4882a593Smuzhiyun goto done;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun q->offloaded = true;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun done:
1274*4882a593Smuzhiyun taprio_offload_free(offload);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun return err;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
taprio_disable_offload(struct net_device * dev,struct taprio_sched * q,struct netlink_ext_ack * extack)1279*4882a593Smuzhiyun static int taprio_disable_offload(struct net_device *dev,
1280*4882a593Smuzhiyun struct taprio_sched *q,
1281*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun const struct net_device_ops *ops = dev->netdev_ops;
1284*4882a593Smuzhiyun struct tc_taprio_qopt_offload *offload;
1285*4882a593Smuzhiyun int err;
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (!q->offloaded)
1288*4882a593Smuzhiyun return 0;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun offload = taprio_offload_alloc(0);
1291*4882a593Smuzhiyun if (!offload) {
1292*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1293*4882a593Smuzhiyun "Not enough memory to disable offload mode");
1294*4882a593Smuzhiyun return -ENOMEM;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun offload->enable = 0;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1299*4882a593Smuzhiyun if (err < 0) {
1300*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1301*4882a593Smuzhiyun "Device failed to disable offload");
1302*4882a593Smuzhiyun goto out;
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun q->offloaded = false;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun out:
1308*4882a593Smuzhiyun taprio_offload_free(offload);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun return err;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /* If full offload is enabled, the only possible clockid is the net device's
1314*4882a593Smuzhiyun * PHC. For that reason, specifying a clockid through netlink is incorrect.
1315*4882a593Smuzhiyun * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1316*4882a593Smuzhiyun * in sync with the specified clockid via a user space daemon such as phc2sys.
1317*4882a593Smuzhiyun * For both software taprio and txtime-assist, the clockid is used for the
1318*4882a593Smuzhiyun * hrtimer that advances the schedule and hence mandatory.
1319*4882a593Smuzhiyun */
taprio_parse_clockid(struct Qdisc * sch,struct nlattr ** tb,struct netlink_ext_ack * extack)1320*4882a593Smuzhiyun static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1321*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1324*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1325*4882a593Smuzhiyun int err = -EINVAL;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1328*4882a593Smuzhiyun const struct ethtool_ops *ops = dev->ethtool_ops;
1329*4882a593Smuzhiyun struct ethtool_ts_info info = {
1330*4882a593Smuzhiyun .cmd = ETHTOOL_GET_TS_INFO,
1331*4882a593Smuzhiyun .phc_index = -1,
1332*4882a593Smuzhiyun };
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1335*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1336*4882a593Smuzhiyun "The 'clockid' cannot be specified for full offload");
1337*4882a593Smuzhiyun goto out;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun if (ops && ops->get_ts_info)
1341*4882a593Smuzhiyun err = ops->get_ts_info(dev, &info);
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (err || info.phc_index < 0) {
1344*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1345*4882a593Smuzhiyun "Device does not have a PTP clock");
1346*4882a593Smuzhiyun err = -ENOTSUPP;
1347*4882a593Smuzhiyun goto out;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1350*4882a593Smuzhiyun int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1351*4882a593Smuzhiyun enum tk_offsets tk_offset;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* We only support static clockids and we don't allow
1354*4882a593Smuzhiyun * for it to be modified after the first init.
1355*4882a593Smuzhiyun */
1356*4882a593Smuzhiyun if (clockid < 0 ||
1357*4882a593Smuzhiyun (q->clockid != -1 && q->clockid != clockid)) {
1358*4882a593Smuzhiyun NL_SET_ERR_MSG(extack,
1359*4882a593Smuzhiyun "Changing the 'clockid' of a running schedule is not supported");
1360*4882a593Smuzhiyun err = -ENOTSUPP;
1361*4882a593Smuzhiyun goto out;
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun switch (clockid) {
1365*4882a593Smuzhiyun case CLOCK_REALTIME:
1366*4882a593Smuzhiyun tk_offset = TK_OFFS_REAL;
1367*4882a593Smuzhiyun break;
1368*4882a593Smuzhiyun case CLOCK_MONOTONIC:
1369*4882a593Smuzhiyun tk_offset = TK_OFFS_MAX;
1370*4882a593Smuzhiyun break;
1371*4882a593Smuzhiyun case CLOCK_BOOTTIME:
1372*4882a593Smuzhiyun tk_offset = TK_OFFS_BOOT;
1373*4882a593Smuzhiyun break;
1374*4882a593Smuzhiyun case CLOCK_TAI:
1375*4882a593Smuzhiyun tk_offset = TK_OFFS_TAI;
1376*4882a593Smuzhiyun break;
1377*4882a593Smuzhiyun default:
1378*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1379*4882a593Smuzhiyun err = -EINVAL;
1380*4882a593Smuzhiyun goto out;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun /* This pairs with READ_ONCE() in taprio_mono_to_any */
1383*4882a593Smuzhiyun WRITE_ONCE(q->tk_offset, tk_offset);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun q->clockid = clockid;
1386*4882a593Smuzhiyun } else {
1387*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1388*4882a593Smuzhiyun goto out;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun /* Everything went ok, return success. */
1392*4882a593Smuzhiyun err = 0;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun out:
1395*4882a593Smuzhiyun return err;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
taprio_mqprio_cmp(const struct net_device * dev,const struct tc_mqprio_qopt * mqprio)1398*4882a593Smuzhiyun static int taprio_mqprio_cmp(const struct net_device *dev,
1399*4882a593Smuzhiyun const struct tc_mqprio_qopt *mqprio)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun int i;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun if (!mqprio || mqprio->num_tc != dev->num_tc)
1404*4882a593Smuzhiyun return -1;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun for (i = 0; i < mqprio->num_tc; i++)
1407*4882a593Smuzhiyun if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1408*4882a593Smuzhiyun dev->tc_to_txq[i].offset != mqprio->offset[i])
1409*4882a593Smuzhiyun return -1;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun for (i = 0; i <= TC_BITMASK; i++)
1412*4882a593Smuzhiyun if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1413*4882a593Smuzhiyun return -1;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun return 0;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /* The semantics of the 'flags' argument in relation to 'change()'
1419*4882a593Smuzhiyun * requests, are interpreted following two rules (which are applied in
1420*4882a593Smuzhiyun * this order): (1) an omitted 'flags' argument is interpreted as
1421*4882a593Smuzhiyun * zero; (2) the 'flags' of a "running" taprio instance cannot be
1422*4882a593Smuzhiyun * changed.
1423*4882a593Smuzhiyun */
taprio_new_flags(const struct nlattr * attr,u32 old,struct netlink_ext_ack * extack)1424*4882a593Smuzhiyun static int taprio_new_flags(const struct nlattr *attr, u32 old,
1425*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun u32 new = 0;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun if (attr)
1430*4882a593Smuzhiyun new = nla_get_u32(attr);
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun if (old != TAPRIO_FLAGS_INVALID && old != new) {
1433*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1434*4882a593Smuzhiyun return -EOPNOTSUPP;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun if (!taprio_flags_valid(new)) {
1438*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1439*4882a593Smuzhiyun return -EINVAL;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun return new;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
taprio_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1445*4882a593Smuzhiyun static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1446*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1449*4882a593Smuzhiyun struct sched_gate_list *oper, *admin, *new_admin;
1450*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1451*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1452*4882a593Smuzhiyun struct tc_mqprio_qopt *mqprio = NULL;
1453*4882a593Smuzhiyun unsigned long flags;
1454*4882a593Smuzhiyun ktime_t start;
1455*4882a593Smuzhiyun int i, err;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1458*4882a593Smuzhiyun taprio_policy, extack);
1459*4882a593Smuzhiyun if (err < 0)
1460*4882a593Smuzhiyun return err;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1463*4882a593Smuzhiyun mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1466*4882a593Smuzhiyun q->flags, extack);
1467*4882a593Smuzhiyun if (err < 0)
1468*4882a593Smuzhiyun return err;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun q->flags = err;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1473*4882a593Smuzhiyun if (err < 0)
1474*4882a593Smuzhiyun return err;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1477*4882a593Smuzhiyun if (!new_admin) {
1478*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1479*4882a593Smuzhiyun return -ENOMEM;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun INIT_LIST_HEAD(&new_admin->entries);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun rcu_read_lock();
1484*4882a593Smuzhiyun oper = rcu_dereference(q->oper_sched);
1485*4882a593Smuzhiyun admin = rcu_dereference(q->admin_sched);
1486*4882a593Smuzhiyun rcu_read_unlock();
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /* no changes - no new mqprio settings */
1489*4882a593Smuzhiyun if (!taprio_mqprio_cmp(dev, mqprio))
1490*4882a593Smuzhiyun mqprio = NULL;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun if (mqprio && (oper || admin)) {
1493*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1494*4882a593Smuzhiyun err = -ENOTSUPP;
1495*4882a593Smuzhiyun goto free_sched;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun err = parse_taprio_schedule(q, tb, new_admin, extack);
1499*4882a593Smuzhiyun if (err < 0)
1500*4882a593Smuzhiyun goto free_sched;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun if (new_admin->num_entries == 0) {
1503*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1504*4882a593Smuzhiyun err = -EINVAL;
1505*4882a593Smuzhiyun goto free_sched;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun err = taprio_parse_clockid(sch, tb, extack);
1509*4882a593Smuzhiyun if (err < 0)
1510*4882a593Smuzhiyun goto free_sched;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun taprio_set_picos_per_byte(dev, q);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (mqprio) {
1515*4882a593Smuzhiyun err = netdev_set_num_tc(dev, mqprio->num_tc);
1516*4882a593Smuzhiyun if (err)
1517*4882a593Smuzhiyun goto free_sched;
1518*4882a593Smuzhiyun for (i = 0; i < mqprio->num_tc; i++)
1519*4882a593Smuzhiyun netdev_set_tc_queue(dev, i,
1520*4882a593Smuzhiyun mqprio->count[i],
1521*4882a593Smuzhiyun mqprio->offset[i]);
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun /* Always use supplied priority mappings */
1524*4882a593Smuzhiyun for (i = 0; i <= TC_BITMASK; i++)
1525*4882a593Smuzhiyun netdev_set_prio_tc_map(dev, i,
1526*4882a593Smuzhiyun mqprio->prio_tc_map[i]);
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1530*4882a593Smuzhiyun err = taprio_enable_offload(dev, q, new_admin, extack);
1531*4882a593Smuzhiyun else
1532*4882a593Smuzhiyun err = taprio_disable_offload(dev, q, extack);
1533*4882a593Smuzhiyun if (err)
1534*4882a593Smuzhiyun goto free_sched;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun /* Protects against enqueue()/dequeue() */
1537*4882a593Smuzhiyun spin_lock_bh(qdisc_lock(sch));
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1540*4882a593Smuzhiyun if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1541*4882a593Smuzhiyun NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1542*4882a593Smuzhiyun err = -EINVAL;
1543*4882a593Smuzhiyun goto unlock;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1550*4882a593Smuzhiyun !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1551*4882a593Smuzhiyun !hrtimer_active(&q->advance_timer)) {
1552*4882a593Smuzhiyun hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1553*4882a593Smuzhiyun q->advance_timer.function = advance_sched;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1557*4882a593Smuzhiyun q->dequeue = taprio_dequeue_offload;
1558*4882a593Smuzhiyun q->peek = taprio_peek_offload;
1559*4882a593Smuzhiyun } else {
1560*4882a593Smuzhiyun /* Be sure to always keep the function pointers
1561*4882a593Smuzhiyun * in a consistent state.
1562*4882a593Smuzhiyun */
1563*4882a593Smuzhiyun q->dequeue = taprio_dequeue_soft;
1564*4882a593Smuzhiyun q->peek = taprio_peek_soft;
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun err = taprio_get_start_time(sch, new_admin, &start);
1568*4882a593Smuzhiyun if (err < 0) {
1569*4882a593Smuzhiyun NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1570*4882a593Smuzhiyun goto unlock;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun setup_txtime(q, new_admin, start);
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1576*4882a593Smuzhiyun if (!oper) {
1577*4882a593Smuzhiyun rcu_assign_pointer(q->oper_sched, new_admin);
1578*4882a593Smuzhiyun err = 0;
1579*4882a593Smuzhiyun new_admin = NULL;
1580*4882a593Smuzhiyun goto unlock;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun rcu_assign_pointer(q->admin_sched, new_admin);
1584*4882a593Smuzhiyun if (admin)
1585*4882a593Smuzhiyun call_rcu(&admin->rcu, taprio_free_sched_cb);
1586*4882a593Smuzhiyun } else {
1587*4882a593Smuzhiyun setup_first_close_time(q, new_admin, start);
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun /* Protects against advance_sched() */
1590*4882a593Smuzhiyun spin_lock_irqsave(&q->current_entry_lock, flags);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun taprio_start_sched(sch, start, new_admin);
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun rcu_assign_pointer(q->admin_sched, new_admin);
1595*4882a593Smuzhiyun if (admin)
1596*4882a593Smuzhiyun call_rcu(&admin->rcu, taprio_free_sched_cb);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun spin_unlock_irqrestore(&q->current_entry_lock, flags);
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1601*4882a593Smuzhiyun taprio_offload_config_changed(q);
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun new_admin = NULL;
1605*4882a593Smuzhiyun err = 0;
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun unlock:
1608*4882a593Smuzhiyun spin_unlock_bh(qdisc_lock(sch));
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun free_sched:
1611*4882a593Smuzhiyun if (new_admin)
1612*4882a593Smuzhiyun call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun return err;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
taprio_reset(struct Qdisc * sch)1617*4882a593Smuzhiyun static void taprio_reset(struct Qdisc *sch)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1620*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1621*4882a593Smuzhiyun int i;
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun hrtimer_cancel(&q->advance_timer);
1624*4882a593Smuzhiyun if (q->qdiscs) {
1625*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++)
1626*4882a593Smuzhiyun if (q->qdiscs[i])
1627*4882a593Smuzhiyun qdisc_reset(q->qdiscs[i]);
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
taprio_destroy(struct Qdisc * sch)1631*4882a593Smuzhiyun static void taprio_destroy(struct Qdisc *sch)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1634*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1635*4882a593Smuzhiyun unsigned int i;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun spin_lock(&taprio_list_lock);
1638*4882a593Smuzhiyun list_del(&q->taprio_list);
1639*4882a593Smuzhiyun spin_unlock(&taprio_list_lock);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun /* Note that taprio_reset() might not be called if an error
1642*4882a593Smuzhiyun * happens in qdisc_create(), after taprio_init() has been called.
1643*4882a593Smuzhiyun */
1644*4882a593Smuzhiyun hrtimer_cancel(&q->advance_timer);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun taprio_disable_offload(dev, q, NULL);
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun if (q->qdiscs) {
1649*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++)
1650*4882a593Smuzhiyun qdisc_put(q->qdiscs[i]);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun kfree(q->qdiscs);
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun q->qdiscs = NULL;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun netdev_reset_tc(dev);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun if (q->oper_sched)
1659*4882a593Smuzhiyun call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun if (q->admin_sched)
1662*4882a593Smuzhiyun call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun
taprio_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)1665*4882a593Smuzhiyun static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1666*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1669*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1670*4882a593Smuzhiyun int i;
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun spin_lock_init(&q->current_entry_lock);
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1675*4882a593Smuzhiyun q->advance_timer.function = advance_sched;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun q->dequeue = taprio_dequeue_soft;
1678*4882a593Smuzhiyun q->peek = taprio_peek_soft;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun q->root = sch;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /* We only support static clockids. Use an invalid value as default
1683*4882a593Smuzhiyun * and get the valid one on taprio_change().
1684*4882a593Smuzhiyun */
1685*4882a593Smuzhiyun q->clockid = -1;
1686*4882a593Smuzhiyun q->flags = TAPRIO_FLAGS_INVALID;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun spin_lock(&taprio_list_lock);
1689*4882a593Smuzhiyun list_add(&q->taprio_list, &taprio_list);
1690*4882a593Smuzhiyun spin_unlock(&taprio_list_lock);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun if (sch->parent != TC_H_ROOT)
1693*4882a593Smuzhiyun return -EOPNOTSUPP;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun if (!netif_is_multiqueue(dev))
1696*4882a593Smuzhiyun return -EOPNOTSUPP;
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* pre-allocate qdisc, attachment can't fail */
1699*4882a593Smuzhiyun q->qdiscs = kcalloc(dev->num_tx_queues,
1700*4882a593Smuzhiyun sizeof(q->qdiscs[0]),
1701*4882a593Smuzhiyun GFP_KERNEL);
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun if (!q->qdiscs)
1704*4882a593Smuzhiyun return -ENOMEM;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun if (!opt)
1707*4882a593Smuzhiyun return -EINVAL;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
1710*4882a593Smuzhiyun struct netdev_queue *dev_queue;
1711*4882a593Smuzhiyun struct Qdisc *qdisc;
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun dev_queue = netdev_get_tx_queue(dev, i);
1714*4882a593Smuzhiyun qdisc = qdisc_create_dflt(dev_queue,
1715*4882a593Smuzhiyun &pfifo_qdisc_ops,
1716*4882a593Smuzhiyun TC_H_MAKE(TC_H_MAJ(sch->handle),
1717*4882a593Smuzhiyun TC_H_MIN(i + 1)),
1718*4882a593Smuzhiyun extack);
1719*4882a593Smuzhiyun if (!qdisc)
1720*4882a593Smuzhiyun return -ENOMEM;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun if (i < dev->real_num_tx_queues)
1723*4882a593Smuzhiyun qdisc_hash_add(qdisc, false);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun q->qdiscs[i] = qdisc;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun return taprio_change(sch, opt, extack);
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun
taprio_queue_get(struct Qdisc * sch,unsigned long cl)1731*4882a593Smuzhiyun static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1732*4882a593Smuzhiyun unsigned long cl)
1733*4882a593Smuzhiyun {
1734*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1735*4882a593Smuzhiyun unsigned long ntx = cl - 1;
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun if (ntx >= dev->num_tx_queues)
1738*4882a593Smuzhiyun return NULL;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun return netdev_get_tx_queue(dev, ntx);
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
taprio_graft(struct Qdisc * sch,unsigned long cl,struct Qdisc * new,struct Qdisc ** old,struct netlink_ext_ack * extack)1743*4882a593Smuzhiyun static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1744*4882a593Smuzhiyun struct Qdisc *new, struct Qdisc **old,
1745*4882a593Smuzhiyun struct netlink_ext_ack *extack)
1746*4882a593Smuzhiyun {
1747*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1748*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1749*4882a593Smuzhiyun struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun if (!dev_queue)
1752*4882a593Smuzhiyun return -EINVAL;
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun if (dev->flags & IFF_UP)
1755*4882a593Smuzhiyun dev_deactivate(dev);
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun *old = q->qdiscs[cl - 1];
1758*4882a593Smuzhiyun q->qdiscs[cl - 1] = new;
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun if (new)
1761*4882a593Smuzhiyun new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun if (dev->flags & IFF_UP)
1764*4882a593Smuzhiyun dev_activate(dev);
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun return 0;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
dump_entry(struct sk_buff * msg,const struct sched_entry * entry)1769*4882a593Smuzhiyun static int dump_entry(struct sk_buff *msg,
1770*4882a593Smuzhiyun const struct sched_entry *entry)
1771*4882a593Smuzhiyun {
1772*4882a593Smuzhiyun struct nlattr *item;
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1775*4882a593Smuzhiyun if (!item)
1776*4882a593Smuzhiyun return -ENOSPC;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1779*4882a593Smuzhiyun goto nla_put_failure;
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1782*4882a593Smuzhiyun goto nla_put_failure;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1785*4882a593Smuzhiyun entry->gate_mask))
1786*4882a593Smuzhiyun goto nla_put_failure;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1789*4882a593Smuzhiyun entry->interval))
1790*4882a593Smuzhiyun goto nla_put_failure;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun return nla_nest_end(msg, item);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun nla_put_failure:
1795*4882a593Smuzhiyun nla_nest_cancel(msg, item);
1796*4882a593Smuzhiyun return -1;
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun
dump_schedule(struct sk_buff * msg,const struct sched_gate_list * root)1799*4882a593Smuzhiyun static int dump_schedule(struct sk_buff *msg,
1800*4882a593Smuzhiyun const struct sched_gate_list *root)
1801*4882a593Smuzhiyun {
1802*4882a593Smuzhiyun struct nlattr *entry_list;
1803*4882a593Smuzhiyun struct sched_entry *entry;
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1806*4882a593Smuzhiyun root->base_time, TCA_TAPRIO_PAD))
1807*4882a593Smuzhiyun return -1;
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1810*4882a593Smuzhiyun root->cycle_time, TCA_TAPRIO_PAD))
1811*4882a593Smuzhiyun return -1;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1814*4882a593Smuzhiyun root->cycle_time_extension, TCA_TAPRIO_PAD))
1815*4882a593Smuzhiyun return -1;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun entry_list = nla_nest_start_noflag(msg,
1818*4882a593Smuzhiyun TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1819*4882a593Smuzhiyun if (!entry_list)
1820*4882a593Smuzhiyun goto error_nest;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun list_for_each_entry(entry, &root->entries, list) {
1823*4882a593Smuzhiyun if (dump_entry(msg, entry) < 0)
1824*4882a593Smuzhiyun goto error_nest;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun nla_nest_end(msg, entry_list);
1828*4882a593Smuzhiyun return 0;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun error_nest:
1831*4882a593Smuzhiyun nla_nest_cancel(msg, entry_list);
1832*4882a593Smuzhiyun return -1;
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
taprio_dump(struct Qdisc * sch,struct sk_buff * skb)1835*4882a593Smuzhiyun static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1838*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1839*4882a593Smuzhiyun struct sched_gate_list *oper, *admin;
1840*4882a593Smuzhiyun struct tc_mqprio_qopt opt = { 0 };
1841*4882a593Smuzhiyun struct nlattr *nest, *sched_nest;
1842*4882a593Smuzhiyun unsigned int i;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun rcu_read_lock();
1845*4882a593Smuzhiyun oper = rcu_dereference(q->oper_sched);
1846*4882a593Smuzhiyun admin = rcu_dereference(q->admin_sched);
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun opt.num_tc = netdev_get_num_tc(dev);
1849*4882a593Smuzhiyun memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun for (i = 0; i < netdev_get_num_tc(dev); i++) {
1852*4882a593Smuzhiyun opt.count[i] = dev->tc_to_txq[i].count;
1853*4882a593Smuzhiyun opt.offset[i] = dev->tc_to_txq[i].offset;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1857*4882a593Smuzhiyun if (!nest)
1858*4882a593Smuzhiyun goto start_error;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1861*4882a593Smuzhiyun goto options_error;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1864*4882a593Smuzhiyun nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1865*4882a593Smuzhiyun goto options_error;
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1868*4882a593Smuzhiyun goto options_error;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun if (q->txtime_delay &&
1871*4882a593Smuzhiyun nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1872*4882a593Smuzhiyun goto options_error;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun if (oper && dump_schedule(skb, oper))
1875*4882a593Smuzhiyun goto options_error;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun if (!admin)
1878*4882a593Smuzhiyun goto done;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1881*4882a593Smuzhiyun if (!sched_nest)
1882*4882a593Smuzhiyun goto options_error;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (dump_schedule(skb, admin))
1885*4882a593Smuzhiyun goto admin_error;
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun nla_nest_end(skb, sched_nest);
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun done:
1890*4882a593Smuzhiyun rcu_read_unlock();
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun return nla_nest_end(skb, nest);
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun admin_error:
1895*4882a593Smuzhiyun nla_nest_cancel(skb, sched_nest);
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun options_error:
1898*4882a593Smuzhiyun nla_nest_cancel(skb, nest);
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun start_error:
1901*4882a593Smuzhiyun rcu_read_unlock();
1902*4882a593Smuzhiyun return -ENOSPC;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
taprio_leaf(struct Qdisc * sch,unsigned long cl)1905*4882a593Smuzhiyun static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun struct taprio_sched *q = qdisc_priv(sch);
1908*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1909*4882a593Smuzhiyun unsigned int ntx = cl - 1;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun if (ntx >= dev->num_tx_queues)
1912*4882a593Smuzhiyun return NULL;
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun return q->qdiscs[ntx];
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun
taprio_find(struct Qdisc * sch,u32 classid)1917*4882a593Smuzhiyun static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun unsigned int ntx = TC_H_MIN(classid);
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun if (!taprio_queue_get(sch, ntx))
1922*4882a593Smuzhiyun return 0;
1923*4882a593Smuzhiyun return ntx;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
taprio_dump_class(struct Qdisc * sch,unsigned long cl,struct sk_buff * skb,struct tcmsg * tcm)1926*4882a593Smuzhiyun static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1927*4882a593Smuzhiyun struct sk_buff *skb, struct tcmsg *tcm)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun tcm->tcm_parent = TC_H_ROOT;
1932*4882a593Smuzhiyun tcm->tcm_handle |= TC_H_MIN(cl);
1933*4882a593Smuzhiyun tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun return 0;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
taprio_dump_class_stats(struct Qdisc * sch,unsigned long cl,struct gnet_dump * d)1938*4882a593Smuzhiyun static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1939*4882a593Smuzhiyun struct gnet_dump *d)
1940*4882a593Smuzhiyun __releases(d->lock)
1941*4882a593Smuzhiyun __acquires(d->lock)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun sch = dev_queue->qdisc_sleeping;
1946*4882a593Smuzhiyun if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1947*4882a593Smuzhiyun qdisc_qstats_copy(d, sch) < 0)
1948*4882a593Smuzhiyun return -1;
1949*4882a593Smuzhiyun return 0;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
taprio_walk(struct Qdisc * sch,struct qdisc_walker * arg)1952*4882a593Smuzhiyun static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun struct net_device *dev = qdisc_dev(sch);
1955*4882a593Smuzhiyun unsigned long ntx;
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun if (arg->stop)
1958*4882a593Smuzhiyun return;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun arg->count = arg->skip;
1961*4882a593Smuzhiyun for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1962*4882a593Smuzhiyun if (arg->fn(sch, ntx + 1, arg) < 0) {
1963*4882a593Smuzhiyun arg->stop = 1;
1964*4882a593Smuzhiyun break;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun arg->count++;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun
taprio_select_queue(struct Qdisc * sch,struct tcmsg * tcm)1970*4882a593Smuzhiyun static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
1971*4882a593Smuzhiyun struct tcmsg *tcm)
1972*4882a593Smuzhiyun {
1973*4882a593Smuzhiyun return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun static const struct Qdisc_class_ops taprio_class_ops = {
1977*4882a593Smuzhiyun .graft = taprio_graft,
1978*4882a593Smuzhiyun .leaf = taprio_leaf,
1979*4882a593Smuzhiyun .find = taprio_find,
1980*4882a593Smuzhiyun .walk = taprio_walk,
1981*4882a593Smuzhiyun .dump = taprio_dump_class,
1982*4882a593Smuzhiyun .dump_stats = taprio_dump_class_stats,
1983*4882a593Smuzhiyun .select_queue = taprio_select_queue,
1984*4882a593Smuzhiyun };
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
1987*4882a593Smuzhiyun .cl_ops = &taprio_class_ops,
1988*4882a593Smuzhiyun .id = "taprio",
1989*4882a593Smuzhiyun .priv_size = sizeof(struct taprio_sched),
1990*4882a593Smuzhiyun .init = taprio_init,
1991*4882a593Smuzhiyun .change = taprio_change,
1992*4882a593Smuzhiyun .destroy = taprio_destroy,
1993*4882a593Smuzhiyun .reset = taprio_reset,
1994*4882a593Smuzhiyun .peek = taprio_peek,
1995*4882a593Smuzhiyun .dequeue = taprio_dequeue,
1996*4882a593Smuzhiyun .enqueue = taprio_enqueue,
1997*4882a593Smuzhiyun .dump = taprio_dump,
1998*4882a593Smuzhiyun .owner = THIS_MODULE,
1999*4882a593Smuzhiyun };
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun static struct notifier_block taprio_device_notifier = {
2002*4882a593Smuzhiyun .notifier_call = taprio_dev_notifier,
2003*4882a593Smuzhiyun };
2004*4882a593Smuzhiyun
taprio_module_init(void)2005*4882a593Smuzhiyun static int __init taprio_module_init(void)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun int err = register_netdevice_notifier(&taprio_device_notifier);
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun if (err)
2010*4882a593Smuzhiyun return err;
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun return register_qdisc(&taprio_qdisc_ops);
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
taprio_module_exit(void)2015*4882a593Smuzhiyun static void __exit taprio_module_exit(void)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun unregister_qdisc(&taprio_qdisc_ops);
2018*4882a593Smuzhiyun unregister_netdevice_notifier(&taprio_device_notifier);
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun module_init(taprio_module_init);
2022*4882a593Smuzhiyun module_exit(taprio_module_exit);
2023*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2024