1
2
3
4
5
6
7
8
9#include <linux/ethtool.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/list.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <linux/math64.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <linux/rcupdate.h>
21#include <net/netlink.h>
22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
24#include <net/sch_generic.h>
25#include <net/sock.h>
26#include <net/tcp.h>
27
28static LIST_HEAD(taprio_list);
29static DEFINE_SPINLOCK(taprio_list_lock);
30
31#define TAPRIO_ALL_GATES_OPEN -1
32
33#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
34#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
35#define TAPRIO_FLAGS_INVALID U32_MAX
36
37struct sched_entry {
38 struct list_head list;
39
40
41
42
43
44 ktime_t close_time;
45 ktime_t next_txtime;
46 atomic_t budget;
47 int index;
48 u32 gate_mask;
49 u32 interval;
50 u8 command;
51};
52
53struct sched_gate_list {
54 struct rcu_head rcu;
55 struct list_head entries;
56 size_t num_entries;
57 ktime_t cycle_close_time;
58 s64 cycle_time;
59 s64 cycle_time_extension;
60 s64 base_time;
61};
62
63struct taprio_sched {
64 struct Qdisc **qdiscs;
65 struct Qdisc *root;
66 u32 flags;
67 enum tk_offsets tk_offset;
68 int clockid;
69 atomic64_t picos_per_byte;
70
71
72
73
74 spinlock_t current_entry_lock;
75 struct sched_entry __rcu *current_entry;
76 struct sched_gate_list __rcu *oper_sched;
77 struct sched_gate_list __rcu *admin_sched;
78 struct hrtimer advance_timer;
79 struct list_head taprio_list;
80 struct sk_buff *(*dequeue)(struct Qdisc *sch);
81 struct sk_buff *(*peek)(struct Qdisc *sch);
82 u32 txtime_delay;
83};
84
85struct __tc_taprio_qopt_offload {
86 refcount_t users;
87 struct tc_taprio_qopt_offload offload;
88};
89
90static ktime_t sched_base_time(const struct sched_gate_list *sched)
91{
92 if (!sched)
93 return KTIME_MAX;
94
95 return ns_to_ktime(sched->base_time);
96}
97
98static ktime_t taprio_get_time(struct taprio_sched *q)
99{
100 ktime_t mono = ktime_get();
101
102 switch (q->tk_offset) {
103 case TK_OFFS_MAX:
104 return mono;
105 default:
106 return ktime_mono_to_any(mono, q->tk_offset);
107 }
108
109 return KTIME_MAX;
110}
111
112static void taprio_free_sched_cb(struct rcu_head *head)
113{
114 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
115 struct sched_entry *entry, *n;
116
117 if (!sched)
118 return;
119
120 list_for_each_entry_safe(entry, n, &sched->entries, list) {
121 list_del(&entry->list);
122 kfree(entry);
123 }
124
125 kfree(sched);
126}
127
128static void switch_schedules(struct taprio_sched *q,
129 struct sched_gate_list **admin,
130 struct sched_gate_list **oper)
131{
132 rcu_assign_pointer(q->oper_sched, *admin);
133 rcu_assign_pointer(q->admin_sched, NULL);
134
135 if (*oper)
136 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
137
138 *oper = *admin;
139 *admin = NULL;
140}
141
142
143static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
144{
145 ktime_t time_since_sched_start;
146 s32 time_elapsed;
147
148 time_since_sched_start = ktime_sub(time, sched->base_time);
149 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
150
151 return time_elapsed;
152}
153
154static ktime_t get_interval_end_time(struct sched_gate_list *sched,
155 struct sched_gate_list *admin,
156 struct sched_entry *entry,
157 ktime_t intv_start)
158{
159 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
160 ktime_t intv_end, cycle_ext_end, cycle_end;
161
162 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
163 intv_end = ktime_add_ns(intv_start, entry->interval);
164 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
165
166 if (ktime_before(intv_end, cycle_end))
167 return intv_end;
168 else if (admin && admin != sched &&
169 ktime_after(admin->base_time, cycle_end) &&
170 ktime_before(admin->base_time, cycle_ext_end))
171 return admin->base_time;
172 else
173 return cycle_end;
174}
175
176static int length_to_duration(struct taprio_sched *q, int len)
177{
178 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
179}
180
181
182
183
184
185static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
186 struct Qdisc *sch,
187 struct sched_gate_list *sched,
188 struct sched_gate_list *admin,
189 ktime_t time,
190 ktime_t *interval_start,
191 ktime_t *interval_end,
192 bool validate_interval)
193{
194 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
195 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
196 struct sched_entry *entry = NULL, *entry_found = NULL;
197 struct taprio_sched *q = qdisc_priv(sch);
198 struct net_device *dev = qdisc_dev(sch);
199 bool entry_available = false;
200 s32 cycle_elapsed;
201 int tc, n;
202
203 tc = netdev_get_prio_tc_map(dev, skb->priority);
204 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
205
206 *interval_start = 0;
207 *interval_end = 0;
208
209 if (!sched)
210 return NULL;
211
212 cycle = sched->cycle_time;
213 cycle_elapsed = get_cycle_time_elapsed(sched, time);
214 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
215 cycle_end = ktime_add_ns(curr_intv_end, cycle);
216
217 list_for_each_entry(entry, &sched->entries, list) {
218 curr_intv_start = curr_intv_end;
219 curr_intv_end = get_interval_end_time(sched, admin, entry,
220 curr_intv_start);
221
222 if (ktime_after(curr_intv_start, cycle_end))
223 break;
224
225 if (!(entry->gate_mask & BIT(tc)) ||
226 packet_transmit_time > entry->interval)
227 continue;
228
229 txtime = entry->next_txtime;
230
231 if (ktime_before(txtime, time) || validate_interval) {
232 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
233 if ((ktime_before(curr_intv_start, time) &&
234 ktime_before(transmit_end_time, curr_intv_end)) ||
235 (ktime_after(curr_intv_start, time) && !validate_interval)) {
236 entry_found = entry;
237 *interval_start = curr_intv_start;
238 *interval_end = curr_intv_end;
239 break;
240 } else if (!entry_available && !validate_interval) {
241
242
243
244 entry_available = true;
245 entry_found = entry;
246 *interval_start = ktime_add_ns(curr_intv_start, cycle);
247 *interval_end = ktime_add_ns(curr_intv_end, cycle);
248 }
249 } else if (ktime_before(txtime, earliest_txtime) &&
250 !entry_available) {
251 earliest_txtime = txtime;
252 entry_found = entry;
253 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
254 *interval_start = ktime_add(curr_intv_start, n * cycle);
255 *interval_end = ktime_add(curr_intv_end, n * cycle);
256 }
257 }
258
259 return entry_found;
260}
261
262static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
263{
264 struct taprio_sched *q = qdisc_priv(sch);
265 struct sched_gate_list *sched, *admin;
266 ktime_t interval_start, interval_end;
267 struct sched_entry *entry;
268
269 rcu_read_lock();
270 sched = rcu_dereference(q->oper_sched);
271 admin = rcu_dereference(q->admin_sched);
272
273 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
274 &interval_start, &interval_end, true);
275 rcu_read_unlock();
276
277 return entry;
278}
279
280static bool taprio_flags_valid(u32 flags)
281{
282
283 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
284 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
285 return false;
286
287 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
288 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
289 return false;
290 return true;
291}
292
293
294static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
295{
296 unsigned int offset = skb_network_offset(skb);
297 const struct ipv6hdr *ipv6h;
298 const struct iphdr *iph;
299 struct ipv6hdr _ipv6h;
300
301 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
302 if (!ipv6h)
303 return 0;
304
305 if (ipv6h->version == 4) {
306 iph = (struct iphdr *)ipv6h;
307 offset += iph->ihl * 4;
308
309
310
311
312 if (iph->protocol == IPPROTO_IPV6) {
313 ipv6h = skb_header_pointer(skb, offset,
314 sizeof(_ipv6h), &_ipv6h);
315
316 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
317 return 0;
318 } else if (iph->protocol != IPPROTO_TCP) {
319 return 0;
320 }
321 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
322 return 0;
323 }
324
325 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
344{
345 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
346 struct taprio_sched *q = qdisc_priv(sch);
347 struct sched_gate_list *sched, *admin;
348 ktime_t minimum_time, now, txtime;
349 int len, packet_transmit_time;
350 struct sched_entry *entry;
351 bool sched_changed;
352
353 now = taprio_get_time(q);
354 minimum_time = ktime_add_ns(now, q->txtime_delay);
355
356 tcp_tstamp = get_tcp_tstamp(q, skb);
357 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
358
359 rcu_read_lock();
360 admin = rcu_dereference(q->admin_sched);
361 sched = rcu_dereference(q->oper_sched);
362 if (admin && ktime_after(minimum_time, admin->base_time))
363 switch_schedules(q, &admin, &sched);
364
365
366 if (!sched || ktime_before(minimum_time, sched->base_time)) {
367 txtime = minimum_time;
368 goto done;
369 }
370
371 len = qdisc_pkt_len(skb);
372 packet_transmit_time = length_to_duration(q, len);
373
374 do {
375 sched_changed = false;
376
377 entry = find_entry_to_transmit(skb, sch, sched, admin,
378 minimum_time,
379 &interval_start, &interval_end,
380 false);
381 if (!entry) {
382 txtime = 0;
383 goto done;
384 }
385
386 txtime = entry->next_txtime;
387 txtime = max_t(ktime_t, txtime, minimum_time);
388 txtime = max_t(ktime_t, txtime, interval_start);
389
390 if (admin && admin != sched &&
391 ktime_after(txtime, admin->base_time)) {
392 sched = admin;
393 sched_changed = true;
394 continue;
395 }
396
397 transmit_end_time = ktime_add(txtime, packet_transmit_time);
398 minimum_time = transmit_end_time;
399
400
401
402
403 if (ktime_after(transmit_end_time, interval_end))
404 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
405 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
406
407 entry->next_txtime = transmit_end_time;
408
409done:
410 rcu_read_unlock();
411 return txtime;
412}
413
414static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
415 struct Qdisc *child, struct sk_buff **to_free)
416{
417 struct taprio_sched *q = qdisc_priv(sch);
418
419 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
420 if (!is_valid_interval(skb, sch))
421 return qdisc_drop(skb, sch, to_free);
422 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
423 skb->tstamp = get_packet_txtime(skb, sch);
424 if (!skb->tstamp)
425 return qdisc_drop(skb, sch, to_free);
426 }
427
428 qdisc_qstats_backlog_inc(sch, skb);
429 sch->q.qlen++;
430
431 return qdisc_enqueue(skb, child, to_free);
432}
433
434static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 struct sk_buff **to_free)
436{
437 struct taprio_sched *q = qdisc_priv(sch);
438 struct Qdisc *child;
439 int queue;
440
441 queue = skb_get_queue_mapping(skb);
442
443 child = q->qdiscs[queue];
444 if (unlikely(!child))
445 return qdisc_drop(skb, sch, to_free);
446
447
448
449
450
451
452 if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) {
453 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
454 netdev_features_t features = netif_skb_features(skb);
455 struct sk_buff *segs, *nskb;
456 int ret;
457
458 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
459 if (IS_ERR_OR_NULL(segs))
460 return qdisc_drop(skb, sch, to_free);
461
462 skb_list_walk_safe(segs, segs, nskb) {
463 skb_mark_not_on_list(segs);
464 qdisc_skb_cb(segs)->pkt_len = segs->len;
465 slen += segs->len;
466
467 ret = taprio_enqueue_one(segs, sch, child, to_free);
468 if (ret != NET_XMIT_SUCCESS) {
469 if (net_xmit_drop_count(ret))
470 qdisc_qstats_drop(sch);
471 } else {
472 numsegs++;
473 }
474 }
475
476 if (numsegs > 1)
477 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
478 consume_skb(skb);
479
480 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
481 }
482
483 return taprio_enqueue_one(skb, sch, child, to_free);
484}
485
486static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
487{
488 struct taprio_sched *q = qdisc_priv(sch);
489 struct net_device *dev = qdisc_dev(sch);
490 struct sched_entry *entry;
491 struct sk_buff *skb;
492 u32 gate_mask;
493 int i;
494
495 rcu_read_lock();
496 entry = rcu_dereference(q->current_entry);
497 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
498 rcu_read_unlock();
499
500 if (!gate_mask)
501 return NULL;
502
503 for (i = 0; i < dev->num_tx_queues; i++) {
504 struct Qdisc *child = q->qdiscs[i];
505 int prio;
506 u8 tc;
507
508 if (unlikely(!child))
509 continue;
510
511 skb = child->ops->peek(child);
512 if (!skb)
513 continue;
514
515 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
516 return skb;
517
518 prio = skb->priority;
519 tc = netdev_get_prio_tc_map(dev, prio);
520
521 if (!(gate_mask & BIT(tc)))
522 continue;
523
524 return skb;
525 }
526
527 return NULL;
528}
529
530static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
531{
532 struct taprio_sched *q = qdisc_priv(sch);
533 struct net_device *dev = qdisc_dev(sch);
534 struct sk_buff *skb;
535 int i;
536
537 for (i = 0; i < dev->num_tx_queues; i++) {
538 struct Qdisc *child = q->qdiscs[i];
539
540 if (unlikely(!child))
541 continue;
542
543 skb = child->ops->peek(child);
544 if (!skb)
545 continue;
546
547 return skb;
548 }
549
550 return NULL;
551}
552
553static struct sk_buff *taprio_peek(struct Qdisc *sch)
554{
555 struct taprio_sched *q = qdisc_priv(sch);
556
557 return q->peek(sch);
558}
559
560static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
561{
562 atomic_set(&entry->budget,
563 div64_u64((u64)entry->interval * 1000,
564 atomic64_read(&q->picos_per_byte)));
565}
566
567static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
568{
569 struct taprio_sched *q = qdisc_priv(sch);
570 struct net_device *dev = qdisc_dev(sch);
571 struct sk_buff *skb = NULL;
572 struct sched_entry *entry;
573 u32 gate_mask;
574 int i;
575
576 rcu_read_lock();
577 entry = rcu_dereference(q->current_entry);
578
579
580
581
582
583 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
584
585 if (!gate_mask)
586 goto done;
587
588 for (i = 0; i < dev->num_tx_queues; i++) {
589 struct Qdisc *child = q->qdiscs[i];
590 ktime_t guard;
591 int prio;
592 int len;
593 u8 tc;
594
595 if (unlikely(!child))
596 continue;
597
598 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
599 skb = child->ops->dequeue(child);
600 if (!skb)
601 continue;
602 goto skb_found;
603 }
604
605 skb = child->ops->peek(child);
606 if (!skb)
607 continue;
608
609 prio = skb->priority;
610 tc = netdev_get_prio_tc_map(dev, prio);
611
612 if (!(gate_mask & BIT(tc))) {
613 skb = NULL;
614 continue;
615 }
616
617 len = qdisc_pkt_len(skb);
618 guard = ktime_add_ns(taprio_get_time(q),
619 length_to_duration(q, len));
620
621
622
623
624 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
625 ktime_after(guard, entry->close_time)) {
626 skb = NULL;
627 continue;
628 }
629
630
631 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
632 atomic_sub_return(len, &entry->budget) < 0) {
633 skb = NULL;
634 continue;
635 }
636
637 skb = child->ops->dequeue(child);
638 if (unlikely(!skb))
639 goto done;
640
641skb_found:
642 qdisc_bstats_update(sch, skb);
643 qdisc_qstats_backlog_dec(sch, skb);
644 sch->q.qlen--;
645
646 goto done;
647 }
648
649done:
650 rcu_read_unlock();
651
652 return skb;
653}
654
655static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
656{
657 struct taprio_sched *q = qdisc_priv(sch);
658 struct net_device *dev = qdisc_dev(sch);
659 struct sk_buff *skb;
660 int i;
661
662 for (i = 0; i < dev->num_tx_queues; i++) {
663 struct Qdisc *child = q->qdiscs[i];
664
665 if (unlikely(!child))
666 continue;
667
668 skb = child->ops->dequeue(child);
669 if (unlikely(!skb))
670 continue;
671
672 qdisc_bstats_update(sch, skb);
673 qdisc_qstats_backlog_dec(sch, skb);
674 sch->q.qlen--;
675
676 return skb;
677 }
678
679 return NULL;
680}
681
682static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
683{
684 struct taprio_sched *q = qdisc_priv(sch);
685
686 return q->dequeue(sch);
687}
688
689static bool should_restart_cycle(const struct sched_gate_list *oper,
690 const struct sched_entry *entry)
691{
692 if (list_is_last(&entry->list, &oper->entries))
693 return true;
694
695 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
696 return true;
697
698 return false;
699}
700
701static bool should_change_schedules(const struct sched_gate_list *admin,
702 const struct sched_gate_list *oper,
703 ktime_t close_time)
704{
705 ktime_t next_base_time, extension_time;
706
707 if (!admin)
708 return false;
709
710 next_base_time = sched_base_time(admin);
711
712
713
714
715 if (ktime_compare(next_base_time, close_time) <= 0)
716 return true;
717
718
719
720
721
722
723 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
724
725
726
727
728
729 if (ktime_compare(next_base_time, extension_time) <= 0)
730 return true;
731
732 return false;
733}
734
735static enum hrtimer_restart advance_sched(struct hrtimer *timer)
736{
737 struct taprio_sched *q = container_of(timer, struct taprio_sched,
738 advance_timer);
739 struct sched_gate_list *oper, *admin;
740 struct sched_entry *entry, *next;
741 struct Qdisc *sch = q->root;
742 ktime_t close_time;
743
744 spin_lock(&q->current_entry_lock);
745 entry = rcu_dereference_protected(q->current_entry,
746 lockdep_is_held(&q->current_entry_lock));
747 oper = rcu_dereference_protected(q->oper_sched,
748 lockdep_is_held(&q->current_entry_lock));
749 admin = rcu_dereference_protected(q->admin_sched,
750 lockdep_is_held(&q->current_entry_lock));
751
752 if (!oper)
753 switch_schedules(q, &admin, &oper);
754
755
756
757
758
759
760
761 if (unlikely(!entry || entry->close_time == oper->base_time)) {
762 next = list_first_entry(&oper->entries, struct sched_entry,
763 list);
764 close_time = next->close_time;
765 goto first_run;
766 }
767
768 if (should_restart_cycle(oper, entry)) {
769 next = list_first_entry(&oper->entries, struct sched_entry,
770 list);
771 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
772 oper->cycle_time);
773 } else {
774 next = list_next_entry(entry, list);
775 }
776
777 close_time = ktime_add_ns(entry->close_time, next->interval);
778 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
779
780 if (should_change_schedules(admin, oper, close_time)) {
781
782
783
784 close_time = sched_base_time(admin);
785 switch_schedules(q, &admin, &oper);
786 }
787
788 next->close_time = close_time;
789 taprio_set_budget(q, next);
790
791first_run:
792 rcu_assign_pointer(q->current_entry, next);
793 spin_unlock(&q->current_entry_lock);
794
795 hrtimer_set_expires(&q->advance_timer, close_time);
796
797 rcu_read_lock();
798 __netif_schedule(sch);
799 rcu_read_unlock();
800
801 return HRTIMER_RESTART;
802}
803
804static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
805 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
806 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
807 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
808 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
809};
810
811static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
812 [TCA_TAPRIO_ATTR_PRIOMAP] = {
813 .len = sizeof(struct tc_mqprio_qopt)
814 },
815 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
816 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
817 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
818 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
819 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
820 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
821 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
822 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
823};
824
825static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
826 struct sched_entry *entry,
827 struct netlink_ext_ack *extack)
828{
829 int min_duration = length_to_duration(q, ETH_ZLEN);
830 u32 interval = 0;
831
832 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
833 entry->command = nla_get_u8(
834 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
835
836 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
837 entry->gate_mask = nla_get_u32(
838 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
839
840 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
841 interval = nla_get_u32(
842 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
843
844
845
846
847 if (interval < min_duration) {
848 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
849 return -EINVAL;
850 }
851
852 entry->interval = interval;
853
854 return 0;
855}
856
857static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
858 struct sched_entry *entry, int index,
859 struct netlink_ext_ack *extack)
860{
861 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
862 int err;
863
864 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
865 entry_policy, NULL);
866 if (err < 0) {
867 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
868 return -EINVAL;
869 }
870
871 entry->index = index;
872
873 return fill_sched_entry(q, tb, entry, extack);
874}
875
876static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
877 struct sched_gate_list *sched,
878 struct netlink_ext_ack *extack)
879{
880 struct nlattr *n;
881 int err, rem;
882 int i = 0;
883
884 if (!list)
885 return -EINVAL;
886
887 nla_for_each_nested(n, list, rem) {
888 struct sched_entry *entry;
889
890 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
891 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
892 continue;
893 }
894
895 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
896 if (!entry) {
897 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
898 return -ENOMEM;
899 }
900
901 err = parse_sched_entry(q, n, entry, i, extack);
902 if (err < 0) {
903 kfree(entry);
904 return err;
905 }
906
907 list_add_tail(&entry->list, &sched->entries);
908 i++;
909 }
910
911 sched->num_entries = i;
912
913 return i;
914}
915
916static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
917 struct sched_gate_list *new,
918 struct netlink_ext_ack *extack)
919{
920 int err = 0;
921
922 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
923 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
924 return -ENOTSUPP;
925 }
926
927 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
928 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
929
930 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
931 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
932
933 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
934 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
935
936 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
937 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
938 new, extack);
939 if (err < 0)
940 return err;
941
942 if (!new->cycle_time) {
943 struct sched_entry *entry;
944 ktime_t cycle = 0;
945
946 list_for_each_entry(entry, &new->entries, list)
947 cycle = ktime_add_ns(cycle, entry->interval);
948
949 if (!cycle) {
950 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
951 return -EINVAL;
952 }
953
954 new->cycle_time = cycle;
955 }
956
957 return 0;
958}
959
960static int taprio_parse_mqprio_opt(struct net_device *dev,
961 struct tc_mqprio_qopt *qopt,
962 struct netlink_ext_ack *extack,
963 u32 taprio_flags)
964{
965 int i, j;
966
967 if (!qopt && !dev->num_tc) {
968 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
969 return -EINVAL;
970 }
971
972
973
974
975 if (dev->num_tc)
976 return 0;
977
978
979 if (qopt->num_tc > TC_MAX_QUEUE) {
980 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
981 return -EINVAL;
982 }
983
984
985 if (qopt->num_tc > dev->num_tx_queues) {
986 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
987 return -EINVAL;
988 }
989
990
991 for (i = 0; i <= TC_BITMASK; i++) {
992 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
993 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
994 return -EINVAL;
995 }
996 }
997
998 for (i = 0; i < qopt->num_tc; i++) {
999 unsigned int last = qopt->offset[i] + qopt->count[i];
1000
1001
1002
1003
1004 if (qopt->offset[i] >= dev->num_tx_queues ||
1005 !qopt->count[i] ||
1006 last > dev->real_num_tx_queues) {
1007 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
1008 return -EINVAL;
1009 }
1010
1011 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
1012 continue;
1013
1014
1015 for (j = i + 1; j < qopt->num_tc; j++) {
1016 if (last > qopt->offset[j]) {
1017 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
1018 return -EINVAL;
1019 }
1020 }
1021 }
1022
1023 return 0;
1024}
1025
1026static int taprio_get_start_time(struct Qdisc *sch,
1027 struct sched_gate_list *sched,
1028 ktime_t *start)
1029{
1030 struct taprio_sched *q = qdisc_priv(sch);
1031 ktime_t now, base, cycle;
1032 s64 n;
1033
1034 base = sched_base_time(sched);
1035 now = taprio_get_time(q);
1036
1037 if (ktime_after(base, now)) {
1038 *start = base;
1039 return 0;
1040 }
1041
1042 cycle = sched->cycle_time;
1043
1044
1045
1046
1047
1048
1049 if (WARN_ON(!cycle))
1050 return -EFAULT;
1051
1052
1053
1054
1055 n = div64_s64(ktime_sub_ns(now, base), cycle);
1056 *start = ktime_add_ns(base, (n + 1) * cycle);
1057 return 0;
1058}
1059
1060static void setup_first_close_time(struct taprio_sched *q,
1061 struct sched_gate_list *sched, ktime_t base)
1062{
1063 struct sched_entry *first;
1064 ktime_t cycle;
1065
1066 first = list_first_entry(&sched->entries,
1067 struct sched_entry, list);
1068
1069 cycle = sched->cycle_time;
1070
1071
1072 sched->cycle_close_time = ktime_add_ns(base, cycle);
1073
1074 first->close_time = ktime_add_ns(base, first->interval);
1075 taprio_set_budget(q, first);
1076 rcu_assign_pointer(q->current_entry, NULL);
1077}
1078
1079static void taprio_start_sched(struct Qdisc *sch,
1080 ktime_t start, struct sched_gate_list *new)
1081{
1082 struct taprio_sched *q = qdisc_priv(sch);
1083 ktime_t expires;
1084
1085 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1086 return;
1087
1088 expires = hrtimer_get_expires(&q->advance_timer);
1089 if (expires == 0)
1090 expires = KTIME_MAX;
1091
1092
1093
1094
1095
1096 start = min_t(ktime_t, start, expires);
1097
1098 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1099}
1100
1101static void taprio_set_picos_per_byte(struct net_device *dev,
1102 struct taprio_sched *q)
1103{
1104 struct ethtool_link_ksettings ecmd;
1105 int speed = SPEED_10;
1106 int picos_per_byte;
1107 int err;
1108
1109 err = __ethtool_get_link_ksettings(dev, &ecmd);
1110 if (err < 0)
1111 goto skip;
1112
1113 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1114 speed = ecmd.base.speed;
1115
1116skip:
1117 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1118
1119 atomic64_set(&q->picos_per_byte, picos_per_byte);
1120 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1121 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1122 ecmd.base.speed);
1123}
1124
1125static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1126 void *ptr)
1127{
1128 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1129 struct net_device *qdev;
1130 struct taprio_sched *q;
1131 bool found = false;
1132
1133 ASSERT_RTNL();
1134
1135 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1136 return NOTIFY_DONE;
1137
1138 spin_lock(&taprio_list_lock);
1139 list_for_each_entry(q, &taprio_list, taprio_list) {
1140 qdev = qdisc_dev(q->root);
1141 if (qdev == dev) {
1142 found = true;
1143 break;
1144 }
1145 }
1146 spin_unlock(&taprio_list_lock);
1147
1148 if (found)
1149 taprio_set_picos_per_byte(dev, q);
1150
1151 return NOTIFY_DONE;
1152}
1153
1154static void setup_txtime(struct taprio_sched *q,
1155 struct sched_gate_list *sched, ktime_t base)
1156{
1157 struct sched_entry *entry;
1158 u32 interval = 0;
1159
1160 list_for_each_entry(entry, &sched->entries, list) {
1161 entry->next_txtime = ktime_add_ns(base, interval);
1162 interval += entry->interval;
1163 }
1164}
1165
1166static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1167{
1168 struct __tc_taprio_qopt_offload *__offload;
1169
1170 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1171 GFP_KERNEL);
1172 if (!__offload)
1173 return NULL;
1174
1175 refcount_set(&__offload->users, 1);
1176
1177 return &__offload->offload;
1178}
1179
1180struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1181 *offload)
1182{
1183 struct __tc_taprio_qopt_offload *__offload;
1184
1185 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1186 offload);
1187
1188 refcount_inc(&__offload->users);
1189
1190 return offload;
1191}
1192EXPORT_SYMBOL_GPL(taprio_offload_get);
1193
1194void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1195{
1196 struct __tc_taprio_qopt_offload *__offload;
1197
1198 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1199 offload);
1200
1201 if (!refcount_dec_and_test(&__offload->users))
1202 return;
1203
1204 kfree(__offload);
1205}
1206EXPORT_SYMBOL_GPL(taprio_offload_free);
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220static void taprio_offload_config_changed(struct taprio_sched *q)
1221{
1222 struct sched_gate_list *oper, *admin;
1223
1224 spin_lock(&q->current_entry_lock);
1225
1226 oper = rcu_dereference_protected(q->oper_sched,
1227 lockdep_is_held(&q->current_entry_lock));
1228 admin = rcu_dereference_protected(q->admin_sched,
1229 lockdep_is_held(&q->current_entry_lock));
1230
1231 switch_schedules(q, &admin, &oper);
1232
1233 spin_unlock(&q->current_entry_lock);
1234}
1235
1236static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1237{
1238 u32 i, queue_mask = 0;
1239
1240 for (i = 0; i < dev->num_tc; i++) {
1241 u32 offset, count;
1242
1243 if (!(tc_mask & BIT(i)))
1244 continue;
1245
1246 offset = dev->tc_to_txq[i].offset;
1247 count = dev->tc_to_txq[i].count;
1248
1249 queue_mask |= GENMASK(offset + count - 1, offset);
1250 }
1251
1252 return queue_mask;
1253}
1254
1255static void taprio_sched_to_offload(struct net_device *dev,
1256 struct sched_gate_list *sched,
1257 struct tc_taprio_qopt_offload *offload)
1258{
1259 struct sched_entry *entry;
1260 int i = 0;
1261
1262 offload->base_time = sched->base_time;
1263 offload->cycle_time = sched->cycle_time;
1264 offload->cycle_time_extension = sched->cycle_time_extension;
1265
1266 list_for_each_entry(entry, &sched->entries, list) {
1267 struct tc_taprio_sched_entry *e = &offload->entries[i];
1268
1269 e->command = entry->command;
1270 e->interval = entry->interval;
1271 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
1272
1273 i++;
1274 }
1275
1276 offload->num_entries = i;
1277}
1278
1279static int taprio_enable_offload(struct net_device *dev,
1280 struct taprio_sched *q,
1281 struct sched_gate_list *sched,
1282 struct netlink_ext_ack *extack)
1283{
1284 const struct net_device_ops *ops = dev->netdev_ops;
1285 struct tc_taprio_qopt_offload *offload;
1286 int err = 0;
1287
1288 if (!ops->ndo_setup_tc) {
1289 NL_SET_ERR_MSG(extack,
1290 "Device does not support taprio offload");
1291 return -EOPNOTSUPP;
1292 }
1293
1294 offload = taprio_offload_alloc(sched->num_entries);
1295 if (!offload) {
1296 NL_SET_ERR_MSG(extack,
1297 "Not enough memory for enabling offload mode");
1298 return -ENOMEM;
1299 }
1300 offload->enable = 1;
1301 taprio_sched_to_offload(dev, sched, offload);
1302
1303 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1304 if (err < 0) {
1305 NL_SET_ERR_MSG(extack,
1306 "Device failed to setup taprio offload");
1307 goto done;
1308 }
1309
1310done:
1311 taprio_offload_free(offload);
1312
1313 return err;
1314}
1315
1316static int taprio_disable_offload(struct net_device *dev,
1317 struct taprio_sched *q,
1318 struct netlink_ext_ack *extack)
1319{
1320 const struct net_device_ops *ops = dev->netdev_ops;
1321 struct tc_taprio_qopt_offload *offload;
1322 int err;
1323
1324 if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
1325 return 0;
1326
1327 if (!ops->ndo_setup_tc)
1328 return -EOPNOTSUPP;
1329
1330 offload = taprio_offload_alloc(0);
1331 if (!offload) {
1332 NL_SET_ERR_MSG(extack,
1333 "Not enough memory to disable offload mode");
1334 return -ENOMEM;
1335 }
1336 offload->enable = 0;
1337
1338 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1339 if (err < 0) {
1340 NL_SET_ERR_MSG(extack,
1341 "Device failed to disable offload");
1342 goto out;
1343 }
1344
1345out:
1346 taprio_offload_free(offload);
1347
1348 return err;
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1359 struct netlink_ext_ack *extack)
1360{
1361 struct taprio_sched *q = qdisc_priv(sch);
1362 struct net_device *dev = qdisc_dev(sch);
1363 int err = -EINVAL;
1364
1365 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1366 const struct ethtool_ops *ops = dev->ethtool_ops;
1367 struct ethtool_ts_info info = {
1368 .cmd = ETHTOOL_GET_TS_INFO,
1369 .phc_index = -1,
1370 };
1371
1372 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1373 NL_SET_ERR_MSG(extack,
1374 "The 'clockid' cannot be specified for full offload");
1375 goto out;
1376 }
1377
1378 if (ops && ops->get_ts_info)
1379 err = ops->get_ts_info(dev, &info);
1380
1381 if (err || info.phc_index < 0) {
1382 NL_SET_ERR_MSG(extack,
1383 "Device does not have a PTP clock");
1384 err = -ENOTSUPP;
1385 goto out;
1386 }
1387 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1388 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1389
1390
1391
1392
1393 if (clockid < 0 ||
1394 (q->clockid != -1 && q->clockid != clockid)) {
1395 NL_SET_ERR_MSG(extack,
1396 "Changing the 'clockid' of a running schedule is not supported");
1397 err = -ENOTSUPP;
1398 goto out;
1399 }
1400
1401 switch (clockid) {
1402 case CLOCK_REALTIME:
1403 q->tk_offset = TK_OFFS_REAL;
1404 break;
1405 case CLOCK_MONOTONIC:
1406 q->tk_offset = TK_OFFS_MAX;
1407 break;
1408 case CLOCK_BOOTTIME:
1409 q->tk_offset = TK_OFFS_BOOT;
1410 break;
1411 case CLOCK_TAI:
1412 q->tk_offset = TK_OFFS_TAI;
1413 break;
1414 default:
1415 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1416 err = -EINVAL;
1417 goto out;
1418 }
1419
1420 q->clockid = clockid;
1421 } else {
1422 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1423 goto out;
1424 }
1425
1426
1427 err = 0;
1428
1429out:
1430 return err;
1431}
1432
1433static int taprio_mqprio_cmp(const struct net_device *dev,
1434 const struct tc_mqprio_qopt *mqprio)
1435{
1436 int i;
1437
1438 if (!mqprio || mqprio->num_tc != dev->num_tc)
1439 return -1;
1440
1441 for (i = 0; i < mqprio->num_tc; i++)
1442 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1443 dev->tc_to_txq[i].offset != mqprio->offset[i])
1444 return -1;
1445
1446 for (i = 0; i <= TC_BITMASK; i++)
1447 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1448 return -1;
1449
1450 return 0;
1451}
1452
1453
1454
1455
1456
1457
1458
1459static int taprio_new_flags(const struct nlattr *attr, u32 old,
1460 struct netlink_ext_ack *extack)
1461{
1462 u32 new = 0;
1463
1464 if (attr)
1465 new = nla_get_u32(attr);
1466
1467 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1468 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1469 return -EOPNOTSUPP;
1470 }
1471
1472 if (!taprio_flags_valid(new)) {
1473 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1474 return -EINVAL;
1475 }
1476
1477 return new;
1478}
1479
1480static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1481 struct netlink_ext_ack *extack)
1482{
1483 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1484 struct sched_gate_list *oper, *admin, *new_admin;
1485 struct taprio_sched *q = qdisc_priv(sch);
1486 struct net_device *dev = qdisc_dev(sch);
1487 struct tc_mqprio_qopt *mqprio = NULL;
1488 unsigned long flags;
1489 ktime_t start;
1490 int i, err;
1491
1492 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1493 taprio_policy, extack);
1494 if (err < 0)
1495 return err;
1496
1497 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1498 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1499
1500 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1501 q->flags, extack);
1502 if (err < 0)
1503 return err;
1504
1505 q->flags = err;
1506
1507 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1508 if (err < 0)
1509 return err;
1510
1511 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1512 if (!new_admin) {
1513 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1514 return -ENOMEM;
1515 }
1516 INIT_LIST_HEAD(&new_admin->entries);
1517
1518 rcu_read_lock();
1519 oper = rcu_dereference(q->oper_sched);
1520 admin = rcu_dereference(q->admin_sched);
1521 rcu_read_unlock();
1522
1523
1524 if (!taprio_mqprio_cmp(dev, mqprio))
1525 mqprio = NULL;
1526
1527 if (mqprio && (oper || admin)) {
1528 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1529 err = -ENOTSUPP;
1530 goto free_sched;
1531 }
1532
1533 err = parse_taprio_schedule(q, tb, new_admin, extack);
1534 if (err < 0)
1535 goto free_sched;
1536
1537 if (new_admin->num_entries == 0) {
1538 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1539 err = -EINVAL;
1540 goto free_sched;
1541 }
1542
1543 err = taprio_parse_clockid(sch, tb, extack);
1544 if (err < 0)
1545 goto free_sched;
1546
1547 taprio_set_picos_per_byte(dev, q);
1548
1549 if (mqprio) {
1550 netdev_set_num_tc(dev, mqprio->num_tc);
1551 for (i = 0; i < mqprio->num_tc; i++)
1552 netdev_set_tc_queue(dev, i,
1553 mqprio->count[i],
1554 mqprio->offset[i]);
1555
1556
1557 for (i = 0; i <= TC_BITMASK; i++)
1558 netdev_set_prio_tc_map(dev, i,
1559 mqprio->prio_tc_map[i]);
1560 }
1561
1562 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1563 err = taprio_enable_offload(dev, q, new_admin, extack);
1564 else
1565 err = taprio_disable_offload(dev, q, extack);
1566 if (err)
1567 goto free_sched;
1568
1569
1570 spin_lock_bh(qdisc_lock(sch));
1571
1572 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1573 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1574 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1575 err = -EINVAL;
1576 goto unlock;
1577 }
1578
1579 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1580 }
1581
1582 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1583 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1584 !hrtimer_active(&q->advance_timer)) {
1585 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1586 q->advance_timer.function = advance_sched;
1587 }
1588
1589 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1590 q->dequeue = taprio_dequeue_offload;
1591 q->peek = taprio_peek_offload;
1592 } else {
1593
1594
1595
1596 q->dequeue = taprio_dequeue_soft;
1597 q->peek = taprio_peek_soft;
1598 }
1599
1600 err = taprio_get_start_time(sch, new_admin, &start);
1601 if (err < 0) {
1602 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1603 goto unlock;
1604 }
1605
1606 setup_txtime(q, new_admin, start);
1607
1608 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1609 if (!oper) {
1610 rcu_assign_pointer(q->oper_sched, new_admin);
1611 err = 0;
1612 new_admin = NULL;
1613 goto unlock;
1614 }
1615
1616 rcu_assign_pointer(q->admin_sched, new_admin);
1617 if (admin)
1618 call_rcu(&admin->rcu, taprio_free_sched_cb);
1619 } else {
1620 setup_first_close_time(q, new_admin, start);
1621
1622
1623 spin_lock_irqsave(&q->current_entry_lock, flags);
1624
1625 taprio_start_sched(sch, start, new_admin);
1626
1627 rcu_assign_pointer(q->admin_sched, new_admin);
1628 if (admin)
1629 call_rcu(&admin->rcu, taprio_free_sched_cb);
1630
1631 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1632
1633 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1634 taprio_offload_config_changed(q);
1635 }
1636
1637 new_admin = NULL;
1638 err = 0;
1639
1640unlock:
1641 spin_unlock_bh(qdisc_lock(sch));
1642
1643free_sched:
1644 if (new_admin)
1645 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1646
1647 return err;
1648}
1649
1650static void taprio_reset(struct Qdisc *sch)
1651{
1652 struct taprio_sched *q = qdisc_priv(sch);
1653 struct net_device *dev = qdisc_dev(sch);
1654 int i;
1655
1656 hrtimer_cancel(&q->advance_timer);
1657 if (q->qdiscs) {
1658 for (i = 0; i < dev->num_tx_queues; i++)
1659 if (q->qdiscs[i])
1660 qdisc_reset(q->qdiscs[i]);
1661 }
1662 sch->qstats.backlog = 0;
1663 sch->q.qlen = 0;
1664}
1665
1666static void taprio_destroy(struct Qdisc *sch)
1667{
1668 struct taprio_sched *q = qdisc_priv(sch);
1669 struct net_device *dev = qdisc_dev(sch);
1670 unsigned int i;
1671
1672 spin_lock(&taprio_list_lock);
1673 list_del(&q->taprio_list);
1674 spin_unlock(&taprio_list_lock);
1675
1676
1677 taprio_disable_offload(dev, q, NULL);
1678
1679 if (q->qdiscs) {
1680 for (i = 0; i < dev->num_tx_queues; i++)
1681 qdisc_put(q->qdiscs[i]);
1682
1683 kfree(q->qdiscs);
1684 }
1685 q->qdiscs = NULL;
1686
1687 netdev_reset_tc(dev);
1688
1689 if (q->oper_sched)
1690 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1691
1692 if (q->admin_sched)
1693 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1694}
1695
1696static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1697 struct netlink_ext_ack *extack)
1698{
1699 struct taprio_sched *q = qdisc_priv(sch);
1700 struct net_device *dev = qdisc_dev(sch);
1701 int i;
1702
1703 spin_lock_init(&q->current_entry_lock);
1704
1705 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1706 q->advance_timer.function = advance_sched;
1707
1708 q->dequeue = taprio_dequeue_soft;
1709 q->peek = taprio_peek_soft;
1710
1711 q->root = sch;
1712
1713
1714
1715
1716 q->clockid = -1;
1717 q->flags = TAPRIO_FLAGS_INVALID;
1718
1719 spin_lock(&taprio_list_lock);
1720 list_add(&q->taprio_list, &taprio_list);
1721 spin_unlock(&taprio_list_lock);
1722
1723 if (sch->parent != TC_H_ROOT)
1724 return -EOPNOTSUPP;
1725
1726 if (!netif_is_multiqueue(dev))
1727 return -EOPNOTSUPP;
1728
1729
1730 q->qdiscs = kcalloc(dev->num_tx_queues,
1731 sizeof(q->qdiscs[0]),
1732 GFP_KERNEL);
1733
1734 if (!q->qdiscs)
1735 return -ENOMEM;
1736
1737 if (!opt)
1738 return -EINVAL;
1739
1740 for (i = 0; i < dev->num_tx_queues; i++) {
1741 struct netdev_queue *dev_queue;
1742 struct Qdisc *qdisc;
1743
1744 dev_queue = netdev_get_tx_queue(dev, i);
1745 qdisc = qdisc_create_dflt(dev_queue,
1746 &pfifo_qdisc_ops,
1747 TC_H_MAKE(TC_H_MAJ(sch->handle),
1748 TC_H_MIN(i + 1)),
1749 extack);
1750 if (!qdisc)
1751 return -ENOMEM;
1752
1753 if (i < dev->real_num_tx_queues)
1754 qdisc_hash_add(qdisc, false);
1755
1756 q->qdiscs[i] = qdisc;
1757 }
1758
1759 return taprio_change(sch, opt, extack);
1760}
1761
1762static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1763 unsigned long cl)
1764{
1765 struct net_device *dev = qdisc_dev(sch);
1766 unsigned long ntx = cl - 1;
1767
1768 if (ntx >= dev->num_tx_queues)
1769 return NULL;
1770
1771 return netdev_get_tx_queue(dev, ntx);
1772}
1773
1774static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1775 struct Qdisc *new, struct Qdisc **old,
1776 struct netlink_ext_ack *extack)
1777{
1778 struct taprio_sched *q = qdisc_priv(sch);
1779 struct net_device *dev = qdisc_dev(sch);
1780 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1781
1782 if (!dev_queue)
1783 return -EINVAL;
1784
1785 if (dev->flags & IFF_UP)
1786 dev_deactivate(dev);
1787
1788 *old = q->qdiscs[cl - 1];
1789 q->qdiscs[cl - 1] = new;
1790
1791 if (new)
1792 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1793
1794 if (dev->flags & IFF_UP)
1795 dev_activate(dev);
1796
1797 return 0;
1798}
1799
1800static int dump_entry(struct sk_buff *msg,
1801 const struct sched_entry *entry)
1802{
1803 struct nlattr *item;
1804
1805 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1806 if (!item)
1807 return -ENOSPC;
1808
1809 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1810 goto nla_put_failure;
1811
1812 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1813 goto nla_put_failure;
1814
1815 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1816 entry->gate_mask))
1817 goto nla_put_failure;
1818
1819 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1820 entry->interval))
1821 goto nla_put_failure;
1822
1823 return nla_nest_end(msg, item);
1824
1825nla_put_failure:
1826 nla_nest_cancel(msg, item);
1827 return -1;
1828}
1829
1830static int dump_schedule(struct sk_buff *msg,
1831 const struct sched_gate_list *root)
1832{
1833 struct nlattr *entry_list;
1834 struct sched_entry *entry;
1835
1836 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1837 root->base_time, TCA_TAPRIO_PAD))
1838 return -1;
1839
1840 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1841 root->cycle_time, TCA_TAPRIO_PAD))
1842 return -1;
1843
1844 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1845 root->cycle_time_extension, TCA_TAPRIO_PAD))
1846 return -1;
1847
1848 entry_list = nla_nest_start_noflag(msg,
1849 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1850 if (!entry_list)
1851 goto error_nest;
1852
1853 list_for_each_entry(entry, &root->entries, list) {
1854 if (dump_entry(msg, entry) < 0)
1855 goto error_nest;
1856 }
1857
1858 nla_nest_end(msg, entry_list);
1859 return 0;
1860
1861error_nest:
1862 nla_nest_cancel(msg, entry_list);
1863 return -1;
1864}
1865
1866static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1867{
1868 struct taprio_sched *q = qdisc_priv(sch);
1869 struct net_device *dev = qdisc_dev(sch);
1870 struct sched_gate_list *oper, *admin;
1871 struct tc_mqprio_qopt opt = { 0 };
1872 struct nlattr *nest, *sched_nest;
1873 unsigned int i;
1874
1875 rcu_read_lock();
1876 oper = rcu_dereference(q->oper_sched);
1877 admin = rcu_dereference(q->admin_sched);
1878
1879 opt.num_tc = netdev_get_num_tc(dev);
1880 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1881
1882 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1883 opt.count[i] = dev->tc_to_txq[i].count;
1884 opt.offset[i] = dev->tc_to_txq[i].offset;
1885 }
1886
1887 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1888 if (!nest)
1889 goto start_error;
1890
1891 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1892 goto options_error;
1893
1894 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1895 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1896 goto options_error;
1897
1898 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1899 goto options_error;
1900
1901 if (q->txtime_delay &&
1902 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1903 goto options_error;
1904
1905 if (oper && dump_schedule(skb, oper))
1906 goto options_error;
1907
1908 if (!admin)
1909 goto done;
1910
1911 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1912 if (!sched_nest)
1913 goto options_error;
1914
1915 if (dump_schedule(skb, admin))
1916 goto admin_error;
1917
1918 nla_nest_end(skb, sched_nest);
1919
1920done:
1921 rcu_read_unlock();
1922
1923 return nla_nest_end(skb, nest);
1924
1925admin_error:
1926 nla_nest_cancel(skb, sched_nest);
1927
1928options_error:
1929 nla_nest_cancel(skb, nest);
1930
1931start_error:
1932 rcu_read_unlock();
1933 return -ENOSPC;
1934}
1935
1936static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1937{
1938 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1939
1940 if (!dev_queue)
1941 return NULL;
1942
1943 return dev_queue->qdisc_sleeping;
1944}
1945
1946static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1947{
1948 unsigned int ntx = TC_H_MIN(classid);
1949
1950 if (!taprio_queue_get(sch, ntx))
1951 return 0;
1952 return ntx;
1953}
1954
1955static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1956 struct sk_buff *skb, struct tcmsg *tcm)
1957{
1958 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1959
1960 tcm->tcm_parent = TC_H_ROOT;
1961 tcm->tcm_handle |= TC_H_MIN(cl);
1962 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1963
1964 return 0;
1965}
1966
1967static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1968 struct gnet_dump *d)
1969 __releases(d->lock)
1970 __acquires(d->lock)
1971{
1972 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1973
1974 sch = dev_queue->qdisc_sleeping;
1975 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1976 qdisc_qstats_copy(d, sch) < 0)
1977 return -1;
1978 return 0;
1979}
1980
1981static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1982{
1983 struct net_device *dev = qdisc_dev(sch);
1984 unsigned long ntx;
1985
1986 if (arg->stop)
1987 return;
1988
1989 arg->count = arg->skip;
1990 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1991 if (arg->fn(sch, ntx + 1, arg) < 0) {
1992 arg->stop = 1;
1993 break;
1994 }
1995 arg->count++;
1996 }
1997}
1998
1999static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
2000 struct tcmsg *tcm)
2001{
2002 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
2003}
2004
2005static const struct Qdisc_class_ops taprio_class_ops = {
2006 .graft = taprio_graft,
2007 .leaf = taprio_leaf,
2008 .find = taprio_find,
2009 .walk = taprio_walk,
2010 .dump = taprio_dump_class,
2011 .dump_stats = taprio_dump_class_stats,
2012 .select_queue = taprio_select_queue,
2013};
2014
2015static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2016 .cl_ops = &taprio_class_ops,
2017 .id = "taprio",
2018 .priv_size = sizeof(struct taprio_sched),
2019 .init = taprio_init,
2020 .change = taprio_change,
2021 .destroy = taprio_destroy,
2022 .reset = taprio_reset,
2023 .peek = taprio_peek,
2024 .dequeue = taprio_dequeue,
2025 .enqueue = taprio_enqueue,
2026 .dump = taprio_dump,
2027 .owner = THIS_MODULE,
2028};
2029
2030static struct notifier_block taprio_device_notifier = {
2031 .notifier_call = taprio_dev_notifier,
2032};
2033
2034static int __init taprio_module_init(void)
2035{
2036 int err = register_netdevice_notifier(&taprio_device_notifier);
2037
2038 if (err)
2039 return err;
2040
2041 return register_qdisc(&taprio_qdisc_ops);
2042}
2043
2044static void __exit taprio_module_exit(void)
2045{
2046 unregister_qdisc(&taprio_qdisc_ops);
2047 unregister_netdevice_notifier(&taprio_device_notifier);
2048}
2049
2050module_init(taprio_module_init);
2051module_exit(taprio_module_exit);
2052MODULE_LICENSE("GPL");
2053