1
2
3
4
5
6
7
8
9#include <linux/ethtool.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/list.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <linux/math64.h>
18#include <linux/module.h>
19#include <linux/spinlock.h>
20#include <linux/rcupdate.h>
21#include <net/netlink.h>
22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
24#include <net/sch_generic.h>
25#include <net/sock.h>
26#include <net/tcp.h>
27
28static LIST_HEAD(taprio_list);
29static DEFINE_SPINLOCK(taprio_list_lock);
30
31#define TAPRIO_ALL_GATES_OPEN -1
32
33#define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
34#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
35#define TAPRIO_FLAGS_INVALID U32_MAX
36
37struct sched_entry {
38 struct list_head list;
39
40
41
42
43
44 ktime_t close_time;
45 ktime_t next_txtime;
46 atomic_t budget;
47 int index;
48 u32 gate_mask;
49 u32 interval;
50 u8 command;
51};
52
53struct sched_gate_list {
54 struct rcu_head rcu;
55 struct list_head entries;
56 size_t num_entries;
57 ktime_t cycle_close_time;
58 s64 cycle_time;
59 s64 cycle_time_extension;
60 s64 base_time;
61};
62
63struct taprio_sched {
64 struct Qdisc **qdiscs;
65 struct Qdisc *root;
66 u32 flags;
67 enum tk_offsets tk_offset;
68 int clockid;
69 atomic64_t picos_per_byte;
70
71
72
73
74 spinlock_t current_entry_lock;
75 struct sched_entry __rcu *current_entry;
76 struct sched_gate_list __rcu *oper_sched;
77 struct sched_gate_list __rcu *admin_sched;
78 struct hrtimer advance_timer;
79 struct list_head taprio_list;
80 struct sk_buff *(*dequeue)(struct Qdisc *sch);
81 struct sk_buff *(*peek)(struct Qdisc *sch);
82 u32 txtime_delay;
83};
84
85struct __tc_taprio_qopt_offload {
86 refcount_t users;
87 struct tc_taprio_qopt_offload offload;
88};
89
90static ktime_t sched_base_time(const struct sched_gate_list *sched)
91{
92 if (!sched)
93 return KTIME_MAX;
94
95 return ns_to_ktime(sched->base_time);
96}
97
98static ktime_t taprio_get_time(struct taprio_sched *q)
99{
100 ktime_t mono = ktime_get();
101
102 switch (q->tk_offset) {
103 case TK_OFFS_MAX:
104 return mono;
105 default:
106 return ktime_mono_to_any(mono, q->tk_offset);
107 }
108
109 return KTIME_MAX;
110}
111
112static void taprio_free_sched_cb(struct rcu_head *head)
113{
114 struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu);
115 struct sched_entry *entry, *n;
116
117 list_for_each_entry_safe(entry, n, &sched->entries, list) {
118 list_del(&entry->list);
119 kfree(entry);
120 }
121
122 kfree(sched);
123}
124
125static void switch_schedules(struct taprio_sched *q,
126 struct sched_gate_list **admin,
127 struct sched_gate_list **oper)
128{
129 rcu_assign_pointer(q->oper_sched, *admin);
130 rcu_assign_pointer(q->admin_sched, NULL);
131
132 if (*oper)
133 call_rcu(&(*oper)->rcu, taprio_free_sched_cb);
134
135 *oper = *admin;
136 *admin = NULL;
137}
138
139
140static s32 get_cycle_time_elapsed(struct sched_gate_list *sched, ktime_t time)
141{
142 ktime_t time_since_sched_start;
143 s32 time_elapsed;
144
145 time_since_sched_start = ktime_sub(time, sched->base_time);
146 div_s64_rem(time_since_sched_start, sched->cycle_time, &time_elapsed);
147
148 return time_elapsed;
149}
150
151static ktime_t get_interval_end_time(struct sched_gate_list *sched,
152 struct sched_gate_list *admin,
153 struct sched_entry *entry,
154 ktime_t intv_start)
155{
156 s32 cycle_elapsed = get_cycle_time_elapsed(sched, intv_start);
157 ktime_t intv_end, cycle_ext_end, cycle_end;
158
159 cycle_end = ktime_add_ns(intv_start, sched->cycle_time - cycle_elapsed);
160 intv_end = ktime_add_ns(intv_start, entry->interval);
161 cycle_ext_end = ktime_add(cycle_end, sched->cycle_time_extension);
162
163 if (ktime_before(intv_end, cycle_end))
164 return intv_end;
165 else if (admin && admin != sched &&
166 ktime_after(admin->base_time, cycle_end) &&
167 ktime_before(admin->base_time, cycle_ext_end))
168 return admin->base_time;
169 else
170 return cycle_end;
171}
172
173static int length_to_duration(struct taprio_sched *q, int len)
174{
175 return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
176}
177
178
179
180
181
182static struct sched_entry *find_entry_to_transmit(struct sk_buff *skb,
183 struct Qdisc *sch,
184 struct sched_gate_list *sched,
185 struct sched_gate_list *admin,
186 ktime_t time,
187 ktime_t *interval_start,
188 ktime_t *interval_end,
189 bool validate_interval)
190{
191 ktime_t curr_intv_start, curr_intv_end, cycle_end, packet_transmit_time;
192 ktime_t earliest_txtime = KTIME_MAX, txtime, cycle, transmit_end_time;
193 struct sched_entry *entry = NULL, *entry_found = NULL;
194 struct taprio_sched *q = qdisc_priv(sch);
195 struct net_device *dev = qdisc_dev(sch);
196 bool entry_available = false;
197 s32 cycle_elapsed;
198 int tc, n;
199
200 tc = netdev_get_prio_tc_map(dev, skb->priority);
201 packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb));
202
203 *interval_start = 0;
204 *interval_end = 0;
205
206 if (!sched)
207 return NULL;
208
209 cycle = sched->cycle_time;
210 cycle_elapsed = get_cycle_time_elapsed(sched, time);
211 curr_intv_end = ktime_sub_ns(time, cycle_elapsed);
212 cycle_end = ktime_add_ns(curr_intv_end, cycle);
213
214 list_for_each_entry(entry, &sched->entries, list) {
215 curr_intv_start = curr_intv_end;
216 curr_intv_end = get_interval_end_time(sched, admin, entry,
217 curr_intv_start);
218
219 if (ktime_after(curr_intv_start, cycle_end))
220 break;
221
222 if (!(entry->gate_mask & BIT(tc)) ||
223 packet_transmit_time > entry->interval)
224 continue;
225
226 txtime = entry->next_txtime;
227
228 if (ktime_before(txtime, time) || validate_interval) {
229 transmit_end_time = ktime_add_ns(time, packet_transmit_time);
230 if ((ktime_before(curr_intv_start, time) &&
231 ktime_before(transmit_end_time, curr_intv_end)) ||
232 (ktime_after(curr_intv_start, time) && !validate_interval)) {
233 entry_found = entry;
234 *interval_start = curr_intv_start;
235 *interval_end = curr_intv_end;
236 break;
237 } else if (!entry_available && !validate_interval) {
238
239
240
241 entry_available = true;
242 entry_found = entry;
243 *interval_start = ktime_add_ns(curr_intv_start, cycle);
244 *interval_end = ktime_add_ns(curr_intv_end, cycle);
245 }
246 } else if (ktime_before(txtime, earliest_txtime) &&
247 !entry_available) {
248 earliest_txtime = txtime;
249 entry_found = entry;
250 n = div_s64(ktime_sub(txtime, curr_intv_start), cycle);
251 *interval_start = ktime_add(curr_intv_start, n * cycle);
252 *interval_end = ktime_add(curr_intv_end, n * cycle);
253 }
254 }
255
256 return entry_found;
257}
258
259static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch)
260{
261 struct taprio_sched *q = qdisc_priv(sch);
262 struct sched_gate_list *sched, *admin;
263 ktime_t interval_start, interval_end;
264 struct sched_entry *entry;
265
266 rcu_read_lock();
267 sched = rcu_dereference(q->oper_sched);
268 admin = rcu_dereference(q->admin_sched);
269
270 entry = find_entry_to_transmit(skb, sch, sched, admin, skb->tstamp,
271 &interval_start, &interval_end, true);
272 rcu_read_unlock();
273
274 return entry;
275}
276
277static bool taprio_flags_valid(u32 flags)
278{
279
280 if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST |
281 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
282 return false;
283
284 if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
285 (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD))
286 return false;
287 return true;
288}
289
290
291static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb)
292{
293 unsigned int offset = skb_network_offset(skb);
294 const struct ipv6hdr *ipv6h;
295 const struct iphdr *iph;
296 struct ipv6hdr _ipv6h;
297
298 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
299 if (!ipv6h)
300 return 0;
301
302 if (ipv6h->version == 4) {
303 iph = (struct iphdr *)ipv6h;
304 offset += iph->ihl * 4;
305
306
307
308
309 if (iph->protocol == IPPROTO_IPV6) {
310 ipv6h = skb_header_pointer(skb, offset,
311 sizeof(_ipv6h), &_ipv6h);
312
313 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
314 return 0;
315 } else if (iph->protocol != IPPROTO_TCP) {
316 return 0;
317 }
318 } else if (ipv6h->version == 6 && ipv6h->nexthdr != IPPROTO_TCP) {
319 return 0;
320 }
321
322 return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset);
323}
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
341{
342 ktime_t transmit_end_time, interval_end, interval_start, tcp_tstamp;
343 struct taprio_sched *q = qdisc_priv(sch);
344 struct sched_gate_list *sched, *admin;
345 ktime_t minimum_time, now, txtime;
346 int len, packet_transmit_time;
347 struct sched_entry *entry;
348 bool sched_changed;
349
350 now = taprio_get_time(q);
351 minimum_time = ktime_add_ns(now, q->txtime_delay);
352
353 tcp_tstamp = get_tcp_tstamp(q, skb);
354 minimum_time = max_t(ktime_t, minimum_time, tcp_tstamp);
355
356 rcu_read_lock();
357 admin = rcu_dereference(q->admin_sched);
358 sched = rcu_dereference(q->oper_sched);
359 if (admin && ktime_after(minimum_time, admin->base_time))
360 switch_schedules(q, &admin, &sched);
361
362
363 if (!sched || ktime_before(minimum_time, sched->base_time)) {
364 txtime = minimum_time;
365 goto done;
366 }
367
368 len = qdisc_pkt_len(skb);
369 packet_transmit_time = length_to_duration(q, len);
370
371 do {
372 sched_changed = false;
373
374 entry = find_entry_to_transmit(skb, sch, sched, admin,
375 minimum_time,
376 &interval_start, &interval_end,
377 false);
378 if (!entry) {
379 txtime = 0;
380 goto done;
381 }
382
383 txtime = entry->next_txtime;
384 txtime = max_t(ktime_t, txtime, minimum_time);
385 txtime = max_t(ktime_t, txtime, interval_start);
386
387 if (admin && admin != sched &&
388 ktime_after(txtime, admin->base_time)) {
389 sched = admin;
390 sched_changed = true;
391 continue;
392 }
393
394 transmit_end_time = ktime_add(txtime, packet_transmit_time);
395 minimum_time = transmit_end_time;
396
397
398
399
400 if (ktime_after(transmit_end_time, interval_end))
401 entry->next_txtime = ktime_add(interval_start, sched->cycle_time);
402 } while (sched_changed || ktime_after(transmit_end_time, interval_end));
403
404 entry->next_txtime = transmit_end_time;
405
406done:
407 rcu_read_unlock();
408 return txtime;
409}
410
411static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
412 struct Qdisc *child, struct sk_buff **to_free)
413{
414 struct taprio_sched *q = qdisc_priv(sch);
415
416 if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
417 if (!is_valid_interval(skb, sch))
418 return qdisc_drop(skb, sch, to_free);
419 } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
420 skb->tstamp = get_packet_txtime(skb, sch);
421 if (!skb->tstamp)
422 return qdisc_drop(skb, sch, to_free);
423 }
424
425 qdisc_qstats_backlog_inc(sch, skb);
426 sch->q.qlen++;
427
428 return qdisc_enqueue(skb, child, to_free);
429}
430
431static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
432 struct sk_buff **to_free)
433{
434 struct taprio_sched *q = qdisc_priv(sch);
435 struct Qdisc *child;
436 int queue;
437
438 if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) {
439 WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n");
440 return qdisc_drop(skb, sch, to_free);
441 }
442
443 queue = skb_get_queue_mapping(skb);
444
445 child = q->qdiscs[queue];
446 if (unlikely(!child))
447 return qdisc_drop(skb, sch, to_free);
448
449
450
451
452
453
454 if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) {
455 unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
456 netdev_features_t features = netif_skb_features(skb);
457 struct sk_buff *segs, *nskb;
458 int ret;
459
460 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
461 if (IS_ERR_OR_NULL(segs))
462 return qdisc_drop(skb, sch, to_free);
463
464 skb_list_walk_safe(segs, segs, nskb) {
465 skb_mark_not_on_list(segs);
466 qdisc_skb_cb(segs)->pkt_len = segs->len;
467 slen += segs->len;
468
469 ret = taprio_enqueue_one(segs, sch, child, to_free);
470 if (ret != NET_XMIT_SUCCESS) {
471 if (net_xmit_drop_count(ret))
472 qdisc_qstats_drop(sch);
473 } else {
474 numsegs++;
475 }
476 }
477
478 if (numsegs > 1)
479 qdisc_tree_reduce_backlog(sch, 1 - numsegs, len - slen);
480 consume_skb(skb);
481
482 return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
483 }
484
485 return taprio_enqueue_one(skb, sch, child, to_free);
486}
487
488static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
489{
490 struct taprio_sched *q = qdisc_priv(sch);
491 struct net_device *dev = qdisc_dev(sch);
492 struct sched_entry *entry;
493 struct sk_buff *skb;
494 u32 gate_mask;
495 int i;
496
497 rcu_read_lock();
498 entry = rcu_dereference(q->current_entry);
499 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
500 rcu_read_unlock();
501
502 if (!gate_mask)
503 return NULL;
504
505 for (i = 0; i < dev->num_tx_queues; i++) {
506 struct Qdisc *child = q->qdiscs[i];
507 int prio;
508 u8 tc;
509
510 if (unlikely(!child))
511 continue;
512
513 skb = child->ops->peek(child);
514 if (!skb)
515 continue;
516
517 if (TXTIME_ASSIST_IS_ENABLED(q->flags))
518 return skb;
519
520 prio = skb->priority;
521 tc = netdev_get_prio_tc_map(dev, prio);
522
523 if (!(gate_mask & BIT(tc)))
524 continue;
525
526 return skb;
527 }
528
529 return NULL;
530}
531
532static struct sk_buff *taprio_peek_offload(struct Qdisc *sch)
533{
534 WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n");
535
536 return NULL;
537}
538
539static struct sk_buff *taprio_peek(struct Qdisc *sch)
540{
541 struct taprio_sched *q = qdisc_priv(sch);
542
543 return q->peek(sch);
544}
545
546static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
547{
548 atomic_set(&entry->budget,
549 div64_u64((u64)entry->interval * 1000,
550 atomic64_read(&q->picos_per_byte)));
551}
552
553static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
554{
555 struct taprio_sched *q = qdisc_priv(sch);
556 struct net_device *dev = qdisc_dev(sch);
557 struct sk_buff *skb = NULL;
558 struct sched_entry *entry;
559 u32 gate_mask;
560 int i;
561
562 rcu_read_lock();
563 entry = rcu_dereference(q->current_entry);
564
565
566
567
568
569 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
570
571 if (!gate_mask)
572 goto done;
573
574 for (i = 0; i < dev->num_tx_queues; i++) {
575 struct Qdisc *child = q->qdiscs[i];
576 ktime_t guard;
577 int prio;
578 int len;
579 u8 tc;
580
581 if (unlikely(!child))
582 continue;
583
584 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
585 skb = child->ops->dequeue(child);
586 if (!skb)
587 continue;
588 goto skb_found;
589 }
590
591 skb = child->ops->peek(child);
592 if (!skb)
593 continue;
594
595 prio = skb->priority;
596 tc = netdev_get_prio_tc_map(dev, prio);
597
598 if (!(gate_mask & BIT(tc))) {
599 skb = NULL;
600 continue;
601 }
602
603 len = qdisc_pkt_len(skb);
604 guard = ktime_add_ns(taprio_get_time(q),
605 length_to_duration(q, len));
606
607
608
609
610 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
611 ktime_after(guard, entry->close_time)) {
612 skb = NULL;
613 continue;
614 }
615
616
617 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
618 atomic_sub_return(len, &entry->budget) < 0) {
619 skb = NULL;
620 continue;
621 }
622
623 skb = child->ops->dequeue(child);
624 if (unlikely(!skb))
625 goto done;
626
627skb_found:
628 qdisc_bstats_update(sch, skb);
629 qdisc_qstats_backlog_dec(sch, skb);
630 sch->q.qlen--;
631
632 goto done;
633 }
634
635done:
636 rcu_read_unlock();
637
638 return skb;
639}
640
641static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch)
642{
643 WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n");
644
645 return NULL;
646}
647
648static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
649{
650 struct taprio_sched *q = qdisc_priv(sch);
651
652 return q->dequeue(sch);
653}
654
655static bool should_restart_cycle(const struct sched_gate_list *oper,
656 const struct sched_entry *entry)
657{
658 if (list_is_last(&entry->list, &oper->entries))
659 return true;
660
661 if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
662 return true;
663
664 return false;
665}
666
667static bool should_change_schedules(const struct sched_gate_list *admin,
668 const struct sched_gate_list *oper,
669 ktime_t close_time)
670{
671 ktime_t next_base_time, extension_time;
672
673 if (!admin)
674 return false;
675
676 next_base_time = sched_base_time(admin);
677
678
679
680
681 if (ktime_compare(next_base_time, close_time) <= 0)
682 return true;
683
684
685
686
687
688
689 extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
690
691
692
693
694
695 if (ktime_compare(next_base_time, extension_time) <= 0)
696 return true;
697
698 return false;
699}
700
701static enum hrtimer_restart advance_sched(struct hrtimer *timer)
702{
703 struct taprio_sched *q = container_of(timer, struct taprio_sched,
704 advance_timer);
705 struct sched_gate_list *oper, *admin;
706 struct sched_entry *entry, *next;
707 struct Qdisc *sch = q->root;
708 ktime_t close_time;
709
710 spin_lock(&q->current_entry_lock);
711 entry = rcu_dereference_protected(q->current_entry,
712 lockdep_is_held(&q->current_entry_lock));
713 oper = rcu_dereference_protected(q->oper_sched,
714 lockdep_is_held(&q->current_entry_lock));
715 admin = rcu_dereference_protected(q->admin_sched,
716 lockdep_is_held(&q->current_entry_lock));
717
718 if (!oper)
719 switch_schedules(q, &admin, &oper);
720
721
722
723
724
725
726
727 if (unlikely(!entry || entry->close_time == oper->base_time)) {
728 next = list_first_entry(&oper->entries, struct sched_entry,
729 list);
730 close_time = next->close_time;
731 goto first_run;
732 }
733
734 if (should_restart_cycle(oper, entry)) {
735 next = list_first_entry(&oper->entries, struct sched_entry,
736 list);
737 oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
738 oper->cycle_time);
739 } else {
740 next = list_next_entry(entry, list);
741 }
742
743 close_time = ktime_add_ns(entry->close_time, next->interval);
744 close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
745
746 if (should_change_schedules(admin, oper, close_time)) {
747
748
749
750 close_time = sched_base_time(admin);
751 switch_schedules(q, &admin, &oper);
752 }
753
754 next->close_time = close_time;
755 taprio_set_budget(q, next);
756
757first_run:
758 rcu_assign_pointer(q->current_entry, next);
759 spin_unlock(&q->current_entry_lock);
760
761 hrtimer_set_expires(&q->advance_timer, close_time);
762
763 rcu_read_lock();
764 __netif_schedule(sch);
765 rcu_read_unlock();
766
767 return HRTIMER_RESTART;
768}
769
770static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
771 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
772 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
773 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
774 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
775};
776
777static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
778 [TCA_TAPRIO_ATTR_PRIOMAP] = {
779 .len = sizeof(struct tc_mqprio_qopt)
780 },
781 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
782 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
783 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
784 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
785 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
786 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
787 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
788 [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
789};
790
791static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
792 struct sched_entry *entry,
793 struct netlink_ext_ack *extack)
794{
795 int min_duration = length_to_duration(q, ETH_ZLEN);
796 u32 interval = 0;
797
798 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
799 entry->command = nla_get_u8(
800 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
801
802 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
803 entry->gate_mask = nla_get_u32(
804 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
805
806 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
807 interval = nla_get_u32(
808 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
809
810
811
812
813 if (interval < min_duration) {
814 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
815 return -EINVAL;
816 }
817
818 entry->interval = interval;
819
820 return 0;
821}
822
823static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
824 struct sched_entry *entry, int index,
825 struct netlink_ext_ack *extack)
826{
827 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
828 int err;
829
830 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
831 entry_policy, NULL);
832 if (err < 0) {
833 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
834 return -EINVAL;
835 }
836
837 entry->index = index;
838
839 return fill_sched_entry(q, tb, entry, extack);
840}
841
842static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
843 struct sched_gate_list *sched,
844 struct netlink_ext_ack *extack)
845{
846 struct nlattr *n;
847 int err, rem;
848 int i = 0;
849
850 if (!list)
851 return -EINVAL;
852
853 nla_for_each_nested(n, list, rem) {
854 struct sched_entry *entry;
855
856 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
857 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
858 continue;
859 }
860
861 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
862 if (!entry) {
863 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
864 return -ENOMEM;
865 }
866
867 err = parse_sched_entry(q, n, entry, i, extack);
868 if (err < 0) {
869 kfree(entry);
870 return err;
871 }
872
873 list_add_tail(&entry->list, &sched->entries);
874 i++;
875 }
876
877 sched->num_entries = i;
878
879 return i;
880}
881
882static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
883 struct sched_gate_list *new,
884 struct netlink_ext_ack *extack)
885{
886 int err = 0;
887
888 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY]) {
889 NL_SET_ERR_MSG(extack, "Adding a single entry is not supported");
890 return -ENOTSUPP;
891 }
892
893 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
894 new->base_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
895
896 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION])
897 new->cycle_time_extension = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION]);
898
899 if (tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME])
900 new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
901
902 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
903 err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
904 new, extack);
905 if (err < 0)
906 return err;
907
908 if (!new->cycle_time) {
909 struct sched_entry *entry;
910 ktime_t cycle = 0;
911
912 list_for_each_entry(entry, &new->entries, list)
913 cycle = ktime_add_ns(cycle, entry->interval);
914
915 if (!cycle) {
916 NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
917 return -EINVAL;
918 }
919
920 new->cycle_time = cycle;
921 }
922
923 return 0;
924}
925
926static int taprio_parse_mqprio_opt(struct net_device *dev,
927 struct tc_mqprio_qopt *qopt,
928 struct netlink_ext_ack *extack,
929 u32 taprio_flags)
930{
931 int i, j;
932
933 if (!qopt && !dev->num_tc) {
934 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
935 return -EINVAL;
936 }
937
938
939
940
941 if (dev->num_tc)
942 return 0;
943
944
945 if (qopt->num_tc > TC_MAX_QUEUE) {
946 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
947 return -EINVAL;
948 }
949
950
951 if (qopt->num_tc > dev->num_tx_queues) {
952 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
953 return -EINVAL;
954 }
955
956
957 for (i = 0; i <= TC_BITMASK; i++) {
958 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
959 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
960 return -EINVAL;
961 }
962 }
963
964 for (i = 0; i < qopt->num_tc; i++) {
965 unsigned int last = qopt->offset[i] + qopt->count[i];
966
967
968
969
970 if (qopt->offset[i] >= dev->num_tx_queues ||
971 !qopt->count[i] ||
972 last > dev->real_num_tx_queues) {
973 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
974 return -EINVAL;
975 }
976
977 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags))
978 continue;
979
980
981 for (j = i + 1; j < qopt->num_tc; j++) {
982 if (last > qopt->offset[j]) {
983 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
984 return -EINVAL;
985 }
986 }
987 }
988
989 return 0;
990}
991
992static int taprio_get_start_time(struct Qdisc *sch,
993 struct sched_gate_list *sched,
994 ktime_t *start)
995{
996 struct taprio_sched *q = qdisc_priv(sch);
997 ktime_t now, base, cycle;
998 s64 n;
999
1000 base = sched_base_time(sched);
1001 now = taprio_get_time(q);
1002
1003 if (ktime_after(base, now)) {
1004 *start = base;
1005 return 0;
1006 }
1007
1008 cycle = sched->cycle_time;
1009
1010
1011
1012
1013
1014
1015 if (WARN_ON(!cycle))
1016 return -EFAULT;
1017
1018
1019
1020
1021 n = div64_s64(ktime_sub_ns(now, base), cycle);
1022 *start = ktime_add_ns(base, (n + 1) * cycle);
1023 return 0;
1024}
1025
1026static void setup_first_close_time(struct taprio_sched *q,
1027 struct sched_gate_list *sched, ktime_t base)
1028{
1029 struct sched_entry *first;
1030 ktime_t cycle;
1031
1032 first = list_first_entry(&sched->entries,
1033 struct sched_entry, list);
1034
1035 cycle = sched->cycle_time;
1036
1037
1038 sched->cycle_close_time = ktime_add_ns(base, cycle);
1039
1040 first->close_time = ktime_add_ns(base, first->interval);
1041 taprio_set_budget(q, first);
1042 rcu_assign_pointer(q->current_entry, NULL);
1043}
1044
1045static void taprio_start_sched(struct Qdisc *sch,
1046 ktime_t start, struct sched_gate_list *new)
1047{
1048 struct taprio_sched *q = qdisc_priv(sch);
1049 ktime_t expires;
1050
1051 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1052 return;
1053
1054 expires = hrtimer_get_expires(&q->advance_timer);
1055 if (expires == 0)
1056 expires = KTIME_MAX;
1057
1058
1059
1060
1061
1062 start = min_t(ktime_t, start, expires);
1063
1064 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
1065}
1066
1067static void taprio_set_picos_per_byte(struct net_device *dev,
1068 struct taprio_sched *q)
1069{
1070 struct ethtool_link_ksettings ecmd;
1071 int speed = SPEED_10;
1072 int picos_per_byte;
1073 int err;
1074
1075 err = __ethtool_get_link_ksettings(dev, &ecmd);
1076 if (err < 0)
1077 goto skip;
1078
1079 if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
1080 speed = ecmd.base.speed;
1081
1082skip:
1083 picos_per_byte = (USEC_PER_SEC * 8) / speed;
1084
1085 atomic64_set(&q->picos_per_byte, picos_per_byte);
1086 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1087 dev->name, (long long)atomic64_read(&q->picos_per_byte),
1088 ecmd.base.speed);
1089}
1090
1091static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
1092 void *ptr)
1093{
1094 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1095 struct net_device *qdev;
1096 struct taprio_sched *q;
1097 bool found = false;
1098
1099 ASSERT_RTNL();
1100
1101 if (event != NETDEV_UP && event != NETDEV_CHANGE)
1102 return NOTIFY_DONE;
1103
1104 spin_lock(&taprio_list_lock);
1105 list_for_each_entry(q, &taprio_list, taprio_list) {
1106 qdev = qdisc_dev(q->root);
1107 if (qdev == dev) {
1108 found = true;
1109 break;
1110 }
1111 }
1112 spin_unlock(&taprio_list_lock);
1113
1114 if (found)
1115 taprio_set_picos_per_byte(dev, q);
1116
1117 return NOTIFY_DONE;
1118}
1119
1120static void setup_txtime(struct taprio_sched *q,
1121 struct sched_gate_list *sched, ktime_t base)
1122{
1123 struct sched_entry *entry;
1124 u32 interval = 0;
1125
1126 list_for_each_entry(entry, &sched->entries, list) {
1127 entry->next_txtime = ktime_add_ns(base, interval);
1128 interval += entry->interval;
1129 }
1130}
1131
1132static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
1133{
1134 struct __tc_taprio_qopt_offload *__offload;
1135
1136 __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
1137 GFP_KERNEL);
1138 if (!__offload)
1139 return NULL;
1140
1141 refcount_set(&__offload->users, 1);
1142
1143 return &__offload->offload;
1144}
1145
1146struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload
1147 *offload)
1148{
1149 struct __tc_taprio_qopt_offload *__offload;
1150
1151 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1152 offload);
1153
1154 refcount_inc(&__offload->users);
1155
1156 return offload;
1157}
1158EXPORT_SYMBOL_GPL(taprio_offload_get);
1159
1160void taprio_offload_free(struct tc_taprio_qopt_offload *offload)
1161{
1162 struct __tc_taprio_qopt_offload *__offload;
1163
1164 __offload = container_of(offload, struct __tc_taprio_qopt_offload,
1165 offload);
1166
1167 if (!refcount_dec_and_test(&__offload->users))
1168 return;
1169
1170 kfree(__offload);
1171}
1172EXPORT_SYMBOL_GPL(taprio_offload_free);
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186static void taprio_offload_config_changed(struct taprio_sched *q)
1187{
1188 struct sched_gate_list *oper, *admin;
1189
1190 spin_lock(&q->current_entry_lock);
1191
1192 oper = rcu_dereference_protected(q->oper_sched,
1193 lockdep_is_held(&q->current_entry_lock));
1194 admin = rcu_dereference_protected(q->admin_sched,
1195 lockdep_is_held(&q->current_entry_lock));
1196
1197 switch_schedules(q, &admin, &oper);
1198
1199 spin_unlock(&q->current_entry_lock);
1200}
1201
1202static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
1203{
1204 u32 i, queue_mask = 0;
1205
1206 for (i = 0; i < dev->num_tc; i++) {
1207 u32 offset, count;
1208
1209 if (!(tc_mask & BIT(i)))
1210 continue;
1211
1212 offset = dev->tc_to_txq[i].offset;
1213 count = dev->tc_to_txq[i].count;
1214
1215 queue_mask |= GENMASK(offset + count - 1, offset);
1216 }
1217
1218 return queue_mask;
1219}
1220
1221static void taprio_sched_to_offload(struct net_device *dev,
1222 struct sched_gate_list *sched,
1223 struct tc_taprio_qopt_offload *offload)
1224{
1225 struct sched_entry *entry;
1226 int i = 0;
1227
1228 offload->base_time = sched->base_time;
1229 offload->cycle_time = sched->cycle_time;
1230 offload->cycle_time_extension = sched->cycle_time_extension;
1231
1232 list_for_each_entry(entry, &sched->entries, list) {
1233 struct tc_taprio_sched_entry *e = &offload->entries[i];
1234
1235 e->command = entry->command;
1236 e->interval = entry->interval;
1237 e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
1238
1239 i++;
1240 }
1241
1242 offload->num_entries = i;
1243}
1244
1245static int taprio_enable_offload(struct net_device *dev,
1246 struct taprio_sched *q,
1247 struct sched_gate_list *sched,
1248 struct netlink_ext_ack *extack)
1249{
1250 const struct net_device_ops *ops = dev->netdev_ops;
1251 struct tc_taprio_qopt_offload *offload;
1252 int err = 0;
1253
1254 if (!ops->ndo_setup_tc) {
1255 NL_SET_ERR_MSG(extack,
1256 "Device does not support taprio offload");
1257 return -EOPNOTSUPP;
1258 }
1259
1260 offload = taprio_offload_alloc(sched->num_entries);
1261 if (!offload) {
1262 NL_SET_ERR_MSG(extack,
1263 "Not enough memory for enabling offload mode");
1264 return -ENOMEM;
1265 }
1266 offload->enable = 1;
1267 taprio_sched_to_offload(dev, sched, offload);
1268
1269 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1270 if (err < 0) {
1271 NL_SET_ERR_MSG(extack,
1272 "Device failed to setup taprio offload");
1273 goto done;
1274 }
1275
1276done:
1277 taprio_offload_free(offload);
1278
1279 return err;
1280}
1281
1282static int taprio_disable_offload(struct net_device *dev,
1283 struct taprio_sched *q,
1284 struct netlink_ext_ack *extack)
1285{
1286 const struct net_device_ops *ops = dev->netdev_ops;
1287 struct tc_taprio_qopt_offload *offload;
1288 int err;
1289
1290 if (!FULL_OFFLOAD_IS_ENABLED(q->flags))
1291 return 0;
1292
1293 if (!ops->ndo_setup_tc)
1294 return -EOPNOTSUPP;
1295
1296 offload = taprio_offload_alloc(0);
1297 if (!offload) {
1298 NL_SET_ERR_MSG(extack,
1299 "Not enough memory to disable offload mode");
1300 return -ENOMEM;
1301 }
1302 offload->enable = 0;
1303
1304 err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
1305 if (err < 0) {
1306 NL_SET_ERR_MSG(extack,
1307 "Device failed to disable offload");
1308 goto out;
1309 }
1310
1311out:
1312 taprio_offload_free(offload);
1313
1314 return err;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
1325 struct netlink_ext_ack *extack)
1326{
1327 struct taprio_sched *q = qdisc_priv(sch);
1328 struct net_device *dev = qdisc_dev(sch);
1329 int err = -EINVAL;
1330
1331 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1332 const struct ethtool_ops *ops = dev->ethtool_ops;
1333 struct ethtool_ts_info info = {
1334 .cmd = ETHTOOL_GET_TS_INFO,
1335 .phc_index = -1,
1336 };
1337
1338 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1339 NL_SET_ERR_MSG(extack,
1340 "The 'clockid' cannot be specified for full offload");
1341 goto out;
1342 }
1343
1344 if (ops && ops->get_ts_info)
1345 err = ops->get_ts_info(dev, &info);
1346
1347 if (err || info.phc_index < 0) {
1348 NL_SET_ERR_MSG(extack,
1349 "Device does not have a PTP clock");
1350 err = -ENOTSUPP;
1351 goto out;
1352 }
1353 } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
1354 int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
1355
1356
1357
1358
1359 if (clockid < 0 ||
1360 (q->clockid != -1 && q->clockid != clockid)) {
1361 NL_SET_ERR_MSG(extack,
1362 "Changing the 'clockid' of a running schedule is not supported");
1363 err = -ENOTSUPP;
1364 goto out;
1365 }
1366
1367 switch (clockid) {
1368 case CLOCK_REALTIME:
1369 q->tk_offset = TK_OFFS_REAL;
1370 break;
1371 case CLOCK_MONOTONIC:
1372 q->tk_offset = TK_OFFS_MAX;
1373 break;
1374 case CLOCK_BOOTTIME:
1375 q->tk_offset = TK_OFFS_BOOT;
1376 break;
1377 case CLOCK_TAI:
1378 q->tk_offset = TK_OFFS_TAI;
1379 break;
1380 default:
1381 NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
1382 err = -EINVAL;
1383 goto out;
1384 }
1385
1386 q->clockid = clockid;
1387 } else {
1388 NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
1389 goto out;
1390 }
1391
1392
1393 err = 0;
1394
1395out:
1396 return err;
1397}
1398
1399static int taprio_mqprio_cmp(const struct net_device *dev,
1400 const struct tc_mqprio_qopt *mqprio)
1401{
1402 int i;
1403
1404 if (!mqprio || mqprio->num_tc != dev->num_tc)
1405 return -1;
1406
1407 for (i = 0; i < mqprio->num_tc; i++)
1408 if (dev->tc_to_txq[i].count != mqprio->count[i] ||
1409 dev->tc_to_txq[i].offset != mqprio->offset[i])
1410 return -1;
1411
1412 for (i = 0; i <= TC_BITMASK; i++)
1413 if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i])
1414 return -1;
1415
1416 return 0;
1417}
1418
1419
1420
1421
1422
1423
1424
1425static int taprio_new_flags(const struct nlattr *attr, u32 old,
1426 struct netlink_ext_ack *extack)
1427{
1428 u32 new = 0;
1429
1430 if (attr)
1431 new = nla_get_u32(attr);
1432
1433 if (old != TAPRIO_FLAGS_INVALID && old != new) {
1434 NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported");
1435 return -EOPNOTSUPP;
1436 }
1437
1438 if (!taprio_flags_valid(new)) {
1439 NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid");
1440 return -EINVAL;
1441 }
1442
1443 return new;
1444}
1445
1446static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1447 struct netlink_ext_ack *extack)
1448{
1449 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
1450 struct sched_gate_list *oper, *admin, *new_admin;
1451 struct taprio_sched *q = qdisc_priv(sch);
1452 struct net_device *dev = qdisc_dev(sch);
1453 struct tc_mqprio_qopt *mqprio = NULL;
1454 unsigned long flags;
1455 ktime_t start;
1456 int i, err;
1457
1458 err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt,
1459 taprio_policy, extack);
1460 if (err < 0)
1461 return err;
1462
1463 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
1464 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
1465
1466 err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS],
1467 q->flags, extack);
1468 if (err < 0)
1469 return err;
1470
1471 q->flags = err;
1472
1473 err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
1474 if (err < 0)
1475 return err;
1476
1477 new_admin = kzalloc(sizeof(*new_admin), GFP_KERNEL);
1478 if (!new_admin) {
1479 NL_SET_ERR_MSG(extack, "Not enough memory for a new schedule");
1480 return -ENOMEM;
1481 }
1482 INIT_LIST_HEAD(&new_admin->entries);
1483
1484 rcu_read_lock();
1485 oper = rcu_dereference(q->oper_sched);
1486 admin = rcu_dereference(q->admin_sched);
1487 rcu_read_unlock();
1488
1489
1490 if (!taprio_mqprio_cmp(dev, mqprio))
1491 mqprio = NULL;
1492
1493 if (mqprio && (oper || admin)) {
1494 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported");
1495 err = -ENOTSUPP;
1496 goto free_sched;
1497 }
1498
1499 err = parse_taprio_schedule(q, tb, new_admin, extack);
1500 if (err < 0)
1501 goto free_sched;
1502
1503 if (new_admin->num_entries == 0) {
1504 NL_SET_ERR_MSG(extack, "There should be at least one entry in the schedule");
1505 err = -EINVAL;
1506 goto free_sched;
1507 }
1508
1509 err = taprio_parse_clockid(sch, tb, extack);
1510 if (err < 0)
1511 goto free_sched;
1512
1513 taprio_set_picos_per_byte(dev, q);
1514
1515 if (mqprio) {
1516 err = netdev_set_num_tc(dev, mqprio->num_tc);
1517 if (err)
1518 goto free_sched;
1519 for (i = 0; i < mqprio->num_tc; i++)
1520 netdev_set_tc_queue(dev, i,
1521 mqprio->count[i],
1522 mqprio->offset[i]);
1523
1524
1525 for (i = 0; i <= TC_BITMASK; i++)
1526 netdev_set_prio_tc_map(dev, i,
1527 mqprio->prio_tc_map[i]);
1528 }
1529
1530 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1531 err = taprio_enable_offload(dev, q, new_admin, extack);
1532 else
1533 err = taprio_disable_offload(dev, q, extack);
1534 if (err)
1535 goto free_sched;
1536
1537
1538 spin_lock_bh(qdisc_lock(sch));
1539
1540 if (tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]) {
1541 if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1542 NL_SET_ERR_MSG_MOD(extack, "txtime-delay can only be set when txtime-assist mode is enabled");
1543 err = -EINVAL;
1544 goto unlock;
1545 }
1546
1547 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]);
1548 }
1549
1550 if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
1551 !FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1552 !hrtimer_active(&q->advance_timer)) {
1553 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
1554 q->advance_timer.function = advance_sched;
1555 }
1556
1557 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1558 q->dequeue = taprio_dequeue_offload;
1559 q->peek = taprio_peek_offload;
1560 } else {
1561
1562
1563
1564 q->dequeue = taprio_dequeue_soft;
1565 q->peek = taprio_peek_soft;
1566 }
1567
1568 err = taprio_get_start_time(sch, new_admin, &start);
1569 if (err < 0) {
1570 NL_SET_ERR_MSG(extack, "Internal error: failed get start time");
1571 goto unlock;
1572 }
1573
1574 setup_txtime(q, new_admin, start);
1575
1576 if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
1577 if (!oper) {
1578 rcu_assign_pointer(q->oper_sched, new_admin);
1579 err = 0;
1580 new_admin = NULL;
1581 goto unlock;
1582 }
1583
1584 rcu_assign_pointer(q->admin_sched, new_admin);
1585 if (admin)
1586 call_rcu(&admin->rcu, taprio_free_sched_cb);
1587 } else {
1588 setup_first_close_time(q, new_admin, start);
1589
1590
1591 spin_lock_irqsave(&q->current_entry_lock, flags);
1592
1593 taprio_start_sched(sch, start, new_admin);
1594
1595 rcu_assign_pointer(q->admin_sched, new_admin);
1596 if (admin)
1597 call_rcu(&admin->rcu, taprio_free_sched_cb);
1598
1599 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1600
1601 if (FULL_OFFLOAD_IS_ENABLED(q->flags))
1602 taprio_offload_config_changed(q);
1603 }
1604
1605 new_admin = NULL;
1606 err = 0;
1607
1608unlock:
1609 spin_unlock_bh(qdisc_lock(sch));
1610
1611free_sched:
1612 if (new_admin)
1613 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1614
1615 return err;
1616}
1617
1618static void taprio_reset(struct Qdisc *sch)
1619{
1620 struct taprio_sched *q = qdisc_priv(sch);
1621 struct net_device *dev = qdisc_dev(sch);
1622 int i;
1623
1624 hrtimer_cancel(&q->advance_timer);
1625 if (q->qdiscs) {
1626 for (i = 0; i < dev->num_tx_queues; i++)
1627 if (q->qdiscs[i])
1628 qdisc_reset(q->qdiscs[i]);
1629 }
1630 sch->qstats.backlog = 0;
1631 sch->q.qlen = 0;
1632}
1633
1634static void taprio_destroy(struct Qdisc *sch)
1635{
1636 struct taprio_sched *q = qdisc_priv(sch);
1637 struct net_device *dev = qdisc_dev(sch);
1638 unsigned int i;
1639
1640 spin_lock(&taprio_list_lock);
1641 list_del(&q->taprio_list);
1642 spin_unlock(&taprio_list_lock);
1643
1644
1645
1646
1647 hrtimer_cancel(&q->advance_timer);
1648
1649 taprio_disable_offload(dev, q, NULL);
1650
1651 if (q->qdiscs) {
1652 for (i = 0; i < dev->num_tx_queues; i++)
1653 qdisc_put(q->qdiscs[i]);
1654
1655 kfree(q->qdiscs);
1656 }
1657 q->qdiscs = NULL;
1658
1659 netdev_reset_tc(dev);
1660
1661 if (q->oper_sched)
1662 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb);
1663
1664 if (q->admin_sched)
1665 call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb);
1666}
1667
1668static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1669 struct netlink_ext_ack *extack)
1670{
1671 struct taprio_sched *q = qdisc_priv(sch);
1672 struct net_device *dev = qdisc_dev(sch);
1673 int i;
1674
1675 spin_lock_init(&q->current_entry_lock);
1676
1677 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
1678 q->advance_timer.function = advance_sched;
1679
1680 q->dequeue = taprio_dequeue_soft;
1681 q->peek = taprio_peek_soft;
1682
1683 q->root = sch;
1684
1685
1686
1687
1688 q->clockid = -1;
1689 q->flags = TAPRIO_FLAGS_INVALID;
1690
1691 spin_lock(&taprio_list_lock);
1692 list_add(&q->taprio_list, &taprio_list);
1693 spin_unlock(&taprio_list_lock);
1694
1695 if (sch->parent != TC_H_ROOT)
1696 return -EOPNOTSUPP;
1697
1698 if (!netif_is_multiqueue(dev))
1699 return -EOPNOTSUPP;
1700
1701
1702 q->qdiscs = kcalloc(dev->num_tx_queues,
1703 sizeof(q->qdiscs[0]),
1704 GFP_KERNEL);
1705
1706 if (!q->qdiscs)
1707 return -ENOMEM;
1708
1709 if (!opt)
1710 return -EINVAL;
1711
1712 for (i = 0; i < dev->num_tx_queues; i++) {
1713 struct netdev_queue *dev_queue;
1714 struct Qdisc *qdisc;
1715
1716 dev_queue = netdev_get_tx_queue(dev, i);
1717 qdisc = qdisc_create_dflt(dev_queue,
1718 &pfifo_qdisc_ops,
1719 TC_H_MAKE(TC_H_MAJ(sch->handle),
1720 TC_H_MIN(i + 1)),
1721 extack);
1722 if (!qdisc)
1723 return -ENOMEM;
1724
1725 if (i < dev->real_num_tx_queues)
1726 qdisc_hash_add(qdisc, false);
1727
1728 q->qdiscs[i] = qdisc;
1729 }
1730
1731 return taprio_change(sch, opt, extack);
1732}
1733
1734static void taprio_attach(struct Qdisc *sch)
1735{
1736 struct taprio_sched *q = qdisc_priv(sch);
1737 struct net_device *dev = qdisc_dev(sch);
1738 unsigned int ntx;
1739
1740
1741 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1742 struct Qdisc *qdisc = q->qdiscs[ntx];
1743 struct Qdisc *old;
1744
1745 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1746 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1747 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1748 } else {
1749 old = dev_graft_qdisc(qdisc->dev_queue, sch);
1750 qdisc_refcount_inc(sch);
1751 }
1752 if (old)
1753 qdisc_put(old);
1754 }
1755
1756
1757 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1758 kfree(q->qdiscs);
1759 q->qdiscs = NULL;
1760 }
1761}
1762
1763static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
1764 unsigned long cl)
1765{
1766 struct net_device *dev = qdisc_dev(sch);
1767 unsigned long ntx = cl - 1;
1768
1769 if (ntx >= dev->num_tx_queues)
1770 return NULL;
1771
1772 return netdev_get_tx_queue(dev, ntx);
1773}
1774
1775static int taprio_graft(struct Qdisc *sch, unsigned long cl,
1776 struct Qdisc *new, struct Qdisc **old,
1777 struct netlink_ext_ack *extack)
1778{
1779 struct taprio_sched *q = qdisc_priv(sch);
1780 struct net_device *dev = qdisc_dev(sch);
1781 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1782
1783 if (!dev_queue)
1784 return -EINVAL;
1785
1786 if (dev->flags & IFF_UP)
1787 dev_deactivate(dev);
1788
1789 if (FULL_OFFLOAD_IS_ENABLED(q->flags)) {
1790 *old = dev_graft_qdisc(dev_queue, new);
1791 } else {
1792 *old = q->qdiscs[cl - 1];
1793 q->qdiscs[cl - 1] = new;
1794 }
1795
1796 if (new)
1797 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1798
1799 if (dev->flags & IFF_UP)
1800 dev_activate(dev);
1801
1802 return 0;
1803}
1804
1805static int dump_entry(struct sk_buff *msg,
1806 const struct sched_entry *entry)
1807{
1808 struct nlattr *item;
1809
1810 item = nla_nest_start_noflag(msg, TCA_TAPRIO_SCHED_ENTRY);
1811 if (!item)
1812 return -ENOSPC;
1813
1814 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
1815 goto nla_put_failure;
1816
1817 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
1818 goto nla_put_failure;
1819
1820 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
1821 entry->gate_mask))
1822 goto nla_put_failure;
1823
1824 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
1825 entry->interval))
1826 goto nla_put_failure;
1827
1828 return nla_nest_end(msg, item);
1829
1830nla_put_failure:
1831 nla_nest_cancel(msg, item);
1832 return -1;
1833}
1834
1835static int dump_schedule(struct sk_buff *msg,
1836 const struct sched_gate_list *root)
1837{
1838 struct nlattr *entry_list;
1839 struct sched_entry *entry;
1840
1841 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
1842 root->base_time, TCA_TAPRIO_PAD))
1843 return -1;
1844
1845 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME,
1846 root->cycle_time, TCA_TAPRIO_PAD))
1847 return -1;
1848
1849 if (nla_put_s64(msg, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION,
1850 root->cycle_time_extension, TCA_TAPRIO_PAD))
1851 return -1;
1852
1853 entry_list = nla_nest_start_noflag(msg,
1854 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
1855 if (!entry_list)
1856 goto error_nest;
1857
1858 list_for_each_entry(entry, &root->entries, list) {
1859 if (dump_entry(msg, entry) < 0)
1860 goto error_nest;
1861 }
1862
1863 nla_nest_end(msg, entry_list);
1864 return 0;
1865
1866error_nest:
1867 nla_nest_cancel(msg, entry_list);
1868 return -1;
1869}
1870
1871static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
1872{
1873 struct taprio_sched *q = qdisc_priv(sch);
1874 struct net_device *dev = qdisc_dev(sch);
1875 struct sched_gate_list *oper, *admin;
1876 struct tc_mqprio_qopt opt = { 0 };
1877 struct nlattr *nest, *sched_nest;
1878 unsigned int i;
1879
1880 rcu_read_lock();
1881 oper = rcu_dereference(q->oper_sched);
1882 admin = rcu_dereference(q->admin_sched);
1883
1884 opt.num_tc = netdev_get_num_tc(dev);
1885 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
1886
1887 for (i = 0; i < netdev_get_num_tc(dev); i++) {
1888 opt.count[i] = dev->tc_to_txq[i].count;
1889 opt.offset[i] = dev->tc_to_txq[i].offset;
1890 }
1891
1892 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1893 if (!nest)
1894 goto start_error;
1895
1896 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
1897 goto options_error;
1898
1899 if (!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
1900 nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
1901 goto options_error;
1902
1903 if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags))
1904 goto options_error;
1905
1906 if (q->txtime_delay &&
1907 nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
1908 goto options_error;
1909
1910 if (oper && dump_schedule(skb, oper))
1911 goto options_error;
1912
1913 if (!admin)
1914 goto done;
1915
1916 sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
1917 if (!sched_nest)
1918 goto options_error;
1919
1920 if (dump_schedule(skb, admin))
1921 goto admin_error;
1922
1923 nla_nest_end(skb, sched_nest);
1924
1925done:
1926 rcu_read_unlock();
1927
1928 return nla_nest_end(skb, nest);
1929
1930admin_error:
1931 nla_nest_cancel(skb, sched_nest);
1932
1933options_error:
1934 nla_nest_cancel(skb, nest);
1935
1936start_error:
1937 rcu_read_unlock();
1938 return -ENOSPC;
1939}
1940
1941static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
1942{
1943 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1944
1945 if (!dev_queue)
1946 return NULL;
1947
1948 return dev_queue->qdisc_sleeping;
1949}
1950
1951static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
1952{
1953 unsigned int ntx = TC_H_MIN(classid);
1954
1955 if (!taprio_queue_get(sch, ntx))
1956 return 0;
1957 return ntx;
1958}
1959
1960static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
1961 struct sk_buff *skb, struct tcmsg *tcm)
1962{
1963 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1964
1965 tcm->tcm_parent = TC_H_ROOT;
1966 tcm->tcm_handle |= TC_H_MIN(cl);
1967 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
1968
1969 return 0;
1970}
1971
1972static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
1973 struct gnet_dump *d)
1974 __releases(d->lock)
1975 __acquires(d->lock)
1976{
1977 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
1978
1979 sch = dev_queue->qdisc_sleeping;
1980 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
1981 qdisc_qstats_copy(d, sch) < 0)
1982 return -1;
1983 return 0;
1984}
1985
1986static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1987{
1988 struct net_device *dev = qdisc_dev(sch);
1989 unsigned long ntx;
1990
1991 if (arg->stop)
1992 return;
1993
1994 arg->count = arg->skip;
1995 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
1996 if (arg->fn(sch, ntx + 1, arg) < 0) {
1997 arg->stop = 1;
1998 break;
1999 }
2000 arg->count++;
2001 }
2002}
2003
2004static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
2005 struct tcmsg *tcm)
2006{
2007 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
2008}
2009
2010static const struct Qdisc_class_ops taprio_class_ops = {
2011 .graft = taprio_graft,
2012 .leaf = taprio_leaf,
2013 .find = taprio_find,
2014 .walk = taprio_walk,
2015 .dump = taprio_dump_class,
2016 .dump_stats = taprio_dump_class_stats,
2017 .select_queue = taprio_select_queue,
2018};
2019
2020static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
2021 .cl_ops = &taprio_class_ops,
2022 .id = "taprio",
2023 .priv_size = sizeof(struct taprio_sched),
2024 .init = taprio_init,
2025 .change = taprio_change,
2026 .destroy = taprio_destroy,
2027 .reset = taprio_reset,
2028 .attach = taprio_attach,
2029 .peek = taprio_peek,
2030 .dequeue = taprio_dequeue,
2031 .enqueue = taprio_enqueue,
2032 .dump = taprio_dump,
2033 .owner = THIS_MODULE,
2034};
2035
2036static struct notifier_block taprio_device_notifier = {
2037 .notifier_call = taprio_dev_notifier,
2038};
2039
2040static int __init taprio_module_init(void)
2041{
2042 int err = register_netdevice_notifier(&taprio_device_notifier);
2043
2044 if (err)
2045 return err;
2046
2047 return register_qdisc(&taprio_qdisc_ops);
2048}
2049
2050static void __exit taprio_module_exit(void)
2051{
2052 unregister_qdisc(&taprio_qdisc_ops);
2053 unregister_netdevice_notifier(&taprio_device_notifier);
2054}
2055
2056module_init(taprio_module_init);
2057module_exit(taprio_module_exit);
2058MODULE_LICENSE("GPL");
2059