1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitops.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24#include <linux/init.h>
25#include <linux/rcupdate.h>
26#include <linux/list.h>
27#include <linux/slab.h>
28#include <linux/if_vlan.h>
29#include <linux/skb_array.h>
30#include <linux/if_macvlan.h>
31#include <net/sch_generic.h>
32#include <net/pkt_sched.h>
33#include <net/dst.h>
34#include <trace/events/qdisc.h>
35#include <net/xfrm.h>
36
37
38const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
39EXPORT_SYMBOL(default_qdisc_ops);
40
41
42
43
44
45
46
47
48
49
50
51
52static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
53{
54 const struct netdev_queue *txq = q->dev_queue;
55 spinlock_t *lock = NULL;
56 struct sk_buff *skb;
57
58 if (q->flags & TCQ_F_NOLOCK) {
59 lock = qdisc_lock(q);
60 spin_lock(lock);
61 }
62
63 skb = skb_peek(&q->skb_bad_txq);
64 if (skb) {
65
66 txq = skb_get_tx_queue(txq->dev, skb);
67 if (!netif_xmit_frozen_or_stopped(txq)) {
68 skb = __skb_dequeue(&q->skb_bad_txq);
69 if (qdisc_is_percpu_stats(q)) {
70 qdisc_qstats_cpu_backlog_dec(q, skb);
71 qdisc_qstats_cpu_qlen_dec(q);
72 } else {
73 qdisc_qstats_backlog_dec(q, skb);
74 q->q.qlen--;
75 }
76 } else {
77 skb = NULL;
78 }
79 }
80
81 if (lock)
82 spin_unlock(lock);
83
84 return skb;
85}
86
87static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
88{
89 struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
90
91 if (unlikely(skb))
92 skb = __skb_dequeue_bad_txq(q);
93
94 return skb;
95}
96
97static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
98 struct sk_buff *skb)
99{
100 spinlock_t *lock = NULL;
101
102 if (q->flags & TCQ_F_NOLOCK) {
103 lock = qdisc_lock(q);
104 spin_lock(lock);
105 }
106
107 __skb_queue_tail(&q->skb_bad_txq, skb);
108
109 if (qdisc_is_percpu_stats(q)) {
110 qdisc_qstats_cpu_backlog_inc(q, skb);
111 qdisc_qstats_cpu_qlen_inc(q);
112 } else {
113 qdisc_qstats_backlog_inc(q, skb);
114 q->q.qlen++;
115 }
116
117 if (lock)
118 spin_unlock(lock);
119}
120
121static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
122{
123 while (skb) {
124 struct sk_buff *next = skb->next;
125
126 __skb_queue_tail(&q->gso_skb, skb);
127 q->qstats.requeues++;
128 qdisc_qstats_backlog_inc(q, skb);
129 q->q.qlen++;
130
131 skb = next;
132 }
133 __netif_schedule(q);
134
135 return 0;
136}
137
138static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
139{
140 spinlock_t *lock = qdisc_lock(q);
141
142 spin_lock(lock);
143 while (skb) {
144 struct sk_buff *next = skb->next;
145
146 __skb_queue_tail(&q->gso_skb, skb);
147
148 qdisc_qstats_cpu_requeues_inc(q);
149 qdisc_qstats_cpu_backlog_inc(q, skb);
150 qdisc_qstats_cpu_qlen_inc(q);
151
152 skb = next;
153 }
154 spin_unlock(lock);
155
156 __netif_schedule(q);
157
158 return 0;
159}
160
161static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
162{
163 if (q->flags & TCQ_F_NOLOCK)
164 return dev_requeue_skb_locked(skb, q);
165 else
166 return __dev_requeue_skb(skb, q);
167}
168
169static void try_bulk_dequeue_skb(struct Qdisc *q,
170 struct sk_buff *skb,
171 const struct netdev_queue *txq,
172 int *packets)
173{
174 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
175
176 while (bytelimit > 0) {
177 struct sk_buff *nskb = q->dequeue(q);
178
179 if (!nskb)
180 break;
181
182 bytelimit -= nskb->len;
183 skb->next = nskb;
184 skb = nskb;
185 (*packets)++;
186 }
187 skb->next = NULL;
188}
189
190
191
192
193static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
194 struct sk_buff *skb,
195 int *packets)
196{
197 int mapping = skb_get_queue_mapping(skb);
198 struct sk_buff *nskb;
199 int cnt = 0;
200
201 do {
202 nskb = q->dequeue(q);
203 if (!nskb)
204 break;
205 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
206 qdisc_enqueue_skb_bad_txq(q, nskb);
207 break;
208 }
209 skb->next = nskb;
210 skb = nskb;
211 } while (++cnt < 8);
212 (*packets) += cnt;
213 skb->next = NULL;
214}
215
216
217
218
219static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
220 int *packets)
221{
222 const struct netdev_queue *txq = q->dev_queue;
223 struct sk_buff *skb = NULL;
224
225 *packets = 1;
226 if (unlikely(!skb_queue_empty(&q->gso_skb))) {
227 spinlock_t *lock = NULL;
228
229 if (q->flags & TCQ_F_NOLOCK) {
230 lock = qdisc_lock(q);
231 spin_lock(lock);
232 }
233
234 skb = skb_peek(&q->gso_skb);
235
236
237
238
239 if (!skb) {
240 if (lock)
241 spin_unlock(lock);
242 goto validate;
243 }
244
245
246 *validate = false;
247 if (xfrm_offload(skb))
248 *validate = true;
249
250 txq = skb_get_tx_queue(txq->dev, skb);
251 if (!netif_xmit_frozen_or_stopped(txq)) {
252 skb = __skb_dequeue(&q->gso_skb);
253 if (qdisc_is_percpu_stats(q)) {
254 qdisc_qstats_cpu_backlog_dec(q, skb);
255 qdisc_qstats_cpu_qlen_dec(q);
256 } else {
257 qdisc_qstats_backlog_dec(q, skb);
258 q->q.qlen--;
259 }
260 } else {
261 skb = NULL;
262 }
263 if (lock)
264 spin_unlock(lock);
265 goto trace;
266 }
267validate:
268 *validate = true;
269
270 if ((q->flags & TCQ_F_ONETXQUEUE) &&
271 netif_xmit_frozen_or_stopped(txq))
272 return skb;
273
274 skb = qdisc_dequeue_skb_bad_txq(q);
275 if (unlikely(skb))
276 goto bulk;
277 skb = q->dequeue(q);
278 if (skb) {
279bulk:
280 if (qdisc_may_bulk(q))
281 try_bulk_dequeue_skb(q, skb, txq, packets);
282 else
283 try_bulk_dequeue_skb_slow(q, skb, packets);
284 }
285trace:
286 trace_qdisc_dequeue(q, txq, *packets, skb);
287 return skb;
288}
289
290
291
292
293
294
295
296
297
298
299bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
300 struct net_device *dev, struct netdev_queue *txq,
301 spinlock_t *root_lock, bool validate)
302{
303 int ret = NETDEV_TX_BUSY;
304 bool again = false;
305
306
307 if (root_lock)
308 spin_unlock(root_lock);
309
310
311 if (validate)
312 skb = validate_xmit_skb_list(skb, dev, &again);
313
314#ifdef CONFIG_XFRM_OFFLOAD
315 if (unlikely(again)) {
316 if (root_lock)
317 spin_lock(root_lock);
318
319 dev_requeue_skb(skb, q);
320 return false;
321 }
322#endif
323
324 if (likely(skb)) {
325 HARD_TX_LOCK(dev, txq, smp_processor_id());
326 if (!netif_xmit_frozen_or_stopped(txq))
327 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
328
329 HARD_TX_UNLOCK(dev, txq);
330 } else {
331 if (root_lock)
332 spin_lock(root_lock);
333 return true;
334 }
335
336 if (root_lock)
337 spin_lock(root_lock);
338
339 if (!dev_xmit_complete(ret)) {
340
341 if (unlikely(ret != NETDEV_TX_BUSY))
342 net_warn_ratelimited("BUG %s code %d qlen %d\n",
343 dev->name, ret, q->q.qlen);
344
345 dev_requeue_skb(skb, q);
346 return false;
347 }
348
349 return true;
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371static inline bool qdisc_restart(struct Qdisc *q, int *packets)
372{
373 spinlock_t *root_lock = NULL;
374 struct netdev_queue *txq;
375 struct net_device *dev;
376 struct sk_buff *skb;
377 bool validate;
378
379
380 skb = dequeue_skb(q, &validate, packets);
381 if (unlikely(!skb))
382 return false;
383
384 if (!(q->flags & TCQ_F_NOLOCK))
385 root_lock = qdisc_lock(q);
386
387 dev = qdisc_dev(q);
388 txq = skb_get_tx_queue(dev, skb);
389
390 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
391}
392
393void __qdisc_run(struct Qdisc *q)
394{
395 int quota = dev_tx_weight;
396 int packets;
397
398 while (qdisc_restart(q, &packets)) {
399
400
401
402
403
404 quota -= packets;
405 if (quota <= 0 || need_resched()) {
406 __netif_schedule(q);
407 break;
408 }
409 }
410}
411
412unsigned long dev_trans_start(struct net_device *dev)
413{
414 unsigned long val, res;
415 unsigned int i;
416
417 if (is_vlan_dev(dev))
418 dev = vlan_dev_real_dev(dev);
419 else if (netif_is_macvlan(dev))
420 dev = macvlan_dev_real_dev(dev);
421 res = netdev_get_tx_queue(dev, 0)->trans_start;
422 for (i = 1; i < dev->num_tx_queues; i++) {
423 val = netdev_get_tx_queue(dev, i)->trans_start;
424 if (val && time_after(val, res))
425 res = val;
426 }
427
428 return res;
429}
430EXPORT_SYMBOL(dev_trans_start);
431
432static void dev_watchdog(struct timer_list *t)
433{
434 struct net_device *dev = from_timer(dev, t, watchdog_timer);
435
436 netif_tx_lock(dev);
437 if (!qdisc_tx_is_noop(dev)) {
438 if (netif_device_present(dev) &&
439 netif_running(dev) &&
440 netif_carrier_ok(dev)) {
441 int some_queue_timedout = 0;
442 unsigned int i;
443 unsigned long trans_start;
444
445 for (i = 0; i < dev->num_tx_queues; i++) {
446 struct netdev_queue *txq;
447
448 txq = netdev_get_tx_queue(dev, i);
449 trans_start = txq->trans_start;
450 if (netif_xmit_stopped(txq) &&
451 time_after(jiffies, (trans_start +
452 dev->watchdog_timeo))) {
453 some_queue_timedout = 1;
454 txq->trans_timeout++;
455 break;
456 }
457 }
458
459 if (some_queue_timedout) {
460 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
461 dev->name, netdev_drivername(dev), i);
462 dev->netdev_ops->ndo_tx_timeout(dev);
463 }
464 if (!mod_timer(&dev->watchdog_timer,
465 round_jiffies(jiffies +
466 dev->watchdog_timeo)))
467 dev_hold(dev);
468 }
469 }
470 netif_tx_unlock(dev);
471
472 dev_put(dev);
473}
474
475void __netdev_watchdog_up(struct net_device *dev)
476{
477 if (dev->netdev_ops->ndo_tx_timeout) {
478 if (dev->watchdog_timeo <= 0)
479 dev->watchdog_timeo = 5*HZ;
480 if (!mod_timer(&dev->watchdog_timer,
481 round_jiffies(jiffies + dev->watchdog_timeo)))
482 dev_hold(dev);
483 }
484}
485
486static void dev_watchdog_up(struct net_device *dev)
487{
488 __netdev_watchdog_up(dev);
489}
490
491static void dev_watchdog_down(struct net_device *dev)
492{
493 netif_tx_lock_bh(dev);
494 if (del_timer(&dev->watchdog_timer))
495 dev_put(dev);
496 netif_tx_unlock_bh(dev);
497}
498
499
500
501
502
503
504
505void netif_carrier_on(struct net_device *dev)
506{
507 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
508 if (dev->reg_state == NETREG_UNINITIALIZED)
509 return;
510 atomic_inc(&dev->carrier_up_count);
511 linkwatch_fire_event(dev);
512 if (netif_running(dev))
513 __netdev_watchdog_up(dev);
514 }
515}
516EXPORT_SYMBOL(netif_carrier_on);
517
518
519
520
521
522
523
524void netif_carrier_off(struct net_device *dev)
525{
526 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
527 if (dev->reg_state == NETREG_UNINITIALIZED)
528 return;
529 atomic_inc(&dev->carrier_down_count);
530 linkwatch_fire_event(dev);
531 }
532}
533EXPORT_SYMBOL(netif_carrier_off);
534
535
536
537
538
539
540static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
541 struct sk_buff **to_free)
542{
543 __qdisc_drop(skb, to_free);
544 return NET_XMIT_CN;
545}
546
547static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
548{
549 return NULL;
550}
551
552struct Qdisc_ops noop_qdisc_ops __read_mostly = {
553 .id = "noop",
554 .priv_size = 0,
555 .enqueue = noop_enqueue,
556 .dequeue = noop_dequeue,
557 .peek = noop_dequeue,
558 .owner = THIS_MODULE,
559};
560
561static struct netdev_queue noop_netdev_queue = {
562 .qdisc = &noop_qdisc,
563 .qdisc_sleeping = &noop_qdisc,
564};
565
566struct Qdisc noop_qdisc = {
567 .enqueue = noop_enqueue,
568 .dequeue = noop_dequeue,
569 .flags = TCQ_F_BUILTIN,
570 .ops = &noop_qdisc_ops,
571 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
572 .dev_queue = &noop_netdev_queue,
573 .running = SEQCNT_ZERO(noop_qdisc.running),
574 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
575};
576EXPORT_SYMBOL(noop_qdisc);
577
578static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
579 struct netlink_ext_ack *extack)
580{
581
582
583
584 qdisc->enqueue = NULL;
585 return 0;
586}
587
588struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
589 .id = "noqueue",
590 .priv_size = 0,
591 .init = noqueue_init,
592 .enqueue = noop_enqueue,
593 .dequeue = noop_dequeue,
594 .peek = noop_dequeue,
595 .owner = THIS_MODULE,
596};
597
598static const u8 prio2band[TC_PRIO_MAX + 1] = {
599 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
600};
601
602
603
604
605
606#define PFIFO_FAST_BANDS 3
607
608
609
610
611
612struct pfifo_fast_priv {
613 struct skb_array q[PFIFO_FAST_BANDS];
614};
615
616static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
617 int band)
618{
619 return &priv->q[band];
620}
621
622static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
623 struct sk_buff **to_free)
624{
625 int band = prio2band[skb->priority & TC_PRIO_MAX];
626 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
627 struct skb_array *q = band2list(priv, band);
628 unsigned int pkt_len = qdisc_pkt_len(skb);
629 int err;
630
631 err = skb_array_produce(q, skb);
632
633 if (unlikely(err))
634 return qdisc_drop_cpu(skb, qdisc, to_free);
635
636 qdisc_qstats_cpu_qlen_inc(qdisc);
637
638
639
640 this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
641 return NET_XMIT_SUCCESS;
642}
643
644static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
645{
646 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
647 struct sk_buff *skb = NULL;
648 int band;
649
650 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
651 struct skb_array *q = band2list(priv, band);
652
653 if (__skb_array_empty(q))
654 continue;
655
656 skb = __skb_array_consume(q);
657 }
658 if (likely(skb)) {
659 qdisc_qstats_cpu_backlog_dec(qdisc, skb);
660 qdisc_bstats_cpu_update(qdisc, skb);
661 qdisc_qstats_cpu_qlen_dec(qdisc);
662 }
663
664 return skb;
665}
666
667static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
668{
669 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
670 struct sk_buff *skb = NULL;
671 int band;
672
673 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
674 struct skb_array *q = band2list(priv, band);
675
676 skb = __skb_array_peek(q);
677 }
678
679 return skb;
680}
681
682static void pfifo_fast_reset(struct Qdisc *qdisc)
683{
684 int i, band;
685 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
686
687 for (band = 0; band < PFIFO_FAST_BANDS; band++) {
688 struct skb_array *q = band2list(priv, band);
689 struct sk_buff *skb;
690
691
692
693
694 if (!q->ring.queue)
695 continue;
696
697 while ((skb = __skb_array_consume(q)) != NULL)
698 kfree_skb(skb);
699 }
700
701 for_each_possible_cpu(i) {
702 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
703
704 q->backlog = 0;
705 q->qlen = 0;
706 }
707}
708
709static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
710{
711 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
712
713 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
714 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
715 goto nla_put_failure;
716 return skb->len;
717
718nla_put_failure:
719 return -1;
720}
721
722static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
723 struct netlink_ext_ack *extack)
724{
725 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
726 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
727 int prio;
728
729
730 if (!qlen)
731 return -EINVAL;
732
733 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
734 struct skb_array *q = band2list(priv, prio);
735 int err;
736
737 err = skb_array_init(q, qlen, GFP_KERNEL);
738 if (err)
739 return -ENOMEM;
740 }
741
742
743 qdisc->flags |= TCQ_F_CAN_BYPASS;
744 return 0;
745}
746
747static void pfifo_fast_destroy(struct Qdisc *sch)
748{
749 struct pfifo_fast_priv *priv = qdisc_priv(sch);
750 int prio;
751
752 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
753 struct skb_array *q = band2list(priv, prio);
754
755
756
757
758 if (!q->ring.queue)
759 continue;
760
761
762
763 ptr_ring_cleanup(&q->ring, NULL);
764 }
765}
766
767static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
768 unsigned int new_len)
769{
770 struct pfifo_fast_priv *priv = qdisc_priv(sch);
771 struct skb_array *bands[PFIFO_FAST_BANDS];
772 int prio;
773
774 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
775 struct skb_array *q = band2list(priv, prio);
776
777 bands[prio] = q;
778 }
779
780 return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
781 GFP_KERNEL);
782}
783
784struct Qdisc_ops pfifo_fast_ops __read_mostly = {
785 .id = "pfifo_fast",
786 .priv_size = sizeof(struct pfifo_fast_priv),
787 .enqueue = pfifo_fast_enqueue,
788 .dequeue = pfifo_fast_dequeue,
789 .peek = pfifo_fast_peek,
790 .init = pfifo_fast_init,
791 .destroy = pfifo_fast_destroy,
792 .reset = pfifo_fast_reset,
793 .dump = pfifo_fast_dump,
794 .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
795 .owner = THIS_MODULE,
796 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
797};
798EXPORT_SYMBOL(pfifo_fast_ops);
799
800static struct lock_class_key qdisc_tx_busylock;
801static struct lock_class_key qdisc_running_key;
802
803struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
804 const struct Qdisc_ops *ops,
805 struct netlink_ext_ack *extack)
806{
807 void *p;
808 struct Qdisc *sch;
809 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
810 int err = -ENOBUFS;
811 struct net_device *dev;
812
813 if (!dev_queue) {
814 NL_SET_ERR_MSG(extack, "No device queue given");
815 err = -EINVAL;
816 goto errout;
817 }
818
819 dev = dev_queue->dev;
820 p = kzalloc_node(size, GFP_KERNEL,
821 netdev_queue_numa_node_read(dev_queue));
822
823 if (!p)
824 goto errout;
825 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
826
827 if (sch != p) {
828 kfree(p);
829 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
830 netdev_queue_numa_node_read(dev_queue));
831 if (!p)
832 goto errout;
833 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
834 sch->padded = (char *) sch - (char *) p;
835 }
836 __skb_queue_head_init(&sch->gso_skb);
837 __skb_queue_head_init(&sch->skb_bad_txq);
838 qdisc_skb_head_init(&sch->q);
839 spin_lock_init(&sch->q.lock);
840
841 if (ops->static_flags & TCQ_F_CPUSTATS) {
842 sch->cpu_bstats =
843 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
844 if (!sch->cpu_bstats)
845 goto errout1;
846
847 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
848 if (!sch->cpu_qstats) {
849 free_percpu(sch->cpu_bstats);
850 goto errout1;
851 }
852 }
853
854 spin_lock_init(&sch->busylock);
855 lockdep_set_class(&sch->busylock,
856 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
857
858
859 spin_lock_init(&sch->seqlock);
860 lockdep_set_class(&sch->busylock,
861 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
862
863 seqcount_init(&sch->running);
864 lockdep_set_class(&sch->running,
865 dev->qdisc_running_key ?: &qdisc_running_key);
866
867 sch->ops = ops;
868 sch->flags = ops->static_flags;
869 sch->enqueue = ops->enqueue;
870 sch->dequeue = ops->dequeue;
871 sch->dev_queue = dev_queue;
872 dev_hold(dev);
873 refcount_set(&sch->refcnt, 1);
874
875 return sch;
876errout1:
877 kfree(p);
878errout:
879 return ERR_PTR(err);
880}
881
882struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
883 const struct Qdisc_ops *ops,
884 unsigned int parentid,
885 struct netlink_ext_ack *extack)
886{
887 struct Qdisc *sch;
888
889 if (!try_module_get(ops->owner)) {
890 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
891 return NULL;
892 }
893
894 sch = qdisc_alloc(dev_queue, ops, extack);
895 if (IS_ERR(sch)) {
896 module_put(ops->owner);
897 return NULL;
898 }
899 sch->parent = parentid;
900
901 if (!ops->init || ops->init(sch, NULL, extack) == 0)
902 return sch;
903
904 qdisc_destroy(sch);
905 return NULL;
906}
907EXPORT_SYMBOL(qdisc_create_dflt);
908
909
910
911void qdisc_reset(struct Qdisc *qdisc)
912{
913 const struct Qdisc_ops *ops = qdisc->ops;
914 struct sk_buff *skb, *tmp;
915
916 if (ops->reset)
917 ops->reset(qdisc);
918
919 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
920 __skb_unlink(skb, &qdisc->gso_skb);
921 kfree_skb_list(skb);
922 }
923
924 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
925 __skb_unlink(skb, &qdisc->skb_bad_txq);
926 kfree_skb_list(skb);
927 }
928
929 qdisc->q.qlen = 0;
930 qdisc->qstats.backlog = 0;
931}
932EXPORT_SYMBOL(qdisc_reset);
933
934void qdisc_free(struct Qdisc *qdisc)
935{
936 if (qdisc_is_percpu_stats(qdisc)) {
937 free_percpu(qdisc->cpu_bstats);
938 free_percpu(qdisc->cpu_qstats);
939 }
940
941 kfree((char *) qdisc - qdisc->padded);
942}
943
944void qdisc_destroy(struct Qdisc *qdisc)
945{
946 const struct Qdisc_ops *ops = qdisc->ops;
947 struct sk_buff *skb, *tmp;
948
949 if (qdisc->flags & TCQ_F_BUILTIN ||
950 !refcount_dec_and_test(&qdisc->refcnt))
951 return;
952
953#ifdef CONFIG_NET_SCHED
954 qdisc_hash_del(qdisc);
955
956 qdisc_put_stab(rtnl_dereference(qdisc->stab));
957#endif
958 gen_kill_estimator(&qdisc->rate_est);
959 if (ops->reset)
960 ops->reset(qdisc);
961 if (ops->destroy)
962 ops->destroy(qdisc);
963
964 module_put(ops->owner);
965 dev_put(qdisc_dev(qdisc));
966
967 skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
968 __skb_unlink(skb, &qdisc->gso_skb);
969 kfree_skb_list(skb);
970 }
971
972 skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
973 __skb_unlink(skb, &qdisc->skb_bad_txq);
974 kfree_skb_list(skb);
975 }
976
977 qdisc_free(qdisc);
978}
979EXPORT_SYMBOL(qdisc_destroy);
980
981
982struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
983 struct Qdisc *qdisc)
984{
985 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
986 spinlock_t *root_lock;
987
988 root_lock = qdisc_lock(oqdisc);
989 spin_lock_bh(root_lock);
990
991
992 if (qdisc == NULL)
993 qdisc = &noop_qdisc;
994 dev_queue->qdisc_sleeping = qdisc;
995 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
996
997 spin_unlock_bh(root_lock);
998
999 return oqdisc;
1000}
1001EXPORT_SYMBOL(dev_graft_qdisc);
1002
1003static void attach_one_default_qdisc(struct net_device *dev,
1004 struct netdev_queue *dev_queue,
1005 void *_unused)
1006{
1007 struct Qdisc *qdisc;
1008 const struct Qdisc_ops *ops = default_qdisc_ops;
1009
1010 if (dev->priv_flags & IFF_NO_QUEUE)
1011 ops = &noqueue_qdisc_ops;
1012
1013 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1014 if (!qdisc) {
1015 netdev_info(dev, "activation failed\n");
1016 return;
1017 }
1018 if (!netif_is_multiqueue(dev))
1019 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1020 dev_queue->qdisc_sleeping = qdisc;
1021}
1022
1023static void attach_default_qdiscs(struct net_device *dev)
1024{
1025 struct netdev_queue *txq;
1026 struct Qdisc *qdisc;
1027
1028 txq = netdev_get_tx_queue(dev, 0);
1029
1030 if (!netif_is_multiqueue(dev) ||
1031 dev->priv_flags & IFF_NO_QUEUE) {
1032 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1033 dev->qdisc = txq->qdisc_sleeping;
1034 qdisc_refcount_inc(dev->qdisc);
1035 } else {
1036 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1037 if (qdisc) {
1038 dev->qdisc = qdisc;
1039 qdisc->ops->attach(qdisc);
1040 }
1041 }
1042#ifdef CONFIG_NET_SCHED
1043 if (dev->qdisc != &noop_qdisc)
1044 qdisc_hash_add(dev->qdisc, false);
1045#endif
1046}
1047
1048static void transition_one_qdisc(struct net_device *dev,
1049 struct netdev_queue *dev_queue,
1050 void *_need_watchdog)
1051{
1052 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1053 int *need_watchdog_p = _need_watchdog;
1054
1055 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1056 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1057
1058 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1059 if (need_watchdog_p) {
1060 dev_queue->trans_start = 0;
1061 *need_watchdog_p = 1;
1062 }
1063}
1064
1065void dev_activate(struct net_device *dev)
1066{
1067 int need_watchdog;
1068
1069
1070
1071
1072
1073
1074 if (dev->qdisc == &noop_qdisc)
1075 attach_default_qdiscs(dev);
1076
1077 if (!netif_carrier_ok(dev))
1078
1079 return;
1080
1081 need_watchdog = 0;
1082 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1083 if (dev_ingress_queue(dev))
1084 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1085
1086 if (need_watchdog) {
1087 netif_trans_update(dev);
1088 dev_watchdog_up(dev);
1089 }
1090}
1091EXPORT_SYMBOL(dev_activate);
1092
1093static void dev_deactivate_queue(struct net_device *dev,
1094 struct netdev_queue *dev_queue,
1095 void *_qdisc_default)
1096{
1097 struct Qdisc *qdisc_default = _qdisc_default;
1098 struct Qdisc *qdisc;
1099
1100 qdisc = rtnl_dereference(dev_queue->qdisc);
1101 if (qdisc) {
1102 bool nolock = qdisc->flags & TCQ_F_NOLOCK;
1103
1104 if (nolock)
1105 spin_lock_bh(&qdisc->seqlock);
1106 spin_lock_bh(qdisc_lock(qdisc));
1107
1108 if (!(qdisc->flags & TCQ_F_BUILTIN))
1109 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1110
1111 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1112 qdisc_reset(qdisc);
1113
1114 spin_unlock_bh(qdisc_lock(qdisc));
1115 if (nolock)
1116 spin_unlock_bh(&qdisc->seqlock);
1117 }
1118}
1119
1120static bool some_qdisc_is_busy(struct net_device *dev)
1121{
1122 unsigned int i;
1123
1124 for (i = 0; i < dev->num_tx_queues; i++) {
1125 struct netdev_queue *dev_queue;
1126 spinlock_t *root_lock;
1127 struct Qdisc *q;
1128 int val;
1129
1130 dev_queue = netdev_get_tx_queue(dev, i);
1131 q = dev_queue->qdisc_sleeping;
1132
1133 root_lock = qdisc_lock(q);
1134 spin_lock_bh(root_lock);
1135
1136 val = (qdisc_is_running(q) ||
1137 test_bit(__QDISC_STATE_SCHED, &q->state));
1138
1139 spin_unlock_bh(root_lock);
1140
1141 if (val)
1142 return true;
1143 }
1144 return false;
1145}
1146
1147static void dev_qdisc_reset(struct net_device *dev,
1148 struct netdev_queue *dev_queue,
1149 void *none)
1150{
1151 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1152
1153 if (qdisc)
1154 qdisc_reset(qdisc);
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164void dev_deactivate_many(struct list_head *head)
1165{
1166 struct net_device *dev;
1167
1168 list_for_each_entry(dev, head, close_list) {
1169 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1170 &noop_qdisc);
1171 if (dev_ingress_queue(dev))
1172 dev_deactivate_queue(dev, dev_ingress_queue(dev),
1173 &noop_qdisc);
1174
1175 dev_watchdog_down(dev);
1176 }
1177
1178
1179
1180
1181
1182 synchronize_net();
1183
1184
1185 list_for_each_entry(dev, head, close_list) {
1186 while (some_qdisc_is_busy(dev))
1187 yield();
1188
1189
1190
1191 netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
1192 if (dev_ingress_queue(dev))
1193 dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
1194 }
1195}
1196
1197void dev_deactivate(struct net_device *dev)
1198{
1199 LIST_HEAD(single);
1200
1201 list_add(&dev->close_list, &single);
1202 dev_deactivate_many(&single);
1203 list_del(&single);
1204}
1205EXPORT_SYMBOL(dev_deactivate);
1206
1207static int qdisc_change_tx_queue_len(struct net_device *dev,
1208 struct netdev_queue *dev_queue)
1209{
1210 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1211 const struct Qdisc_ops *ops = qdisc->ops;
1212
1213 if (ops->change_tx_queue_len)
1214 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1215 return 0;
1216}
1217
1218int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1219{
1220 bool up = dev->flags & IFF_UP;
1221 unsigned int i;
1222 int ret = 0;
1223
1224 if (up)
1225 dev_deactivate(dev);
1226
1227 for (i = 0; i < dev->num_tx_queues; i++) {
1228 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1229
1230
1231 if (ret)
1232 break;
1233 }
1234
1235 if (up)
1236 dev_activate(dev);
1237 return ret;
1238}
1239
1240static void dev_init_scheduler_queue(struct net_device *dev,
1241 struct netdev_queue *dev_queue,
1242 void *_qdisc)
1243{
1244 struct Qdisc *qdisc = _qdisc;
1245
1246 rcu_assign_pointer(dev_queue->qdisc, qdisc);
1247 dev_queue->qdisc_sleeping = qdisc;
1248 __skb_queue_head_init(&qdisc->gso_skb);
1249 __skb_queue_head_init(&qdisc->skb_bad_txq);
1250}
1251
1252void dev_init_scheduler(struct net_device *dev)
1253{
1254 dev->qdisc = &noop_qdisc;
1255 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1256 if (dev_ingress_queue(dev))
1257 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1258
1259 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1260}
1261
1262static void shutdown_scheduler_queue(struct net_device *dev,
1263 struct netdev_queue *dev_queue,
1264 void *_qdisc_default)
1265{
1266 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1267 struct Qdisc *qdisc_default = _qdisc_default;
1268
1269 if (qdisc) {
1270 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1271 dev_queue->qdisc_sleeping = qdisc_default;
1272
1273 qdisc_destroy(qdisc);
1274 }
1275}
1276
1277void dev_shutdown(struct net_device *dev)
1278{
1279 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1280 if (dev_ingress_queue(dev))
1281 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1282 qdisc_destroy(dev->qdisc);
1283 dev->qdisc = &noop_qdisc;
1284
1285 WARN_ON(timer_pending(&dev->watchdog_timer));
1286}
1287
1288void psched_ratecfg_precompute(struct psched_ratecfg *r,
1289 const struct tc_ratespec *conf,
1290 u64 rate64)
1291{
1292 memset(r, 0, sizeof(*r));
1293 r->overhead = conf->overhead;
1294 r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1295 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1296 r->mult = 1;
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 if (r->rate_bytes_ps > 0) {
1311 u64 factor = NSEC_PER_SEC;
1312
1313 for (;;) {
1314 r->mult = div64_u64(factor, r->rate_bytes_ps);
1315 if (r->mult & (1U << 31) || factor & (1ULL << 63))
1316 break;
1317 factor <<= 1;
1318 r->shift++;
1319 }
1320 }
1321}
1322EXPORT_SYMBOL(psched_ratecfg_precompute);
1323
1324static void mini_qdisc_rcu_func(struct rcu_head *head)
1325{
1326}
1327
1328void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1329 struct tcf_proto *tp_head)
1330{
1331 struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq);
1332 struct mini_Qdisc *miniq;
1333
1334 if (!tp_head) {
1335 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1336
1337 rcu_barrier_bh();
1338 return;
1339 }
1340
1341 miniq = !miniq_old || miniq_old == &miniqp->miniq2 ?
1342 &miniqp->miniq1 : &miniqp->miniq2;
1343
1344
1345
1346
1347
1348 rcu_barrier_bh();
1349 miniq->filter_list = tp_head;
1350 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1351
1352 if (miniq_old)
1353
1354
1355
1356
1357 call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func);
1358}
1359EXPORT_SYMBOL(mini_qdisc_pair_swap);
1360
1361void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1362 struct mini_Qdisc __rcu **p_miniq)
1363{
1364 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1365 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1366 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1367 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1368 miniqp->p_miniq = p_miniq;
1369}
1370EXPORT_SYMBOL(mini_qdisc_pair_init);
1371