1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitops.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24#include <linux/init.h>
25#include <linux/rcupdate.h>
26#include <linux/list.h>
27#include <linux/slab.h>
28#include <net/pkt_sched.h>
29#include <net/dst.h>
30
31
32
33
34
35
36
37
38
39
40
41
42static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
43{
44 skb_dst_force(skb);
45 q->gso_skb = skb;
46 q->qstats.requeues++;
47 q->q.qlen++;
48 __netif_schedule(q);
49
50 return 0;
51}
52
53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54{
55 struct sk_buff *skb = q->gso_skb;
56
57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
60
61
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_xmit_frozen_or_stopped(txq)) {
64 q->gso_skb = NULL;
65 q->q.qlen--;
66 } else
67 skb = NULL;
68 } else {
69 skb = q->dequeue(q);
70 }
71
72 return skb;
73}
74
75static inline int handle_dev_cpu_collision(struct sk_buff *skb,
76 struct netdev_queue *dev_queue,
77 struct Qdisc *q)
78{
79 int ret;
80
81 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
82
83
84
85
86
87
88 kfree_skb(skb);
89 net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
90 dev_queue->dev->name);
91 ret = qdisc_qlen(q);
92 } else {
93
94
95
96
97 __this_cpu_inc(softnet_data.cpu_collision);
98 ret = dev_requeue_skb(skb, q);
99 }
100
101 return ret;
102}
103
104
105
106
107
108
109
110
111
112
113int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
114 struct net_device *dev, struct netdev_queue *txq,
115 spinlock_t *root_lock)
116{
117 int ret = NETDEV_TX_BUSY;
118
119
120 spin_unlock(root_lock);
121
122 HARD_TX_LOCK(dev, txq, smp_processor_id());
123 if (!netif_xmit_frozen_or_stopped(txq))
124 ret = dev_hard_start_xmit(skb, dev, txq);
125
126 HARD_TX_UNLOCK(dev, txq);
127
128 spin_lock(root_lock);
129
130 if (dev_xmit_complete(ret)) {
131
132 ret = qdisc_qlen(q);
133 } else if (ret == NETDEV_TX_LOCKED) {
134
135 ret = handle_dev_cpu_collision(skb, txq, q);
136 } else {
137
138 if (unlikely(ret != NETDEV_TX_BUSY))
139 net_warn_ratelimited("BUG %s code %d qlen %d\n",
140 dev->name, ret, q->q.qlen);
141
142 ret = dev_requeue_skb(skb, q);
143 }
144
145 if (ret && netif_xmit_frozen_or_stopped(txq))
146 ret = 0;
147
148 return ret;
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170static inline int qdisc_restart(struct Qdisc *q)
171{
172 struct netdev_queue *txq;
173 struct net_device *dev;
174 spinlock_t *root_lock;
175 struct sk_buff *skb;
176
177
178 skb = dequeue_skb(q);
179 if (unlikely(!skb))
180 return 0;
181 WARN_ON_ONCE(skb_dst_is_noref(skb));
182 root_lock = qdisc_lock(q);
183 dev = qdisc_dev(q);
184 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
185
186 return sch_direct_xmit(skb, q, dev, txq, root_lock);
187}
188
189void __qdisc_run(struct Qdisc *q)
190{
191 int quota = weight_p;
192
193 while (qdisc_restart(q)) {
194
195
196
197
198
199 if (--quota <= 0 || need_resched()) {
200 __netif_schedule(q);
201 break;
202 }
203 }
204
205 qdisc_run_end(q);
206}
207
208unsigned long dev_trans_start(struct net_device *dev)
209{
210 unsigned long val, res = dev->trans_start;
211 unsigned int i;
212
213 for (i = 0; i < dev->num_tx_queues; i++) {
214 val = netdev_get_tx_queue(dev, i)->trans_start;
215 if (val && time_after(val, res))
216 res = val;
217 }
218 dev->trans_start = res;
219 return res;
220}
221EXPORT_SYMBOL(dev_trans_start);
222
223static void dev_watchdog(unsigned long arg)
224{
225 struct net_device *dev = (struct net_device *)arg;
226
227 netif_tx_lock(dev);
228 if (!qdisc_tx_is_noop(dev)) {
229 if (netif_device_present(dev) &&
230 netif_running(dev) &&
231 netif_carrier_ok(dev)) {
232 int some_queue_timedout = 0;
233 unsigned int i;
234 unsigned long trans_start;
235
236 for (i = 0; i < dev->num_tx_queues; i++) {
237 struct netdev_queue *txq;
238
239 txq = netdev_get_tx_queue(dev, i);
240
241
242
243 trans_start = txq->trans_start ? : dev->trans_start;
244 if (netif_xmit_stopped(txq) &&
245 time_after(jiffies, (trans_start +
246 dev->watchdog_timeo))) {
247 some_queue_timedout = 1;
248 txq->trans_timeout++;
249 break;
250 }
251 }
252
253 if (some_queue_timedout) {
254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
255 dev->name, netdev_drivername(dev), i);
256 dev->netdev_ops->ndo_tx_timeout(dev);
257 }
258 if (!mod_timer(&dev->watchdog_timer,
259 round_jiffies(jiffies +
260 dev->watchdog_timeo)))
261 dev_hold(dev);
262 }
263 }
264 netif_tx_unlock(dev);
265
266 dev_put(dev);
267}
268
269void __netdev_watchdog_up(struct net_device *dev)
270{
271 if (dev->netdev_ops->ndo_tx_timeout) {
272 if (dev->watchdog_timeo <= 0)
273 dev->watchdog_timeo = 5*HZ;
274 if (!mod_timer(&dev->watchdog_timer,
275 round_jiffies(jiffies + dev->watchdog_timeo)))
276 dev_hold(dev);
277 }
278}
279
280static void dev_watchdog_up(struct net_device *dev)
281{
282 __netdev_watchdog_up(dev);
283}
284
285static void dev_watchdog_down(struct net_device *dev)
286{
287 netif_tx_lock_bh(dev);
288 if (del_timer(&dev->watchdog_timer))
289 dev_put(dev);
290 netif_tx_unlock_bh(dev);
291}
292
293
294
295
296
297
298
299void netif_carrier_on(struct net_device *dev)
300{
301 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
302 if (dev->reg_state == NETREG_UNINITIALIZED)
303 return;
304 linkwatch_fire_event(dev);
305 if (netif_running(dev))
306 __netdev_watchdog_up(dev);
307 }
308}
309EXPORT_SYMBOL(netif_carrier_on);
310
311
312
313
314
315
316
317void netif_carrier_off(struct net_device *dev)
318{
319 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
320 if (dev->reg_state == NETREG_UNINITIALIZED)
321 return;
322 linkwatch_fire_event(dev);
323 }
324}
325EXPORT_SYMBOL(netif_carrier_off);
326
327
328
329
330
331
332
333
334
335
336
337void netif_notify_peers(struct net_device *dev)
338{
339 rtnl_lock();
340 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
341 rtnl_unlock();
342}
343EXPORT_SYMBOL(netif_notify_peers);
344
345
346
347
348
349
350static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
351{
352 kfree_skb(skb);
353 return NET_XMIT_CN;
354}
355
356static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
357{
358 return NULL;
359}
360
361struct Qdisc_ops noop_qdisc_ops __read_mostly = {
362 .id = "noop",
363 .priv_size = 0,
364 .enqueue = noop_enqueue,
365 .dequeue = noop_dequeue,
366 .peek = noop_dequeue,
367 .owner = THIS_MODULE,
368};
369
370static struct netdev_queue noop_netdev_queue = {
371 .qdisc = &noop_qdisc,
372 .qdisc_sleeping = &noop_qdisc,
373};
374
375struct Qdisc noop_qdisc = {
376 .enqueue = noop_enqueue,
377 .dequeue = noop_dequeue,
378 .flags = TCQ_F_BUILTIN,
379 .ops = &noop_qdisc_ops,
380 .list = LIST_HEAD_INIT(noop_qdisc.list),
381 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
382 .dev_queue = &noop_netdev_queue,
383 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
384};
385EXPORT_SYMBOL(noop_qdisc);
386
387static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
388 .id = "noqueue",
389 .priv_size = 0,
390 .enqueue = noop_enqueue,
391 .dequeue = noop_dequeue,
392 .peek = noop_dequeue,
393 .owner = THIS_MODULE,
394};
395
396static struct Qdisc noqueue_qdisc;
397static struct netdev_queue noqueue_netdev_queue = {
398 .qdisc = &noqueue_qdisc,
399 .qdisc_sleeping = &noqueue_qdisc,
400};
401
402static struct Qdisc noqueue_qdisc = {
403 .enqueue = NULL,
404 .dequeue = noop_dequeue,
405 .flags = TCQ_F_BUILTIN,
406 .ops = &noqueue_qdisc_ops,
407 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
408 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
409 .dev_queue = &noqueue_netdev_queue,
410 .busylock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
411};
412
413
414static const u8 prio2band[TC_PRIO_MAX + 1] = {
415 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
416};
417
418
419
420
421
422#define PFIFO_FAST_BANDS 3
423
424
425
426
427
428
429struct pfifo_fast_priv {
430 u32 bitmap;
431 struct sk_buff_head q[PFIFO_FAST_BANDS];
432};
433
434
435
436
437
438
439
440static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
441
442static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
443 int band)
444{
445 return priv->q + band;
446}
447
448static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
449{
450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
451 int band = prio2band[skb->priority & TC_PRIO_MAX];
452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
453 struct sk_buff_head *list = band2list(priv, band);
454
455 priv->bitmap |= (1 << band);
456 qdisc->q.qlen++;
457 return __qdisc_enqueue_tail(skb, qdisc, list);
458 }
459
460 return qdisc_drop(skb, qdisc);
461}
462
463static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
464{
465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
466 int band = bitmap2band[priv->bitmap];
467
468 if (likely(band >= 0)) {
469 struct sk_buff_head *list = band2list(priv, band);
470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
471
472 qdisc->q.qlen--;
473 if (skb_queue_empty(list))
474 priv->bitmap &= ~(1 << band);
475
476 return skb;
477 }
478
479 return NULL;
480}
481
482static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
483{
484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
485 int band = bitmap2band[priv->bitmap];
486
487 if (band >= 0) {
488 struct sk_buff_head *list = band2list(priv, band);
489
490 return skb_peek(list);
491 }
492
493 return NULL;
494}
495
496static void pfifo_fast_reset(struct Qdisc *qdisc)
497{
498 int prio;
499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
500
501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
502 __qdisc_reset_queue(qdisc, band2list(priv, prio));
503
504 priv->bitmap = 0;
505 qdisc->qstats.backlog = 0;
506 qdisc->q.qlen = 0;
507}
508
509static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
510{
511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
512
513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
514 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
515 goto nla_put_failure;
516 return skb->len;
517
518nla_put_failure:
519 return -1;
520}
521
522static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
523{
524 int prio;
525 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
526
527 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
528 skb_queue_head_init(band2list(priv, prio));
529
530
531 qdisc->flags |= TCQ_F_CAN_BYPASS;
532 return 0;
533}
534
535struct Qdisc_ops pfifo_fast_ops __read_mostly = {
536 .id = "pfifo_fast",
537 .priv_size = sizeof(struct pfifo_fast_priv),
538 .enqueue = pfifo_fast_enqueue,
539 .dequeue = pfifo_fast_dequeue,
540 .peek = pfifo_fast_peek,
541 .init = pfifo_fast_init,
542 .reset = pfifo_fast_reset,
543 .dump = pfifo_fast_dump,
544 .owner = THIS_MODULE,
545};
546EXPORT_SYMBOL(pfifo_fast_ops);
547
548struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
549 struct Qdisc_ops *ops)
550{
551 void *p;
552 struct Qdisc *sch;
553 unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
554 int err = -ENOBUFS;
555
556 p = kzalloc_node(size, GFP_KERNEL,
557 netdev_queue_numa_node_read(dev_queue));
558
559 if (!p)
560 goto errout;
561 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
562
563 if (sch != p) {
564 kfree(p);
565 p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
566 netdev_queue_numa_node_read(dev_queue));
567 if (!p)
568 goto errout;
569 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
570 sch->padded = (char *) sch - (char *) p;
571 }
572 INIT_LIST_HEAD(&sch->list);
573 skb_queue_head_init(&sch->q);
574 spin_lock_init(&sch->busylock);
575 sch->ops = ops;
576 sch->enqueue = ops->enqueue;
577 sch->dequeue = ops->dequeue;
578 sch->dev_queue = dev_queue;
579 dev_hold(qdisc_dev(sch));
580 atomic_set(&sch->refcnt, 1);
581
582 return sch;
583errout:
584 return ERR_PTR(err);
585}
586
587struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
588 struct Qdisc_ops *ops, unsigned int parentid)
589{
590 struct Qdisc *sch;
591
592 sch = qdisc_alloc(dev_queue, ops);
593 if (IS_ERR(sch))
594 goto errout;
595 sch->parent = parentid;
596
597 if (!ops->init || ops->init(sch, NULL) == 0)
598 return sch;
599
600 qdisc_destroy(sch);
601errout:
602 return NULL;
603}
604EXPORT_SYMBOL(qdisc_create_dflt);
605
606
607
608void qdisc_reset(struct Qdisc *qdisc)
609{
610 const struct Qdisc_ops *ops = qdisc->ops;
611
612 if (ops->reset)
613 ops->reset(qdisc);
614
615 if (qdisc->gso_skb) {
616 kfree_skb(qdisc->gso_skb);
617 qdisc->gso_skb = NULL;
618 qdisc->q.qlen = 0;
619 }
620}
621EXPORT_SYMBOL(qdisc_reset);
622
623static void qdisc_rcu_free(struct rcu_head *head)
624{
625 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
626
627 kfree((char *) qdisc - qdisc->padded);
628}
629
630void qdisc_destroy(struct Qdisc *qdisc)
631{
632 const struct Qdisc_ops *ops = qdisc->ops;
633
634 if (qdisc->flags & TCQ_F_BUILTIN ||
635 !atomic_dec_and_test(&qdisc->refcnt))
636 return;
637
638#ifdef CONFIG_NET_SCHED
639 qdisc_list_del(qdisc);
640
641 qdisc_put_stab(rtnl_dereference(qdisc->stab));
642#endif
643 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
644 if (ops->reset)
645 ops->reset(qdisc);
646 if (ops->destroy)
647 ops->destroy(qdisc);
648
649 module_put(ops->owner);
650 dev_put(qdisc_dev(qdisc));
651
652 kfree_skb(qdisc->gso_skb);
653
654
655
656
657 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
658}
659EXPORT_SYMBOL(qdisc_destroy);
660
661
662struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
663 struct Qdisc *qdisc)
664{
665 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
666 spinlock_t *root_lock;
667
668 root_lock = qdisc_lock(oqdisc);
669 spin_lock_bh(root_lock);
670
671
672 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
673 qdisc_reset(oqdisc);
674
675
676 if (qdisc == NULL)
677 qdisc = &noop_qdisc;
678 dev_queue->qdisc_sleeping = qdisc;
679 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
680
681 spin_unlock_bh(root_lock);
682
683 return oqdisc;
684}
685EXPORT_SYMBOL(dev_graft_qdisc);
686
687static void attach_one_default_qdisc(struct net_device *dev,
688 struct netdev_queue *dev_queue,
689 void *_unused)
690{
691 struct Qdisc *qdisc = &noqueue_qdisc;
692
693 if (dev->tx_queue_len) {
694 qdisc = qdisc_create_dflt(dev_queue,
695 &pfifo_fast_ops, TC_H_ROOT);
696 if (!qdisc) {
697 netdev_info(dev, "activation failed\n");
698 return;
699 }
700 }
701 dev_queue->qdisc_sleeping = qdisc;
702}
703
704static void attach_default_qdiscs(struct net_device *dev)
705{
706 struct netdev_queue *txq;
707 struct Qdisc *qdisc;
708
709 txq = netdev_get_tx_queue(dev, 0);
710
711 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
712 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
713 dev->qdisc = txq->qdisc_sleeping;
714 atomic_inc(&dev->qdisc->refcnt);
715 } else {
716 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
717 if (qdisc) {
718 qdisc->ops->attach(qdisc);
719 dev->qdisc = qdisc;
720 }
721 }
722}
723
724static void transition_one_qdisc(struct net_device *dev,
725 struct netdev_queue *dev_queue,
726 void *_need_watchdog)
727{
728 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
729 int *need_watchdog_p = _need_watchdog;
730
731 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
732 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
733
734 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
735 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
736 dev_queue->trans_start = 0;
737 *need_watchdog_p = 1;
738 }
739}
740
741void dev_activate(struct net_device *dev)
742{
743 int need_watchdog;
744
745
746
747
748
749
750
751 if (dev->qdisc == &noop_qdisc)
752 attach_default_qdiscs(dev);
753
754 if (!netif_carrier_ok(dev))
755
756 return;
757
758 need_watchdog = 0;
759 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
760 if (dev_ingress_queue(dev))
761 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
762
763 if (need_watchdog) {
764 dev->trans_start = jiffies;
765 dev_watchdog_up(dev);
766 }
767}
768EXPORT_SYMBOL(dev_activate);
769
770static void dev_deactivate_queue(struct net_device *dev,
771 struct netdev_queue *dev_queue,
772 void *_qdisc_default)
773{
774 struct Qdisc *qdisc_default = _qdisc_default;
775 struct Qdisc *qdisc;
776
777 qdisc = dev_queue->qdisc;
778 if (qdisc) {
779 spin_lock_bh(qdisc_lock(qdisc));
780
781 if (!(qdisc->flags & TCQ_F_BUILTIN))
782 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
783
784 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
785 qdisc_reset(qdisc);
786
787 spin_unlock_bh(qdisc_lock(qdisc));
788 }
789}
790
791static bool some_qdisc_is_busy(struct net_device *dev)
792{
793 unsigned int i;
794
795 for (i = 0; i < dev->num_tx_queues; i++) {
796 struct netdev_queue *dev_queue;
797 spinlock_t *root_lock;
798 struct Qdisc *q;
799 int val;
800
801 dev_queue = netdev_get_tx_queue(dev, i);
802 q = dev_queue->qdisc_sleeping;
803 root_lock = qdisc_lock(q);
804
805 spin_lock_bh(root_lock);
806
807 val = (qdisc_is_running(q) ||
808 test_bit(__QDISC_STATE_SCHED, &q->state));
809
810 spin_unlock_bh(root_lock);
811
812 if (val)
813 return true;
814 }
815 return false;
816}
817
818
819
820
821
822
823
824
825void dev_deactivate_many(struct list_head *head)
826{
827 struct net_device *dev;
828 bool sync_needed = false;
829
830 list_for_each_entry(dev, head, unreg_list) {
831 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
832 &noop_qdisc);
833 if (dev_ingress_queue(dev))
834 dev_deactivate_queue(dev, dev_ingress_queue(dev),
835 &noop_qdisc);
836
837 dev_watchdog_down(dev);
838 sync_needed |= !dev->dismantle;
839 }
840
841
842
843
844
845 if (sync_needed)
846 synchronize_net();
847
848
849 list_for_each_entry(dev, head, unreg_list)
850 while (some_qdisc_is_busy(dev))
851 yield();
852}
853
854void dev_deactivate(struct net_device *dev)
855{
856 LIST_HEAD(single);
857
858 list_add(&dev->unreg_list, &single);
859 dev_deactivate_many(&single);
860 list_del(&single);
861}
862EXPORT_SYMBOL(dev_deactivate);
863
864static void dev_init_scheduler_queue(struct net_device *dev,
865 struct netdev_queue *dev_queue,
866 void *_qdisc)
867{
868 struct Qdisc *qdisc = _qdisc;
869
870 dev_queue->qdisc = qdisc;
871 dev_queue->qdisc_sleeping = qdisc;
872}
873
874void dev_init_scheduler(struct net_device *dev)
875{
876 dev->qdisc = &noop_qdisc;
877 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
878 if (dev_ingress_queue(dev))
879 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
880
881 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
882}
883
884static void shutdown_scheduler_queue(struct net_device *dev,
885 struct netdev_queue *dev_queue,
886 void *_qdisc_default)
887{
888 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
889 struct Qdisc *qdisc_default = _qdisc_default;
890
891 if (qdisc) {
892 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
893 dev_queue->qdisc_sleeping = qdisc_default;
894
895 qdisc_destroy(qdisc);
896 }
897}
898
899void dev_shutdown(struct net_device *dev)
900{
901 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
902 if (dev_ingress_queue(dev))
903 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
904 qdisc_destroy(dev->qdisc);
905 dev->qdisc = &noop_qdisc;
906
907 WARN_ON(timer_pending(&dev->watchdog_timer));
908}
909