linux/net/sched/sch_generic.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_generic.c      Generic packet scheduler routines.
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
  11 *              - Ingress support
  12 */
  13
  14#include <linux/bitops.h>
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/kernel.h>
  18#include <linux/sched.h>
  19#include <linux/string.h>
  20#include <linux/errno.h>
  21#include <linux/netdevice.h>
  22#include <linux/skbuff.h>
  23#include <linux/rtnetlink.h>
  24#include <linux/init.h>
  25#include <linux/rcupdate.h>
  26#include <linux/list.h>
  27#include <linux/slab.h>
  28#include <linux/if_vlan.h>
  29#include <net/sch_generic.h>
  30#include <net/pkt_sched.h>
  31#include <net/dst.h>
  32
  33/* Qdisc to use by default */
  34const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  35EXPORT_SYMBOL(default_qdisc_ops);
  36
  37/* Main transmission queue. */
  38
  39/* Modifications to data participating in scheduling must be protected with
  40 * qdisc_lock(qdisc) spinlock.
  41 *
  42 * The idea is the following:
  43 * - enqueue, dequeue are serialized via qdisc root lock
  44 * - ingress filtering is also serialized via qdisc root lock
  45 * - updates to tree and tree walking are only done under the rtnl mutex.
  46 */
  47
  48static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
  49{
  50        q->gso_skb = skb;
  51        q->qstats.requeues++;
  52        qdisc_qstats_backlog_inc(q, skb);
  53        q->q.qlen++;    /* it's still part of the queue */
  54        __netif_schedule(q);
  55
  56        return 0;
  57}
  58
  59static void try_bulk_dequeue_skb(struct Qdisc *q,
  60                                 struct sk_buff *skb,
  61                                 const struct netdev_queue *txq,
  62                                 int *packets)
  63{
  64        int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
  65
  66        while (bytelimit > 0) {
  67                struct sk_buff *nskb = q->dequeue(q);
  68
  69                if (!nskb)
  70                        break;
  71
  72                bytelimit -= nskb->len; /* covers GSO len */
  73                skb->next = nskb;
  74                skb = nskb;
  75                (*packets)++; /* GSO counts as one pkt */
  76        }
  77        skb->next = NULL;
  78}
  79
  80/* This variant of try_bulk_dequeue_skb() makes sure
  81 * all skbs in the chain are for the same txq
  82 */
  83static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
  84                                      struct sk_buff *skb,
  85                                      int *packets)
  86{
  87        int mapping = skb_get_queue_mapping(skb);
  88        struct sk_buff *nskb;
  89        int cnt = 0;
  90
  91        do {
  92                nskb = q->dequeue(q);
  93                if (!nskb)
  94                        break;
  95                if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
  96                        q->skb_bad_txq = nskb;
  97                        qdisc_qstats_backlog_inc(q, nskb);
  98                        q->q.qlen++;
  99                        break;
 100                }
 101                skb->next = nskb;
 102                skb = nskb;
 103        } while (++cnt < 8);
 104        (*packets) += cnt;
 105        skb->next = NULL;
 106}
 107
 108/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 109 * A requeued skb (via q->gso_skb) can also be a SKB list.
 110 */
 111static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 112                                   int *packets)
 113{
 114        struct sk_buff *skb = q->gso_skb;
 115        const struct netdev_queue *txq = q->dev_queue;
 116
 117        *packets = 1;
 118        if (unlikely(skb)) {
 119                /* skb in gso_skb were already validated */
 120                *validate = false;
 121                /* check the reason of requeuing without tx lock first */
 122                txq = skb_get_tx_queue(txq->dev, skb);
 123                if (!netif_xmit_frozen_or_stopped(txq)) {
 124                        q->gso_skb = NULL;
 125                        qdisc_qstats_backlog_dec(q, skb);
 126                        q->q.qlen--;
 127                } else
 128                        skb = NULL;
 129                return skb;
 130        }
 131        *validate = true;
 132        skb = q->skb_bad_txq;
 133        if (unlikely(skb)) {
 134                /* check the reason of requeuing without tx lock first */
 135                txq = skb_get_tx_queue(txq->dev, skb);
 136                if (!netif_xmit_frozen_or_stopped(txq)) {
 137                        q->skb_bad_txq = NULL;
 138                        qdisc_qstats_backlog_dec(q, skb);
 139                        q->q.qlen--;
 140                        goto bulk;
 141                }
 142                return NULL;
 143        }
 144        if (!(q->flags & TCQ_F_ONETXQUEUE) ||
 145            !netif_xmit_frozen_or_stopped(txq))
 146                skb = q->dequeue(q);
 147        if (skb) {
 148bulk:
 149                if (qdisc_may_bulk(q))
 150                        try_bulk_dequeue_skb(q, skb, txq, packets);
 151                else
 152                        try_bulk_dequeue_skb_slow(q, skb, packets);
 153        }
 154        return skb;
 155}
 156
 157/*
 158 * Transmit possibly several skbs, and handle the return status as
 159 * required. Owning running seqcount bit guarantees that
 160 * only one CPU can execute this function.
 161 *
 162 * Returns to the caller:
 163 *                              0  - queue is empty or throttled.
 164 *                              >0 - queue is not empty.
 165 */
 166int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 167                    struct net_device *dev, struct netdev_queue *txq,
 168                    spinlock_t *root_lock, bool validate)
 169{
 170        int ret = NETDEV_TX_BUSY;
 171
 172        /* And release qdisc */
 173        spin_unlock(root_lock);
 174
 175        /* Note that we validate skb (GSO, checksum, ...) outside of locks */
 176        if (validate)
 177                skb = validate_xmit_skb_list(skb, dev);
 178
 179        if (likely(skb)) {
 180                HARD_TX_LOCK(dev, txq, smp_processor_id());
 181                if (!netif_xmit_frozen_or_stopped(txq))
 182                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 183
 184                HARD_TX_UNLOCK(dev, txq);
 185        } else {
 186                spin_lock(root_lock);
 187                return qdisc_qlen(q);
 188        }
 189        spin_lock(root_lock);
 190
 191        if (dev_xmit_complete(ret)) {
 192                /* Driver sent out skb successfully or skb was consumed */
 193                ret = qdisc_qlen(q);
 194        } else {
 195                /* Driver returned NETDEV_TX_BUSY - requeue skb */
 196                if (unlikely(ret != NETDEV_TX_BUSY))
 197                        net_warn_ratelimited("BUG %s code %d qlen %d\n",
 198                                             dev->name, ret, q->q.qlen);
 199
 200                ret = dev_requeue_skb(skb, q);
 201        }
 202
 203        if (ret && netif_xmit_frozen_or_stopped(txq))
 204                ret = 0;
 205
 206        return ret;
 207}
 208
 209/*
 210 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 211 *
 212 * running seqcount guarantees only one CPU can process
 213 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 214 * this queue.
 215 *
 216 *  netif_tx_lock serializes accesses to device driver.
 217 *
 218 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 219 *  if one is grabbed, another must be free.
 220 *
 221 * Note, that this procedure can be called by a watchdog timer
 222 *
 223 * Returns to the caller:
 224 *                              0  - queue is empty or throttled.
 225 *                              >0 - queue is not empty.
 226 *
 227 */
 228static inline int qdisc_restart(struct Qdisc *q, int *packets)
 229{
 230        struct netdev_queue *txq;
 231        struct net_device *dev;
 232        spinlock_t *root_lock;
 233        struct sk_buff *skb;
 234        bool validate;
 235
 236        /* Dequeue packet */
 237        skb = dequeue_skb(q, &validate, packets);
 238        if (unlikely(!skb))
 239                return 0;
 240
 241        root_lock = qdisc_lock(q);
 242        dev = qdisc_dev(q);
 243        txq = skb_get_tx_queue(dev, skb);
 244
 245        return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 246}
 247
 248void __qdisc_run(struct Qdisc *q)
 249{
 250        int quota = weight_p;
 251        int packets;
 252
 253        while (qdisc_restart(q, &packets)) {
 254                /*
 255                 * Ordered by possible occurrence: Postpone processing if
 256                 * 1. we've exceeded packet quota
 257                 * 2. another process needs the CPU;
 258                 */
 259                quota -= packets;
 260                if (quota <= 0 || need_resched()) {
 261                        __netif_schedule(q);
 262                        break;
 263                }
 264        }
 265
 266        qdisc_run_end(q);
 267}
 268
 269unsigned long dev_trans_start(struct net_device *dev)
 270{
 271        unsigned long val, res;
 272        unsigned int i;
 273
 274        if (is_vlan_dev(dev))
 275                dev = vlan_dev_real_dev(dev);
 276        res = netdev_get_tx_queue(dev, 0)->trans_start;
 277        for (i = 1; i < dev->num_tx_queues; i++) {
 278                val = netdev_get_tx_queue(dev, i)->trans_start;
 279                if (val && time_after(val, res))
 280                        res = val;
 281        }
 282
 283        return res;
 284}
 285EXPORT_SYMBOL(dev_trans_start);
 286
 287static void dev_watchdog(unsigned long arg)
 288{
 289        struct net_device *dev = (struct net_device *)arg;
 290
 291        netif_tx_lock(dev);
 292        if (!qdisc_tx_is_noop(dev)) {
 293                if (netif_device_present(dev) &&
 294                    netif_running(dev) &&
 295                    netif_carrier_ok(dev)) {
 296                        int some_queue_timedout = 0;
 297                        unsigned int i;
 298                        unsigned long trans_start;
 299
 300                        for (i = 0; i < dev->num_tx_queues; i++) {
 301                                struct netdev_queue *txq;
 302
 303                                txq = netdev_get_tx_queue(dev, i);
 304                                trans_start = txq->trans_start;
 305                                if (netif_xmit_stopped(txq) &&
 306                                    time_after(jiffies, (trans_start +
 307                                                         dev->watchdog_timeo))) {
 308                                        some_queue_timedout = 1;
 309                                        txq->trans_timeout++;
 310                                        break;
 311                                }
 312                        }
 313
 314                        if (some_queue_timedout) {
 315                                WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
 316                                       dev->name, netdev_drivername(dev), i);
 317                                dev->netdev_ops->ndo_tx_timeout(dev);
 318                        }
 319                        if (!mod_timer(&dev->watchdog_timer,
 320                                       round_jiffies(jiffies +
 321                                                     dev->watchdog_timeo)))
 322                                dev_hold(dev);
 323                }
 324        }
 325        netif_tx_unlock(dev);
 326
 327        dev_put(dev);
 328}
 329
 330void __netdev_watchdog_up(struct net_device *dev)
 331{
 332        if (dev->netdev_ops->ndo_tx_timeout) {
 333                if (dev->watchdog_timeo <= 0)
 334                        dev->watchdog_timeo = 5*HZ;
 335                if (!mod_timer(&dev->watchdog_timer,
 336                               round_jiffies(jiffies + dev->watchdog_timeo)))
 337                        dev_hold(dev);
 338        }
 339}
 340
 341static void dev_watchdog_up(struct net_device *dev)
 342{
 343        __netdev_watchdog_up(dev);
 344}
 345
 346static void dev_watchdog_down(struct net_device *dev)
 347{
 348        netif_tx_lock_bh(dev);
 349        if (del_timer(&dev->watchdog_timer))
 350                dev_put(dev);
 351        netif_tx_unlock_bh(dev);
 352}
 353
 354/**
 355 *      netif_carrier_on - set carrier
 356 *      @dev: network device
 357 *
 358 * Device has detected that carrier.
 359 */
 360void netif_carrier_on(struct net_device *dev)
 361{
 362        if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 363                if (dev->reg_state == NETREG_UNINITIALIZED)
 364                        return;
 365                atomic_inc(&dev->carrier_changes);
 366                linkwatch_fire_event(dev);
 367                if (netif_running(dev))
 368                        __netdev_watchdog_up(dev);
 369        }
 370}
 371EXPORT_SYMBOL(netif_carrier_on);
 372
 373/**
 374 *      netif_carrier_off - clear carrier
 375 *      @dev: network device
 376 *
 377 * Device has detected loss of carrier.
 378 */
 379void netif_carrier_off(struct net_device *dev)
 380{
 381        if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 382                if (dev->reg_state == NETREG_UNINITIALIZED)
 383                        return;
 384                atomic_inc(&dev->carrier_changes);
 385                linkwatch_fire_event(dev);
 386        }
 387}
 388EXPORT_SYMBOL(netif_carrier_off);
 389
 390/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 391   under all circumstances. It is difficult to invent anything faster or
 392   cheaper.
 393 */
 394
 395static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 396                        struct sk_buff **to_free)
 397{
 398        __qdisc_drop(skb, to_free);
 399        return NET_XMIT_CN;
 400}
 401
 402static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 403{
 404        return NULL;
 405}
 406
 407struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 408        .id             =       "noop",
 409        .priv_size      =       0,
 410        .enqueue        =       noop_enqueue,
 411        .dequeue        =       noop_dequeue,
 412        .peek           =       noop_dequeue,
 413        .owner          =       THIS_MODULE,
 414};
 415
 416static struct netdev_queue noop_netdev_queue = {
 417        .qdisc          =       &noop_qdisc,
 418        .qdisc_sleeping =       &noop_qdisc,
 419};
 420
 421struct Qdisc noop_qdisc = {
 422        .enqueue        =       noop_enqueue,
 423        .dequeue        =       noop_dequeue,
 424        .flags          =       TCQ_F_BUILTIN,
 425        .ops            =       &noop_qdisc_ops,
 426        .list           =       LIST_HEAD_INIT(noop_qdisc.list),
 427        .q.lock         =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 428        .dev_queue      =       &noop_netdev_queue,
 429        .running        =       SEQCNT_ZERO(noop_qdisc.running),
 430        .busylock       =       __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 431};
 432EXPORT_SYMBOL(noop_qdisc);
 433
 434static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
 435{
 436        /* register_qdisc() assigns a default of noop_enqueue if unset,
 437         * but __dev_queue_xmit() treats noqueue only as such
 438         * if this is NULL - so clear it here. */
 439        qdisc->enqueue = NULL;
 440        return 0;
 441}
 442
 443struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 444        .id             =       "noqueue",
 445        .priv_size      =       0,
 446        .init           =       noqueue_init,
 447        .enqueue        =       noop_enqueue,
 448        .dequeue        =       noop_dequeue,
 449        .peek           =       noop_dequeue,
 450        .owner          =       THIS_MODULE,
 451};
 452
 453static const u8 prio2band[TC_PRIO_MAX + 1] = {
 454        1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
 455};
 456
 457/* 3-band FIFO queue: old style, but should be a bit faster than
 458   generic prio+fifo combination.
 459 */
 460
 461#define PFIFO_FAST_BANDS 3
 462
 463/*
 464 * Private data for a pfifo_fast scheduler containing:
 465 *      - queues for the three band
 466 *      - bitmap indicating which of the bands contain skbs
 467 */
 468struct pfifo_fast_priv {
 469        u32 bitmap;
 470        struct sk_buff_head q[PFIFO_FAST_BANDS];
 471};
 472
 473/*
 474 * Convert a bitmap to the first band number where an skb is queued, where:
 475 *      bitmap=0 means there are no skbs on any band.
 476 *      bitmap=1 means there is an skb on band 0.
 477 *      bitmap=7 means there are skbs on all 3 bands, etc.
 478 */
 479static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
 480
 481static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
 482                                             int band)
 483{
 484        return priv->q + band;
 485}
 486
 487static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 488                              struct sk_buff **to_free)
 489{
 490        if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
 491                int band = prio2band[skb->priority & TC_PRIO_MAX];
 492                struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 493                struct sk_buff_head *list = band2list(priv, band);
 494
 495                priv->bitmap |= (1 << band);
 496                qdisc->q.qlen++;
 497                return __qdisc_enqueue_tail(skb, qdisc, list);
 498        }
 499
 500        return qdisc_drop(skb, qdisc, to_free);
 501}
 502
 503static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 504{
 505        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 506        int band = bitmap2band[priv->bitmap];
 507
 508        if (likely(band >= 0)) {
 509                struct sk_buff_head *list = band2list(priv, band);
 510                struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
 511
 512                qdisc->q.qlen--;
 513                if (skb_queue_empty(list))
 514                        priv->bitmap &= ~(1 << band);
 515
 516                return skb;
 517        }
 518
 519        return NULL;
 520}
 521
 522static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 523{
 524        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 525        int band = bitmap2band[priv->bitmap];
 526
 527        if (band >= 0) {
 528                struct sk_buff_head *list = band2list(priv, band);
 529
 530                return skb_peek(list);
 531        }
 532
 533        return NULL;
 534}
 535
 536static void pfifo_fast_reset(struct Qdisc *qdisc)
 537{
 538        int prio;
 539        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 540
 541        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
 542                __qdisc_reset_queue(band2list(priv, prio));
 543
 544        priv->bitmap = 0;
 545        qdisc->qstats.backlog = 0;
 546        qdisc->q.qlen = 0;
 547}
 548
 549static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 550{
 551        struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 552
 553        memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
 554        if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 555                goto nla_put_failure;
 556        return skb->len;
 557
 558nla_put_failure:
 559        return -1;
 560}
 561
 562static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
 563{
 564        int prio;
 565        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 566
 567        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
 568                __skb_queue_head_init(band2list(priv, prio));
 569
 570        /* Can by-pass the queue discipline */
 571        qdisc->flags |= TCQ_F_CAN_BYPASS;
 572        return 0;
 573}
 574
 575struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 576        .id             =       "pfifo_fast",
 577        .priv_size      =       sizeof(struct pfifo_fast_priv),
 578        .enqueue        =       pfifo_fast_enqueue,
 579        .dequeue        =       pfifo_fast_dequeue,
 580        .peek           =       pfifo_fast_peek,
 581        .init           =       pfifo_fast_init,
 582        .reset          =       pfifo_fast_reset,
 583        .dump           =       pfifo_fast_dump,
 584        .owner          =       THIS_MODULE,
 585};
 586EXPORT_SYMBOL(pfifo_fast_ops);
 587
 588static struct lock_class_key qdisc_tx_busylock;
 589static struct lock_class_key qdisc_running_key;
 590
 591struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 592                          const struct Qdisc_ops *ops)
 593{
 594        void *p;
 595        struct Qdisc *sch;
 596        unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
 597        int err = -ENOBUFS;
 598        struct net_device *dev = dev_queue->dev;
 599
 600        p = kzalloc_node(size, GFP_KERNEL,
 601                         netdev_queue_numa_node_read(dev_queue));
 602
 603        if (!p)
 604                goto errout;
 605        sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 606        /* if we got non aligned memory, ask more and do alignment ourself */
 607        if (sch != p) {
 608                kfree(p);
 609                p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
 610                                 netdev_queue_numa_node_read(dev_queue));
 611                if (!p)
 612                        goto errout;
 613                sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 614                sch->padded = (char *) sch - (char *) p;
 615        }
 616        INIT_LIST_HEAD(&sch->list);
 617        skb_queue_head_init(&sch->q);
 618
 619        spin_lock_init(&sch->busylock);
 620        lockdep_set_class(&sch->busylock,
 621                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 622
 623        seqcount_init(&sch->running);
 624        lockdep_set_class(&sch->running,
 625                          dev->qdisc_running_key ?: &qdisc_running_key);
 626
 627        sch->ops = ops;
 628        sch->enqueue = ops->enqueue;
 629        sch->dequeue = ops->dequeue;
 630        sch->dev_queue = dev_queue;
 631        dev_hold(dev);
 632        atomic_set(&sch->refcnt, 1);
 633
 634        return sch;
 635errout:
 636        return ERR_PTR(err);
 637}
 638
 639struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 640                                const struct Qdisc_ops *ops,
 641                                unsigned int parentid)
 642{
 643        struct Qdisc *sch;
 644
 645        if (!try_module_get(ops->owner))
 646                return NULL;
 647
 648        sch = qdisc_alloc(dev_queue, ops);
 649        if (IS_ERR(sch)) {
 650                module_put(ops->owner);
 651                return NULL;
 652        }
 653        sch->parent = parentid;
 654
 655        if (!ops->init || ops->init(sch, NULL) == 0)
 656                return sch;
 657
 658        qdisc_destroy(sch);
 659        return NULL;
 660}
 661EXPORT_SYMBOL(qdisc_create_dflt);
 662
 663/* Under qdisc_lock(qdisc) and BH! */
 664
 665void qdisc_reset(struct Qdisc *qdisc)
 666{
 667        const struct Qdisc_ops *ops = qdisc->ops;
 668
 669        if (ops->reset)
 670                ops->reset(qdisc);
 671
 672        kfree_skb(qdisc->skb_bad_txq);
 673        qdisc->skb_bad_txq = NULL;
 674
 675        if (qdisc->gso_skb) {
 676                kfree_skb_list(qdisc->gso_skb);
 677                qdisc->gso_skb = NULL;
 678        }
 679        qdisc->q.qlen = 0;
 680}
 681EXPORT_SYMBOL(qdisc_reset);
 682
 683static void qdisc_rcu_free(struct rcu_head *head)
 684{
 685        struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
 686
 687        if (qdisc_is_percpu_stats(qdisc)) {
 688                free_percpu(qdisc->cpu_bstats);
 689                free_percpu(qdisc->cpu_qstats);
 690        }
 691
 692        kfree((char *) qdisc - qdisc->padded);
 693}
 694
 695void qdisc_destroy(struct Qdisc *qdisc)
 696{
 697        const struct Qdisc_ops  *ops = qdisc->ops;
 698
 699        if (qdisc->flags & TCQ_F_BUILTIN ||
 700            !atomic_dec_and_test(&qdisc->refcnt))
 701                return;
 702
 703#ifdef CONFIG_NET_SCHED
 704        qdisc_list_del(qdisc);
 705
 706        qdisc_put_stab(rtnl_dereference(qdisc->stab));
 707#endif
 708        gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
 709        if (ops->reset)
 710                ops->reset(qdisc);
 711        if (ops->destroy)
 712                ops->destroy(qdisc);
 713
 714        module_put(ops->owner);
 715        dev_put(qdisc_dev(qdisc));
 716
 717        kfree_skb_list(qdisc->gso_skb);
 718        kfree_skb(qdisc->skb_bad_txq);
 719        /*
 720         * gen_estimator est_timer() might access qdisc->q.lock,
 721         * wait a RCU grace period before freeing qdisc.
 722         */
 723        call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
 724}
 725EXPORT_SYMBOL(qdisc_destroy);
 726
 727/* Attach toplevel qdisc to device queue. */
 728struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 729                              struct Qdisc *qdisc)
 730{
 731        struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
 732        spinlock_t *root_lock;
 733
 734        root_lock = qdisc_lock(oqdisc);
 735        spin_lock_bh(root_lock);
 736
 737        /* Prune old scheduler */
 738        if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
 739                qdisc_reset(oqdisc);
 740
 741        /* ... and graft new one */
 742        if (qdisc == NULL)
 743                qdisc = &noop_qdisc;
 744        dev_queue->qdisc_sleeping = qdisc;
 745        rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
 746
 747        spin_unlock_bh(root_lock);
 748
 749        return oqdisc;
 750}
 751EXPORT_SYMBOL(dev_graft_qdisc);
 752
 753static void attach_one_default_qdisc(struct net_device *dev,
 754                                     struct netdev_queue *dev_queue,
 755                                     void *_unused)
 756{
 757        struct Qdisc *qdisc;
 758        const struct Qdisc_ops *ops = default_qdisc_ops;
 759
 760        if (dev->priv_flags & IFF_NO_QUEUE)
 761                ops = &noqueue_qdisc_ops;
 762
 763        qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
 764        if (!qdisc) {
 765                netdev_info(dev, "activation failed\n");
 766                return;
 767        }
 768        if (!netif_is_multiqueue(dev))
 769                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 770        dev_queue->qdisc_sleeping = qdisc;
 771}
 772
 773static void attach_default_qdiscs(struct net_device *dev)
 774{
 775        struct netdev_queue *txq;
 776        struct Qdisc *qdisc;
 777
 778        txq = netdev_get_tx_queue(dev, 0);
 779
 780        if (!netif_is_multiqueue(dev) ||
 781            dev->priv_flags & IFF_NO_QUEUE) {
 782                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
 783                dev->qdisc = txq->qdisc_sleeping;
 784                atomic_inc(&dev->qdisc->refcnt);
 785        } else {
 786                qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
 787                if (qdisc) {
 788                        dev->qdisc = qdisc;
 789                        qdisc->ops->attach(qdisc);
 790                }
 791        }
 792}
 793
 794static void transition_one_qdisc(struct net_device *dev,
 795                                 struct netdev_queue *dev_queue,
 796                                 void *_need_watchdog)
 797{
 798        struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
 799        int *need_watchdog_p = _need_watchdog;
 800
 801        if (!(new_qdisc->flags & TCQ_F_BUILTIN))
 802                clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
 803
 804        rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
 805        if (need_watchdog_p) {
 806                dev_queue->trans_start = 0;
 807                *need_watchdog_p = 1;
 808        }
 809}
 810
 811void dev_activate(struct net_device *dev)
 812{
 813        int need_watchdog;
 814
 815        /* No queueing discipline is attached to device;
 816         * create default one for devices, which need queueing
 817         * and noqueue_qdisc for virtual interfaces
 818         */
 819
 820        if (dev->qdisc == &noop_qdisc)
 821                attach_default_qdiscs(dev);
 822
 823        if (!netif_carrier_ok(dev))
 824                /* Delay activation until next carrier-on event */
 825                return;
 826
 827        need_watchdog = 0;
 828        netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
 829        if (dev_ingress_queue(dev))
 830                transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
 831
 832        if (need_watchdog) {
 833                netif_trans_update(dev);
 834                dev_watchdog_up(dev);
 835        }
 836}
 837EXPORT_SYMBOL(dev_activate);
 838
 839static void dev_deactivate_queue(struct net_device *dev,
 840                                 struct netdev_queue *dev_queue,
 841                                 void *_qdisc_default)
 842{
 843        struct Qdisc *qdisc_default = _qdisc_default;
 844        struct Qdisc *qdisc;
 845
 846        qdisc = rtnl_dereference(dev_queue->qdisc);
 847        if (qdisc) {
 848                spin_lock_bh(qdisc_lock(qdisc));
 849
 850                if (!(qdisc->flags & TCQ_F_BUILTIN))
 851                        set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
 852
 853                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
 854                qdisc_reset(qdisc);
 855
 856                spin_unlock_bh(qdisc_lock(qdisc));
 857        }
 858}
 859
 860static bool some_qdisc_is_busy(struct net_device *dev)
 861{
 862        unsigned int i;
 863
 864        for (i = 0; i < dev->num_tx_queues; i++) {
 865                struct netdev_queue *dev_queue;
 866                spinlock_t *root_lock;
 867                struct Qdisc *q;
 868                int val;
 869
 870                dev_queue = netdev_get_tx_queue(dev, i);
 871                q = dev_queue->qdisc_sleeping;
 872                root_lock = qdisc_lock(q);
 873
 874                spin_lock_bh(root_lock);
 875
 876                val = (qdisc_is_running(q) ||
 877                       test_bit(__QDISC_STATE_SCHED, &q->state));
 878
 879                spin_unlock_bh(root_lock);
 880
 881                if (val)
 882                        return true;
 883        }
 884        return false;
 885}
 886
 887/**
 888 *      dev_deactivate_many - deactivate transmissions on several devices
 889 *      @head: list of devices to deactivate
 890 *
 891 *      This function returns only when all outstanding transmissions
 892 *      have completed, unless all devices are in dismantle phase.
 893 */
 894void dev_deactivate_many(struct list_head *head)
 895{
 896        struct net_device *dev;
 897        bool sync_needed = false;
 898
 899        list_for_each_entry(dev, head, close_list) {
 900                netdev_for_each_tx_queue(dev, dev_deactivate_queue,
 901                                         &noop_qdisc);
 902                if (dev_ingress_queue(dev))
 903                        dev_deactivate_queue(dev, dev_ingress_queue(dev),
 904                                             &noop_qdisc);
 905
 906                dev_watchdog_down(dev);
 907                sync_needed |= !dev->dismantle;
 908        }
 909
 910        /* Wait for outstanding qdisc-less dev_queue_xmit calls.
 911         * This is avoided if all devices are in dismantle phase :
 912         * Caller will call synchronize_net() for us
 913         */
 914        if (sync_needed)
 915                synchronize_net();
 916
 917        /* Wait for outstanding qdisc_run calls. */
 918        list_for_each_entry(dev, head, close_list)
 919                while (some_qdisc_is_busy(dev))
 920                        yield();
 921}
 922
 923void dev_deactivate(struct net_device *dev)
 924{
 925        LIST_HEAD(single);
 926
 927        list_add(&dev->close_list, &single);
 928        dev_deactivate_many(&single);
 929        list_del(&single);
 930}
 931EXPORT_SYMBOL(dev_deactivate);
 932
 933static void dev_init_scheduler_queue(struct net_device *dev,
 934                                     struct netdev_queue *dev_queue,
 935                                     void *_qdisc)
 936{
 937        struct Qdisc *qdisc = _qdisc;
 938
 939        rcu_assign_pointer(dev_queue->qdisc, qdisc);
 940        dev_queue->qdisc_sleeping = qdisc;
 941}
 942
 943void dev_init_scheduler(struct net_device *dev)
 944{
 945        dev->qdisc = &noop_qdisc;
 946        netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
 947        if (dev_ingress_queue(dev))
 948                dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
 949
 950        setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
 951}
 952
 953static void shutdown_scheduler_queue(struct net_device *dev,
 954                                     struct netdev_queue *dev_queue,
 955                                     void *_qdisc_default)
 956{
 957        struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
 958        struct Qdisc *qdisc_default = _qdisc_default;
 959
 960        if (qdisc) {
 961                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
 962                dev_queue->qdisc_sleeping = qdisc_default;
 963
 964                qdisc_destroy(qdisc);
 965        }
 966}
 967
 968void dev_shutdown(struct net_device *dev)
 969{
 970        netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
 971        if (dev_ingress_queue(dev))
 972                shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
 973        qdisc_destroy(dev->qdisc);
 974        dev->qdisc = &noop_qdisc;
 975
 976        WARN_ON(timer_pending(&dev->watchdog_timer));
 977}
 978
 979void psched_ratecfg_precompute(struct psched_ratecfg *r,
 980                               const struct tc_ratespec *conf,
 981                               u64 rate64)
 982{
 983        memset(r, 0, sizeof(*r));
 984        r->overhead = conf->overhead;
 985        r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
 986        r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
 987        r->mult = 1;
 988        /*
 989         * The deal here is to replace a divide by a reciprocal one
 990         * in fast path (a reciprocal divide is a multiply and a shift)
 991         *
 992         * Normal formula would be :
 993         *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
 994         *
 995         * We compute mult/shift to use instead :
 996         *  time_in_ns = (len * mult) >> shift;
 997         *
 998         * We try to get the highest possible mult value for accuracy,
 999         * but have to make sure no overflows will ever happen.
1000         */
1001        if (r->rate_bytes_ps > 0) {
1002                u64 factor = NSEC_PER_SEC;
1003
1004                for (;;) {
1005                        r->mult = div64_u64(factor, r->rate_bytes_ps);
1006                        if (r->mult & (1U << 31) || factor & (1ULL << 63))
1007                                break;
1008                        factor <<= 1;
1009                        r->shift++;
1010                }
1011        }
1012}
1013EXPORT_SYMBOL(psched_ratecfg_precompute);
1014