linux/net/sched/sch_generic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/sch_generic.c      Generic packet scheduler routines.
   4 *
   5 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
   7 *              - Ingress support
   8 */
   9
  10#include <linux/bitops.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/rtnetlink.h>
  20#include <linux/init.h>
  21#include <linux/rcupdate.h>
  22#include <linux/list.h>
  23#include <linux/slab.h>
  24#include <linux/if_vlan.h>
  25#include <linux/skb_array.h>
  26#include <linux/if_macvlan.h>
  27#include <net/sch_generic.h>
  28#include <net/pkt_sched.h>
  29#include <net/dst.h>
  30#include <trace/events/qdisc.h>
  31#include <trace/events/net.h>
  32#include <net/xfrm.h>
  33
  34/* Qdisc to use by default */
  35const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
  36EXPORT_SYMBOL(default_qdisc_ops);
  37
  38/* Main transmission queue. */
  39
  40/* Modifications to data participating in scheduling must be protected with
  41 * qdisc_lock(qdisc) spinlock.
  42 *
  43 * The idea is the following:
  44 * - enqueue, dequeue are serialized via qdisc root lock
  45 * - ingress filtering is also serialized via qdisc root lock
  46 * - updates to tree and tree walking are only done under the rtnl mutex.
  47 */
  48
  49#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
  50
  51static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
  52{
  53        const struct netdev_queue *txq = q->dev_queue;
  54        spinlock_t *lock = NULL;
  55        struct sk_buff *skb;
  56
  57        if (q->flags & TCQ_F_NOLOCK) {
  58                lock = qdisc_lock(q);
  59                spin_lock(lock);
  60        }
  61
  62        skb = skb_peek(&q->skb_bad_txq);
  63        if (skb) {
  64                /* check the reason of requeuing without tx lock first */
  65                txq = skb_get_tx_queue(txq->dev, skb);
  66                if (!netif_xmit_frozen_or_stopped(txq)) {
  67                        skb = __skb_dequeue(&q->skb_bad_txq);
  68                        if (qdisc_is_percpu_stats(q)) {
  69                                qdisc_qstats_cpu_backlog_dec(q, skb);
  70                                qdisc_qstats_cpu_qlen_dec(q);
  71                        } else {
  72                                qdisc_qstats_backlog_dec(q, skb);
  73                                q->q.qlen--;
  74                        }
  75                } else {
  76                        skb = SKB_XOFF_MAGIC;
  77                }
  78        }
  79
  80        if (lock)
  81                spin_unlock(lock);
  82
  83        return skb;
  84}
  85
  86static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
  87{
  88        struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
  89
  90        if (unlikely(skb))
  91                skb = __skb_dequeue_bad_txq(q);
  92
  93        return skb;
  94}
  95
  96static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
  97                                             struct sk_buff *skb)
  98{
  99        spinlock_t *lock = NULL;
 100
 101        if (q->flags & TCQ_F_NOLOCK) {
 102                lock = qdisc_lock(q);
 103                spin_lock(lock);
 104        }
 105
 106        __skb_queue_tail(&q->skb_bad_txq, skb);
 107
 108        if (qdisc_is_percpu_stats(q)) {
 109                qdisc_qstats_cpu_backlog_inc(q, skb);
 110                qdisc_qstats_cpu_qlen_inc(q);
 111        } else {
 112                qdisc_qstats_backlog_inc(q, skb);
 113                q->q.qlen++;
 114        }
 115
 116        if (lock)
 117                spin_unlock(lock);
 118}
 119
 120static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 121{
 122        spinlock_t *lock = NULL;
 123
 124        if (q->flags & TCQ_F_NOLOCK) {
 125                lock = qdisc_lock(q);
 126                spin_lock(lock);
 127        }
 128
 129        while (skb) {
 130                struct sk_buff *next = skb->next;
 131
 132                __skb_queue_tail(&q->gso_skb, skb);
 133
 134                /* it's still part of the queue */
 135                if (qdisc_is_percpu_stats(q)) {
 136                        qdisc_qstats_cpu_requeues_inc(q);
 137                        qdisc_qstats_cpu_backlog_inc(q, skb);
 138                        qdisc_qstats_cpu_qlen_inc(q);
 139                } else {
 140                        q->qstats.requeues++;
 141                        qdisc_qstats_backlog_inc(q, skb);
 142                        q->q.qlen++;
 143                }
 144
 145                skb = next;
 146        }
 147        if (lock)
 148                spin_unlock(lock);
 149        __netif_schedule(q);
 150}
 151
 152static void try_bulk_dequeue_skb(struct Qdisc *q,
 153                                 struct sk_buff *skb,
 154                                 const struct netdev_queue *txq,
 155                                 int *packets)
 156{
 157        int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
 158
 159        while (bytelimit > 0) {
 160                struct sk_buff *nskb = q->dequeue(q);
 161
 162                if (!nskb)
 163                        break;
 164
 165                bytelimit -= nskb->len; /* covers GSO len */
 166                skb->next = nskb;
 167                skb = nskb;
 168                (*packets)++; /* GSO counts as one pkt */
 169        }
 170        skb_mark_not_on_list(skb);
 171}
 172
 173/* This variant of try_bulk_dequeue_skb() makes sure
 174 * all skbs in the chain are for the same txq
 175 */
 176static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 177                                      struct sk_buff *skb,
 178                                      int *packets)
 179{
 180        int mapping = skb_get_queue_mapping(skb);
 181        struct sk_buff *nskb;
 182        int cnt = 0;
 183
 184        do {
 185                nskb = q->dequeue(q);
 186                if (!nskb)
 187                        break;
 188                if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
 189                        qdisc_enqueue_skb_bad_txq(q, nskb);
 190                        break;
 191                }
 192                skb->next = nskb;
 193                skb = nskb;
 194        } while (++cnt < 8);
 195        (*packets) += cnt;
 196        skb_mark_not_on_list(skb);
 197}
 198
 199/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 200 * A requeued skb (via q->gso_skb) can also be a SKB list.
 201 */
 202static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 203                                   int *packets)
 204{
 205        const struct netdev_queue *txq = q->dev_queue;
 206        struct sk_buff *skb = NULL;
 207
 208        *packets = 1;
 209        if (unlikely(!skb_queue_empty(&q->gso_skb))) {
 210                spinlock_t *lock = NULL;
 211
 212                if (q->flags & TCQ_F_NOLOCK) {
 213                        lock = qdisc_lock(q);
 214                        spin_lock(lock);
 215                }
 216
 217                skb = skb_peek(&q->gso_skb);
 218
 219                /* skb may be null if another cpu pulls gso_skb off in between
 220                 * empty check and lock.
 221                 */
 222                if (!skb) {
 223                        if (lock)
 224                                spin_unlock(lock);
 225                        goto validate;
 226                }
 227
 228                /* skb in gso_skb were already validated */
 229                *validate = false;
 230                if (xfrm_offload(skb))
 231                        *validate = true;
 232                /* check the reason of requeuing without tx lock first */
 233                txq = skb_get_tx_queue(txq->dev, skb);
 234                if (!netif_xmit_frozen_or_stopped(txq)) {
 235                        skb = __skb_dequeue(&q->gso_skb);
 236                        if (qdisc_is_percpu_stats(q)) {
 237                                qdisc_qstats_cpu_backlog_dec(q, skb);
 238                                qdisc_qstats_cpu_qlen_dec(q);
 239                        } else {
 240                                qdisc_qstats_backlog_dec(q, skb);
 241                                q->q.qlen--;
 242                        }
 243                } else {
 244                        skb = NULL;
 245                }
 246                if (lock)
 247                        spin_unlock(lock);
 248                goto trace;
 249        }
 250validate:
 251        *validate = true;
 252
 253        if ((q->flags & TCQ_F_ONETXQUEUE) &&
 254            netif_xmit_frozen_or_stopped(txq))
 255                return skb;
 256
 257        skb = qdisc_dequeue_skb_bad_txq(q);
 258        if (unlikely(skb)) {
 259                if (skb == SKB_XOFF_MAGIC)
 260                        return NULL;
 261                goto bulk;
 262        }
 263        skb = q->dequeue(q);
 264        if (skb) {
 265bulk:
 266                if (qdisc_may_bulk(q))
 267                        try_bulk_dequeue_skb(q, skb, txq, packets);
 268                else
 269                        try_bulk_dequeue_skb_slow(q, skb, packets);
 270        }
 271trace:
 272        trace_qdisc_dequeue(q, txq, *packets, skb);
 273        return skb;
 274}
 275
 276/*
 277 * Transmit possibly several skbs, and handle the return status as
 278 * required. Owning running seqcount bit guarantees that
 279 * only one CPU can execute this function.
 280 *
 281 * Returns to the caller:
 282 *                              false  - hardware queue frozen backoff
 283 *                              true   - feel free to send more pkts
 284 */
 285bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 286                     struct net_device *dev, struct netdev_queue *txq,
 287                     spinlock_t *root_lock, bool validate)
 288{
 289        int ret = NETDEV_TX_BUSY;
 290        bool again = false;
 291
 292        /* And release qdisc */
 293        if (root_lock)
 294                spin_unlock(root_lock);
 295
 296        /* Note that we validate skb (GSO, checksum, ...) outside of locks */
 297        if (validate)
 298                skb = validate_xmit_skb_list(skb, dev, &again);
 299
 300#ifdef CONFIG_XFRM_OFFLOAD
 301        if (unlikely(again)) {
 302                if (root_lock)
 303                        spin_lock(root_lock);
 304
 305                dev_requeue_skb(skb, q);
 306                return false;
 307        }
 308#endif
 309
 310        if (likely(skb)) {
 311                HARD_TX_LOCK(dev, txq, smp_processor_id());
 312                if (!netif_xmit_frozen_or_stopped(txq))
 313                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 314
 315                HARD_TX_UNLOCK(dev, txq);
 316        } else {
 317                if (root_lock)
 318                        spin_lock(root_lock);
 319                return true;
 320        }
 321
 322        if (root_lock)
 323                spin_lock(root_lock);
 324
 325        if (!dev_xmit_complete(ret)) {
 326                /* Driver returned NETDEV_TX_BUSY - requeue skb */
 327                if (unlikely(ret != NETDEV_TX_BUSY))
 328                        net_warn_ratelimited("BUG %s code %d qlen %d\n",
 329                                             dev->name, ret, q->q.qlen);
 330
 331                dev_requeue_skb(skb, q);
 332                return false;
 333        }
 334
 335        return true;
 336}
 337
 338/*
 339 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 340 *
 341 * running seqcount guarantees only one CPU can process
 342 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 343 * this queue.
 344 *
 345 *  netif_tx_lock serializes accesses to device driver.
 346 *
 347 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 348 *  if one is grabbed, another must be free.
 349 *
 350 * Note, that this procedure can be called by a watchdog timer
 351 *
 352 * Returns to the caller:
 353 *                              0  - queue is empty or throttled.
 354 *                              >0 - queue is not empty.
 355 *
 356 */
 357static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 358{
 359        spinlock_t *root_lock = NULL;
 360        struct netdev_queue *txq;
 361        struct net_device *dev;
 362        struct sk_buff *skb;
 363        bool validate;
 364
 365        /* Dequeue packet */
 366        skb = dequeue_skb(q, &validate, packets);
 367        if (unlikely(!skb))
 368                return false;
 369
 370        if (!(q->flags & TCQ_F_NOLOCK))
 371                root_lock = qdisc_lock(q);
 372
 373        dev = qdisc_dev(q);
 374        txq = skb_get_tx_queue(dev, skb);
 375
 376        return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
 377}
 378
 379void __qdisc_run(struct Qdisc *q)
 380{
 381        int quota = dev_tx_weight;
 382        int packets;
 383
 384        while (qdisc_restart(q, &packets)) {
 385                quota -= packets;
 386                if (quota <= 0) {
 387                        __netif_schedule(q);
 388                        break;
 389                }
 390        }
 391}
 392
 393unsigned long dev_trans_start(struct net_device *dev)
 394{
 395        unsigned long val, res;
 396        unsigned int i;
 397
 398        if (is_vlan_dev(dev))
 399                dev = vlan_dev_real_dev(dev);
 400        else if (netif_is_macvlan(dev))
 401                dev = macvlan_dev_real_dev(dev);
 402        res = netdev_get_tx_queue(dev, 0)->trans_start;
 403        for (i = 1; i < dev->num_tx_queues; i++) {
 404                val = netdev_get_tx_queue(dev, i)->trans_start;
 405                if (val && time_after(val, res))
 406                        res = val;
 407        }
 408
 409        return res;
 410}
 411EXPORT_SYMBOL(dev_trans_start);
 412
 413static void dev_watchdog(struct timer_list *t)
 414{
 415        struct net_device *dev = from_timer(dev, t, watchdog_timer);
 416
 417        netif_tx_lock(dev);
 418        if (!qdisc_tx_is_noop(dev)) {
 419                if (netif_device_present(dev) &&
 420                    netif_running(dev) &&
 421                    netif_carrier_ok(dev)) {
 422                        int some_queue_timedout = 0;
 423                        unsigned int i;
 424                        unsigned long trans_start;
 425
 426                        for (i = 0; i < dev->num_tx_queues; i++) {
 427                                struct netdev_queue *txq;
 428
 429                                txq = netdev_get_tx_queue(dev, i);
 430                                trans_start = txq->trans_start;
 431                                if (netif_xmit_stopped(txq) &&
 432                                    time_after(jiffies, (trans_start +
 433                                                         dev->watchdog_timeo))) {
 434                                        some_queue_timedout = 1;
 435                                        txq->trans_timeout++;
 436                                        break;
 437                                }
 438                        }
 439
 440                        if (some_queue_timedout) {
 441                                trace_net_dev_xmit_timeout(dev, i);
 442                                WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
 443                                       dev->name, netdev_drivername(dev), i);
 444                                dev->netdev_ops->ndo_tx_timeout(dev, i);
 445                        }
 446                        if (!mod_timer(&dev->watchdog_timer,
 447                                       round_jiffies(jiffies +
 448                                                     dev->watchdog_timeo)))
 449                                dev_hold(dev);
 450                }
 451        }
 452        netif_tx_unlock(dev);
 453
 454        dev_put(dev);
 455}
 456
 457void __netdev_watchdog_up(struct net_device *dev)
 458{
 459        if (dev->netdev_ops->ndo_tx_timeout) {
 460                if (dev->watchdog_timeo <= 0)
 461                        dev->watchdog_timeo = 5*HZ;
 462                if (!mod_timer(&dev->watchdog_timer,
 463                               round_jiffies(jiffies + dev->watchdog_timeo)))
 464                        dev_hold(dev);
 465        }
 466}
 467EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
 468
 469static void dev_watchdog_up(struct net_device *dev)
 470{
 471        __netdev_watchdog_up(dev);
 472}
 473
 474static void dev_watchdog_down(struct net_device *dev)
 475{
 476        netif_tx_lock_bh(dev);
 477        if (del_timer(&dev->watchdog_timer))
 478                dev_put(dev);
 479        netif_tx_unlock_bh(dev);
 480}
 481
 482/**
 483 *      netif_carrier_on - set carrier
 484 *      @dev: network device
 485 *
 486 * Device has detected acquisition of carrier.
 487 */
 488void netif_carrier_on(struct net_device *dev)
 489{
 490        if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 491                if (dev->reg_state == NETREG_UNINITIALIZED)
 492                        return;
 493                atomic_inc(&dev->carrier_up_count);
 494                linkwatch_fire_event(dev);
 495                if (netif_running(dev))
 496                        __netdev_watchdog_up(dev);
 497        }
 498}
 499EXPORT_SYMBOL(netif_carrier_on);
 500
 501/**
 502 *      netif_carrier_off - clear carrier
 503 *      @dev: network device
 504 *
 505 * Device has detected loss of carrier.
 506 */
 507void netif_carrier_off(struct net_device *dev)
 508{
 509        if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
 510                if (dev->reg_state == NETREG_UNINITIALIZED)
 511                        return;
 512                atomic_inc(&dev->carrier_down_count);
 513                linkwatch_fire_event(dev);
 514        }
 515}
 516EXPORT_SYMBOL(netif_carrier_off);
 517
 518/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
 519   under all circumstances. It is difficult to invent anything faster or
 520   cheaper.
 521 */
 522
 523static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 524                        struct sk_buff **to_free)
 525{
 526        __qdisc_drop(skb, to_free);
 527        return NET_XMIT_CN;
 528}
 529
 530static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
 531{
 532        return NULL;
 533}
 534
 535struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 536        .id             =       "noop",
 537        .priv_size      =       0,
 538        .enqueue        =       noop_enqueue,
 539        .dequeue        =       noop_dequeue,
 540        .peek           =       noop_dequeue,
 541        .owner          =       THIS_MODULE,
 542};
 543
 544static struct netdev_queue noop_netdev_queue = {
 545        RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
 546        .qdisc_sleeping =       &noop_qdisc,
 547};
 548
 549struct Qdisc noop_qdisc = {
 550        .enqueue        =       noop_enqueue,
 551        .dequeue        =       noop_dequeue,
 552        .flags          =       TCQ_F_BUILTIN,
 553        .ops            =       &noop_qdisc_ops,
 554        .q.lock         =       __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
 555        .dev_queue      =       &noop_netdev_queue,
 556        .running        =       SEQCNT_ZERO(noop_qdisc.running),
 557        .busylock       =       __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
 558        .gso_skb = {
 559                .next = (struct sk_buff *)&noop_qdisc.gso_skb,
 560                .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
 561                .qlen = 0,
 562                .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
 563        },
 564        .skb_bad_txq = {
 565                .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 566                .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
 567                .qlen = 0,
 568                .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
 569        },
 570};
 571EXPORT_SYMBOL(noop_qdisc);
 572
 573static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
 574                        struct netlink_ext_ack *extack)
 575{
 576        /* register_qdisc() assigns a default of noop_enqueue if unset,
 577         * but __dev_queue_xmit() treats noqueue only as such
 578         * if this is NULL - so clear it here. */
 579        qdisc->enqueue = NULL;
 580        return 0;
 581}
 582
 583struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 584        .id             =       "noqueue",
 585        .priv_size      =       0,
 586        .init           =       noqueue_init,
 587        .enqueue        =       noop_enqueue,
 588        .dequeue        =       noop_dequeue,
 589        .peek           =       noop_dequeue,
 590        .owner          =       THIS_MODULE,
 591};
 592
 593static const u8 prio2band[TC_PRIO_MAX + 1] = {
 594        1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
 595};
 596
 597/* 3-band FIFO queue: old style, but should be a bit faster than
 598   generic prio+fifo combination.
 599 */
 600
 601#define PFIFO_FAST_BANDS 3
 602
 603/*
 604 * Private data for a pfifo_fast scheduler containing:
 605 *      - rings for priority bands
 606 */
 607struct pfifo_fast_priv {
 608        struct skb_array q[PFIFO_FAST_BANDS];
 609};
 610
 611static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
 612                                          int band)
 613{
 614        return &priv->q[band];
 615}
 616
 617static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 618                              struct sk_buff **to_free)
 619{
 620        int band = prio2band[skb->priority & TC_PRIO_MAX];
 621        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 622        struct skb_array *q = band2list(priv, band);
 623        unsigned int pkt_len = qdisc_pkt_len(skb);
 624        int err;
 625
 626        err = skb_array_produce(q, skb);
 627
 628        if (unlikely(err)) {
 629                if (qdisc_is_percpu_stats(qdisc))
 630                        return qdisc_drop_cpu(skb, qdisc, to_free);
 631                else
 632                        return qdisc_drop(skb, qdisc, to_free);
 633        }
 634
 635        qdisc_update_stats_at_enqueue(qdisc, pkt_len);
 636        return NET_XMIT_SUCCESS;
 637}
 638
 639static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 640{
 641        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 642        struct sk_buff *skb = NULL;
 643        int band;
 644
 645        for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 646                struct skb_array *q = band2list(priv, band);
 647
 648                if (__skb_array_empty(q))
 649                        continue;
 650
 651                skb = __skb_array_consume(q);
 652        }
 653        if (likely(skb)) {
 654                qdisc_update_stats_at_dequeue(qdisc, skb);
 655        } else {
 656                WRITE_ONCE(qdisc->empty, true);
 657        }
 658
 659        return skb;
 660}
 661
 662static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 663{
 664        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 665        struct sk_buff *skb = NULL;
 666        int band;
 667
 668        for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
 669                struct skb_array *q = band2list(priv, band);
 670
 671                skb = __skb_array_peek(q);
 672        }
 673
 674        return skb;
 675}
 676
 677static void pfifo_fast_reset(struct Qdisc *qdisc)
 678{
 679        int i, band;
 680        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 681
 682        for (band = 0; band < PFIFO_FAST_BANDS; band++) {
 683                struct skb_array *q = band2list(priv, band);
 684                struct sk_buff *skb;
 685
 686                /* NULL ring is possible if destroy path is due to a failed
 687                 * skb_array_init() in pfifo_fast_init() case.
 688                 */
 689                if (!q->ring.queue)
 690                        continue;
 691
 692                while ((skb = __skb_array_consume(q)) != NULL)
 693                        kfree_skb(skb);
 694        }
 695
 696        if (qdisc_is_percpu_stats(qdisc)) {
 697                for_each_possible_cpu(i) {
 698                        struct gnet_stats_queue *q;
 699
 700                        q = per_cpu_ptr(qdisc->cpu_qstats, i);
 701                        q->backlog = 0;
 702                        q->qlen = 0;
 703                }
 704        }
 705}
 706
 707static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 708{
 709        struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 710
 711        memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
 712        if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 713                goto nla_put_failure;
 714        return skb->len;
 715
 716nla_put_failure:
 717        return -1;
 718}
 719
 720static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
 721                           struct netlink_ext_ack *extack)
 722{
 723        unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 724        struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 725        int prio;
 726
 727        /* guard against zero length rings */
 728        if (!qlen)
 729                return -EINVAL;
 730
 731        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 732                struct skb_array *q = band2list(priv, prio);
 733                int err;
 734
 735                err = skb_array_init(q, qlen, GFP_KERNEL);
 736                if (err)
 737                        return -ENOMEM;
 738        }
 739
 740        /* Can by-pass the queue discipline */
 741        qdisc->flags |= TCQ_F_CAN_BYPASS;
 742        return 0;
 743}
 744
 745static void pfifo_fast_destroy(struct Qdisc *sch)
 746{
 747        struct pfifo_fast_priv *priv = qdisc_priv(sch);
 748        int prio;
 749
 750        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 751                struct skb_array *q = band2list(priv, prio);
 752
 753                /* NULL ring is possible if destroy path is due to a failed
 754                 * skb_array_init() in pfifo_fast_init() case.
 755                 */
 756                if (!q->ring.queue)
 757                        continue;
 758                /* Destroy ring but no need to kfree_skb because a call to
 759                 * pfifo_fast_reset() has already done that work.
 760                 */
 761                ptr_ring_cleanup(&q->ring, NULL);
 762        }
 763}
 764
 765static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
 766                                          unsigned int new_len)
 767{
 768        struct pfifo_fast_priv *priv = qdisc_priv(sch);
 769        struct skb_array *bands[PFIFO_FAST_BANDS];
 770        int prio;
 771
 772        for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
 773                struct skb_array *q = band2list(priv, prio);
 774
 775                bands[prio] = q;
 776        }
 777
 778        return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
 779                                         GFP_KERNEL);
 780}
 781
 782struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 783        .id             =       "pfifo_fast",
 784        .priv_size      =       sizeof(struct pfifo_fast_priv),
 785        .enqueue        =       pfifo_fast_enqueue,
 786        .dequeue        =       pfifo_fast_dequeue,
 787        .peek           =       pfifo_fast_peek,
 788        .init           =       pfifo_fast_init,
 789        .destroy        =       pfifo_fast_destroy,
 790        .reset          =       pfifo_fast_reset,
 791        .dump           =       pfifo_fast_dump,
 792        .change_tx_queue_len =  pfifo_fast_change_tx_queue_len,
 793        .owner          =       THIS_MODULE,
 794        .static_flags   =       TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 795};
 796EXPORT_SYMBOL(pfifo_fast_ops);
 797
 798static struct lock_class_key qdisc_tx_busylock;
 799static struct lock_class_key qdisc_running_key;
 800
 801struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 802                          const struct Qdisc_ops *ops,
 803                          struct netlink_ext_ack *extack)
 804{
 805        void *p;
 806        struct Qdisc *sch;
 807        unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
 808        int err = -ENOBUFS;
 809        struct net_device *dev;
 810
 811        if (!dev_queue) {
 812                NL_SET_ERR_MSG(extack, "No device queue given");
 813                err = -EINVAL;
 814                goto errout;
 815        }
 816
 817        dev = dev_queue->dev;
 818        p = kzalloc_node(size, GFP_KERNEL,
 819                         netdev_queue_numa_node_read(dev_queue));
 820
 821        if (!p)
 822                goto errout;
 823        sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 824        /* if we got non aligned memory, ask more and do alignment ourself */
 825        if (sch != p) {
 826                kfree(p);
 827                p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
 828                                 netdev_queue_numa_node_read(dev_queue));
 829                if (!p)
 830                        goto errout;
 831                sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 832                sch->padded = (char *) sch - (char *) p;
 833        }
 834        __skb_queue_head_init(&sch->gso_skb);
 835        __skb_queue_head_init(&sch->skb_bad_txq);
 836        qdisc_skb_head_init(&sch->q);
 837        spin_lock_init(&sch->q.lock);
 838
 839        if (ops->static_flags & TCQ_F_CPUSTATS) {
 840                sch->cpu_bstats =
 841                        netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
 842                if (!sch->cpu_bstats)
 843                        goto errout1;
 844
 845                sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
 846                if (!sch->cpu_qstats) {
 847                        free_percpu(sch->cpu_bstats);
 848                        goto errout1;
 849                }
 850        }
 851
 852        spin_lock_init(&sch->busylock);
 853        lockdep_set_class(&sch->busylock,
 854                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 855
 856        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
 857        spin_lock_init(&sch->seqlock);
 858        lockdep_set_class(&sch->busylock,
 859                          dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
 860
 861        seqcount_init(&sch->running);
 862        lockdep_set_class(&sch->running,
 863                          dev->qdisc_running_key ?: &qdisc_running_key);
 864
 865        sch->ops = ops;
 866        sch->flags = ops->static_flags;
 867        sch->enqueue = ops->enqueue;
 868        sch->dequeue = ops->dequeue;
 869        sch->dev_queue = dev_queue;
 870        sch->empty = true;
 871        dev_hold(dev);
 872        refcount_set(&sch->refcnt, 1);
 873
 874        return sch;
 875errout1:
 876        kfree(p);
 877errout:
 878        return ERR_PTR(err);
 879}
 880
 881struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 882                                const struct Qdisc_ops *ops,
 883                                unsigned int parentid,
 884                                struct netlink_ext_ack *extack)
 885{
 886        struct Qdisc *sch;
 887
 888        if (!try_module_get(ops->owner)) {
 889                NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 890                return NULL;
 891        }
 892
 893        sch = qdisc_alloc(dev_queue, ops, extack);
 894        if (IS_ERR(sch)) {
 895                module_put(ops->owner);
 896                return NULL;
 897        }
 898        sch->parent = parentid;
 899
 900        if (!ops->init || ops->init(sch, NULL, extack) == 0) {
 901                trace_qdisc_create(ops, dev_queue->dev, parentid);
 902                return sch;
 903        }
 904
 905        qdisc_put(sch);
 906        return NULL;
 907}
 908EXPORT_SYMBOL(qdisc_create_dflt);
 909
 910/* Under qdisc_lock(qdisc) and BH! */
 911
 912void qdisc_reset(struct Qdisc *qdisc)
 913{
 914        const struct Qdisc_ops *ops = qdisc->ops;
 915        struct sk_buff *skb, *tmp;
 916
 917        trace_qdisc_reset(qdisc);
 918
 919        if (ops->reset)
 920                ops->reset(qdisc);
 921
 922        skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
 923                __skb_unlink(skb, &qdisc->gso_skb);
 924                kfree_skb_list(skb);
 925        }
 926
 927        skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
 928                __skb_unlink(skb, &qdisc->skb_bad_txq);
 929                kfree_skb_list(skb);
 930        }
 931
 932        qdisc->q.qlen = 0;
 933        qdisc->qstats.backlog = 0;
 934}
 935EXPORT_SYMBOL(qdisc_reset);
 936
 937void qdisc_free(struct Qdisc *qdisc)
 938{
 939        if (qdisc_is_percpu_stats(qdisc)) {
 940                free_percpu(qdisc->cpu_bstats);
 941                free_percpu(qdisc->cpu_qstats);
 942        }
 943
 944        kfree((char *) qdisc - qdisc->padded);
 945}
 946
 947static void qdisc_free_cb(struct rcu_head *head)
 948{
 949        struct Qdisc *q = container_of(head, struct Qdisc, rcu);
 950
 951        qdisc_free(q);
 952}
 953
 954static void qdisc_destroy(struct Qdisc *qdisc)
 955{
 956        const struct Qdisc_ops  *ops = qdisc->ops;
 957
 958#ifdef CONFIG_NET_SCHED
 959        qdisc_hash_del(qdisc);
 960
 961        qdisc_put_stab(rtnl_dereference(qdisc->stab));
 962#endif
 963        gen_kill_estimator(&qdisc->rate_est);
 964
 965        qdisc_reset(qdisc);
 966
 967        if (ops->destroy)
 968                ops->destroy(qdisc);
 969
 970        module_put(ops->owner);
 971        dev_put(qdisc_dev(qdisc));
 972
 973        trace_qdisc_destroy(qdisc);
 974
 975        call_rcu(&qdisc->rcu, qdisc_free_cb);
 976}
 977
 978void qdisc_put(struct Qdisc *qdisc)
 979{
 980        if (!qdisc)
 981                return;
 982
 983        if (qdisc->flags & TCQ_F_BUILTIN ||
 984            !refcount_dec_and_test(&qdisc->refcnt))
 985                return;
 986
 987        qdisc_destroy(qdisc);
 988}
 989EXPORT_SYMBOL(qdisc_put);
 990
 991/* Version of qdisc_put() that is called with rtnl mutex unlocked.
 992 * Intended to be used as optimization, this function only takes rtnl lock if
 993 * qdisc reference counter reached zero.
 994 */
 995
 996void qdisc_put_unlocked(struct Qdisc *qdisc)
 997{
 998        if (qdisc->flags & TCQ_F_BUILTIN ||
 999            !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1000                return;
1001
1002        qdisc_destroy(qdisc);
1003        rtnl_unlock();
1004}
1005EXPORT_SYMBOL(qdisc_put_unlocked);
1006
1007/* Attach toplevel qdisc to device queue. */
1008struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1009                              struct Qdisc *qdisc)
1010{
1011        struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
1012        spinlock_t *root_lock;
1013
1014        root_lock = qdisc_lock(oqdisc);
1015        spin_lock_bh(root_lock);
1016
1017        /* ... and graft new one */
1018        if (qdisc == NULL)
1019                qdisc = &noop_qdisc;
1020        dev_queue->qdisc_sleeping = qdisc;
1021        rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1022
1023        spin_unlock_bh(root_lock);
1024
1025        return oqdisc;
1026}
1027EXPORT_SYMBOL(dev_graft_qdisc);
1028
1029static void attach_one_default_qdisc(struct net_device *dev,
1030                                     struct netdev_queue *dev_queue,
1031                                     void *_unused)
1032{
1033        struct Qdisc *qdisc;
1034        const struct Qdisc_ops *ops = default_qdisc_ops;
1035
1036        if (dev->priv_flags & IFF_NO_QUEUE)
1037                ops = &noqueue_qdisc_ops;
1038        else if(dev->type == ARPHRD_CAN)
1039                ops = &pfifo_fast_ops;
1040
1041        qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1042        if (!qdisc)
1043                return;
1044
1045        if (!netif_is_multiqueue(dev))
1046                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1047        dev_queue->qdisc_sleeping = qdisc;
1048}
1049
1050static void attach_default_qdiscs(struct net_device *dev)
1051{
1052        struct netdev_queue *txq;
1053        struct Qdisc *qdisc;
1054
1055        txq = netdev_get_tx_queue(dev, 0);
1056
1057        if (!netif_is_multiqueue(dev) ||
1058            dev->priv_flags & IFF_NO_QUEUE) {
1059                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1060                dev->qdisc = txq->qdisc_sleeping;
1061                qdisc_refcount_inc(dev->qdisc);
1062        } else {
1063                qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1064                if (qdisc) {
1065                        dev->qdisc = qdisc;
1066                        qdisc->ops->attach(qdisc);
1067                }
1068        }
1069
1070        /* Detect default qdisc setup/init failed and fallback to "noqueue" */
1071        if (dev->qdisc == &noop_qdisc) {
1072                netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1073                            default_qdisc_ops->id, noqueue_qdisc_ops.id);
1074                dev->priv_flags |= IFF_NO_QUEUE;
1075                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1076                dev->qdisc = txq->qdisc_sleeping;
1077                qdisc_refcount_inc(dev->qdisc);
1078                dev->priv_flags ^= IFF_NO_QUEUE;
1079        }
1080
1081#ifdef CONFIG_NET_SCHED
1082        if (dev->qdisc != &noop_qdisc)
1083                qdisc_hash_add(dev->qdisc, false);
1084#endif
1085}
1086
1087static void transition_one_qdisc(struct net_device *dev,
1088                                 struct netdev_queue *dev_queue,
1089                                 void *_need_watchdog)
1090{
1091        struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1092        int *need_watchdog_p = _need_watchdog;
1093
1094        if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1095                clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1096
1097        rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1098        if (need_watchdog_p) {
1099                dev_queue->trans_start = 0;
1100                *need_watchdog_p = 1;
1101        }
1102}
1103
1104void dev_activate(struct net_device *dev)
1105{
1106        int need_watchdog;
1107
1108        /* No queueing discipline is attached to device;
1109         * create default one for devices, which need queueing
1110         * and noqueue_qdisc for virtual interfaces
1111         */
1112
1113        if (dev->qdisc == &noop_qdisc)
1114                attach_default_qdiscs(dev);
1115
1116        if (!netif_carrier_ok(dev))
1117                /* Delay activation until next carrier-on event */
1118                return;
1119
1120        need_watchdog = 0;
1121        netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1122        if (dev_ingress_queue(dev))
1123                transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1124
1125        if (need_watchdog) {
1126                netif_trans_update(dev);
1127                dev_watchdog_up(dev);
1128        }
1129}
1130EXPORT_SYMBOL(dev_activate);
1131
1132static void qdisc_deactivate(struct Qdisc *qdisc)
1133{
1134        if (qdisc->flags & TCQ_F_BUILTIN)
1135                return;
1136
1137        set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1138}
1139
1140static void dev_deactivate_queue(struct net_device *dev,
1141                                 struct netdev_queue *dev_queue,
1142                                 void *_qdisc_default)
1143{
1144        struct Qdisc *qdisc_default = _qdisc_default;
1145        struct Qdisc *qdisc;
1146
1147        qdisc = rtnl_dereference(dev_queue->qdisc);
1148        if (qdisc) {
1149                qdisc_deactivate(qdisc);
1150                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1151        }
1152}
1153
1154static void dev_reset_queue(struct net_device *dev,
1155                            struct netdev_queue *dev_queue,
1156                            void *_unused)
1157{
1158        struct Qdisc *qdisc;
1159        bool nolock;
1160
1161        qdisc = dev_queue->qdisc_sleeping;
1162        if (!qdisc)
1163                return;
1164
1165        nolock = qdisc->flags & TCQ_F_NOLOCK;
1166
1167        if (nolock)
1168                spin_lock_bh(&qdisc->seqlock);
1169        spin_lock_bh(qdisc_lock(qdisc));
1170
1171        qdisc_reset(qdisc);
1172
1173        spin_unlock_bh(qdisc_lock(qdisc));
1174        if (nolock)
1175                spin_unlock_bh(&qdisc->seqlock);
1176}
1177
1178static bool some_qdisc_is_busy(struct net_device *dev)
1179{
1180        unsigned int i;
1181
1182        for (i = 0; i < dev->num_tx_queues; i++) {
1183                struct netdev_queue *dev_queue;
1184                spinlock_t *root_lock;
1185                struct Qdisc *q;
1186                int val;
1187
1188                dev_queue = netdev_get_tx_queue(dev, i);
1189                q = dev_queue->qdisc_sleeping;
1190
1191                root_lock = qdisc_lock(q);
1192                spin_lock_bh(root_lock);
1193
1194                val = (qdisc_is_running(q) ||
1195                       test_bit(__QDISC_STATE_SCHED, &q->state));
1196
1197                spin_unlock_bh(root_lock);
1198
1199                if (val)
1200                        return true;
1201        }
1202        return false;
1203}
1204
1205/**
1206 *      dev_deactivate_many - deactivate transmissions on several devices
1207 *      @head: list of devices to deactivate
1208 *
1209 *      This function returns only when all outstanding transmissions
1210 *      have completed, unless all devices are in dismantle phase.
1211 */
1212void dev_deactivate_many(struct list_head *head)
1213{
1214        struct net_device *dev;
1215
1216        list_for_each_entry(dev, head, close_list) {
1217                netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1218                                         &noop_qdisc);
1219                if (dev_ingress_queue(dev))
1220                        dev_deactivate_queue(dev, dev_ingress_queue(dev),
1221                                             &noop_qdisc);
1222
1223                dev_watchdog_down(dev);
1224        }
1225
1226        /* Wait for outstanding qdisc-less dev_queue_xmit calls or
1227         * outstanding qdisc enqueuing calls.
1228         * This is avoided if all devices are in dismantle phase :
1229         * Caller will call synchronize_net() for us
1230         */
1231        synchronize_net();
1232
1233        list_for_each_entry(dev, head, close_list) {
1234                netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1235
1236                if (dev_ingress_queue(dev))
1237                        dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1238        }
1239
1240        /* Wait for outstanding qdisc_run calls. */
1241        list_for_each_entry(dev, head, close_list) {
1242                while (some_qdisc_is_busy(dev)) {
1243                        /* wait_event() would avoid this sleep-loop but would
1244                         * require expensive checks in the fast paths of packet
1245                         * processing which isn't worth it.
1246                         */
1247                        schedule_timeout_uninterruptible(1);
1248                }
1249        }
1250}
1251
1252void dev_deactivate(struct net_device *dev)
1253{
1254        LIST_HEAD(single);
1255
1256        list_add(&dev->close_list, &single);
1257        dev_deactivate_many(&single);
1258        list_del(&single);
1259}
1260EXPORT_SYMBOL(dev_deactivate);
1261
1262static int qdisc_change_tx_queue_len(struct net_device *dev,
1263                                     struct netdev_queue *dev_queue)
1264{
1265        struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1266        const struct Qdisc_ops *ops = qdisc->ops;
1267
1268        if (ops->change_tx_queue_len)
1269                return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1270        return 0;
1271}
1272
1273int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1274{
1275        bool up = dev->flags & IFF_UP;
1276        unsigned int i;
1277        int ret = 0;
1278
1279        if (up)
1280                dev_deactivate(dev);
1281
1282        for (i = 0; i < dev->num_tx_queues; i++) {
1283                ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1284
1285                /* TODO: revert changes on a partial failure */
1286                if (ret)
1287                        break;
1288        }
1289
1290        if (up)
1291                dev_activate(dev);
1292        return ret;
1293}
1294
1295static void dev_init_scheduler_queue(struct net_device *dev,
1296                                     struct netdev_queue *dev_queue,
1297                                     void *_qdisc)
1298{
1299        struct Qdisc *qdisc = _qdisc;
1300
1301        rcu_assign_pointer(dev_queue->qdisc, qdisc);
1302        dev_queue->qdisc_sleeping = qdisc;
1303}
1304
1305void dev_init_scheduler(struct net_device *dev)
1306{
1307        dev->qdisc = &noop_qdisc;
1308        netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1309        if (dev_ingress_queue(dev))
1310                dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1311
1312        timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1313}
1314
1315static void shutdown_scheduler_queue(struct net_device *dev,
1316                                     struct netdev_queue *dev_queue,
1317                                     void *_qdisc_default)
1318{
1319        struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1320        struct Qdisc *qdisc_default = _qdisc_default;
1321
1322        if (qdisc) {
1323                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1324                dev_queue->qdisc_sleeping = qdisc_default;
1325
1326                qdisc_put(qdisc);
1327        }
1328}
1329
1330void dev_shutdown(struct net_device *dev)
1331{
1332        netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1333        if (dev_ingress_queue(dev))
1334                shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1335        qdisc_put(dev->qdisc);
1336        dev->qdisc = &noop_qdisc;
1337
1338        WARN_ON(timer_pending(&dev->watchdog_timer));
1339}
1340
1341void psched_ratecfg_precompute(struct psched_ratecfg *r,
1342                               const struct tc_ratespec *conf,
1343                               u64 rate64)
1344{
1345        memset(r, 0, sizeof(*r));
1346        r->overhead = conf->overhead;
1347        r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1348        r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1349        r->mult = 1;
1350        /*
1351         * The deal here is to replace a divide by a reciprocal one
1352         * in fast path (a reciprocal divide is a multiply and a shift)
1353         *
1354         * Normal formula would be :
1355         *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
1356         *
1357         * We compute mult/shift to use instead :
1358         *  time_in_ns = (len * mult) >> shift;
1359         *
1360         * We try to get the highest possible mult value for accuracy,
1361         * but have to make sure no overflows will ever happen.
1362         */
1363        if (r->rate_bytes_ps > 0) {
1364                u64 factor = NSEC_PER_SEC;
1365
1366                for (;;) {
1367                        r->mult = div64_u64(factor, r->rate_bytes_ps);
1368                        if (r->mult & (1U << 31) || factor & (1ULL << 63))
1369                                break;
1370                        factor <<= 1;
1371                        r->shift++;
1372                }
1373        }
1374}
1375EXPORT_SYMBOL(psched_ratecfg_precompute);
1376
1377static void mini_qdisc_rcu_func(struct rcu_head *head)
1378{
1379}
1380
1381void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1382                          struct tcf_proto *tp_head)
1383{
1384        /* Protected with chain0->filter_chain_lock.
1385         * Can't access chain directly because tp_head can be NULL.
1386         */
1387        struct mini_Qdisc *miniq_old =
1388                rcu_dereference_protected(*miniqp->p_miniq, 1);
1389        struct mini_Qdisc *miniq;
1390
1391        if (!tp_head) {
1392                RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1393                /* Wait for flying RCU callback before it is freed. */
1394                rcu_barrier();
1395                return;
1396        }
1397
1398        miniq = !miniq_old || miniq_old == &miniqp->miniq2 ?
1399                &miniqp->miniq1 : &miniqp->miniq2;
1400
1401        /* We need to make sure that readers won't see the miniq
1402         * we are about to modify. So wait until previous call_rcu callback
1403         * is done.
1404         */
1405        rcu_barrier();
1406        miniq->filter_list = tp_head;
1407        rcu_assign_pointer(*miniqp->p_miniq, miniq);
1408
1409        if (miniq_old)
1410                /* This is counterpart of the rcu barriers above. We need to
1411                 * block potential new user of miniq_old until all readers
1412                 * are not seeing it.
1413                 */
1414                call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
1415}
1416EXPORT_SYMBOL(mini_qdisc_pair_swap);
1417
1418void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1419                                struct tcf_block *block)
1420{
1421        miniqp->miniq1.block = block;
1422        miniqp->miniq2.block = block;
1423}
1424EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1425
1426void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1427                          struct mini_Qdisc __rcu **p_miniq)
1428{
1429        miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1430        miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1431        miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1432        miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1433        miniqp->p_miniq = p_miniq;
1434}
1435EXPORT_SYMBOL(mini_qdisc_pair_init);
1436