linux/include/net/sch_generic.h
<<
>>
Prefs
   1#ifndef __NET_SCHED_GENERIC_H
   2#define __NET_SCHED_GENERIC_H
   3
   4#include <linux/netdevice.h>
   5#include <linux/types.h>
   6#include <linux/rcupdate.h>
   7#include <linux/pkt_sched.h>
   8#include <linux/pkt_cls.h>
   9#include <linux/percpu.h>
  10#include <linux/dynamic_queue_limits.h>
  11#include <net/gen_stats.h>
  12#include <net/rtnetlink.h>
  13
  14struct Qdisc_ops;
  15struct qdisc_walker;
  16struct tcf_walker;
  17struct module;
  18
  19struct qdisc_rate_table {
  20        struct tc_ratespec rate;
  21        u32             data[256];
  22        struct qdisc_rate_table *next;
  23        int             refcnt;
  24};
  25
  26enum qdisc_state_t {
  27        __QDISC_STATE_SCHED,
  28        __QDISC_STATE_DEACTIVATED,
  29};
  30
  31struct qdisc_size_table {
  32        struct rcu_head         rcu;
  33        struct list_head        list;
  34        struct tc_sizespec      szopts;
  35        int                     refcnt;
  36        u16                     data[];
  37};
  38
  39/* similar to sk_buff_head, but skb->prev pointer is undefined. */
  40struct qdisc_skb_head {
  41        struct sk_buff  *head;
  42        struct sk_buff  *tail;
  43        __u32           qlen;
  44        spinlock_t      lock;
  45};
  46
  47struct Qdisc {
  48        int                     (*enqueue)(struct sk_buff *skb,
  49                                           struct Qdisc *sch,
  50                                           struct sk_buff **to_free);
  51        struct sk_buff *        (*dequeue)(struct Qdisc *sch);
  52        unsigned int            flags;
  53#define TCQ_F_BUILTIN           1
  54#define TCQ_F_INGRESS           2
  55#define TCQ_F_CAN_BYPASS        4
  56#define TCQ_F_MQROOT            8
  57#define TCQ_F_ONETXQUEUE        0x10 /* dequeue_skb() can assume all skbs are for
  58                                      * q->dev_queue : It can test
  59                                      * netif_xmit_frozen_or_stopped() before
  60                                      * dequeueing next packet.
  61                                      * Its true for MQ/MQPRIO slaves, or non
  62                                      * multiqueue device.
  63                                      */
  64#define TCQ_F_WARN_NONWC        (1 << 16)
  65#define TCQ_F_CPUSTATS          0x20 /* run using percpu statistics */
  66#define TCQ_F_NOPARENT          0x40 /* root of its hierarchy :
  67                                      * qdisc_tree_decrease_qlen() should stop.
  68                                      */
  69#define TCQ_F_INVISIBLE         0x80 /* invisible by default in dump */
  70        u32                     limit;
  71        const struct Qdisc_ops  *ops;
  72        struct qdisc_size_table __rcu *stab;
  73        struct hlist_node       hash;
  74        u32                     handle;
  75        u32                     parent;
  76        void                    *u32_node;
  77
  78        struct netdev_queue     *dev_queue;
  79
  80        struct net_rate_estimator __rcu *rate_est;
  81        struct gnet_stats_basic_cpu __percpu *cpu_bstats;
  82        struct gnet_stats_queue __percpu *cpu_qstats;
  83
  84        /*
  85         * For performance sake on SMP, we put highly modified fields at the end
  86         */
  87        struct sk_buff          *gso_skb ____cacheline_aligned_in_smp;
  88        struct qdisc_skb_head   q;
  89        struct gnet_stats_basic_packed bstats;
  90        seqcount_t              running;
  91        struct gnet_stats_queue qstats;
  92        unsigned long           state;
  93        struct Qdisc            *next_sched;
  94        struct sk_buff          *skb_bad_txq;
  95        struct rcu_head         rcu_head;
  96        int                     padded;
  97        atomic_t                refcnt;
  98
  99        spinlock_t              busylock ____cacheline_aligned_in_smp;
 100};
 101
 102static inline bool qdisc_is_running(const struct Qdisc *qdisc)
 103{
 104        return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
 105}
 106
 107static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 108{
 109        if (qdisc_is_running(qdisc))
 110                return false;
 111        /* Variant of write_seqcount_begin() telling lockdep a trylock
 112         * was attempted.
 113         */
 114        raw_write_seqcount_begin(&qdisc->running);
 115        seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
 116        return true;
 117}
 118
 119static inline void qdisc_run_end(struct Qdisc *qdisc)
 120{
 121        write_seqcount_end(&qdisc->running);
 122}
 123
 124static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
 125{
 126        return qdisc->flags & TCQ_F_ONETXQUEUE;
 127}
 128
 129static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
 130{
 131#ifdef CONFIG_BQL
 132        /* Non-BQL migrated drivers will return 0, too. */
 133        return dql_avail(&txq->dql);
 134#else
 135        return 0;
 136#endif
 137}
 138
 139struct Qdisc_class_ops {
 140        /* Child qdisc manipulation */
 141        struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
 142        int                     (*graft)(struct Qdisc *, unsigned long cl,
 143                                        struct Qdisc *, struct Qdisc **);
 144        struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
 145        void                    (*qlen_notify)(struct Qdisc *, unsigned long);
 146
 147        /* Class manipulation routines */
 148        unsigned long           (*get)(struct Qdisc *, u32 classid);
 149        void                    (*put)(struct Qdisc *, unsigned long);
 150        int                     (*change)(struct Qdisc *, u32, u32,
 151                                        struct nlattr **, unsigned long *);
 152        int                     (*delete)(struct Qdisc *, unsigned long);
 153        void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
 154
 155        /* Filter manipulation */
 156        struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
 157        bool                    (*tcf_cl_offload)(u32 classid);
 158        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
 159                                        u32 classid);
 160        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
 161
 162        /* rtnetlink specific */
 163        int                     (*dump)(struct Qdisc *, unsigned long,
 164                                        struct sk_buff *skb, struct tcmsg*);
 165        int                     (*dump_stats)(struct Qdisc *, unsigned long,
 166                                        struct gnet_dump *);
 167};
 168
 169struct Qdisc_ops {
 170        struct Qdisc_ops        *next;
 171        const struct Qdisc_class_ops    *cl_ops;
 172        char                    id[IFNAMSIZ];
 173        int                     priv_size;
 174
 175        int                     (*enqueue)(struct sk_buff *skb,
 176                                           struct Qdisc *sch,
 177                                           struct sk_buff **to_free);
 178        struct sk_buff *        (*dequeue)(struct Qdisc *);
 179        struct sk_buff *        (*peek)(struct Qdisc *);
 180
 181        int                     (*init)(struct Qdisc *, struct nlattr *arg);
 182        void                    (*reset)(struct Qdisc *);
 183        void                    (*destroy)(struct Qdisc *);
 184        int                     (*change)(struct Qdisc *, struct nlattr *arg);
 185        void                    (*attach)(struct Qdisc *);
 186
 187        int                     (*dump)(struct Qdisc *, struct sk_buff *);
 188        int                     (*dump_stats)(struct Qdisc *, struct gnet_dump *);
 189
 190        struct module           *owner;
 191};
 192
 193
 194struct tcf_result {
 195        unsigned long   class;
 196        u32             classid;
 197};
 198
 199struct tcf_proto_ops {
 200        struct list_head        head;
 201        char                    kind[IFNAMSIZ];
 202
 203        int                     (*classify)(struct sk_buff *,
 204                                            const struct tcf_proto *,
 205                                            struct tcf_result *);
 206        int                     (*init)(struct tcf_proto*);
 207        void                    (*destroy)(struct tcf_proto*);
 208
 209        unsigned long           (*get)(struct tcf_proto*, u32 handle);
 210        int                     (*change)(struct net *net, struct sk_buff *,
 211                                        struct tcf_proto*, unsigned long,
 212                                        u32 handle, struct nlattr **,
 213                                        unsigned long *, bool);
 214        int                     (*delete)(struct tcf_proto*, unsigned long, bool*);
 215        void                    (*walk)(struct tcf_proto*, struct tcf_walker *arg);
 216
 217        /* rtnetlink specific */
 218        int                     (*dump)(struct net*, struct tcf_proto*, unsigned long,
 219                                        struct sk_buff *skb, struct tcmsg*);
 220
 221        struct module           *owner;
 222};
 223
 224struct tcf_proto {
 225        /* Fast access part */
 226        struct tcf_proto __rcu  *next;
 227        void __rcu              *root;
 228        int                     (*classify)(struct sk_buff *,
 229                                            const struct tcf_proto *,
 230                                            struct tcf_result *);
 231        __be16                  protocol;
 232
 233        /* All the rest */
 234        u32                     prio;
 235        u32                     classid;
 236        struct Qdisc            *q;
 237        void                    *data;
 238        const struct tcf_proto_ops      *ops;
 239        struct rcu_head         rcu;
 240};
 241
 242struct qdisc_skb_cb {
 243        unsigned int            pkt_len;
 244        u16                     slave_dev_queue_mapping;
 245        u16                     tc_classid;
 246#define QDISC_CB_PRIV_LEN 20
 247        unsigned char           data[QDISC_CB_PRIV_LEN];
 248};
 249
 250static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 251{
 252        struct qdisc_skb_cb *qcb;
 253
 254        BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
 255        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 256}
 257
 258static inline int qdisc_qlen(const struct Qdisc *q)
 259{
 260        return q->q.qlen;
 261}
 262
 263static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 264{
 265        return (struct qdisc_skb_cb *)skb->cb;
 266}
 267
 268static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
 269{
 270        return &qdisc->q.lock;
 271}
 272
 273static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
 274{
 275        struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
 276
 277        return q;
 278}
 279
 280static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
 281{
 282        return qdisc->dev_queue->qdisc_sleeping;
 283}
 284
 285/* The qdisc root lock is a mechanism by which to top level
 286 * of a qdisc tree can be locked from any qdisc node in the
 287 * forest.  This allows changing the configuration of some
 288 * aspect of the qdisc tree while blocking out asynchronous
 289 * qdisc access in the packet processing paths.
 290 *
 291 * It is only legal to do this when the root will not change
 292 * on us.  Otherwise we'll potentially lock the wrong qdisc
 293 * root.  This is enforced by holding the RTNL semaphore, which
 294 * all users of this lock accessor must do.
 295 */
 296static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
 297{
 298        struct Qdisc *root = qdisc_root(qdisc);
 299
 300        ASSERT_RTNL();
 301        return qdisc_lock(root);
 302}
 303
 304static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
 305{
 306        struct Qdisc *root = qdisc_root_sleeping(qdisc);
 307
 308        ASSERT_RTNL();
 309        return qdisc_lock(root);
 310}
 311
 312static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
 313{
 314        struct Qdisc *root = qdisc_root_sleeping(qdisc);
 315
 316        ASSERT_RTNL();
 317        return &root->running;
 318}
 319
 320static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
 321{
 322        return qdisc->dev_queue->dev;
 323}
 324
 325static inline void sch_tree_lock(const struct Qdisc *q)
 326{
 327        spin_lock_bh(qdisc_root_sleeping_lock(q));
 328}
 329
 330static inline void sch_tree_unlock(const struct Qdisc *q)
 331{
 332        spin_unlock_bh(qdisc_root_sleeping_lock(q));
 333}
 334
 335#define tcf_tree_lock(tp)       sch_tree_lock((tp)->q)
 336#define tcf_tree_unlock(tp)     sch_tree_unlock((tp)->q)
 337
 338extern struct Qdisc noop_qdisc;
 339extern struct Qdisc_ops noop_qdisc_ops;
 340extern struct Qdisc_ops pfifo_fast_ops;
 341extern struct Qdisc_ops mq_qdisc_ops;
 342extern struct Qdisc_ops noqueue_qdisc_ops;
 343extern const struct Qdisc_ops *default_qdisc_ops;
 344static inline const struct Qdisc_ops *
 345get_default_qdisc_ops(const struct net_device *dev, int ntx)
 346{
 347        return ntx < dev->real_num_tx_queues ?
 348                        default_qdisc_ops : &pfifo_fast_ops;
 349}
 350
 351struct Qdisc_class_common {
 352        u32                     classid;
 353        struct hlist_node       hnode;
 354};
 355
 356struct Qdisc_class_hash {
 357        struct hlist_head       *hash;
 358        unsigned int            hashsize;
 359        unsigned int            hashmask;
 360        unsigned int            hashelems;
 361};
 362
 363static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
 364{
 365        id ^= id >> 8;
 366        id ^= id >> 4;
 367        return id & mask;
 368}
 369
 370static inline struct Qdisc_class_common *
 371qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
 372{
 373        struct Qdisc_class_common *cl;
 374        unsigned int h;
 375
 376        h = qdisc_class_hash(id, hash->hashmask);
 377        hlist_for_each_entry(cl, &hash->hash[h], hnode) {
 378                if (cl->classid == id)
 379                        return cl;
 380        }
 381        return NULL;
 382}
 383
 384int qdisc_class_hash_init(struct Qdisc_class_hash *);
 385void qdisc_class_hash_insert(struct Qdisc_class_hash *,
 386                             struct Qdisc_class_common *);
 387void qdisc_class_hash_remove(struct Qdisc_class_hash *,
 388                             struct Qdisc_class_common *);
 389void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
 390void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
 391
 392void dev_init_scheduler(struct net_device *dev);
 393void dev_shutdown(struct net_device *dev);
 394void dev_activate(struct net_device *dev);
 395void dev_deactivate(struct net_device *dev);
 396void dev_deactivate_many(struct list_head *head);
 397struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 398                              struct Qdisc *qdisc);
 399void qdisc_reset(struct Qdisc *qdisc);
 400void qdisc_destroy(struct Qdisc *qdisc);
 401void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 402                               unsigned int len);
 403struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 404                          const struct Qdisc_ops *ops);
 405struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 406                                const struct Qdisc_ops *ops, u32 parentid);
 407void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 408                               const struct qdisc_size_table *stab);
 409int skb_do_redirect(struct sk_buff *);
 410
 411static inline void skb_reset_tc(struct sk_buff *skb)
 412{
 413#ifdef CONFIG_NET_CLS_ACT
 414        skb->tc_redirected = 0;
 415#endif
 416}
 417
 418static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
 419{
 420#ifdef CONFIG_NET_CLS_ACT
 421        return skb->tc_at_ingress;
 422#else
 423        return false;
 424#endif
 425}
 426
 427static inline bool skb_skip_tc_classify(struct sk_buff *skb)
 428{
 429#ifdef CONFIG_NET_CLS_ACT
 430        if (skb->tc_skip_classify) {
 431                skb->tc_skip_classify = 0;
 432                return true;
 433        }
 434#endif
 435        return false;
 436}
 437
 438/* Reset all TX qdiscs greater then index of a device.  */
 439static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
 440{
 441        struct Qdisc *qdisc;
 442
 443        for (; i < dev->num_tx_queues; i++) {
 444                qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
 445                if (qdisc) {
 446                        spin_lock_bh(qdisc_lock(qdisc));
 447                        qdisc_reset(qdisc);
 448                        spin_unlock_bh(qdisc_lock(qdisc));
 449                }
 450        }
 451}
 452
 453static inline void qdisc_reset_all_tx(struct net_device *dev)
 454{
 455        qdisc_reset_all_tx_gt(dev, 0);
 456}
 457
 458/* Are all TX queues of the device empty?  */
 459static inline bool qdisc_all_tx_empty(const struct net_device *dev)
 460{
 461        unsigned int i;
 462
 463        rcu_read_lock();
 464        for (i = 0; i < dev->num_tx_queues; i++) {
 465                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 466                const struct Qdisc *q = rcu_dereference(txq->qdisc);
 467
 468                if (q->q.qlen) {
 469                        rcu_read_unlock();
 470                        return false;
 471                }
 472        }
 473        rcu_read_unlock();
 474        return true;
 475}
 476
 477/* Are any of the TX qdiscs changing?  */
 478static inline bool qdisc_tx_changing(const struct net_device *dev)
 479{
 480        unsigned int i;
 481
 482        for (i = 0; i < dev->num_tx_queues; i++) {
 483                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 484                if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
 485                        return true;
 486        }
 487        return false;
 488}
 489
 490/* Is the device using the noop qdisc on all queues?  */
 491static inline bool qdisc_tx_is_noop(const struct net_device *dev)
 492{
 493        unsigned int i;
 494
 495        for (i = 0; i < dev->num_tx_queues; i++) {
 496                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 497                if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
 498                        return false;
 499        }
 500        return true;
 501}
 502
 503static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
 504{
 505        return qdisc_skb_cb(skb)->pkt_len;
 506}
 507
 508/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
 509enum net_xmit_qdisc_t {
 510        __NET_XMIT_STOLEN = 0x00010000,
 511        __NET_XMIT_BYPASS = 0x00020000,
 512};
 513
 514#ifdef CONFIG_NET_CLS_ACT
 515#define net_xmit_drop_count(e)  ((e) & __NET_XMIT_STOLEN ? 0 : 1)
 516#else
 517#define net_xmit_drop_count(e)  (1)
 518#endif
 519
 520static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
 521                                           const struct Qdisc *sch)
 522{
 523#ifdef CONFIG_NET_SCHED
 524        struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
 525
 526        if (stab)
 527                __qdisc_calculate_pkt_len(skb, stab);
 528#endif
 529}
 530
 531static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 532                                struct sk_buff **to_free)
 533{
 534        qdisc_calculate_pkt_len(skb, sch);
 535        return sch->enqueue(skb, sch, to_free);
 536}
 537
 538static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 539{
 540        return q->flags & TCQ_F_CPUSTATS;
 541}
 542
 543static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
 544                                  __u64 bytes, __u32 packets)
 545{
 546        bstats->bytes += bytes;
 547        bstats->packets += packets;
 548}
 549
 550static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
 551                                 const struct sk_buff *skb)
 552{
 553        _bstats_update(bstats,
 554                       qdisc_pkt_len(skb),
 555                       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
 556}
 557
 558static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
 559                                      __u64 bytes, __u32 packets)
 560{
 561        u64_stats_update_begin(&bstats->syncp);
 562        _bstats_update(&bstats->bstats, bytes, packets);
 563        u64_stats_update_end(&bstats->syncp);
 564}
 565
 566static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
 567                                     const struct sk_buff *skb)
 568{
 569        u64_stats_update_begin(&bstats->syncp);
 570        bstats_update(&bstats->bstats, skb);
 571        u64_stats_update_end(&bstats->syncp);
 572}
 573
 574static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
 575                                           const struct sk_buff *skb)
 576{
 577        bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
 578}
 579
 580static inline void qdisc_bstats_update(struct Qdisc *sch,
 581                                       const struct sk_buff *skb)
 582{
 583        bstats_update(&sch->bstats, skb);
 584}
 585
 586static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
 587                                            const struct sk_buff *skb)
 588{
 589        sch->qstats.backlog -= qdisc_pkt_len(skb);
 590}
 591
 592static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
 593                                            const struct sk_buff *skb)
 594{
 595        sch->qstats.backlog += qdisc_pkt_len(skb);
 596}
 597
 598static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
 599{
 600        sch->qstats.drops += count;
 601}
 602
 603static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
 604{
 605        qstats->drops++;
 606}
 607
 608static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
 609{
 610        qstats->overlimits++;
 611}
 612
 613static inline void qdisc_qstats_drop(struct Qdisc *sch)
 614{
 615        qstats_drop_inc(&sch->qstats);
 616}
 617
 618static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
 619{
 620        this_cpu_inc(sch->cpu_qstats->drops);
 621}
 622
 623static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
 624{
 625        sch->qstats.overlimits++;
 626}
 627
 628static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
 629{
 630        qh->head = NULL;
 631        qh->tail = NULL;
 632        qh->qlen = 0;
 633}
 634
 635static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 636                                       struct qdisc_skb_head *qh)
 637{
 638        struct sk_buff *last = qh->tail;
 639
 640        if (last) {
 641                skb->next = NULL;
 642                last->next = skb;
 643                qh->tail = skb;
 644        } else {
 645                qh->tail = skb;
 646                qh->head = skb;
 647        }
 648        qh->qlen++;
 649        qdisc_qstats_backlog_inc(sch, skb);
 650
 651        return NET_XMIT_SUCCESS;
 652}
 653
 654static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
 655{
 656        return __qdisc_enqueue_tail(skb, sch, &sch->q);
 657}
 658
 659static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
 660{
 661        struct sk_buff *skb = qh->head;
 662
 663        if (likely(skb != NULL)) {
 664                qh->head = skb->next;
 665                qh->qlen--;
 666                if (qh->head == NULL)
 667                        qh->tail = NULL;
 668                skb->next = NULL;
 669        }
 670
 671        return skb;
 672}
 673
 674static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 675{
 676        struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 677
 678        if (likely(skb != NULL)) {
 679                qdisc_qstats_backlog_dec(sch, skb);
 680                qdisc_bstats_update(sch, skb);
 681        }
 682
 683        return skb;
 684}
 685
 686/* Instead of calling kfree_skb() while root qdisc lock is held,
 687 * queue the skb for future freeing at end of __dev_xmit_skb()
 688 */
 689static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
 690{
 691        skb->next = *to_free;
 692        *to_free = skb;
 693}
 694
 695static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
 696                                                   struct qdisc_skb_head *qh,
 697                                                   struct sk_buff **to_free)
 698{
 699        struct sk_buff *skb = __qdisc_dequeue_head(qh);
 700
 701        if (likely(skb != NULL)) {
 702                unsigned int len = qdisc_pkt_len(skb);
 703
 704                qdisc_qstats_backlog_dec(sch, skb);
 705                __qdisc_drop(skb, to_free);
 706                return len;
 707        }
 708
 709        return 0;
 710}
 711
 712static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
 713                                                 struct sk_buff **to_free)
 714{
 715        return __qdisc_queue_drop_head(sch, &sch->q, to_free);
 716}
 717
 718static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
 719{
 720        const struct qdisc_skb_head *qh = &sch->q;
 721
 722        return qh->head;
 723}
 724
 725/* generic pseudo peek method for non-work-conserving qdisc */
 726static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
 727{
 728        /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
 729        if (!sch->gso_skb) {
 730                sch->gso_skb = sch->dequeue(sch);
 731                if (sch->gso_skb) {
 732                        /* it's still part of the queue */
 733                        qdisc_qstats_backlog_inc(sch, sch->gso_skb);
 734                        sch->q.qlen++;
 735                }
 736        }
 737
 738        return sch->gso_skb;
 739}
 740
 741/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
 742static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 743{
 744        struct sk_buff *skb = sch->gso_skb;
 745
 746        if (skb) {
 747                sch->gso_skb = NULL;
 748                qdisc_qstats_backlog_dec(sch, skb);
 749                sch->q.qlen--;
 750        } else {
 751                skb = sch->dequeue(sch);
 752        }
 753
 754        return skb;
 755}
 756
 757static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
 758{
 759        /*
 760         * We do not know the backlog in bytes of this list, it
 761         * is up to the caller to correct it
 762         */
 763        ASSERT_RTNL();
 764        if (qh->qlen) {
 765                rtnl_kfree_skbs(qh->head, qh->tail);
 766
 767                qh->head = NULL;
 768                qh->tail = NULL;
 769                qh->qlen = 0;
 770        }
 771}
 772
 773static inline void qdisc_reset_queue(struct Qdisc *sch)
 774{
 775        __qdisc_reset_queue(&sch->q);
 776        sch->qstats.backlog = 0;
 777}
 778
 779static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
 780                                          struct Qdisc **pold)
 781{
 782        struct Qdisc *old;
 783
 784        sch_tree_lock(sch);
 785        old = *pold;
 786        *pold = new;
 787        if (old != NULL) {
 788                qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
 789                qdisc_reset(old);
 790        }
 791        sch_tree_unlock(sch);
 792
 793        return old;
 794}
 795
 796static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
 797{
 798        rtnl_kfree_skbs(skb, skb);
 799        qdisc_qstats_drop(sch);
 800}
 801
 802
 803static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
 804                             struct sk_buff **to_free)
 805{
 806        __qdisc_drop(skb, to_free);
 807        qdisc_qstats_drop(sch);
 808
 809        return NET_XMIT_DROP;
 810}
 811
 812/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
 813   long it will take to send a packet given its size.
 814 */
 815static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
 816{
 817        int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
 818        if (slot < 0)
 819                slot = 0;
 820        slot >>= rtab->rate.cell_log;
 821        if (slot > 255)
 822                return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
 823        return rtab->data[slot];
 824}
 825
 826struct psched_ratecfg {
 827        u64     rate_bytes_ps; /* bytes per second */
 828        u32     mult;
 829        u16     overhead;
 830        u8      linklayer;
 831        u8      shift;
 832};
 833
 834static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
 835                                unsigned int len)
 836{
 837        len += r->overhead;
 838
 839        if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
 840                return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
 841
 842        return ((u64)len * r->mult) >> r->shift;
 843}
 844
 845void psched_ratecfg_precompute(struct psched_ratecfg *r,
 846                               const struct tc_ratespec *conf,
 847                               u64 rate64);
 848
 849static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
 850                                          const struct psched_ratecfg *r)
 851{
 852        memset(res, 0, sizeof(*res));
 853
 854        /* legacy struct tc_ratespec has a 32bit @rate field
 855         * Qdisc using 64bit rate should add new attributes
 856         * in order to maintain compatibility.
 857         */
 858        res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
 859
 860        res->overhead = r->overhead;
 861        res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 862}
 863
 864#endif
 865