linux/include/net/sch_generic.h
<<
>>
Prefs
   1#ifndef __NET_SCHED_GENERIC_H
   2#define __NET_SCHED_GENERIC_H
   3
   4#include <linux/netdevice.h>
   5#include <linux/types.h>
   6#include <linux/rcupdate.h>
   7#include <linux/pkt_sched.h>
   8#include <linux/pkt_cls.h>
   9#include <net/gen_stats.h>
  10#include <net/rtnetlink.h>
  11
  12struct Qdisc_ops;
  13struct qdisc_walker;
  14struct tcf_walker;
  15struct module;
  16
  17struct qdisc_rate_table {
  18        struct tc_ratespec rate;
  19        u32             data[256];
  20        struct qdisc_rate_table *next;
  21        int             refcnt;
  22};
  23
  24enum qdisc_state_t {
  25        __QDISC_STATE_SCHED,
  26        __QDISC_STATE_DEACTIVATED,
  27        __QDISC_STATE_THROTTLED,
  28};
  29
  30/*
  31 * following bits are only changed while qdisc lock is held
  32 */
  33enum qdisc___state_t {
  34        __QDISC___STATE_RUNNING = 1,
  35};
  36
  37struct qdisc_size_table {
  38        struct rcu_head         rcu;
  39        struct list_head        list;
  40        struct tc_sizespec      szopts;
  41        int                     refcnt;
  42        u16                     data[];
  43};
  44
  45struct Qdisc {
  46        int                     (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  47        struct sk_buff *        (*dequeue)(struct Qdisc *dev);
  48        unsigned int            flags;
  49#define TCQ_F_BUILTIN           1
  50#define TCQ_F_INGRESS           2
  51#define TCQ_F_CAN_BYPASS        4
  52#define TCQ_F_MQROOT            8
  53#define TCQ_F_ONETXQUEUE        0x10 /* dequeue_skb() can assume all skbs are for
  54                                      * q->dev_queue : It can test
  55                                      * netif_xmit_frozen_or_stopped() before
  56                                      * dequeueing next packet.
  57                                      * Its true for MQ/MQPRIO slaves, or non
  58                                      * multiqueue device.
  59                                      */
  60#define TCQ_F_WARN_NONWC        (1 << 16)
  61        int                     padded;
  62        const struct Qdisc_ops  *ops;
  63        struct qdisc_size_table __rcu *stab;
  64        struct list_head        list;
  65        u32                     handle;
  66        u32                     parent;
  67        atomic_t                refcnt;
  68        struct gnet_stats_rate_est      rate_est;
  69        int                     (*reshape_fail)(struct sk_buff *skb,
  70                                        struct Qdisc *q);
  71
  72        void                    *u32_node;
  73
  74        /* This field is deprecated, but it is still used by CBQ
  75         * and it will live until better solution will be invented.
  76         */
  77        struct Qdisc            *__parent;
  78        struct netdev_queue     *dev_queue;
  79        struct Qdisc            *next_sched;
  80
  81        struct sk_buff          *gso_skb;
  82        /*
  83         * For performance sake on SMP, we put highly modified fields at the end
  84         */
  85        unsigned long           state;
  86        struct sk_buff_head     q;
  87        struct gnet_stats_basic_packed bstats;
  88        unsigned int            __state;
  89        struct gnet_stats_queue qstats;
  90        struct rcu_head         rcu_head;
  91        spinlock_t              busylock;
  92        u32                     limit;
  93};
  94
  95static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  96{
  97        return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
  98}
  99
 100static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 101{
 102        if (qdisc_is_running(qdisc))
 103                return false;
 104        qdisc->__state |= __QDISC___STATE_RUNNING;
 105        return true;
 106}
 107
 108static inline void qdisc_run_end(struct Qdisc *qdisc)
 109{
 110        qdisc->__state &= ~__QDISC___STATE_RUNNING;
 111}
 112
 113static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
 114{
 115        return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
 116}
 117
 118static inline void qdisc_throttled(struct Qdisc *qdisc)
 119{
 120        set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
 121}
 122
 123static inline void qdisc_unthrottled(struct Qdisc *qdisc)
 124{
 125        clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
 126}
 127
 128struct Qdisc_class_ops {
 129        /* Child qdisc manipulation */
 130        struct netdev_queue *   (*select_queue)(struct Qdisc *, struct tcmsg *);
 131        int                     (*graft)(struct Qdisc *, unsigned long cl,
 132                                        struct Qdisc *, struct Qdisc **);
 133        struct Qdisc *          (*leaf)(struct Qdisc *, unsigned long cl);
 134        void                    (*qlen_notify)(struct Qdisc *, unsigned long);
 135
 136        /* Class manipulation routines */
 137        unsigned long           (*get)(struct Qdisc *, u32 classid);
 138        void                    (*put)(struct Qdisc *, unsigned long);
 139        int                     (*change)(struct Qdisc *, u32, u32,
 140                                        struct nlattr **, unsigned long *);
 141        int                     (*delete)(struct Qdisc *, unsigned long);
 142        void                    (*walk)(struct Qdisc *, struct qdisc_walker * arg);
 143
 144        /* Filter manipulation */
 145        struct tcf_proto **     (*tcf_chain)(struct Qdisc *, unsigned long);
 146        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
 147                                        u32 classid);
 148        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
 149
 150        /* rtnetlink specific */
 151        int                     (*dump)(struct Qdisc *, unsigned long,
 152                                        struct sk_buff *skb, struct tcmsg*);
 153        int                     (*dump_stats)(struct Qdisc *, unsigned long,
 154                                        struct gnet_dump *);
 155};
 156
 157struct Qdisc_ops {
 158        struct Qdisc_ops        *next;
 159        const struct Qdisc_class_ops    *cl_ops;
 160        char                    id[IFNAMSIZ];
 161        int                     priv_size;
 162
 163        int                     (*enqueue)(struct sk_buff *, struct Qdisc *);
 164        struct sk_buff *        (*dequeue)(struct Qdisc *);
 165        struct sk_buff *        (*peek)(struct Qdisc *);
 166        unsigned int            (*drop)(struct Qdisc *);
 167
 168        int                     (*init)(struct Qdisc *, struct nlattr *arg);
 169        void                    (*reset)(struct Qdisc *);
 170        void                    (*destroy)(struct Qdisc *);
 171        int                     (*change)(struct Qdisc *, struct nlattr *arg);
 172        void                    (*attach)(struct Qdisc *);
 173
 174        int                     (*dump)(struct Qdisc *, struct sk_buff *);
 175        int                     (*dump_stats)(struct Qdisc *, struct gnet_dump *);
 176
 177        struct module           *owner;
 178};
 179
 180
 181struct tcf_result {
 182        unsigned long   class;
 183        u32             classid;
 184};
 185
 186struct tcf_proto_ops {
 187        struct tcf_proto_ops    *next;
 188        char                    kind[IFNAMSIZ];
 189
 190        int                     (*classify)(struct sk_buff *,
 191                                            const struct tcf_proto *,
 192                                            struct tcf_result *);
 193        int                     (*init)(struct tcf_proto*);
 194        void                    (*destroy)(struct tcf_proto*);
 195
 196        unsigned long           (*get)(struct tcf_proto*, u32 handle);
 197        void                    (*put)(struct tcf_proto*, unsigned long);
 198        int                     (*change)(struct net *net, struct sk_buff *,
 199                                        struct tcf_proto*, unsigned long,
 200                                        u32 handle, struct nlattr **,
 201                                        unsigned long *);
 202        int                     (*delete)(struct tcf_proto*, unsigned long);
 203        void                    (*walk)(struct tcf_proto*, struct tcf_walker *arg);
 204
 205        /* rtnetlink specific */
 206        int                     (*dump)(struct tcf_proto*, unsigned long,
 207                                        struct sk_buff *skb, struct tcmsg*);
 208
 209        struct module           *owner;
 210};
 211
 212struct tcf_proto {
 213        /* Fast access part */
 214        struct tcf_proto        *next;
 215        void                    *root;
 216        int                     (*classify)(struct sk_buff *,
 217                                            const struct tcf_proto *,
 218                                            struct tcf_result *);
 219        __be16                  protocol;
 220
 221        /* All the rest */
 222        u32                     prio;
 223        u32                     classid;
 224        struct Qdisc            *q;
 225        void                    *data;
 226        const struct tcf_proto_ops      *ops;
 227};
 228
 229struct qdisc_skb_cb {
 230        unsigned int            pkt_len;
 231        u16                     slave_dev_queue_mapping;
 232        u16                     _pad;
 233        unsigned char           data[20];
 234};
 235
 236static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 237{
 238        struct qdisc_skb_cb *qcb;
 239
 240        BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
 241        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 242}
 243
 244static inline int qdisc_qlen(const struct Qdisc *q)
 245{
 246        return q->q.qlen;
 247}
 248
 249static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 250{
 251        return (struct qdisc_skb_cb *)skb->cb;
 252}
 253
 254static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
 255{
 256        return &qdisc->q.lock;
 257}
 258
 259static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
 260{
 261        return qdisc->dev_queue->qdisc;
 262}
 263
 264static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
 265{
 266        return qdisc->dev_queue->qdisc_sleeping;
 267}
 268
 269/* The qdisc root lock is a mechanism by which to top level
 270 * of a qdisc tree can be locked from any qdisc node in the
 271 * forest.  This allows changing the configuration of some
 272 * aspect of the qdisc tree while blocking out asynchronous
 273 * qdisc access in the packet processing paths.
 274 *
 275 * It is only legal to do this when the root will not change
 276 * on us.  Otherwise we'll potentially lock the wrong qdisc
 277 * root.  This is enforced by holding the RTNL semaphore, which
 278 * all users of this lock accessor must do.
 279 */
 280static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
 281{
 282        struct Qdisc *root = qdisc_root(qdisc);
 283
 284        ASSERT_RTNL();
 285        return qdisc_lock(root);
 286}
 287
 288static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
 289{
 290        struct Qdisc *root = qdisc_root_sleeping(qdisc);
 291
 292        ASSERT_RTNL();
 293        return qdisc_lock(root);
 294}
 295
 296static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
 297{
 298        return qdisc->dev_queue->dev;
 299}
 300
 301static inline void sch_tree_lock(const struct Qdisc *q)
 302{
 303        spin_lock_bh(qdisc_root_sleeping_lock(q));
 304}
 305
 306static inline void sch_tree_unlock(const struct Qdisc *q)
 307{
 308        spin_unlock_bh(qdisc_root_sleeping_lock(q));
 309}
 310
 311#define tcf_tree_lock(tp)       sch_tree_lock((tp)->q)
 312#define tcf_tree_unlock(tp)     sch_tree_unlock((tp)->q)
 313
 314extern struct Qdisc noop_qdisc;
 315extern struct Qdisc_ops noop_qdisc_ops;
 316extern struct Qdisc_ops pfifo_fast_ops;
 317extern struct Qdisc_ops mq_qdisc_ops;
 318
 319struct Qdisc_class_common {
 320        u32                     classid;
 321        struct hlist_node       hnode;
 322};
 323
 324struct Qdisc_class_hash {
 325        struct hlist_head       *hash;
 326        unsigned int            hashsize;
 327        unsigned int            hashmask;
 328        unsigned int            hashelems;
 329};
 330
 331static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
 332{
 333        id ^= id >> 8;
 334        id ^= id >> 4;
 335        return id & mask;
 336}
 337
 338static inline struct Qdisc_class_common *
 339qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
 340{
 341        struct Qdisc_class_common *cl;
 342        unsigned int h;
 343
 344        h = qdisc_class_hash(id, hash->hashmask);
 345        hlist_for_each_entry(cl, &hash->hash[h], hnode) {
 346                if (cl->classid == id)
 347                        return cl;
 348        }
 349        return NULL;
 350}
 351
 352extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
 353extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
 354extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
 355extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
 356extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
 357
 358extern void dev_init_scheduler(struct net_device *dev);
 359extern void dev_shutdown(struct net_device *dev);
 360extern void dev_activate(struct net_device *dev);
 361extern void dev_deactivate(struct net_device *dev);
 362extern void dev_deactivate_many(struct list_head *head);
 363extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 364                                     struct Qdisc *qdisc);
 365extern void qdisc_reset(struct Qdisc *qdisc);
 366extern void qdisc_destroy(struct Qdisc *qdisc);
 367extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
 368extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 369                                 struct Qdisc_ops *ops);
 370extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 371                                       struct Qdisc_ops *ops, u32 parentid);
 372extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 373                                      const struct qdisc_size_table *stab);
 374extern void tcf_destroy(struct tcf_proto *tp);
 375extern void tcf_destroy_chain(struct tcf_proto **fl);
 376
 377/* Reset all TX qdiscs greater then index of a device.  */
 378static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
 379{
 380        struct Qdisc *qdisc;
 381
 382        for (; i < dev->num_tx_queues; i++) {
 383                qdisc = netdev_get_tx_queue(dev, i)->qdisc;
 384                if (qdisc) {
 385                        spin_lock_bh(qdisc_lock(qdisc));
 386                        qdisc_reset(qdisc);
 387                        spin_unlock_bh(qdisc_lock(qdisc));
 388                }
 389        }
 390}
 391
 392static inline void qdisc_reset_all_tx(struct net_device *dev)
 393{
 394        qdisc_reset_all_tx_gt(dev, 0);
 395}
 396
 397/* Are all TX queues of the device empty?  */
 398static inline bool qdisc_all_tx_empty(const struct net_device *dev)
 399{
 400        unsigned int i;
 401        for (i = 0; i < dev->num_tx_queues; i++) {
 402                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 403                const struct Qdisc *q = txq->qdisc;
 404
 405                if (q->q.qlen)
 406                        return false;
 407        }
 408        return true;
 409}
 410
 411/* Are any of the TX qdiscs changing?  */
 412static inline bool qdisc_tx_changing(const struct net_device *dev)
 413{
 414        unsigned int i;
 415        for (i = 0; i < dev->num_tx_queues; i++) {
 416                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 417                if (txq->qdisc != txq->qdisc_sleeping)
 418                        return true;
 419        }
 420        return false;
 421}
 422
 423/* Is the device using the noop qdisc on all queues?  */
 424static inline bool qdisc_tx_is_noop(const struct net_device *dev)
 425{
 426        unsigned int i;
 427        for (i = 0; i < dev->num_tx_queues; i++) {
 428                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 429                if (txq->qdisc != &noop_qdisc)
 430                        return false;
 431        }
 432        return true;
 433}
 434
 435static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
 436{
 437        return qdisc_skb_cb(skb)->pkt_len;
 438}
 439
 440/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
 441enum net_xmit_qdisc_t {
 442        __NET_XMIT_STOLEN = 0x00010000,
 443        __NET_XMIT_BYPASS = 0x00020000,
 444};
 445
 446#ifdef CONFIG_NET_CLS_ACT
 447#define net_xmit_drop_count(e)  ((e) & __NET_XMIT_STOLEN ? 0 : 1)
 448#else
 449#define net_xmit_drop_count(e)  (1)
 450#endif
 451
 452static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
 453                                           const struct Qdisc *sch)
 454{
 455#ifdef CONFIG_NET_SCHED
 456        struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
 457
 458        if (stab)
 459                __qdisc_calculate_pkt_len(skb, stab);
 460#endif
 461}
 462
 463static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 464{
 465        qdisc_calculate_pkt_len(skb, sch);
 466        return sch->enqueue(skb, sch);
 467}
 468
 469static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
 470{
 471        qdisc_skb_cb(skb)->pkt_len = skb->len;
 472        return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 473}
 474
 475
 476static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
 477                                 const struct sk_buff *skb)
 478{
 479        bstats->bytes += qdisc_pkt_len(skb);
 480        bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 481}
 482
 483static inline void qdisc_bstats_update(struct Qdisc *sch,
 484                                       const struct sk_buff *skb)
 485{
 486        bstats_update(&sch->bstats, skb);
 487}
 488
 489static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
 490                                       struct sk_buff_head *list)
 491{
 492        __skb_queue_tail(list, skb);
 493        sch->qstats.backlog += qdisc_pkt_len(skb);
 494
 495        return NET_XMIT_SUCCESS;
 496}
 497
 498static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
 499{
 500        return __qdisc_enqueue_tail(skb, sch, &sch->q);
 501}
 502
 503static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
 504                                                   struct sk_buff_head *list)
 505{
 506        struct sk_buff *skb = __skb_dequeue(list);
 507
 508        if (likely(skb != NULL)) {
 509                sch->qstats.backlog -= qdisc_pkt_len(skb);
 510                qdisc_bstats_update(sch, skb);
 511        }
 512
 513        return skb;
 514}
 515
 516static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
 517{
 518        return __qdisc_dequeue_head(sch, &sch->q);
 519}
 520
 521static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
 522                                              struct sk_buff_head *list)
 523{
 524        struct sk_buff *skb = __skb_dequeue(list);
 525
 526        if (likely(skb != NULL)) {
 527                unsigned int len = qdisc_pkt_len(skb);
 528                sch->qstats.backlog -= len;
 529                kfree_skb(skb);
 530                return len;
 531        }
 532
 533        return 0;
 534}
 535
 536static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
 537{
 538        return __qdisc_queue_drop_head(sch, &sch->q);
 539}
 540
 541static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
 542                                                   struct sk_buff_head *list)
 543{
 544        struct sk_buff *skb = __skb_dequeue_tail(list);
 545
 546        if (likely(skb != NULL))
 547                sch->qstats.backlog -= qdisc_pkt_len(skb);
 548
 549        return skb;
 550}
 551
 552static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
 553{
 554        return __qdisc_dequeue_tail(sch, &sch->q);
 555}
 556
 557static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
 558{
 559        return skb_peek(&sch->q);
 560}
 561
 562/* generic pseudo peek method for non-work-conserving qdisc */
 563static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
 564{
 565        /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
 566        if (!sch->gso_skb) {
 567                sch->gso_skb = sch->dequeue(sch);
 568                if (sch->gso_skb)
 569                        /* it's still part of the queue */
 570                        sch->q.qlen++;
 571        }
 572
 573        return sch->gso_skb;
 574}
 575
 576/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
 577static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 578{
 579        struct sk_buff *skb = sch->gso_skb;
 580
 581        if (skb) {
 582                sch->gso_skb = NULL;
 583                sch->q.qlen--;
 584        } else {
 585                skb = sch->dequeue(sch);
 586        }
 587
 588        return skb;
 589}
 590
 591static inline void __qdisc_reset_queue(struct Qdisc *sch,
 592                                       struct sk_buff_head *list)
 593{
 594        /*
 595         * We do not know the backlog in bytes of this list, it
 596         * is up to the caller to correct it
 597         */
 598        __skb_queue_purge(list);
 599}
 600
 601static inline void qdisc_reset_queue(struct Qdisc *sch)
 602{
 603        __qdisc_reset_queue(sch, &sch->q);
 604        sch->qstats.backlog = 0;
 605}
 606
 607static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
 608                                              struct sk_buff_head *list)
 609{
 610        struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
 611
 612        if (likely(skb != NULL)) {
 613                unsigned int len = qdisc_pkt_len(skb);
 614                kfree_skb(skb);
 615                return len;
 616        }
 617
 618        return 0;
 619}
 620
 621static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
 622{
 623        return __qdisc_queue_drop(sch, &sch->q);
 624}
 625
 626static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
 627{
 628        kfree_skb(skb);
 629        sch->qstats.drops++;
 630
 631        return NET_XMIT_DROP;
 632}
 633
 634static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
 635{
 636        sch->qstats.drops++;
 637
 638#ifdef CONFIG_NET_CLS_ACT
 639        if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
 640                goto drop;
 641
 642        return NET_XMIT_SUCCESS;
 643
 644drop:
 645#endif
 646        kfree_skb(skb);
 647        return NET_XMIT_DROP;
 648}
 649
 650/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
 651   long it will take to send a packet given its size.
 652 */
 653static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
 654{
 655        int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
 656        if (slot < 0)
 657                slot = 0;
 658        slot >>= rtab->rate.cell_log;
 659        if (slot > 255)
 660                return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
 661        return rtab->data[slot];
 662}
 663
 664#ifdef CONFIG_NET_CLS_ACT
 665static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
 666                                            int action)
 667{
 668        struct sk_buff *n;
 669
 670        n = skb_clone(skb, gfp_mask);
 671
 672        if (n) {
 673                n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
 674                n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
 675                n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
 676        }
 677        return n;
 678}
 679#endif
 680
 681struct psched_ratecfg {
 682        u64     rate_bps;
 683        u32     mult;
 684        u16     overhead;
 685        u8      shift;
 686};
 687
 688static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
 689                                unsigned int len)
 690{
 691        return ((u64)(len + r->overhead) * r->mult) >> r->shift;
 692}
 693
 694extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
 695
 696static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
 697                                          const struct psched_ratecfg *r)
 698{
 699        memset(res, 0, sizeof(*res));
 700        res->rate = r->rate_bps >> 3;
 701        res->overhead = r->overhead;
 702}
 703
 704#endif
 705