linux/net/sched/sch_drr.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_drr.c         Deficit Round Robin scheduler
   3 *
   4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * version 2 as published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/init.h>
  14#include <linux/errno.h>
  15#include <linux/netdevice.h>
  16#include <linux/pkt_sched.h>
  17#include <net/sch_generic.h>
  18#include <net/pkt_sched.h>
  19#include <net/pkt_cls.h>
  20
  21struct drr_class {
  22        struct Qdisc_class_common       common;
  23        unsigned int                    refcnt;
  24        unsigned int                    filter_cnt;
  25
  26        struct gnet_stats_basic_packed          bstats;
  27        struct gnet_stats_queue         qstats;
  28        struct gnet_stats_rate_est64    rate_est;
  29        struct list_head                alist;
  30        struct Qdisc                    *qdisc;
  31
  32        u32                             quantum;
  33        u32                             deficit;
  34};
  35
  36struct drr_sched {
  37        struct list_head                active;
  38        struct tcf_proto __rcu          *filter_list;
  39        struct Qdisc_class_hash         clhash;
  40};
  41
  42static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
  43{
  44        struct drr_sched *q = qdisc_priv(sch);
  45        struct Qdisc_class_common *clc;
  46
  47        clc = qdisc_class_find(&q->clhash, classid);
  48        if (clc == NULL)
  49                return NULL;
  50        return container_of(clc, struct drr_class, common);
  51}
  52
  53static void drr_purge_queue(struct drr_class *cl)
  54{
  55        unsigned int len = cl->qdisc->q.qlen;
  56        unsigned int backlog = cl->qdisc->qstats.backlog;
  57
  58        qdisc_reset(cl->qdisc);
  59        qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
  60}
  61
  62static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
  63        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
  64};
  65
  66static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  67                            struct nlattr **tca, unsigned long *arg)
  68{
  69        struct drr_sched *q = qdisc_priv(sch);
  70        struct drr_class *cl = (struct drr_class *)*arg;
  71        struct nlattr *opt = tca[TCA_OPTIONS];
  72        struct nlattr *tb[TCA_DRR_MAX + 1];
  73        u32 quantum;
  74        int err;
  75
  76        if (!opt)
  77                return -EINVAL;
  78
  79        err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
  80        if (err < 0)
  81                return err;
  82
  83        if (tb[TCA_DRR_QUANTUM]) {
  84                quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
  85                if (quantum == 0)
  86                        return -EINVAL;
  87        } else
  88                quantum = psched_mtu(qdisc_dev(sch));
  89
  90        if (cl != NULL) {
  91                if (tca[TCA_RATE]) {
  92                        err = gen_replace_estimator(&cl->bstats, NULL,
  93                                                    &cl->rate_est,
  94                                                    qdisc_root_sleeping_lock(sch),
  95                                                    tca[TCA_RATE]);
  96                        if (err)
  97                                return err;
  98                }
  99
 100                sch_tree_lock(sch);
 101                if (tb[TCA_DRR_QUANTUM])
 102                        cl->quantum = quantum;
 103                sch_tree_unlock(sch);
 104
 105                return 0;
 106        }
 107
 108        cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
 109        if (cl == NULL)
 110                return -ENOBUFS;
 111
 112        cl->refcnt         = 1;
 113        cl->common.classid = classid;
 114        cl->quantum        = quantum;
 115        cl->qdisc          = qdisc_create_dflt(sch->dev_queue,
 116                                               &pfifo_qdisc_ops, classid);
 117        if (cl->qdisc == NULL)
 118                cl->qdisc = &noop_qdisc;
 119
 120        if (tca[TCA_RATE]) {
 121                err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
 122                                            qdisc_root_sleeping_lock(sch),
 123                                            tca[TCA_RATE]);
 124                if (err) {
 125                        qdisc_destroy(cl->qdisc);
 126                        kfree(cl);
 127                        return err;
 128                }
 129        }
 130
 131        sch_tree_lock(sch);
 132        qdisc_class_hash_insert(&q->clhash, &cl->common);
 133        sch_tree_unlock(sch);
 134
 135        qdisc_class_hash_grow(sch, &q->clhash);
 136
 137        *arg = (unsigned long)cl;
 138        return 0;
 139}
 140
 141static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
 142{
 143        gen_kill_estimator(&cl->bstats, &cl->rate_est);
 144        qdisc_destroy(cl->qdisc);
 145        kfree(cl);
 146}
 147
 148static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 149{
 150        struct drr_sched *q = qdisc_priv(sch);
 151        struct drr_class *cl = (struct drr_class *)arg;
 152
 153        if (cl->filter_cnt > 0)
 154                return -EBUSY;
 155
 156        sch_tree_lock(sch);
 157
 158        drr_purge_queue(cl);
 159        qdisc_class_hash_remove(&q->clhash, &cl->common);
 160
 161        BUG_ON(--cl->refcnt == 0);
 162        /*
 163         * This shouldn't happen: we "hold" one cops->get() when called
 164         * from tc_ctl_tclass; the destroy method is done from cops->put().
 165         */
 166
 167        sch_tree_unlock(sch);
 168        return 0;
 169}
 170
 171static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
 172{
 173        struct drr_class *cl = drr_find_class(sch, classid);
 174
 175        if (cl != NULL)
 176                cl->refcnt++;
 177
 178        return (unsigned long)cl;
 179}
 180
 181static void drr_put_class(struct Qdisc *sch, unsigned long arg)
 182{
 183        struct drr_class *cl = (struct drr_class *)arg;
 184
 185        if (--cl->refcnt == 0)
 186                drr_destroy_class(sch, cl);
 187}
 188
 189static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
 190                                              unsigned long cl)
 191{
 192        struct drr_sched *q = qdisc_priv(sch);
 193
 194        if (cl)
 195                return NULL;
 196
 197        return &q->filter_list;
 198}
 199
 200static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
 201                                  u32 classid)
 202{
 203        struct drr_class *cl = drr_find_class(sch, classid);
 204
 205        if (cl != NULL)
 206                cl->filter_cnt++;
 207
 208        return (unsigned long)cl;
 209}
 210
 211static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 212{
 213        struct drr_class *cl = (struct drr_class *)arg;
 214
 215        cl->filter_cnt--;
 216}
 217
 218static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
 219                           struct Qdisc *new, struct Qdisc **old)
 220{
 221        struct drr_class *cl = (struct drr_class *)arg;
 222
 223        if (new == NULL) {
 224                new = qdisc_create_dflt(sch->dev_queue,
 225                                        &pfifo_qdisc_ops, cl->common.classid);
 226                if (new == NULL)
 227                        new = &noop_qdisc;
 228        }
 229
 230        *old = qdisc_replace(sch, new, &cl->qdisc);
 231        return 0;
 232}
 233
 234static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
 235{
 236        struct drr_class *cl = (struct drr_class *)arg;
 237
 238        return cl->qdisc;
 239}
 240
 241static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
 242{
 243        struct drr_class *cl = (struct drr_class *)arg;
 244
 245        if (cl->qdisc->q.qlen == 0)
 246                list_del(&cl->alist);
 247}
 248
 249static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
 250                          struct sk_buff *skb, struct tcmsg *tcm)
 251{
 252        struct drr_class *cl = (struct drr_class *)arg;
 253        struct nlattr *nest;
 254
 255        tcm->tcm_parent = TC_H_ROOT;
 256        tcm->tcm_handle = cl->common.classid;
 257        tcm->tcm_info   = cl->qdisc->handle;
 258
 259        nest = nla_nest_start(skb, TCA_OPTIONS);
 260        if (nest == NULL)
 261                goto nla_put_failure;
 262        if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
 263                goto nla_put_failure;
 264        return nla_nest_end(skb, nest);
 265
 266nla_put_failure:
 267        nla_nest_cancel(skb, nest);
 268        return -EMSGSIZE;
 269}
 270
 271static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 272                                struct gnet_dump *d)
 273{
 274        struct drr_class *cl = (struct drr_class *)arg;
 275        __u32 qlen = cl->qdisc->q.qlen;
 276        struct tc_drr_stats xstats;
 277
 278        memset(&xstats, 0, sizeof(xstats));
 279        if (qlen)
 280                xstats.deficit = cl->deficit;
 281
 282        if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
 283            gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
 284            gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
 285                return -1;
 286
 287        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 288}
 289
 290static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 291{
 292        struct drr_sched *q = qdisc_priv(sch);
 293        struct drr_class *cl;
 294        unsigned int i;
 295
 296        if (arg->stop)
 297                return;
 298
 299        for (i = 0; i < q->clhash.hashsize; i++) {
 300                hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 301                        if (arg->count < arg->skip) {
 302                                arg->count++;
 303                                continue;
 304                        }
 305                        if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
 306                                arg->stop = 1;
 307                                return;
 308                        }
 309                        arg->count++;
 310                }
 311        }
 312}
 313
 314static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
 315                                      int *qerr)
 316{
 317        struct drr_sched *q = qdisc_priv(sch);
 318        struct drr_class *cl;
 319        struct tcf_result res;
 320        struct tcf_proto *fl;
 321        int result;
 322
 323        if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
 324                cl = drr_find_class(sch, skb->priority);
 325                if (cl != NULL)
 326                        return cl;
 327        }
 328
 329        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 330        fl = rcu_dereference_bh(q->filter_list);
 331        result = tc_classify(skb, fl, &res, false);
 332        if (result >= 0) {
 333#ifdef CONFIG_NET_CLS_ACT
 334                switch (result) {
 335                case TC_ACT_QUEUED:
 336                case TC_ACT_STOLEN:
 337                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 338                case TC_ACT_SHOT:
 339                        return NULL;
 340                }
 341#endif
 342                cl = (struct drr_class *)res.class;
 343                if (cl == NULL)
 344                        cl = drr_find_class(sch, res.classid);
 345                return cl;
 346        }
 347        return NULL;
 348}
 349
 350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 351{
 352        struct drr_sched *q = qdisc_priv(sch);
 353        struct drr_class *cl;
 354        int err = 0;
 355
 356        cl = drr_classify(skb, sch, &err);
 357        if (cl == NULL) {
 358                if (err & __NET_XMIT_BYPASS)
 359                        qdisc_qstats_drop(sch);
 360                kfree_skb(skb);
 361                return err;
 362        }
 363
 364        err = qdisc_enqueue(skb, cl->qdisc);
 365        if (unlikely(err != NET_XMIT_SUCCESS)) {
 366                if (net_xmit_drop_count(err)) {
 367                        cl->qstats.drops++;
 368                        qdisc_qstats_drop(sch);
 369                }
 370                return err;
 371        }
 372
 373        if (cl->qdisc->q.qlen == 1) {
 374                list_add_tail(&cl->alist, &q->active);
 375                cl->deficit = cl->quantum;
 376        }
 377
 378        sch->q.qlen++;
 379        return err;
 380}
 381
 382static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 383{
 384        struct drr_sched *q = qdisc_priv(sch);
 385        struct drr_class *cl;
 386        struct sk_buff *skb;
 387        unsigned int len;
 388
 389        if (list_empty(&q->active))
 390                goto out;
 391        while (1) {
 392                cl = list_first_entry(&q->active, struct drr_class, alist);
 393                skb = cl->qdisc->ops->peek(cl->qdisc);
 394                if (skb == NULL) {
 395                        qdisc_warn_nonwc(__func__, cl->qdisc);
 396                        goto out;
 397                }
 398
 399                len = qdisc_pkt_len(skb);
 400                if (len <= cl->deficit) {
 401                        cl->deficit -= len;
 402                        skb = qdisc_dequeue_peeked(cl->qdisc);
 403                        if (unlikely(skb == NULL))
 404                                goto out;
 405                        if (cl->qdisc->q.qlen == 0)
 406                                list_del(&cl->alist);
 407
 408                        bstats_update(&cl->bstats, skb);
 409                        qdisc_bstats_update(sch, skb);
 410                        sch->q.qlen--;
 411                        return skb;
 412                }
 413
 414                cl->deficit += cl->quantum;
 415                list_move_tail(&cl->alist, &q->active);
 416        }
 417out:
 418        return NULL;
 419}
 420
 421static unsigned int drr_drop(struct Qdisc *sch)
 422{
 423        struct drr_sched *q = qdisc_priv(sch);
 424        struct drr_class *cl;
 425        unsigned int len;
 426
 427        list_for_each_entry(cl, &q->active, alist) {
 428                if (cl->qdisc->ops->drop) {
 429                        len = cl->qdisc->ops->drop(cl->qdisc);
 430                        if (len > 0) {
 431                                sch->q.qlen--;
 432                                if (cl->qdisc->q.qlen == 0)
 433                                        list_del(&cl->alist);
 434                                return len;
 435                        }
 436                }
 437        }
 438        return 0;
 439}
 440
 441static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 442{
 443        struct drr_sched *q = qdisc_priv(sch);
 444        int err;
 445
 446        err = qdisc_class_hash_init(&q->clhash);
 447        if (err < 0)
 448                return err;
 449        INIT_LIST_HEAD(&q->active);
 450        return 0;
 451}
 452
 453static void drr_reset_qdisc(struct Qdisc *sch)
 454{
 455        struct drr_sched *q = qdisc_priv(sch);
 456        struct drr_class *cl;
 457        unsigned int i;
 458
 459        for (i = 0; i < q->clhash.hashsize; i++) {
 460                hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 461                        if (cl->qdisc->q.qlen)
 462                                list_del(&cl->alist);
 463                        qdisc_reset(cl->qdisc);
 464                }
 465        }
 466        sch->q.qlen = 0;
 467}
 468
 469static void drr_destroy_qdisc(struct Qdisc *sch)
 470{
 471        struct drr_sched *q = qdisc_priv(sch);
 472        struct drr_class *cl;
 473        struct hlist_node *next;
 474        unsigned int i;
 475
 476        tcf_destroy_chain(&q->filter_list);
 477
 478        for (i = 0; i < q->clhash.hashsize; i++) {
 479                hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
 480                                          common.hnode)
 481                        drr_destroy_class(sch, cl);
 482        }
 483        qdisc_class_hash_destroy(&q->clhash);
 484}
 485
 486static const struct Qdisc_class_ops drr_class_ops = {
 487        .change         = drr_change_class,
 488        .delete         = drr_delete_class,
 489        .get            = drr_get_class,
 490        .put            = drr_put_class,
 491        .tcf_chain      = drr_tcf_chain,
 492        .bind_tcf       = drr_bind_tcf,
 493        .unbind_tcf     = drr_unbind_tcf,
 494        .graft          = drr_graft_class,
 495        .leaf           = drr_class_leaf,
 496        .qlen_notify    = drr_qlen_notify,
 497        .dump           = drr_dump_class,
 498        .dump_stats     = drr_dump_class_stats,
 499        .walk           = drr_walk,
 500};
 501
 502static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
 503        .cl_ops         = &drr_class_ops,
 504        .id             = "drr",
 505        .priv_size      = sizeof(struct drr_sched),
 506        .enqueue        = drr_enqueue,
 507        .dequeue        = drr_dequeue,
 508        .peek           = qdisc_peek_dequeued,
 509        .drop           = drr_drop,
 510        .init           = drr_init_qdisc,
 511        .reset          = drr_reset_qdisc,
 512        .destroy        = drr_destroy_qdisc,
 513        .owner          = THIS_MODULE,
 514};
 515
 516static int __init drr_init(void)
 517{
 518        return register_qdisc(&drr_qdisc_ops);
 519}
 520
 521static void __exit drr_exit(void)
 522{
 523        unregister_qdisc(&drr_qdisc_ops);
 524}
 525
 526module_init(drr_init);
 527module_exit(drr_exit);
 528MODULE_LICENSE("GPL");
 529