linux/net/sched/sch_multiq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2008, Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, see <http://www.gnu.org/licenses/>.
  15 *
  16 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
  17 */
  18
  19#include <linux/module.h>
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/string.h>
  24#include <linux/errno.h>
  25#include <linux/skbuff.h>
  26#include <net/netlink.h>
  27#include <net/pkt_sched.h>
  28#include <net/pkt_cls.h>
  29
  30struct multiq_sched_data {
  31        u16 bands;
  32        u16 max_bands;
  33        u16 curband;
  34        struct tcf_proto __rcu *filter_list;
  35        struct tcf_block *block;
  36        struct Qdisc **queues;
  37};
  38
  39
  40static struct Qdisc *
  41multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  42{
  43        struct multiq_sched_data *q = qdisc_priv(sch);
  44        u32 band;
  45        struct tcf_result res;
  46        struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
  47        int err;
  48
  49        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  50        err = tcf_classify(skb, fl, &res, false);
  51#ifdef CONFIG_NET_CLS_ACT
  52        switch (err) {
  53        case TC_ACT_STOLEN:
  54        case TC_ACT_QUEUED:
  55        case TC_ACT_TRAP:
  56                *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  57                /* fall through */
  58        case TC_ACT_SHOT:
  59                return NULL;
  60        }
  61#endif
  62        band = skb_get_queue_mapping(skb);
  63
  64        if (band >= q->bands)
  65                return q->queues[0];
  66
  67        return q->queues[band];
  68}
  69
  70static int
  71multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  72               struct sk_buff **to_free)
  73{
  74        struct Qdisc *qdisc;
  75        int ret;
  76
  77        qdisc = multiq_classify(skb, sch, &ret);
  78#ifdef CONFIG_NET_CLS_ACT
  79        if (qdisc == NULL) {
  80
  81                if (ret & __NET_XMIT_BYPASS)
  82                        qdisc_qstats_drop(sch);
  83                __qdisc_drop(skb, to_free);
  84                return ret;
  85        }
  86#endif
  87
  88        ret = qdisc_enqueue(skb, qdisc, to_free);
  89        if (ret == NET_XMIT_SUCCESS) {
  90                sch->q.qlen++;
  91                return NET_XMIT_SUCCESS;
  92        }
  93        if (net_xmit_drop_count(ret))
  94                qdisc_qstats_drop(sch);
  95        return ret;
  96}
  97
  98static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
  99{
 100        struct multiq_sched_data *q = qdisc_priv(sch);
 101        struct Qdisc *qdisc;
 102        struct sk_buff *skb;
 103        int band;
 104
 105        for (band = 0; band < q->bands; band++) {
 106                /* cycle through bands to ensure fairness */
 107                q->curband++;
 108                if (q->curband >= q->bands)
 109                        q->curband = 0;
 110
 111                /* Check that target subqueue is available before
 112                 * pulling an skb to avoid head-of-line blocking.
 113                 */
 114                if (!netif_xmit_stopped(
 115                    netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
 116                        qdisc = q->queues[q->curband];
 117                        skb = qdisc->dequeue(qdisc);
 118                        if (skb) {
 119                                qdisc_bstats_update(sch, skb);
 120                                sch->q.qlen--;
 121                                return skb;
 122                        }
 123                }
 124        }
 125        return NULL;
 126
 127}
 128
 129static struct sk_buff *multiq_peek(struct Qdisc *sch)
 130{
 131        struct multiq_sched_data *q = qdisc_priv(sch);
 132        unsigned int curband = q->curband;
 133        struct Qdisc *qdisc;
 134        struct sk_buff *skb;
 135        int band;
 136
 137        for (band = 0; band < q->bands; band++) {
 138                /* cycle through bands to ensure fairness */
 139                curband++;
 140                if (curband >= q->bands)
 141                        curband = 0;
 142
 143                /* Check that target subqueue is available before
 144                 * pulling an skb to avoid head-of-line blocking.
 145                 */
 146                if (!netif_xmit_stopped(
 147                    netdev_get_tx_queue(qdisc_dev(sch), curband))) {
 148                        qdisc = q->queues[curband];
 149                        skb = qdisc->ops->peek(qdisc);
 150                        if (skb)
 151                                return skb;
 152                }
 153        }
 154        return NULL;
 155
 156}
 157
 158static void
 159multiq_reset(struct Qdisc *sch)
 160{
 161        u16 band;
 162        struct multiq_sched_data *q = qdisc_priv(sch);
 163
 164        for (band = 0; band < q->bands; band++)
 165                qdisc_reset(q->queues[band]);
 166        sch->q.qlen = 0;
 167        q->curband = 0;
 168}
 169
 170static void
 171multiq_destroy(struct Qdisc *sch)
 172{
 173        int band;
 174        struct multiq_sched_data *q = qdisc_priv(sch);
 175
 176        tcf_block_put(q->block);
 177        for (band = 0; band < q->bands; band++)
 178                qdisc_destroy(q->queues[band]);
 179
 180        kfree(q->queues);
 181}
 182
 183static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
 184                       struct netlink_ext_ack *extack)
 185{
 186        struct multiq_sched_data *q = qdisc_priv(sch);
 187        struct tc_multiq_qopt *qopt;
 188        int i;
 189
 190        if (!netif_is_multiqueue(qdisc_dev(sch)))
 191                return -EOPNOTSUPP;
 192        if (nla_len(opt) < sizeof(*qopt))
 193                return -EINVAL;
 194
 195        qopt = nla_data(opt);
 196
 197        qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
 198
 199        sch_tree_lock(sch);
 200        q->bands = qopt->bands;
 201        for (i = q->bands; i < q->max_bands; i++) {
 202                if (q->queues[i] != &noop_qdisc) {
 203                        struct Qdisc *child = q->queues[i];
 204                        q->queues[i] = &noop_qdisc;
 205                        qdisc_tree_reduce_backlog(child, child->q.qlen,
 206                                                  child->qstats.backlog);
 207                        qdisc_destroy(child);
 208                }
 209        }
 210
 211        sch_tree_unlock(sch);
 212
 213        for (i = 0; i < q->bands; i++) {
 214                if (q->queues[i] == &noop_qdisc) {
 215                        struct Qdisc *child, *old;
 216                        child = qdisc_create_dflt(sch->dev_queue,
 217                                                  &pfifo_qdisc_ops,
 218                                                  TC_H_MAKE(sch->handle,
 219                                                            i + 1), extack);
 220                        if (child) {
 221                                sch_tree_lock(sch);
 222                                old = q->queues[i];
 223                                q->queues[i] = child;
 224                                if (child != &noop_qdisc)
 225                                        qdisc_hash_add(child, true);
 226
 227                                if (old != &noop_qdisc) {
 228                                        qdisc_tree_reduce_backlog(old,
 229                                                                  old->q.qlen,
 230                                                                  old->qstats.backlog);
 231                                        qdisc_destroy(old);
 232                                }
 233                                sch_tree_unlock(sch);
 234                        }
 235                }
 236        }
 237        return 0;
 238}
 239
 240static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
 241                       struct netlink_ext_ack *extack)
 242{
 243        struct multiq_sched_data *q = qdisc_priv(sch);
 244        int i, err;
 245
 246        q->queues = NULL;
 247
 248        if (!opt)
 249                return -EINVAL;
 250
 251        err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 252        if (err)
 253                return err;
 254
 255        q->max_bands = qdisc_dev(sch)->num_tx_queues;
 256
 257        q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
 258        if (!q->queues)
 259                return -ENOBUFS;
 260        for (i = 0; i < q->max_bands; i++)
 261                q->queues[i] = &noop_qdisc;
 262
 263        return multiq_tune(sch, opt, extack);
 264}
 265
 266static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
 267{
 268        struct multiq_sched_data *q = qdisc_priv(sch);
 269        unsigned char *b = skb_tail_pointer(skb);
 270        struct tc_multiq_qopt opt;
 271
 272        opt.bands = q->bands;
 273        opt.max_bands = q->max_bands;
 274
 275        if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
 276                goto nla_put_failure;
 277
 278        return skb->len;
 279
 280nla_put_failure:
 281        nlmsg_trim(skb, b);
 282        return -1;
 283}
 284
 285static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 286                        struct Qdisc **old, struct netlink_ext_ack *extack)
 287{
 288        struct multiq_sched_data *q = qdisc_priv(sch);
 289        unsigned long band = arg - 1;
 290
 291        if (new == NULL)
 292                new = &noop_qdisc;
 293
 294        *old = qdisc_replace(sch, new, &q->queues[band]);
 295        return 0;
 296}
 297
 298static struct Qdisc *
 299multiq_leaf(struct Qdisc *sch, unsigned long arg)
 300{
 301        struct multiq_sched_data *q = qdisc_priv(sch);
 302        unsigned long band = arg - 1;
 303
 304        return q->queues[band];
 305}
 306
 307static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
 308{
 309        struct multiq_sched_data *q = qdisc_priv(sch);
 310        unsigned long band = TC_H_MIN(classid);
 311
 312        if (band - 1 >= q->bands)
 313                return 0;
 314        return band;
 315}
 316
 317static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
 318                                 u32 classid)
 319{
 320        return multiq_find(sch, classid);
 321}
 322
 323
 324static void multiq_unbind(struct Qdisc *q, unsigned long cl)
 325{
 326}
 327
 328static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
 329                             struct sk_buff *skb, struct tcmsg *tcm)
 330{
 331        struct multiq_sched_data *q = qdisc_priv(sch);
 332
 333        tcm->tcm_handle |= TC_H_MIN(cl);
 334        tcm->tcm_info = q->queues[cl - 1]->handle;
 335        return 0;
 336}
 337
 338static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 339                                 struct gnet_dump *d)
 340{
 341        struct multiq_sched_data *q = qdisc_priv(sch);
 342        struct Qdisc *cl_q;
 343
 344        cl_q = q->queues[cl - 1];
 345        if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
 346                                  d, NULL, &cl_q->bstats) < 0 ||
 347            gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
 348                return -1;
 349
 350        return 0;
 351}
 352
 353static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 354{
 355        struct multiq_sched_data *q = qdisc_priv(sch);
 356        int band;
 357
 358        if (arg->stop)
 359                return;
 360
 361        for (band = 0; band < q->bands; band++) {
 362                if (arg->count < arg->skip) {
 363                        arg->count++;
 364                        continue;
 365                }
 366                if (arg->fn(sch, band + 1, arg) < 0) {
 367                        arg->stop = 1;
 368                        break;
 369                }
 370                arg->count++;
 371        }
 372}
 373
 374static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
 375                                          struct netlink_ext_ack *extack)
 376{
 377        struct multiq_sched_data *q = qdisc_priv(sch);
 378
 379        if (cl)
 380                return NULL;
 381        return q->block;
 382}
 383
 384static const struct Qdisc_class_ops multiq_class_ops = {
 385        .graft          =       multiq_graft,
 386        .leaf           =       multiq_leaf,
 387        .find           =       multiq_find,
 388        .walk           =       multiq_walk,
 389        .tcf_block      =       multiq_tcf_block,
 390        .bind_tcf       =       multiq_bind,
 391        .unbind_tcf     =       multiq_unbind,
 392        .dump           =       multiq_dump_class,
 393        .dump_stats     =       multiq_dump_class_stats,
 394};
 395
 396static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
 397        .next           =       NULL,
 398        .cl_ops         =       &multiq_class_ops,
 399        .id             =       "multiq",
 400        .priv_size      =       sizeof(struct multiq_sched_data),
 401        .enqueue        =       multiq_enqueue,
 402        .dequeue        =       multiq_dequeue,
 403        .peek           =       multiq_peek,
 404        .init           =       multiq_init,
 405        .reset          =       multiq_reset,
 406        .destroy        =       multiq_destroy,
 407        .change         =       multiq_tune,
 408        .dump           =       multiq_dump,
 409        .owner          =       THIS_MODULE,
 410};
 411
 412static int __init multiq_module_init(void)
 413{
 414        return register_qdisc(&multiq_qdisc_ops);
 415}
 416
 417static void __exit multiq_module_exit(void)
 418{
 419        unregister_qdisc(&multiq_qdisc_ops);
 420}
 421
 422module_init(multiq_module_init)
 423module_exit(multiq_module_exit)
 424
 425MODULE_LICENSE("GPL");
 426