linux/net/sched/sch_dsmark.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* net/sched/sch_dsmark.c - Differentiated Services field marker */
   3
   4/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
   5
   6
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/types.h>
  11#include <linux/string.h>
  12#include <linux/errno.h>
  13#include <linux/skbuff.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/bitops.h>
  16#include <net/pkt_sched.h>
  17#include <net/pkt_cls.h>
  18#include <net/dsfield.h>
  19#include <net/inet_ecn.h>
  20#include <asm/byteorder.h>
  21
  22/*
  23 * classid      class           marking
  24 * -------      -----           -------
  25 *   n/a          0             n/a
  26 *   x:0          1             use entry [0]
  27 *   ...         ...            ...
  28 *   x:y y>0     y+1            use entry [y]
  29 *   ...         ...            ...
  30 * x:indices-1  indices         use entry [indices-1]
  31 *   ...         ...            ...
  32 *   x:y         y+1            use entry [y & (indices-1)]
  33 *   ...         ...            ...
  34 * 0xffff       0x10000         use entry [indices-1]
  35 */
  36
  37
  38#define NO_DEFAULT_INDEX        (1 << 16)
  39
  40struct mask_value {
  41        u8                      mask;
  42        u8                      value;
  43};
  44
  45struct dsmark_qdisc_data {
  46        struct Qdisc            *q;
  47        struct tcf_proto __rcu  *filter_list;
  48        struct tcf_block        *block;
  49        struct mask_value       *mv;
  50        u16                     indices;
  51        u8                      set_tc_index;
  52        u32                     default_index;  /* index range is 0...0xffff */
  53#define DSMARK_EMBEDDED_SZ      16
  54        struct mask_value       embedded[DSMARK_EMBEDDED_SZ];
  55};
  56
  57static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
  58{
  59        return index <= p->indices && index > 0;
  60}
  61
  62/* ------------------------- Class/flow operations ------------------------- */
  63
  64static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
  65                        struct Qdisc *new, struct Qdisc **old,
  66                        struct netlink_ext_ack *extack)
  67{
  68        struct dsmark_qdisc_data *p = qdisc_priv(sch);
  69
  70        pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
  71                 __func__, sch, p, new, old);
  72
  73        if (new == NULL) {
  74                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  75                                        sch->handle, NULL);
  76                if (new == NULL)
  77                        new = &noop_qdisc;
  78        }
  79
  80        *old = qdisc_replace(sch, new, &p->q);
  81        return 0;
  82}
  83
  84static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
  85{
  86        struct dsmark_qdisc_data *p = qdisc_priv(sch);
  87        return p->q;
  88}
  89
  90static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
  91{
  92        return TC_H_MIN(classid) + 1;
  93}
  94
  95static unsigned long dsmark_bind_filter(struct Qdisc *sch,
  96                                        unsigned long parent, u32 classid)
  97{
  98        pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
  99                 __func__, sch, qdisc_priv(sch), classid);
 100
 101        return dsmark_find(sch, classid);
 102}
 103
 104static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
 105{
 106}
 107
 108static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 109        [TCA_DSMARK_INDICES]            = { .type = NLA_U16 },
 110        [TCA_DSMARK_DEFAULT_INDEX]      = { .type = NLA_U16 },
 111        [TCA_DSMARK_SET_TC_INDEX]       = { .type = NLA_FLAG },
 112        [TCA_DSMARK_MASK]               = { .type = NLA_U8 },
 113        [TCA_DSMARK_VALUE]              = { .type = NLA_U8 },
 114};
 115
 116static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
 117                         struct nlattr **tca, unsigned long *arg,
 118                         struct netlink_ext_ack *extack)
 119{
 120        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 121        struct nlattr *opt = tca[TCA_OPTIONS];
 122        struct nlattr *tb[TCA_DSMARK_MAX + 1];
 123        int err = -EINVAL;
 124
 125        pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
 126                 __func__, sch, p, classid, parent, *arg);
 127
 128        if (!dsmark_valid_index(p, *arg)) {
 129                err = -ENOENT;
 130                goto errout;
 131        }
 132
 133        if (!opt)
 134                goto errout;
 135
 136        err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
 137                                          dsmark_policy, NULL);
 138        if (err < 0)
 139                goto errout;
 140
 141        if (tb[TCA_DSMARK_VALUE])
 142                p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 143
 144        if (tb[TCA_DSMARK_MASK])
 145                p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 146
 147        err = 0;
 148
 149errout:
 150        return err;
 151}
 152
 153static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
 154{
 155        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 156
 157        if (!dsmark_valid_index(p, arg))
 158                return -EINVAL;
 159
 160        p->mv[arg - 1].mask = 0xff;
 161        p->mv[arg - 1].value = 0;
 162
 163        return 0;
 164}
 165
 166static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 167{
 168        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 169        int i;
 170
 171        pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
 172                 __func__, sch, p, walker);
 173
 174        if (walker->stop)
 175                return;
 176
 177        for (i = 0; i < p->indices; i++) {
 178                if (p->mv[i].mask == 0xff && !p->mv[i].value)
 179                        goto ignore;
 180                if (walker->count >= walker->skip) {
 181                        if (walker->fn(sch, i + 1, walker) < 0) {
 182                                walker->stop = 1;
 183                                break;
 184                        }
 185                }
 186ignore:
 187                walker->count++;
 188        }
 189}
 190
 191static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
 192                                          struct netlink_ext_ack *extack)
 193{
 194        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 195
 196        return p->block;
 197}
 198
 199/* --------------------------- Qdisc operations ---------------------------- */
 200
 201static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 202                          struct sk_buff **to_free)
 203{
 204        unsigned int len = qdisc_pkt_len(skb);
 205        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 206        int err;
 207
 208        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 209
 210        if (p->set_tc_index) {
 211                int wlen = skb_network_offset(skb);
 212
 213                switch (tc_skb_protocol(skb)) {
 214                case htons(ETH_P_IP):
 215                        wlen += sizeof(struct iphdr);
 216                        if (!pskb_may_pull(skb, wlen) ||
 217                            skb_try_make_writable(skb, wlen))
 218                                goto drop;
 219
 220                        skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
 221                                & ~INET_ECN_MASK;
 222                        break;
 223
 224                case htons(ETH_P_IPV6):
 225                        wlen += sizeof(struct ipv6hdr);
 226                        if (!pskb_may_pull(skb, wlen) ||
 227                            skb_try_make_writable(skb, wlen))
 228                                goto drop;
 229
 230                        skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
 231                                & ~INET_ECN_MASK;
 232                        break;
 233                default:
 234                        skb->tc_index = 0;
 235                        break;
 236                }
 237        }
 238
 239        if (TC_H_MAJ(skb->priority) == sch->handle)
 240                skb->tc_index = TC_H_MIN(skb->priority);
 241        else {
 242                struct tcf_result res;
 243                struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
 244                int result = tcf_classify(skb, fl, &res, false);
 245
 246                pr_debug("result %d class 0x%04x\n", result, res.classid);
 247
 248                switch (result) {
 249#ifdef CONFIG_NET_CLS_ACT
 250                case TC_ACT_QUEUED:
 251                case TC_ACT_STOLEN:
 252                case TC_ACT_TRAP:
 253                        __qdisc_drop(skb, to_free);
 254                        return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 255
 256                case TC_ACT_SHOT:
 257                        goto drop;
 258#endif
 259                case TC_ACT_OK:
 260                        skb->tc_index = TC_H_MIN(res.classid);
 261                        break;
 262
 263                default:
 264                        if (p->default_index != NO_DEFAULT_INDEX)
 265                                skb->tc_index = p->default_index;
 266                        break;
 267                }
 268        }
 269
 270        err = qdisc_enqueue(skb, p->q, to_free);
 271        if (err != NET_XMIT_SUCCESS) {
 272                if (net_xmit_drop_count(err))
 273                        qdisc_qstats_drop(sch);
 274                return err;
 275        }
 276
 277        sch->qstats.backlog += len;
 278        sch->q.qlen++;
 279
 280        return NET_XMIT_SUCCESS;
 281
 282drop:
 283        qdisc_drop(skb, sch, to_free);
 284        return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 285}
 286
 287static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 288{
 289        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 290        struct sk_buff *skb;
 291        u32 index;
 292
 293        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 294
 295        skb = qdisc_dequeue_peeked(p->q);
 296        if (skb == NULL)
 297                return NULL;
 298
 299        qdisc_bstats_update(sch, skb);
 300        qdisc_qstats_backlog_dec(sch, skb);
 301        sch->q.qlen--;
 302
 303        index = skb->tc_index & (p->indices - 1);
 304        pr_debug("index %d->%d\n", skb->tc_index, index);
 305
 306        switch (tc_skb_protocol(skb)) {
 307        case htons(ETH_P_IP):
 308                ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
 309                                    p->mv[index].value);
 310                        break;
 311        case htons(ETH_P_IPV6):
 312                ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
 313                                    p->mv[index].value);
 314                        break;
 315        default:
 316                /*
 317                 * Only complain if a change was actually attempted.
 318                 * This way, we can send non-IP traffic through dsmark
 319                 * and don't need yet another qdisc as a bypass.
 320                 */
 321                if (p->mv[index].mask != 0xff || p->mv[index].value)
 322                        pr_warn("%s: unsupported protocol %d\n",
 323                                __func__, ntohs(tc_skb_protocol(skb)));
 324                break;
 325        }
 326
 327        return skb;
 328}
 329
 330static struct sk_buff *dsmark_peek(struct Qdisc *sch)
 331{
 332        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 333
 334        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 335
 336        return p->q->ops->peek(p->q);
 337}
 338
 339static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
 340                       struct netlink_ext_ack *extack)
 341{
 342        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 343        struct nlattr *tb[TCA_DSMARK_MAX + 1];
 344        int err = -EINVAL;
 345        u32 default_index = NO_DEFAULT_INDEX;
 346        u16 indices;
 347        int i;
 348
 349        pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
 350
 351        if (!opt)
 352                goto errout;
 353
 354        err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
 355        if (err)
 356                return err;
 357
 358        err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
 359                                          dsmark_policy, NULL);
 360        if (err < 0)
 361                goto errout;
 362
 363        err = -EINVAL;
 364        indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 365
 366        if (hweight32(indices) != 1)
 367                goto errout;
 368
 369        if (tb[TCA_DSMARK_DEFAULT_INDEX])
 370                default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
 371
 372        if (indices <= DSMARK_EMBEDDED_SZ)
 373                p->mv = p->embedded;
 374        else
 375                p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
 376        if (!p->mv) {
 377                err = -ENOMEM;
 378                goto errout;
 379        }
 380        for (i = 0; i < indices; i++) {
 381                p->mv[i].mask = 0xff;
 382                p->mv[i].value = 0;
 383        }
 384        p->indices = indices;
 385        p->default_index = default_index;
 386        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 387
 388        p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
 389                                 NULL);
 390        if (p->q == NULL)
 391                p->q = &noop_qdisc;
 392        else
 393                qdisc_hash_add(p->q, true);
 394
 395        pr_debug("%s: qdisc %p\n", __func__, p->q);
 396
 397        err = 0;
 398errout:
 399        return err;
 400}
 401
 402static void dsmark_reset(struct Qdisc *sch)
 403{
 404        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 405
 406        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 407        qdisc_reset(p->q);
 408        sch->qstats.backlog = 0;
 409        sch->q.qlen = 0;
 410}
 411
 412static void dsmark_destroy(struct Qdisc *sch)
 413{
 414        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 415
 416        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 417
 418        tcf_block_put(p->block);
 419        qdisc_put(p->q);
 420        if (p->mv != p->embedded)
 421                kfree(p->mv);
 422}
 423
 424static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
 425                             struct sk_buff *skb, struct tcmsg *tcm)
 426{
 427        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 428        struct nlattr *opts = NULL;
 429
 430        pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
 431
 432        if (!dsmark_valid_index(p, cl))
 433                return -EINVAL;
 434
 435        tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
 436        tcm->tcm_info = p->q->handle;
 437
 438        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 439        if (opts == NULL)
 440                goto nla_put_failure;
 441        if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
 442            nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
 443                goto nla_put_failure;
 444
 445        return nla_nest_end(skb, opts);
 446
 447nla_put_failure:
 448        nla_nest_cancel(skb, opts);
 449        return -EMSGSIZE;
 450}
 451
 452static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
 453{
 454        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 455        struct nlattr *opts = NULL;
 456
 457        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 458        if (opts == NULL)
 459                goto nla_put_failure;
 460        if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
 461                goto nla_put_failure;
 462
 463        if (p->default_index != NO_DEFAULT_INDEX &&
 464            nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
 465                goto nla_put_failure;
 466
 467        if (p->set_tc_index &&
 468            nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
 469                goto nla_put_failure;
 470
 471        return nla_nest_end(skb, opts);
 472
 473nla_put_failure:
 474        nla_nest_cancel(skb, opts);
 475        return -EMSGSIZE;
 476}
 477
 478static const struct Qdisc_class_ops dsmark_class_ops = {
 479        .graft          =       dsmark_graft,
 480        .leaf           =       dsmark_leaf,
 481        .find           =       dsmark_find,
 482        .change         =       dsmark_change,
 483        .delete         =       dsmark_delete,
 484        .walk           =       dsmark_walk,
 485        .tcf_block      =       dsmark_tcf_block,
 486        .bind_tcf       =       dsmark_bind_filter,
 487        .unbind_tcf     =       dsmark_unbind_filter,
 488        .dump           =       dsmark_dump_class,
 489};
 490
 491static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
 492        .next           =       NULL,
 493        .cl_ops         =       &dsmark_class_ops,
 494        .id             =       "dsmark",
 495        .priv_size      =       sizeof(struct dsmark_qdisc_data),
 496        .enqueue        =       dsmark_enqueue,
 497        .dequeue        =       dsmark_dequeue,
 498        .peek           =       dsmark_peek,
 499        .init           =       dsmark_init,
 500        .reset          =       dsmark_reset,
 501        .destroy        =       dsmark_destroy,
 502        .change         =       NULL,
 503        .dump           =       dsmark_dump,
 504        .owner          =       THIS_MODULE,
 505};
 506
 507static int __init dsmark_module_init(void)
 508{
 509        return register_qdisc(&dsmark_qdisc_ops);
 510}
 511
 512static void __exit dsmark_module_exit(void)
 513{
 514        unregister_qdisc(&dsmark_qdisc_ops);
 515}
 516
 517module_init(dsmark_module_init)
 518module_exit(dsmark_module_exit)
 519
 520MODULE_LICENSE("GPL");
 521