linux/net/sched/sch_dsmark.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* net/sched/sch_dsmark.c - Differentiated Services field marker */
   3
   4/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
   5
   6
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/types.h>
  11#include <linux/string.h>
  12#include <linux/errno.h>
  13#include <linux/skbuff.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/bitops.h>
  16#include <net/pkt_sched.h>
  17#include <net/pkt_cls.h>
  18#include <net/dsfield.h>
  19#include <net/inet_ecn.h>
  20#include <asm/byteorder.h>
  21
  22/*
  23 * classid      class           marking
  24 * -------      -----           -------
  25 *   n/a          0             n/a
  26 *   x:0          1             use entry [0]
  27 *   ...         ...            ...
  28 *   x:y y>0     y+1            use entry [y]
  29 *   ...         ...            ...
  30 * x:indices-1  indices         use entry [indices-1]
  31 *   ...         ...            ...
  32 *   x:y         y+1            use entry [y & (indices-1)]
  33 *   ...         ...            ...
  34 * 0xffff       0x10000         use entry [indices-1]
  35 */
  36
  37
  38#define NO_DEFAULT_INDEX        (1 << 16)
  39
  40struct mask_value {
  41        u8                      mask;
  42        u8                      value;
  43};
  44
  45struct dsmark_qdisc_data {
  46        struct Qdisc            *q;
  47        struct tcf_proto __rcu  *filter_list;
  48        struct tcf_block        *block;
  49        struct mask_value       *mv;
  50        u16                     indices;
  51        u8                      set_tc_index;
  52        u32                     default_index;  /* index range is 0...0xffff */
  53#define DSMARK_EMBEDDED_SZ      16
  54        struct mask_value       embedded[DSMARK_EMBEDDED_SZ];
  55};
  56
  57static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
  58{
  59        return index <= p->indices && index > 0;
  60}
  61
  62/* ------------------------- Class/flow operations ------------------------- */
  63
  64static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
  65                        struct Qdisc *new, struct Qdisc **old,
  66                        struct netlink_ext_ack *extack)
  67{
  68        struct dsmark_qdisc_data *p = qdisc_priv(sch);
  69
  70        pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
  71                 __func__, sch, p, new, old);
  72
  73        if (new == NULL) {
  74                new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  75                                        sch->handle, NULL);
  76                if (new == NULL)
  77                        new = &noop_qdisc;
  78        }
  79
  80        *old = qdisc_replace(sch, new, &p->q);
  81        return 0;
  82}
  83
  84static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
  85{
  86        struct dsmark_qdisc_data *p = qdisc_priv(sch);
  87        return p->q;
  88}
  89
  90static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
  91{
  92        return TC_H_MIN(classid) + 1;
  93}
  94
  95static unsigned long dsmark_bind_filter(struct Qdisc *sch,
  96                                        unsigned long parent, u32 classid)
  97{
  98        pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
  99                 __func__, sch, qdisc_priv(sch), classid);
 100
 101        return dsmark_find(sch, classid);
 102}
 103
 104static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
 105{
 106}
 107
 108static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 109        [TCA_DSMARK_INDICES]            = { .type = NLA_U16 },
 110        [TCA_DSMARK_DEFAULT_INDEX]      = { .type = NLA_U16 },
 111        [TCA_DSMARK_SET_TC_INDEX]       = { .type = NLA_FLAG },
 112        [TCA_DSMARK_MASK]               = { .type = NLA_U8 },
 113        [TCA_DSMARK_VALUE]              = { .type = NLA_U8 },
 114};
 115
 116static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
 117                         struct nlattr **tca, unsigned long *arg,
 118                         struct netlink_ext_ack *extack)
 119{
 120        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 121        struct nlattr *opt = tca[TCA_OPTIONS];
 122        struct nlattr *tb[TCA_DSMARK_MAX + 1];
 123        int err = -EINVAL;
 124
 125        pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
 126                 __func__, sch, p, classid, parent, *arg);
 127
 128        if (!dsmark_valid_index(p, *arg)) {
 129                err = -ENOENT;
 130                goto errout;
 131        }
 132
 133        if (!opt)
 134                goto errout;
 135
 136        err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
 137                                          dsmark_policy, NULL);
 138        if (err < 0)
 139                goto errout;
 140
 141        if (tb[TCA_DSMARK_VALUE])
 142                p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 143
 144        if (tb[TCA_DSMARK_MASK])
 145                p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 146
 147        err = 0;
 148
 149errout:
 150        return err;
 151}
 152
 153static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
 154{
 155        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 156
 157        if (!dsmark_valid_index(p, arg))
 158                return -EINVAL;
 159
 160        p->mv[arg - 1].mask = 0xff;
 161        p->mv[arg - 1].value = 0;
 162
 163        return 0;
 164}
 165
 166static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 167{
 168        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 169        int i;
 170
 171        pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
 172                 __func__, sch, p, walker);
 173
 174        if (walker->stop)
 175                return;
 176
 177        for (i = 0; i < p->indices; i++) {
 178                if (p->mv[i].mask == 0xff && !p->mv[i].value)
 179                        goto ignore;
 180                if (walker->count >= walker->skip) {
 181                        if (walker->fn(sch, i + 1, walker) < 0) {
 182                                walker->stop = 1;
 183                                break;
 184                        }
 185                }
 186ignore:
 187                walker->count++;
 188        }
 189}
 190
 191static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
 192                                          struct netlink_ext_ack *extack)
 193{
 194        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 195
 196        return p->block;
 197}
 198
 199/* --------------------------- Qdisc operations ---------------------------- */
 200
 201static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 202                          struct sk_buff **to_free)
 203{
 204        unsigned int len = qdisc_pkt_len(skb);
 205        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 206        int err;
 207
 208        pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
 209
 210        if (p->set_tc_index) {
 211                int wlen = skb_network_offset(skb);
 212
 213                switch (tc_skb_protocol(skb)) {
 214                case htons(ETH_P_IP):
 215                        wlen += sizeof(struct iphdr);
 216                        if (!pskb_may_pull(skb, wlen) ||
 217                            skb_try_make_writable(skb, wlen))
 218                                goto drop;
 219
 220                        skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
 221                                & ~INET_ECN_MASK;
 222                        break;
 223
 224                case htons(ETH_P_IPV6):
 225                        wlen += sizeof(struct ipv6hdr);
 226                        if (!pskb_may_pull(skb, wlen) ||
 227                            skb_try_make_writable(skb, wlen))
 228                                goto drop;
 229
 230                        skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
 231                                & ~INET_ECN_MASK;
 232                        break;
 233                default:
 234                        skb->tc_index = 0;
 235                        break;
 236                }
 237        }
 238
 239        if (TC_H_MAJ(skb->priority) == sch->handle)
 240                skb->tc_index = TC_H_MIN(skb->priority);
 241        else {
 242                struct tcf_result res;
 243                struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
 244                int result = tcf_classify(skb, fl, &res, false);
 245
 246                pr_debug("result %d class 0x%04x\n", result, res.classid);
 247
 248                switch (result) {
 249#ifdef CONFIG_NET_CLS_ACT
 250                case TC_ACT_QUEUED:
 251                case TC_ACT_STOLEN:
 252                case TC_ACT_TRAP:
 253                        __qdisc_drop(skb, to_free);
 254                        return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 255
 256                case TC_ACT_SHOT:
 257                        goto drop;
 258#endif
 259                case TC_ACT_OK:
 260                        skb->tc_index = TC_H_MIN(res.classid);
 261                        break;
 262
 263                default:
 264                        if (p->default_index != NO_DEFAULT_INDEX)
 265                                skb->tc_index = p->default_index;
 266                        break;
 267                }
 268        }
 269
 270        err = qdisc_enqueue(skb, p->q, to_free);
 271        if (err != NET_XMIT_SUCCESS) {
 272                if (net_xmit_drop_count(err))
 273                        qdisc_qstats_drop(sch);
 274                return err;
 275        }
 276
 277        sch->qstats.backlog += len;
 278        sch->q.qlen++;
 279
 280        return NET_XMIT_SUCCESS;
 281
 282drop:
 283        qdisc_drop(skb, sch, to_free);
 284        return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 285}
 286
 287static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
 288{
 289        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 290        struct sk_buff *skb;
 291        u32 index;
 292
 293        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 294
 295        skb = qdisc_dequeue_peeked(p->q);
 296        if (skb == NULL)
 297                return NULL;
 298
 299        qdisc_bstats_update(sch, skb);
 300        qdisc_qstats_backlog_dec(sch, skb);
 301        sch->q.qlen--;
 302
 303        index = skb->tc_index & (p->indices - 1);
 304        pr_debug("index %d->%d\n", skb->tc_index, index);
 305
 306        switch (tc_skb_protocol(skb)) {
 307        case htons(ETH_P_IP):
 308                ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
 309                                    p->mv[index].value);
 310                        break;
 311        case htons(ETH_P_IPV6):
 312                ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
 313                                    p->mv[index].value);
 314                        break;
 315        default:
 316                /*
 317                 * Only complain if a change was actually attempted.
 318                 * This way, we can send non-IP traffic through dsmark
 319                 * and don't need yet another qdisc as a bypass.
 320                 */
 321                if (p->mv[index].mask != 0xff || p->mv[index].value)
 322                        pr_warn("%s: unsupported protocol %d\n",
 323                                __func__, ntohs(tc_skb_protocol(skb)));
 324                break;
 325        }
 326
 327        return skb;
 328}
 329
 330static struct sk_buff *dsmark_peek(struct Qdisc *sch)
 331{
 332        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 333
 334        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 335
 336        return p->q->ops->peek(p->q);
 337}
 338
 339static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
 340                       struct netlink_ext_ack *extack)
 341{
 342        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 343        struct nlattr *tb[TCA_DSMARK_MAX + 1];
 344        int err = -EINVAL;
 345        u32 default_index = NO_DEFAULT_INDEX;
 346        u16 indices;
 347        int i;
 348
 349        pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
 350
 351        if (!opt)
 352                goto errout;
 353
 354        err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
 355        if (err)
 356                return err;
 357
 358        err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
 359                                          dsmark_policy, NULL);
 360        if (err < 0)
 361                goto errout;
 362
 363        err = -EINVAL;
 364        if (!tb[TCA_DSMARK_INDICES])
 365                goto errout;
 366        indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 367
 368        if (hweight32(indices) != 1)
 369                goto errout;
 370
 371        if (tb[TCA_DSMARK_DEFAULT_INDEX])
 372                default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
 373
 374        if (indices <= DSMARK_EMBEDDED_SZ)
 375                p->mv = p->embedded;
 376        else
 377                p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
 378        if (!p->mv) {
 379                err = -ENOMEM;
 380                goto errout;
 381        }
 382        for (i = 0; i < indices; i++) {
 383                p->mv[i].mask = 0xff;
 384                p->mv[i].value = 0;
 385        }
 386        p->indices = indices;
 387        p->default_index = default_index;
 388        p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 389
 390        p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
 391                                 NULL);
 392        if (p->q == NULL)
 393                p->q = &noop_qdisc;
 394        else
 395                qdisc_hash_add(p->q, true);
 396
 397        pr_debug("%s: qdisc %p\n", __func__, p->q);
 398
 399        err = 0;
 400errout:
 401        return err;
 402}
 403
 404static void dsmark_reset(struct Qdisc *sch)
 405{
 406        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 407
 408        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 409        qdisc_reset(p->q);
 410        sch->qstats.backlog = 0;
 411        sch->q.qlen = 0;
 412}
 413
 414static void dsmark_destroy(struct Qdisc *sch)
 415{
 416        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 417
 418        pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
 419
 420        tcf_block_put(p->block);
 421        qdisc_put(p->q);
 422        if (p->mv != p->embedded)
 423                kfree(p->mv);
 424}
 425
 426static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
 427                             struct sk_buff *skb, struct tcmsg *tcm)
 428{
 429        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 430        struct nlattr *opts = NULL;
 431
 432        pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
 433
 434        if (!dsmark_valid_index(p, cl))
 435                return -EINVAL;
 436
 437        tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
 438        tcm->tcm_info = p->q->handle;
 439
 440        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 441        if (opts == NULL)
 442                goto nla_put_failure;
 443        if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
 444            nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
 445                goto nla_put_failure;
 446
 447        return nla_nest_end(skb, opts);
 448
 449nla_put_failure:
 450        nla_nest_cancel(skb, opts);
 451        return -EMSGSIZE;
 452}
 453
 454static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
 455{
 456        struct dsmark_qdisc_data *p = qdisc_priv(sch);
 457        struct nlattr *opts = NULL;
 458
 459        opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 460        if (opts == NULL)
 461                goto nla_put_failure;
 462        if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
 463                goto nla_put_failure;
 464
 465        if (p->default_index != NO_DEFAULT_INDEX &&
 466            nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
 467                goto nla_put_failure;
 468
 469        if (p->set_tc_index &&
 470            nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
 471                goto nla_put_failure;
 472
 473        return nla_nest_end(skb, opts);
 474
 475nla_put_failure:
 476        nla_nest_cancel(skb, opts);
 477        return -EMSGSIZE;
 478}
 479
 480static const struct Qdisc_class_ops dsmark_class_ops = {
 481        .graft          =       dsmark_graft,
 482        .leaf           =       dsmark_leaf,
 483        .find           =       dsmark_find,
 484        .change         =       dsmark_change,
 485        .delete         =       dsmark_delete,
 486        .walk           =       dsmark_walk,
 487        .tcf_block      =       dsmark_tcf_block,
 488        .bind_tcf       =       dsmark_bind_filter,
 489        .unbind_tcf     =       dsmark_unbind_filter,
 490        .dump           =       dsmark_dump_class,
 491};
 492
 493static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
 494        .next           =       NULL,
 495        .cl_ops         =       &dsmark_class_ops,
 496        .id             =       "dsmark",
 497        .priv_size      =       sizeof(struct dsmark_qdisc_data),
 498        .enqueue        =       dsmark_enqueue,
 499        .dequeue        =       dsmark_dequeue,
 500        .peek           =       dsmark_peek,
 501        .init           =       dsmark_init,
 502        .reset          =       dsmark_reset,
 503        .destroy        =       dsmark_destroy,
 504        .change         =       NULL,
 505        .dump           =       dsmark_dump,
 506        .owner          =       THIS_MODULE,
 507};
 508
 509static int __init dsmark_module_init(void)
 510{
 511        return register_qdisc(&dsmark_qdisc_ops);
 512}
 513
 514static void __exit dsmark_module_exit(void)
 515{
 516        unregister_qdisc(&dsmark_qdisc_ops);
 517}
 518
 519module_init(dsmark_module_init)
 520module_exit(dsmark_module_exit)
 521
 522MODULE_LICENSE("GPL");
 523