linux/net/sched/cls_flower.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_flower.c               Flower classifier
   3 *
   4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/module.h>
  15#include <linux/rhashtable.h>
  16
  17#include <linux/if_ether.h>
  18#include <linux/in6.h>
  19#include <linux/ip.h>
  20
  21#include <net/sch_generic.h>
  22#include <net/pkt_cls.h>
  23#include <net/ip.h>
  24#include <net/flow_dissector.h>
  25
  26struct fl_flow_key {
  27        int     indev_ifindex;
  28        struct flow_dissector_key_control control;
  29        struct flow_dissector_key_basic basic;
  30        struct flow_dissector_key_eth_addrs eth;
  31        struct flow_dissector_key_addrs ipaddrs;
  32        union {
  33                struct flow_dissector_key_ipv4_addrs ipv4;
  34                struct flow_dissector_key_ipv6_addrs ipv6;
  35        };
  36        struct flow_dissector_key_ports tp;
  37} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  38
  39struct fl_flow_mask_range {
  40        unsigned short int start;
  41        unsigned short int end;
  42};
  43
  44struct fl_flow_mask {
  45        struct fl_flow_key key;
  46        struct fl_flow_mask_range range;
  47        struct rcu_head rcu;
  48};
  49
  50struct cls_fl_head {
  51        struct rhashtable ht;
  52        struct fl_flow_mask mask;
  53        struct flow_dissector dissector;
  54        u32 hgen;
  55        bool mask_assigned;
  56        struct list_head filters;
  57        struct rhashtable_params ht_params;
  58        struct rcu_head rcu;
  59};
  60
  61struct cls_fl_filter {
  62        struct rhash_head ht_node;
  63        struct fl_flow_key mkey;
  64        struct tcf_exts exts;
  65        struct tcf_result res;
  66        struct fl_flow_key key;
  67        struct list_head list;
  68        u32 handle;
  69        struct rcu_head rcu;
  70};
  71
  72static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  73{
  74        return mask->range.end - mask->range.start;
  75}
  76
  77static void fl_mask_update_range(struct fl_flow_mask *mask)
  78{
  79        const u8 *bytes = (const u8 *) &mask->key;
  80        size_t size = sizeof(mask->key);
  81        size_t i, first = 0, last = size - 1;
  82
  83        for (i = 0; i < sizeof(mask->key); i++) {
  84                if (bytes[i]) {
  85                        if (!first && i)
  86                                first = i;
  87                        last = i;
  88                }
  89        }
  90        mask->range.start = rounddown(first, sizeof(long));
  91        mask->range.end = roundup(last + 1, sizeof(long));
  92}
  93
  94static void *fl_key_get_start(struct fl_flow_key *key,
  95                              const struct fl_flow_mask *mask)
  96{
  97        return (u8 *) key + mask->range.start;
  98}
  99
 100static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
 101                              struct fl_flow_mask *mask)
 102{
 103        const long *lkey = fl_key_get_start(key, mask);
 104        const long *lmask = fl_key_get_start(&mask->key, mask);
 105        long *lmkey = fl_key_get_start(mkey, mask);
 106        int i;
 107
 108        for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
 109                *lmkey++ = *lkey++ & *lmask++;
 110}
 111
 112static void fl_clear_masked_range(struct fl_flow_key *key,
 113                                  struct fl_flow_mask *mask)
 114{
 115        memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 116}
 117
 118static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 119                       struct tcf_result *res)
 120{
 121        struct cls_fl_head *head = rcu_dereference_bh(tp->root);
 122        struct cls_fl_filter *f;
 123        struct fl_flow_key skb_key;
 124        struct fl_flow_key skb_mkey;
 125
 126        fl_clear_masked_range(&skb_key, &head->mask);
 127        skb_key.indev_ifindex = skb->skb_iif;
 128        /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
 129         * so do it rather here.
 130         */
 131        skb_key.basic.n_proto = skb->protocol;
 132        skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 133
 134        fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
 135
 136        f = rhashtable_lookup_fast(&head->ht,
 137                                   fl_key_get_start(&skb_mkey, &head->mask),
 138                                   head->ht_params);
 139        if (f) {
 140                *res = f->res;
 141                return tcf_exts_exec(skb, &f->exts, res);
 142        }
 143        return -1;
 144}
 145
 146static int fl_init(struct tcf_proto *tp)
 147{
 148        struct cls_fl_head *head;
 149
 150        head = kzalloc(sizeof(*head), GFP_KERNEL);
 151        if (!head)
 152                return -ENOBUFS;
 153
 154        INIT_LIST_HEAD_RCU(&head->filters);
 155        rcu_assign_pointer(tp->root, head);
 156
 157        return 0;
 158}
 159
 160static void fl_destroy_filter(struct rcu_head *head)
 161{
 162        struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
 163
 164        tcf_exts_destroy(&f->exts);
 165        kfree(f);
 166}
 167
 168static bool fl_destroy(struct tcf_proto *tp, bool force)
 169{
 170        struct cls_fl_head *head = rtnl_dereference(tp->root);
 171        struct cls_fl_filter *f, *next;
 172
 173        if (!force && !list_empty(&head->filters))
 174                return false;
 175
 176        list_for_each_entry_safe(f, next, &head->filters, list) {
 177                list_del_rcu(&f->list);
 178                call_rcu(&f->rcu, fl_destroy_filter);
 179        }
 180        RCU_INIT_POINTER(tp->root, NULL);
 181        if (head->mask_assigned)
 182                rhashtable_destroy(&head->ht);
 183        kfree_rcu(head, rcu);
 184        return true;
 185}
 186
 187static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
 188{
 189        struct cls_fl_head *head = rtnl_dereference(tp->root);
 190        struct cls_fl_filter *f;
 191
 192        list_for_each_entry(f, &head->filters, list)
 193                if (f->handle == handle)
 194                        return (unsigned long) f;
 195        return 0;
 196}
 197
 198static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
 199        [TCA_FLOWER_UNSPEC]             = { .type = NLA_UNSPEC },
 200        [TCA_FLOWER_CLASSID]            = { .type = NLA_U32 },
 201        [TCA_FLOWER_INDEV]              = { .type = NLA_STRING,
 202                                            .len = IFNAMSIZ },
 203        [TCA_FLOWER_KEY_ETH_DST]        = { .len = ETH_ALEN },
 204        [TCA_FLOWER_KEY_ETH_DST_MASK]   = { .len = ETH_ALEN },
 205        [TCA_FLOWER_KEY_ETH_SRC]        = { .len = ETH_ALEN },
 206        [TCA_FLOWER_KEY_ETH_SRC_MASK]   = { .len = ETH_ALEN },
 207        [TCA_FLOWER_KEY_ETH_TYPE]       = { .type = NLA_U16 },
 208        [TCA_FLOWER_KEY_IP_PROTO]       = { .type = NLA_U8 },
 209        [TCA_FLOWER_KEY_IPV4_SRC]       = { .type = NLA_U32 },
 210        [TCA_FLOWER_KEY_IPV4_SRC_MASK]  = { .type = NLA_U32 },
 211        [TCA_FLOWER_KEY_IPV4_DST]       = { .type = NLA_U32 },
 212        [TCA_FLOWER_KEY_IPV4_DST_MASK]  = { .type = NLA_U32 },
 213        [TCA_FLOWER_KEY_IPV6_SRC]       = { .len = sizeof(struct in6_addr) },
 214        [TCA_FLOWER_KEY_IPV6_SRC_MASK]  = { .len = sizeof(struct in6_addr) },
 215        [TCA_FLOWER_KEY_IPV6_DST]       = { .len = sizeof(struct in6_addr) },
 216        [TCA_FLOWER_KEY_IPV6_DST_MASK]  = { .len = sizeof(struct in6_addr) },
 217        [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
 218        [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
 219        [TCA_FLOWER_KEY_UDP_SRC]        = { .type = NLA_U16 },
 220        [TCA_FLOWER_KEY_UDP_DST]        = { .type = NLA_U16 },
 221};
 222
 223static void fl_set_key_val(struct nlattr **tb,
 224                           void *val, int val_type,
 225                           void *mask, int mask_type, int len)
 226{
 227        if (!tb[val_type])
 228                return;
 229        memcpy(val, nla_data(tb[val_type]), len);
 230        if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
 231                memset(mask, 0xff, len);
 232        else
 233                memcpy(mask, nla_data(tb[mask_type]), len);
 234}
 235
 236static int fl_set_key(struct net *net, struct nlattr **tb,
 237                      struct fl_flow_key *key, struct fl_flow_key *mask)
 238{
 239#ifdef CONFIG_NET_CLS_IND
 240        if (tb[TCA_FLOWER_INDEV]) {
 241                int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
 242                if (err < 0)
 243                        return err;
 244                key->indev_ifindex = err;
 245                mask->indev_ifindex = 0xffffffff;
 246        }
 247#endif
 248
 249        fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
 250                       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
 251                       sizeof(key->eth.dst));
 252        fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
 253                       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
 254                       sizeof(key->eth.src));
 255        fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
 256                       &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
 257                       sizeof(key->basic.n_proto));
 258        if (key->basic.n_proto == htons(ETH_P_IP) ||
 259            key->basic.n_proto == htons(ETH_P_IPV6)) {
 260                fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
 261                               &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
 262                               sizeof(key->basic.ip_proto));
 263        }
 264        if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
 265                fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
 266                               &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
 267                               sizeof(key->ipv4.src));
 268                fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
 269                               &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
 270                               sizeof(key->ipv4.dst));
 271        } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
 272                fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
 273                               &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
 274                               sizeof(key->ipv6.src));
 275                fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
 276                               &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
 277                               sizeof(key->ipv6.dst));
 278        }
 279        if (key->basic.ip_proto == IPPROTO_TCP) {
 280                fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
 281                               &mask->tp.src, TCA_FLOWER_UNSPEC,
 282                               sizeof(key->tp.src));
 283                fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
 284                               &mask->tp.dst, TCA_FLOWER_UNSPEC,
 285                               sizeof(key->tp.dst));
 286        } else if (key->basic.ip_proto == IPPROTO_UDP) {
 287                fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
 288                               &mask->tp.src, TCA_FLOWER_UNSPEC,
 289                               sizeof(key->tp.src));
 290                fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
 291                               &mask->tp.dst, TCA_FLOWER_UNSPEC,
 292                               sizeof(key->tp.dst));
 293        }
 294
 295        return 0;
 296}
 297
 298static bool fl_mask_eq(struct fl_flow_mask *mask1,
 299                       struct fl_flow_mask *mask2)
 300{
 301        const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
 302        const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
 303
 304        return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
 305               !memcmp(lmask1, lmask2, fl_mask_range(mask1));
 306}
 307
 308static const struct rhashtable_params fl_ht_params = {
 309        .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
 310        .head_offset = offsetof(struct cls_fl_filter, ht_node),
 311        .automatic_shrinking = true,
 312};
 313
 314static int fl_init_hashtable(struct cls_fl_head *head,
 315                             struct fl_flow_mask *mask)
 316{
 317        head->ht_params = fl_ht_params;
 318        head->ht_params.key_len = fl_mask_range(mask);
 319        head->ht_params.key_offset += mask->range.start;
 320
 321        return rhashtable_init(&head->ht, &head->ht_params);
 322}
 323
 324#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
 325#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
 326#define FL_KEY_MEMBER_END_OFFSET(member)                                        \
 327        (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
 328
 329#define FL_KEY_IN_RANGE(mask, member)                                           \
 330        (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end &&                   \
 331         FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
 332
 333#define FL_KEY_SET(keys, cnt, id, member)                                       \
 334        do {                                                                    \
 335                keys[cnt].key_id = id;                                          \
 336                keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);                \
 337                cnt++;                                                          \
 338        } while(0);
 339
 340#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member)                     \
 341        do {                                                                    \
 342                if (FL_KEY_IN_RANGE(mask, member))                              \
 343                        FL_KEY_SET(keys, cnt, id, member);                      \
 344        } while(0);
 345
 346static void fl_init_dissector(struct cls_fl_head *head,
 347                              struct fl_flow_mask *mask)
 348{
 349        struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
 350        size_t cnt = 0;
 351
 352        FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
 353        FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
 354        FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
 355                               FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
 356        FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
 357                               FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
 358        FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
 359                               FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
 360        FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
 361                               FLOW_DISSECTOR_KEY_PORTS, tp);
 362
 363        skb_flow_dissector_init(&head->dissector, keys, cnt);
 364}
 365
 366static int fl_check_assign_mask(struct cls_fl_head *head,
 367                                struct fl_flow_mask *mask)
 368{
 369        int err;
 370
 371        if (head->mask_assigned) {
 372                if (!fl_mask_eq(&head->mask, mask))
 373                        return -EINVAL;
 374                else
 375                        return 0;
 376        }
 377
 378        /* Mask is not assigned yet. So assign it and init hashtable
 379         * according to that.
 380         */
 381        err = fl_init_hashtable(head, mask);
 382        if (err)
 383                return err;
 384        memcpy(&head->mask, mask, sizeof(head->mask));
 385        head->mask_assigned = true;
 386
 387        fl_init_dissector(head, mask);
 388
 389        return 0;
 390}
 391
 392static int fl_set_parms(struct net *net, struct tcf_proto *tp,
 393                        struct cls_fl_filter *f, struct fl_flow_mask *mask,
 394                        unsigned long base, struct nlattr **tb,
 395                        struct nlattr *est, bool ovr)
 396{
 397        struct tcf_exts e;
 398        int err;
 399
 400        tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
 401        err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
 402        if (err < 0)
 403                return err;
 404
 405        if (tb[TCA_FLOWER_CLASSID]) {
 406                f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
 407                tcf_bind_filter(tp, &f->res, base);
 408        }
 409
 410        err = fl_set_key(net, tb, &f->key, &mask->key);
 411        if (err)
 412                goto errout;
 413
 414        fl_mask_update_range(mask);
 415        fl_set_masked_key(&f->mkey, &f->key, mask);
 416
 417        tcf_exts_change(tp, &f->exts, &e);
 418
 419        return 0;
 420errout:
 421        tcf_exts_destroy(&e);
 422        return err;
 423}
 424
 425static u32 fl_grab_new_handle(struct tcf_proto *tp,
 426                              struct cls_fl_head *head)
 427{
 428        unsigned int i = 0x80000000;
 429        u32 handle;
 430
 431        do {
 432                if (++head->hgen == 0x7FFFFFFF)
 433                        head->hgen = 1;
 434        } while (--i > 0 && fl_get(tp, head->hgen));
 435
 436        if (unlikely(i == 0)) {
 437                pr_err("Insufficient number of handles\n");
 438                handle = 0;
 439        } else {
 440                handle = head->hgen;
 441        }
 442
 443        return handle;
 444}
 445
 446static int fl_change(struct net *net, struct sk_buff *in_skb,
 447                     struct tcf_proto *tp, unsigned long base,
 448                     u32 handle, struct nlattr **tca,
 449                     unsigned long *arg, bool ovr)
 450{
 451        struct cls_fl_head *head = rtnl_dereference(tp->root);
 452        struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
 453        struct cls_fl_filter *fnew;
 454        struct nlattr *tb[TCA_FLOWER_MAX + 1];
 455        struct fl_flow_mask mask = {};
 456        int err;
 457
 458        if (!tca[TCA_OPTIONS])
 459                return -EINVAL;
 460
 461        err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
 462        if (err < 0)
 463                return err;
 464
 465        if (fold && handle && fold->handle != handle)
 466                return -EINVAL;
 467
 468        fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
 469        if (!fnew)
 470                return -ENOBUFS;
 471
 472        tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
 473
 474        if (!handle) {
 475                handle = fl_grab_new_handle(tp, head);
 476                if (!handle) {
 477                        err = -EINVAL;
 478                        goto errout;
 479                }
 480        }
 481        fnew->handle = handle;
 482
 483        err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
 484        if (err)
 485                goto errout;
 486
 487        err = fl_check_assign_mask(head, &mask);
 488        if (err)
 489                goto errout;
 490
 491        err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
 492                                     head->ht_params);
 493        if (err)
 494                goto errout;
 495        if (fold)
 496                rhashtable_remove_fast(&head->ht, &fold->ht_node,
 497                                       head->ht_params);
 498
 499        *arg = (unsigned long) fnew;
 500
 501        if (fold) {
 502                list_replace_rcu(&fold->list, &fnew->list);
 503                tcf_unbind_filter(tp, &fold->res);
 504                call_rcu(&fold->rcu, fl_destroy_filter);
 505        } else {
 506                list_add_tail_rcu(&fnew->list, &head->filters);
 507        }
 508
 509        return 0;
 510
 511errout:
 512        kfree(fnew);
 513        return err;
 514}
 515
 516static int fl_delete(struct tcf_proto *tp, unsigned long arg)
 517{
 518        struct cls_fl_head *head = rtnl_dereference(tp->root);
 519        struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
 520
 521        rhashtable_remove_fast(&head->ht, &f->ht_node,
 522                               head->ht_params);
 523        list_del_rcu(&f->list);
 524        tcf_unbind_filter(tp, &f->res);
 525        call_rcu(&f->rcu, fl_destroy_filter);
 526        return 0;
 527}
 528
 529static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 530{
 531        struct cls_fl_head *head = rtnl_dereference(tp->root);
 532        struct cls_fl_filter *f;
 533
 534        list_for_each_entry_rcu(f, &head->filters, list) {
 535                if (arg->count < arg->skip)
 536                        goto skip;
 537                if (arg->fn(tp, (unsigned long) f, arg) < 0) {
 538                        arg->stop = 1;
 539                        break;
 540                }
 541skip:
 542                arg->count++;
 543        }
 544}
 545
 546static int fl_dump_key_val(struct sk_buff *skb,
 547                           void *val, int val_type,
 548                           void *mask, int mask_type, int len)
 549{
 550        int err;
 551
 552        if (!memchr_inv(mask, 0, len))
 553                return 0;
 554        err = nla_put(skb, val_type, len, val);
 555        if (err)
 556                return err;
 557        if (mask_type != TCA_FLOWER_UNSPEC) {
 558                err = nla_put(skb, mask_type, len, mask);
 559                if (err)
 560                        return err;
 561        }
 562        return 0;
 563}
 564
 565static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 566                   struct sk_buff *skb, struct tcmsg *t)
 567{
 568        struct cls_fl_head *head = rtnl_dereference(tp->root);
 569        struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
 570        struct nlattr *nest;
 571        struct fl_flow_key *key, *mask;
 572
 573        if (!f)
 574                return skb->len;
 575
 576        t->tcm_handle = f->handle;
 577
 578        nest = nla_nest_start(skb, TCA_OPTIONS);
 579        if (!nest)
 580                goto nla_put_failure;
 581
 582        if (f->res.classid &&
 583            nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
 584                goto nla_put_failure;
 585
 586        key = &f->key;
 587        mask = &head->mask.key;
 588
 589        if (mask->indev_ifindex) {
 590                struct net_device *dev;
 591
 592                dev = __dev_get_by_index(net, key->indev_ifindex);
 593                if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
 594                        goto nla_put_failure;
 595        }
 596
 597        if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
 598                            mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
 599                            sizeof(key->eth.dst)) ||
 600            fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
 601                            mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
 602                            sizeof(key->eth.src)) ||
 603            fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
 604                            &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
 605                            sizeof(key->basic.n_proto)))
 606                goto nla_put_failure;
 607        if ((key->basic.n_proto == htons(ETH_P_IP) ||
 608             key->basic.n_proto == htons(ETH_P_IPV6)) &&
 609            fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
 610                            &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
 611                            sizeof(key->basic.ip_proto)))
 612                goto nla_put_failure;
 613
 614        if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
 615            (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
 616                             &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
 617                             sizeof(key->ipv4.src)) ||
 618             fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
 619                             &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
 620                             sizeof(key->ipv4.dst))))
 621                goto nla_put_failure;
 622        else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
 623                 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
 624                                  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
 625                                  sizeof(key->ipv6.src)) ||
 626                  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
 627                                  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
 628                                  sizeof(key->ipv6.dst))))
 629                goto nla_put_failure;
 630
 631        if (key->basic.ip_proto == IPPROTO_TCP &&
 632            (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
 633                             &mask->tp.src, TCA_FLOWER_UNSPEC,
 634                             sizeof(key->tp.src)) ||
 635             fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
 636                             &mask->tp.dst, TCA_FLOWER_UNSPEC,
 637                             sizeof(key->tp.dst))))
 638                goto nla_put_failure;
 639        else if (key->basic.ip_proto == IPPROTO_UDP &&
 640                 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
 641                                  &mask->tp.src, TCA_FLOWER_UNSPEC,
 642                                  sizeof(key->tp.src)) ||
 643                  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
 644                                  &mask->tp.dst, TCA_FLOWER_UNSPEC,
 645                                  sizeof(key->tp.dst))))
 646                goto nla_put_failure;
 647
 648        if (tcf_exts_dump(skb, &f->exts))
 649                goto nla_put_failure;
 650
 651        nla_nest_end(skb, nest);
 652
 653        if (tcf_exts_dump_stats(skb, &f->exts) < 0)
 654                goto nla_put_failure;
 655
 656        return skb->len;
 657
 658nla_put_failure:
 659        nla_nest_cancel(skb, nest);
 660        return -1;
 661}
 662
 663static struct tcf_proto_ops cls_fl_ops __read_mostly = {
 664        .kind           = "flower",
 665        .classify       = fl_classify,
 666        .init           = fl_init,
 667        .destroy        = fl_destroy,
 668        .get            = fl_get,
 669        .change         = fl_change,
 670        .delete         = fl_delete,
 671        .walk           = fl_walk,
 672        .dump           = fl_dump,
 673        .owner          = THIS_MODULE,
 674};
 675
 676static int __init cls_fl_init(void)
 677{
 678        return register_tcf_proto_ops(&cls_fl_ops);
 679}
 680
 681static void __exit cls_fl_exit(void)
 682{
 683        unregister_tcf_proto_ops(&cls_fl_ops);
 684}
 685
 686module_init(cls_fl_init);
 687module_exit(cls_fl_exit);
 688
 689MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
 690MODULE_DESCRIPTION("Flower classifier");
 691MODULE_LICENSE("GPL v2");
 692