linux/net/sched/cls_flow.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_flow.c         Generic flow classifier
   3 *
   4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/list.h>
  15#include <linux/jhash.h>
  16#include <linux/random.h>
  17#include <linux/pkt_cls.h>
  18#include <linux/skbuff.h>
  19#include <linux/in.h>
  20#include <linux/ip.h>
  21#include <linux/ipv6.h>
  22#include <linux/if_vlan.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <net/inet_sock.h>
  26
  27#include <net/pkt_cls.h>
  28#include <net/ip.h>
  29#include <net/route.h>
  30#include <net/flow_dissector.h>
  31
  32#if IS_ENABLED(CONFIG_NF_CONNTRACK)
  33#include <net/netfilter/nf_conntrack.h>
  34#endif
  35
  36struct flow_head {
  37        struct list_head        filters;
  38        struct rcu_head         rcu;
  39};
  40
  41struct flow_filter {
  42        struct list_head        list;
  43        struct tcf_exts         exts;
  44        struct tcf_ematch_tree  ematches;
  45        struct tcf_proto        *tp;
  46        struct timer_list       perturb_timer;
  47        u32                     perturb_period;
  48        u32                     handle;
  49
  50        u32                     nkeys;
  51        u32                     keymask;
  52        u32                     mode;
  53        u32                     mask;
  54        u32                     xor;
  55        u32                     rshift;
  56        u32                     addend;
  57        u32                     divisor;
  58        u32                     baseclass;
  59        u32                     hashrnd;
  60        union {
  61                struct work_struct      work;
  62                struct rcu_head         rcu;
  63        };
  64};
  65
  66static inline u32 addr_fold(void *addr)
  67{
  68        unsigned long a = (unsigned long)addr;
  69
  70        return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
  71}
  72
  73static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
  74{
  75        __be32 src = flow_get_u32_src(flow);
  76
  77        if (src)
  78                return ntohl(src);
  79
  80        return addr_fold(skb->sk);
  81}
  82
  83static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
  84{
  85        __be32 dst = flow_get_u32_dst(flow);
  86
  87        if (dst)
  88                return ntohl(dst);
  89
  90        return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
  91}
  92
  93static u32 flow_get_proto(const struct sk_buff *skb,
  94                          const struct flow_keys *flow)
  95{
  96        return flow->basic.ip_proto;
  97}
  98
  99static u32 flow_get_proto_src(const struct sk_buff *skb,
 100                              const struct flow_keys *flow)
 101{
 102        if (flow->ports.ports)
 103                return ntohs(flow->ports.src);
 104
 105        return addr_fold(skb->sk);
 106}
 107
 108static u32 flow_get_proto_dst(const struct sk_buff *skb,
 109                              const struct flow_keys *flow)
 110{
 111        if (flow->ports.ports)
 112                return ntohs(flow->ports.dst);
 113
 114        return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 115}
 116
 117static u32 flow_get_iif(const struct sk_buff *skb)
 118{
 119        return skb->skb_iif;
 120}
 121
 122static u32 flow_get_priority(const struct sk_buff *skb)
 123{
 124        return skb->priority;
 125}
 126
 127static u32 flow_get_mark(const struct sk_buff *skb)
 128{
 129        return skb->mark;
 130}
 131
 132static u32 flow_get_nfct(const struct sk_buff *skb)
 133{
 134#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 135        return addr_fold(skb_nfct(skb));
 136#else
 137        return 0;
 138#endif
 139}
 140
 141#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 142#define CTTUPLE(skb, member)                                            \
 143({                                                                      \
 144        enum ip_conntrack_info ctinfo;                                  \
 145        const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);             \
 146        if (ct == NULL)                                                 \
 147                goto fallback;                                          \
 148        ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;                 \
 149})
 150#else
 151#define CTTUPLE(skb, member)                                            \
 152({                                                                      \
 153        goto fallback;                                                  \
 154        0;                                                              \
 155})
 156#endif
 157
 158static u32 flow_get_nfct_src(const struct sk_buff *skb,
 159                             const struct flow_keys *flow)
 160{
 161        switch (tc_skb_protocol(skb)) {
 162        case htons(ETH_P_IP):
 163                return ntohl(CTTUPLE(skb, src.u3.ip));
 164        case htons(ETH_P_IPV6):
 165                return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
 166        }
 167fallback:
 168        return flow_get_src(skb, flow);
 169}
 170
 171static u32 flow_get_nfct_dst(const struct sk_buff *skb,
 172                             const struct flow_keys *flow)
 173{
 174        switch (tc_skb_protocol(skb)) {
 175        case htons(ETH_P_IP):
 176                return ntohl(CTTUPLE(skb, dst.u3.ip));
 177        case htons(ETH_P_IPV6):
 178                return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
 179        }
 180fallback:
 181        return flow_get_dst(skb, flow);
 182}
 183
 184static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
 185                                   const struct flow_keys *flow)
 186{
 187        return ntohs(CTTUPLE(skb, src.u.all));
 188fallback:
 189        return flow_get_proto_src(skb, flow);
 190}
 191
 192static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
 193                                   const struct flow_keys *flow)
 194{
 195        return ntohs(CTTUPLE(skb, dst.u.all));
 196fallback:
 197        return flow_get_proto_dst(skb, flow);
 198}
 199
 200static u32 flow_get_rtclassid(const struct sk_buff *skb)
 201{
 202#ifdef CONFIG_IP_ROUTE_CLASSID
 203        if (skb_dst(skb))
 204                return skb_dst(skb)->tclassid;
 205#endif
 206        return 0;
 207}
 208
 209static u32 flow_get_skuid(const struct sk_buff *skb)
 210{
 211        struct sock *sk = skb_to_full_sk(skb);
 212
 213        if (sk && sk->sk_socket && sk->sk_socket->file) {
 214                kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
 215
 216                return from_kuid(&init_user_ns, skuid);
 217        }
 218        return 0;
 219}
 220
 221static u32 flow_get_skgid(const struct sk_buff *skb)
 222{
 223        struct sock *sk = skb_to_full_sk(skb);
 224
 225        if (sk && sk->sk_socket && sk->sk_socket->file) {
 226                kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
 227
 228                return from_kgid(&init_user_ns, skgid);
 229        }
 230        return 0;
 231}
 232
 233static u32 flow_get_vlan_tag(const struct sk_buff *skb)
 234{
 235        u16 uninitialized_var(tag);
 236
 237        if (vlan_get_tag(skb, &tag) < 0)
 238                return 0;
 239        return tag & VLAN_VID_MASK;
 240}
 241
 242static u32 flow_get_rxhash(struct sk_buff *skb)
 243{
 244        return skb_get_hash(skb);
 245}
 246
 247static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
 248{
 249        switch (key) {
 250        case FLOW_KEY_SRC:
 251                return flow_get_src(skb, flow);
 252        case FLOW_KEY_DST:
 253                return flow_get_dst(skb, flow);
 254        case FLOW_KEY_PROTO:
 255                return flow_get_proto(skb, flow);
 256        case FLOW_KEY_PROTO_SRC:
 257                return flow_get_proto_src(skb, flow);
 258        case FLOW_KEY_PROTO_DST:
 259                return flow_get_proto_dst(skb, flow);
 260        case FLOW_KEY_IIF:
 261                return flow_get_iif(skb);
 262        case FLOW_KEY_PRIORITY:
 263                return flow_get_priority(skb);
 264        case FLOW_KEY_MARK:
 265                return flow_get_mark(skb);
 266        case FLOW_KEY_NFCT:
 267                return flow_get_nfct(skb);
 268        case FLOW_KEY_NFCT_SRC:
 269                return flow_get_nfct_src(skb, flow);
 270        case FLOW_KEY_NFCT_DST:
 271                return flow_get_nfct_dst(skb, flow);
 272        case FLOW_KEY_NFCT_PROTO_SRC:
 273                return flow_get_nfct_proto_src(skb, flow);
 274        case FLOW_KEY_NFCT_PROTO_DST:
 275                return flow_get_nfct_proto_dst(skb, flow);
 276        case FLOW_KEY_RTCLASSID:
 277                return flow_get_rtclassid(skb);
 278        case FLOW_KEY_SKUID:
 279                return flow_get_skuid(skb);
 280        case FLOW_KEY_SKGID:
 281                return flow_get_skgid(skb);
 282        case FLOW_KEY_VLAN_TAG:
 283                return flow_get_vlan_tag(skb);
 284        case FLOW_KEY_RXHASH:
 285                return flow_get_rxhash(skb);
 286        default:
 287                WARN_ON(1);
 288                return 0;
 289        }
 290}
 291
 292#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) |                 \
 293                          (1 << FLOW_KEY_DST) |                 \
 294                          (1 << FLOW_KEY_PROTO) |               \
 295                          (1 << FLOW_KEY_PROTO_SRC) |           \
 296                          (1 << FLOW_KEY_PROTO_DST) |           \
 297                          (1 << FLOW_KEY_NFCT_SRC) |            \
 298                          (1 << FLOW_KEY_NFCT_DST) |            \
 299                          (1 << FLOW_KEY_NFCT_PROTO_SRC) |      \
 300                          (1 << FLOW_KEY_NFCT_PROTO_DST))
 301
 302static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 303                         struct tcf_result *res)
 304{
 305        struct flow_head *head = rcu_dereference_bh(tp->root);
 306        struct flow_filter *f;
 307        u32 keymask;
 308        u32 classid;
 309        unsigned int n, key;
 310        int r;
 311
 312        list_for_each_entry_rcu(f, &head->filters, list) {
 313                u32 keys[FLOW_KEY_MAX + 1];
 314                struct flow_keys flow_keys;
 315
 316                if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 317                        continue;
 318
 319                keymask = f->keymask;
 320                if (keymask & FLOW_KEYS_NEEDED)
 321                        skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
 322
 323                for (n = 0; n < f->nkeys; n++) {
 324                        key = ffs(keymask) - 1;
 325                        keymask &= ~(1 << key);
 326                        keys[n] = flow_key_get(skb, key, &flow_keys);
 327                }
 328
 329                if (f->mode == FLOW_MODE_HASH)
 330                        classid = jhash2(keys, f->nkeys, f->hashrnd);
 331                else {
 332                        classid = keys[0];
 333                        classid = (classid & f->mask) ^ f->xor;
 334                        classid = (classid >> f->rshift) + f->addend;
 335                }
 336
 337                if (f->divisor)
 338                        classid %= f->divisor;
 339
 340                res->class   = 0;
 341                res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
 342
 343                r = tcf_exts_exec(skb, &f->exts, res);
 344                if (r < 0)
 345                        continue;
 346                return r;
 347        }
 348        return -1;
 349}
 350
 351static void flow_perturbation(struct timer_list *t)
 352{
 353        struct flow_filter *f = from_timer(f, t, perturb_timer);
 354
 355        get_random_bytes(&f->hashrnd, 4);
 356        if (f->perturb_period)
 357                mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
 358}
 359
 360static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
 361        [TCA_FLOW_KEYS]         = { .type = NLA_U32 },
 362        [TCA_FLOW_MODE]         = { .type = NLA_U32 },
 363        [TCA_FLOW_BASECLASS]    = { .type = NLA_U32 },
 364        [TCA_FLOW_RSHIFT]       = { .type = NLA_U32 },
 365        [TCA_FLOW_ADDEND]       = { .type = NLA_U32 },
 366        [TCA_FLOW_MASK]         = { .type = NLA_U32 },
 367        [TCA_FLOW_XOR]          = { .type = NLA_U32 },
 368        [TCA_FLOW_DIVISOR]      = { .type = NLA_U32 },
 369        [TCA_FLOW_ACT]          = { .type = NLA_NESTED },
 370        [TCA_FLOW_POLICE]       = { .type = NLA_NESTED },
 371        [TCA_FLOW_EMATCHES]     = { .type = NLA_NESTED },
 372        [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
 373};
 374
 375static void __flow_destroy_filter(struct flow_filter *f)
 376{
 377        del_timer_sync(&f->perturb_timer);
 378        tcf_exts_destroy(&f->exts);
 379        tcf_em_tree_destroy(&f->ematches);
 380        tcf_exts_put_net(&f->exts);
 381        kfree(f);
 382}
 383
 384static void flow_destroy_filter_work(struct work_struct *work)
 385{
 386        struct flow_filter *f = container_of(work, struct flow_filter, work);
 387
 388        rtnl_lock();
 389        __flow_destroy_filter(f);
 390        rtnl_unlock();
 391}
 392
 393static void flow_destroy_filter(struct rcu_head *head)
 394{
 395        struct flow_filter *f = container_of(head, struct flow_filter, rcu);
 396
 397        INIT_WORK(&f->work, flow_destroy_filter_work);
 398        tcf_queue_work(&f->work);
 399}
 400
 401static int flow_change(struct net *net, struct sk_buff *in_skb,
 402                       struct tcf_proto *tp, unsigned long base,
 403                       u32 handle, struct nlattr **tca,
 404                       void **arg, bool ovr, struct netlink_ext_ack *extack)
 405{
 406        struct flow_head *head = rtnl_dereference(tp->root);
 407        struct flow_filter *fold, *fnew;
 408        struct nlattr *opt = tca[TCA_OPTIONS];
 409        struct nlattr *tb[TCA_FLOW_MAX + 1];
 410        unsigned int nkeys = 0;
 411        unsigned int perturb_period = 0;
 412        u32 baseclass = 0;
 413        u32 keymask = 0;
 414        u32 mode;
 415        int err;
 416
 417        if (opt == NULL)
 418                return -EINVAL;
 419
 420        err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL);
 421        if (err < 0)
 422                return err;
 423
 424        if (tb[TCA_FLOW_BASECLASS]) {
 425                baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
 426                if (TC_H_MIN(baseclass) == 0)
 427                        return -EINVAL;
 428        }
 429
 430        if (tb[TCA_FLOW_KEYS]) {
 431                keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
 432
 433                nkeys = hweight32(keymask);
 434                if (nkeys == 0)
 435                        return -EINVAL;
 436
 437                if (fls(keymask) - 1 > FLOW_KEY_MAX)
 438                        return -EOPNOTSUPP;
 439
 440                if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
 441                    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
 442                        return -EOPNOTSUPP;
 443        }
 444
 445        fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
 446        if (!fnew)
 447                return -ENOBUFS;
 448
 449        err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
 450        if (err < 0)
 451                goto err1;
 452
 453        err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
 454        if (err < 0)
 455                goto err2;
 456
 457        err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
 458                                extack);
 459        if (err < 0)
 460                goto err2;
 461
 462        fold = *arg;
 463        if (fold) {
 464                err = -EINVAL;
 465                if (fold->handle != handle && handle)
 466                        goto err2;
 467
 468                /* Copy fold into fnew */
 469                fnew->tp = fold->tp;
 470                fnew->handle = fold->handle;
 471                fnew->nkeys = fold->nkeys;
 472                fnew->keymask = fold->keymask;
 473                fnew->mode = fold->mode;
 474                fnew->mask = fold->mask;
 475                fnew->xor = fold->xor;
 476                fnew->rshift = fold->rshift;
 477                fnew->addend = fold->addend;
 478                fnew->divisor = fold->divisor;
 479                fnew->baseclass = fold->baseclass;
 480                fnew->hashrnd = fold->hashrnd;
 481
 482                mode = fold->mode;
 483                if (tb[TCA_FLOW_MODE])
 484                        mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 485                if (mode != FLOW_MODE_HASH && nkeys > 1)
 486                        goto err2;
 487
 488                if (mode == FLOW_MODE_HASH)
 489                        perturb_period = fold->perturb_period;
 490                if (tb[TCA_FLOW_PERTURB]) {
 491                        if (mode != FLOW_MODE_HASH)
 492                                goto err2;
 493                        perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 494                }
 495        } else {
 496                err = -EINVAL;
 497                if (!handle)
 498                        goto err2;
 499                if (!tb[TCA_FLOW_KEYS])
 500                        goto err2;
 501
 502                mode = FLOW_MODE_MAP;
 503                if (tb[TCA_FLOW_MODE])
 504                        mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 505                if (mode != FLOW_MODE_HASH && nkeys > 1)
 506                        goto err2;
 507
 508                if (tb[TCA_FLOW_PERTURB]) {
 509                        if (mode != FLOW_MODE_HASH)
 510                                goto err2;
 511                        perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 512                }
 513
 514                if (TC_H_MAJ(baseclass) == 0) {
 515                        struct Qdisc *q = tcf_block_q(tp->chain->block);
 516
 517                        baseclass = TC_H_MAKE(q->handle, baseclass);
 518                }
 519                if (TC_H_MIN(baseclass) == 0)
 520                        baseclass = TC_H_MAKE(baseclass, 1);
 521
 522                fnew->handle = handle;
 523                fnew->mask  = ~0U;
 524                fnew->tp = tp;
 525                get_random_bytes(&fnew->hashrnd, 4);
 526        }
 527
 528        timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
 529
 530        tcf_block_netif_keep_dst(tp->chain->block);
 531
 532        if (tb[TCA_FLOW_KEYS]) {
 533                fnew->keymask = keymask;
 534                fnew->nkeys   = nkeys;
 535        }
 536
 537        fnew->mode = mode;
 538
 539        if (tb[TCA_FLOW_MASK])
 540                fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
 541        if (tb[TCA_FLOW_XOR])
 542                fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
 543        if (tb[TCA_FLOW_RSHIFT])
 544                fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
 545        if (tb[TCA_FLOW_ADDEND])
 546                fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
 547
 548        if (tb[TCA_FLOW_DIVISOR])
 549                fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
 550        if (baseclass)
 551                fnew->baseclass = baseclass;
 552
 553        fnew->perturb_period = perturb_period;
 554        if (perturb_period)
 555                mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
 556
 557        if (!*arg)
 558                list_add_tail_rcu(&fnew->list, &head->filters);
 559        else
 560                list_replace_rcu(&fold->list, &fnew->list);
 561
 562        *arg = fnew;
 563
 564        if (fold) {
 565                tcf_exts_get_net(&fold->exts);
 566                call_rcu(&fold->rcu, flow_destroy_filter);
 567        }
 568        return 0;
 569
 570err2:
 571        tcf_exts_destroy(&fnew->exts);
 572        tcf_em_tree_destroy(&fnew->ematches);
 573err1:
 574        kfree(fnew);
 575        return err;
 576}
 577
 578static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
 579                       struct netlink_ext_ack *extack)
 580{
 581        struct flow_head *head = rtnl_dereference(tp->root);
 582        struct flow_filter *f = arg;
 583
 584        list_del_rcu(&f->list);
 585        tcf_exts_get_net(&f->exts);
 586        call_rcu(&f->rcu, flow_destroy_filter);
 587        *last = list_empty(&head->filters);
 588        return 0;
 589}
 590
 591static int flow_init(struct tcf_proto *tp)
 592{
 593        struct flow_head *head;
 594
 595        head = kzalloc(sizeof(*head), GFP_KERNEL);
 596        if (head == NULL)
 597                return -ENOBUFS;
 598        INIT_LIST_HEAD(&head->filters);
 599        rcu_assign_pointer(tp->root, head);
 600        return 0;
 601}
 602
 603static void flow_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 604{
 605        struct flow_head *head = rtnl_dereference(tp->root);
 606        struct flow_filter *f, *next;
 607
 608        list_for_each_entry_safe(f, next, &head->filters, list) {
 609                list_del_rcu(&f->list);
 610                if (tcf_exts_get_net(&f->exts))
 611                        call_rcu(&f->rcu, flow_destroy_filter);
 612                else
 613                        __flow_destroy_filter(f);
 614        }
 615        kfree_rcu(head, rcu);
 616}
 617
 618static void *flow_get(struct tcf_proto *tp, u32 handle)
 619{
 620        struct flow_head *head = rtnl_dereference(tp->root);
 621        struct flow_filter *f;
 622
 623        list_for_each_entry(f, &head->filters, list)
 624                if (f->handle == handle)
 625                        return f;
 626        return NULL;
 627}
 628
 629static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
 630                     struct sk_buff *skb, struct tcmsg *t)
 631{
 632        struct flow_filter *f = fh;
 633        struct nlattr *nest;
 634
 635        if (f == NULL)
 636                return skb->len;
 637
 638        t->tcm_handle = f->handle;
 639
 640        nest = nla_nest_start(skb, TCA_OPTIONS);
 641        if (nest == NULL)
 642                goto nla_put_failure;
 643
 644        if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
 645            nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
 646                goto nla_put_failure;
 647
 648        if (f->mask != ~0 || f->xor != 0) {
 649                if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
 650                    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
 651                        goto nla_put_failure;
 652        }
 653        if (f->rshift &&
 654            nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
 655                goto nla_put_failure;
 656        if (f->addend &&
 657            nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
 658                goto nla_put_failure;
 659
 660        if (f->divisor &&
 661            nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
 662                goto nla_put_failure;
 663        if (f->baseclass &&
 664            nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
 665                goto nla_put_failure;
 666
 667        if (f->perturb_period &&
 668            nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
 669                goto nla_put_failure;
 670
 671        if (tcf_exts_dump(skb, &f->exts) < 0)
 672                goto nla_put_failure;
 673#ifdef CONFIG_NET_EMATCH
 674        if (f->ematches.hdr.nmatches &&
 675            tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
 676                goto nla_put_failure;
 677#endif
 678        nla_nest_end(skb, nest);
 679
 680        if (tcf_exts_dump_stats(skb, &f->exts) < 0)
 681                goto nla_put_failure;
 682
 683        return skb->len;
 684
 685nla_put_failure:
 686        nla_nest_cancel(skb, nest);
 687        return -1;
 688}
 689
 690static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 691{
 692        struct flow_head *head = rtnl_dereference(tp->root);
 693        struct flow_filter *f;
 694
 695        list_for_each_entry(f, &head->filters, list) {
 696                if (arg->count < arg->skip)
 697                        goto skip;
 698                if (arg->fn(tp, f, arg) < 0) {
 699                        arg->stop = 1;
 700                        break;
 701                }
 702skip:
 703                arg->count++;
 704        }
 705}
 706
 707static struct tcf_proto_ops cls_flow_ops __read_mostly = {
 708        .kind           = "flow",
 709        .classify       = flow_classify,
 710        .init           = flow_init,
 711        .destroy        = flow_destroy,
 712        .change         = flow_change,
 713        .delete         = flow_delete,
 714        .get            = flow_get,
 715        .dump           = flow_dump,
 716        .walk           = flow_walk,
 717        .owner          = THIS_MODULE,
 718};
 719
 720static int __init cls_flow_init(void)
 721{
 722        return register_tcf_proto_ops(&cls_flow_ops);
 723}
 724
 725static void __exit cls_flow_exit(void)
 726{
 727        unregister_tcf_proto_ops(&cls_flow_ops);
 728}
 729
 730module_init(cls_flow_init);
 731module_exit(cls_flow_exit);
 732
 733MODULE_LICENSE("GPL");
 734MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 735MODULE_DESCRIPTION("TC flow classifier");
 736