linux/net/sched/cls_route.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_route.c        ROUTE4 classifier.
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/skbuff.h>
  18#include <net/dst.h>
  19#include <net/route.h>
  20#include <net/netlink.h>
  21#include <net/act_api.h>
  22#include <net/pkt_cls.h>
  23
  24/*
  25   1. For now we assume that route tags < 256.
  26      It allows to use direct table lookups, instead of hash tables.
  27   2. For now we assume that "from TAG" and "fromdev DEV" statements
  28      are mutually  exclusive.
  29   3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
  30 */
  31
  32struct route4_fastmap
  33{
  34        struct route4_filter    *filter;
  35        u32                     id;
  36        int                     iif;
  37};
  38
  39struct route4_head
  40{
  41        struct route4_fastmap   fastmap[16];
  42        struct route4_bucket    *table[256+1];
  43};
  44
  45struct route4_bucket
  46{
  47        /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
  48        struct route4_filter    *ht[16+16+1];
  49};
  50
  51struct route4_filter
  52{
  53        struct route4_filter    *next;
  54        u32                     id;
  55        int                     iif;
  56
  57        struct tcf_result       res;
  58        struct tcf_exts         exts;
  59        u32                     handle;
  60        struct route4_bucket    *bkt;
  61};
  62
  63#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
  64
  65static const struct tcf_ext_map route_ext_map = {
  66        .police = TCA_ROUTE4_POLICE,
  67        .action = TCA_ROUTE4_ACT
  68};
  69
  70static __inline__ int route4_fastmap_hash(u32 id, int iif)
  71{
  72        return id&0xF;
  73}
  74
  75static inline
  76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
  77{
  78        spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
  79
  80        spin_lock_bh(root_lock);
  81        memset(head->fastmap, 0, sizeof(head->fastmap));
  82        spin_unlock_bh(root_lock);
  83}
  84
  85static inline void
  86route4_set_fastmap(struct route4_head *head, u32 id, int iif,
  87                   struct route4_filter *f)
  88{
  89        int h = route4_fastmap_hash(id, iif);
  90        head->fastmap[h].id = id;
  91        head->fastmap[h].iif = iif;
  92        head->fastmap[h].filter = f;
  93}
  94
  95static __inline__ int route4_hash_to(u32 id)
  96{
  97        return id&0xFF;
  98}
  99
 100static __inline__ int route4_hash_from(u32 id)
 101{
 102        return (id>>16)&0xF;
 103}
 104
 105static __inline__ int route4_hash_iif(int iif)
 106{
 107        return 16 + ((iif>>16)&0xF);
 108}
 109
 110static __inline__ int route4_hash_wild(void)
 111{
 112        return 32;
 113}
 114
 115#define ROUTE4_APPLY_RESULT()                                   \
 116{                                                               \
 117        *res = f->res;                                          \
 118        if (tcf_exts_is_available(&f->exts)) {                  \
 119                int r = tcf_exts_exec(skb, &f->exts, res);      \
 120                if (r < 0) {                                    \
 121                        dont_cache = 1;                         \
 122                        continue;                               \
 123                }                                               \
 124                return r;                                       \
 125        } else if (!dont_cache)                                 \
 126                route4_set_fastmap(head, id, iif, f);           \
 127        return 0;                                               \
 128}
 129
 130static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
 131                           struct tcf_result *res)
 132{
 133        struct route4_head *head = (struct route4_head*)tp->root;
 134        struct dst_entry *dst;
 135        struct route4_bucket *b;
 136        struct route4_filter *f;
 137        u32 id, h;
 138        int iif, dont_cache = 0;
 139
 140        if ((dst = skb_dst(skb)) == NULL)
 141                goto failure;
 142
 143        id = dst->tclassid;
 144        if (head == NULL)
 145                goto old_method;
 146
 147        iif = ((struct rtable*)dst)->fl.iif;
 148
 149        h = route4_fastmap_hash(id, iif);
 150        if (id == head->fastmap[h].id &&
 151            iif == head->fastmap[h].iif &&
 152            (f = head->fastmap[h].filter) != NULL) {
 153                if (f == ROUTE4_FAILURE)
 154                        goto failure;
 155
 156                *res = f->res;
 157                return 0;
 158        }
 159
 160        h = route4_hash_to(id);
 161
 162restart:
 163        if ((b = head->table[h]) != NULL) {
 164                for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
 165                        if (f->id == id)
 166                                ROUTE4_APPLY_RESULT();
 167
 168                for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
 169                        if (f->iif == iif)
 170                                ROUTE4_APPLY_RESULT();
 171
 172                for (f = b->ht[route4_hash_wild()]; f; f = f->next)
 173                        ROUTE4_APPLY_RESULT();
 174
 175        }
 176        if (h < 256) {
 177                h = 256;
 178                id &= ~0xFFFF;
 179                goto restart;
 180        }
 181
 182        if (!dont_cache)
 183                route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
 184failure:
 185        return -1;
 186
 187old_method:
 188        if (id && (TC_H_MAJ(id) == 0 ||
 189                   !(TC_H_MAJ(id^tp->q->handle)))) {
 190                res->classid = id;
 191                res->class = 0;
 192                return 0;
 193        }
 194        return -1;
 195}
 196
 197static inline u32 to_hash(u32 id)
 198{
 199        u32 h = id&0xFF;
 200        if (id&0x8000)
 201                h += 256;
 202        return h;
 203}
 204
 205static inline u32 from_hash(u32 id)
 206{
 207        id &= 0xFFFF;
 208        if (id == 0xFFFF)
 209                return 32;
 210        if (!(id & 0x8000)) {
 211                if (id > 255)
 212                        return 256;
 213                return id&0xF;
 214        }
 215        return 16 + (id&0xF);
 216}
 217
 218static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 219{
 220        struct route4_head *head = (struct route4_head*)tp->root;
 221        struct route4_bucket *b;
 222        struct route4_filter *f;
 223        unsigned h1, h2;
 224
 225        if (!head)
 226                return 0;
 227
 228        h1 = to_hash(handle);
 229        if (h1 > 256)
 230                return 0;
 231
 232        h2 = from_hash(handle>>16);
 233        if (h2 > 32)
 234                return 0;
 235
 236        if ((b = head->table[h1]) != NULL) {
 237                for (f = b->ht[h2]; f; f = f->next)
 238                        if (f->handle == handle)
 239                                return (unsigned long)f;
 240        }
 241        return 0;
 242}
 243
 244static void route4_put(struct tcf_proto *tp, unsigned long f)
 245{
 246}
 247
 248static int route4_init(struct tcf_proto *tp)
 249{
 250        return 0;
 251}
 252
 253static inline void
 254route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
 255{
 256        tcf_unbind_filter(tp, &f->res);
 257        tcf_exts_destroy(tp, &f->exts);
 258        kfree(f);
 259}
 260
 261static void route4_destroy(struct tcf_proto *tp)
 262{
 263        struct route4_head *head = tp->root;
 264        int h1, h2;
 265
 266        if (head == NULL)
 267                return;
 268
 269        for (h1=0; h1<=256; h1++) {
 270                struct route4_bucket *b;
 271
 272                if ((b = head->table[h1]) != NULL) {
 273                        for (h2=0; h2<=32; h2++) {
 274                                struct route4_filter *f;
 275
 276                                while ((f = b->ht[h2]) != NULL) {
 277                                        b->ht[h2] = f->next;
 278                                        route4_delete_filter(tp, f);
 279                                }
 280                        }
 281                        kfree(b);
 282                }
 283        }
 284        kfree(head);
 285}
 286
 287static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 288{
 289        struct route4_head *head = (struct route4_head*)tp->root;
 290        struct route4_filter **fp, *f = (struct route4_filter*)arg;
 291        unsigned h = 0;
 292        struct route4_bucket *b;
 293        int i;
 294
 295        if (!head || !f)
 296                return -EINVAL;
 297
 298        h = f->handle;
 299        b = f->bkt;
 300
 301        for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
 302                if (*fp == f) {
 303                        tcf_tree_lock(tp);
 304                        *fp = f->next;
 305                        tcf_tree_unlock(tp);
 306
 307                        route4_reset_fastmap(tp->q, head, f->id);
 308                        route4_delete_filter(tp, f);
 309
 310                        /* Strip tree */
 311
 312                        for (i=0; i<=32; i++)
 313                                if (b->ht[i])
 314                                        return 0;
 315
 316                        /* OK, session has no flows */
 317                        tcf_tree_lock(tp);
 318                        head->table[to_hash(h)] = NULL;
 319                        tcf_tree_unlock(tp);
 320
 321                        kfree(b);
 322                        return 0;
 323                }
 324        }
 325        return 0;
 326}
 327
 328static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
 329        [TCA_ROUTE4_CLASSID]    = { .type = NLA_U32 },
 330        [TCA_ROUTE4_TO]         = { .type = NLA_U32 },
 331        [TCA_ROUTE4_FROM]       = { .type = NLA_U32 },
 332        [TCA_ROUTE4_IIF]        = { .type = NLA_U32 },
 333};
 334
 335static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
 336        struct route4_filter *f, u32 handle, struct route4_head *head,
 337        struct nlattr **tb, struct nlattr *est, int new)
 338{
 339        int err;
 340        u32 id = 0, to = 0, nhandle = 0x8000;
 341        struct route4_filter *fp;
 342        unsigned int h1;
 343        struct route4_bucket *b;
 344        struct tcf_exts e;
 345
 346        err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
 347        if (err < 0)
 348                return err;
 349
 350        err = -EINVAL;
 351        if (tb[TCA_ROUTE4_TO]) {
 352                if (new && handle & 0x8000)
 353                        goto errout;
 354                to = nla_get_u32(tb[TCA_ROUTE4_TO]);
 355                if (to > 0xFF)
 356                        goto errout;
 357                nhandle = to;
 358        }
 359
 360        if (tb[TCA_ROUTE4_FROM]) {
 361                if (tb[TCA_ROUTE4_IIF])
 362                        goto errout;
 363                id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
 364                if (id > 0xFF)
 365                        goto errout;
 366                nhandle |= id << 16;
 367        } else if (tb[TCA_ROUTE4_IIF]) {
 368                id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
 369                if (id > 0x7FFF)
 370                        goto errout;
 371                nhandle |= (id | 0x8000) << 16;
 372        } else
 373                nhandle |= 0xFFFF << 16;
 374
 375        if (handle && new) {
 376                nhandle |= handle & 0x7F00;
 377                if (nhandle != handle)
 378                        goto errout;
 379        }
 380
 381        h1 = to_hash(nhandle);
 382        if ((b = head->table[h1]) == NULL) {
 383                err = -ENOBUFS;
 384                b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
 385                if (b == NULL)
 386                        goto errout;
 387
 388                tcf_tree_lock(tp);
 389                head->table[h1] = b;
 390                tcf_tree_unlock(tp);
 391        } else {
 392                unsigned int h2 = from_hash(nhandle >> 16);
 393                err = -EEXIST;
 394                for (fp = b->ht[h2]; fp; fp = fp->next)
 395                        if (fp->handle == f->handle)
 396                                goto errout;
 397        }
 398
 399        tcf_tree_lock(tp);
 400        if (tb[TCA_ROUTE4_TO])
 401                f->id = to;
 402
 403        if (tb[TCA_ROUTE4_FROM])
 404                f->id = to | id<<16;
 405        else if (tb[TCA_ROUTE4_IIF])
 406                f->iif = id;
 407
 408        f->handle = nhandle;
 409        f->bkt = b;
 410        tcf_tree_unlock(tp);
 411
 412        if (tb[TCA_ROUTE4_CLASSID]) {
 413                f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
 414                tcf_bind_filter(tp, &f->res, base);
 415        }
 416
 417        tcf_exts_change(tp, &f->exts, &e);
 418
 419        return 0;
 420errout:
 421        tcf_exts_destroy(tp, &e);
 422        return err;
 423}
 424
 425static int route4_change(struct tcf_proto *tp, unsigned long base,
 426                       u32 handle,
 427                       struct nlattr **tca,
 428                       unsigned long *arg)
 429{
 430        struct route4_head *head = tp->root;
 431        struct route4_filter *f, *f1, **fp;
 432        struct route4_bucket *b;
 433        struct nlattr *opt = tca[TCA_OPTIONS];
 434        struct nlattr *tb[TCA_ROUTE4_MAX + 1];
 435        unsigned int h, th;
 436        u32 old_handle = 0;
 437        int err;
 438
 439        if (opt == NULL)
 440                return handle ? -EINVAL : 0;
 441
 442        err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
 443        if (err < 0)
 444                return err;
 445
 446        if ((f = (struct route4_filter*)*arg) != NULL) {
 447                if (f->handle != handle && handle)
 448                        return -EINVAL;
 449
 450                if (f->bkt)
 451                        old_handle = f->handle;
 452
 453                err = route4_set_parms(tp, base, f, handle, head, tb,
 454                        tca[TCA_RATE], 0);
 455                if (err < 0)
 456                        return err;
 457
 458                goto reinsert;
 459        }
 460
 461        err = -ENOBUFS;
 462        if (head == NULL) {
 463                head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
 464                if (head == NULL)
 465                        goto errout;
 466
 467                tcf_tree_lock(tp);
 468                tp->root = head;
 469                tcf_tree_unlock(tp);
 470        }
 471
 472        f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
 473        if (f == NULL)
 474                goto errout;
 475
 476        err = route4_set_parms(tp, base, f, handle, head, tb,
 477                tca[TCA_RATE], 1);
 478        if (err < 0)
 479                goto errout;
 480
 481reinsert:
 482        h = from_hash(f->handle >> 16);
 483        for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
 484                if (f->handle < f1->handle)
 485                        break;
 486
 487        f->next = f1;
 488        tcf_tree_lock(tp);
 489        *fp = f;
 490
 491        if (old_handle && f->handle != old_handle) {
 492                th = to_hash(old_handle);
 493                h = from_hash(old_handle >> 16);
 494                if ((b = head->table[th]) != NULL) {
 495                        for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
 496                                if (*fp == f) {
 497                                        *fp = f->next;
 498                                        break;
 499                                }
 500                        }
 501                }
 502        }
 503        tcf_tree_unlock(tp);
 504
 505        route4_reset_fastmap(tp->q, head, f->id);
 506        *arg = (unsigned long)f;
 507        return 0;
 508
 509errout:
 510        kfree(f);
 511        return err;
 512}
 513
 514static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 515{
 516        struct route4_head *head = tp->root;
 517        unsigned h, h1;
 518
 519        if (head == NULL)
 520                arg->stop = 1;
 521
 522        if (arg->stop)
 523                return;
 524
 525        for (h = 0; h <= 256; h++) {
 526                struct route4_bucket *b = head->table[h];
 527
 528                if (b) {
 529                        for (h1 = 0; h1 <= 32; h1++) {
 530                                struct route4_filter *f;
 531
 532                                for (f = b->ht[h1]; f; f = f->next) {
 533                                        if (arg->count < arg->skip) {
 534                                                arg->count++;
 535                                                continue;
 536                                        }
 537                                        if (arg->fn(tp, (unsigned long)f, arg) < 0) {
 538                                                arg->stop = 1;
 539                                                return;
 540                                        }
 541                                        arg->count++;
 542                                }
 543                        }
 544                }
 545        }
 546}
 547
 548static int route4_dump(struct tcf_proto *tp, unsigned long fh,
 549                       struct sk_buff *skb, struct tcmsg *t)
 550{
 551        struct route4_filter *f = (struct route4_filter*)fh;
 552        unsigned char *b = skb_tail_pointer(skb);
 553        struct nlattr *nest;
 554        u32 id;
 555
 556        if (f == NULL)
 557                return skb->len;
 558
 559        t->tcm_handle = f->handle;
 560
 561        nest = nla_nest_start(skb, TCA_OPTIONS);
 562        if (nest == NULL)
 563                goto nla_put_failure;
 564
 565        if (!(f->handle&0x8000)) {
 566                id = f->id&0xFF;
 567                NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
 568        }
 569        if (f->handle&0x80000000) {
 570                if ((f->handle>>16) != 0xFFFF)
 571                        NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
 572        } else {
 573                id = f->id>>16;
 574                NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
 575        }
 576        if (f->res.classid)
 577                NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
 578
 579        if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
 580                goto nla_put_failure;
 581
 582        nla_nest_end(skb, nest);
 583
 584        if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
 585                goto nla_put_failure;
 586
 587        return skb->len;
 588
 589nla_put_failure:
 590        nlmsg_trim(skb, b);
 591        return -1;
 592}
 593
 594static struct tcf_proto_ops cls_route4_ops __read_mostly = {
 595        .kind           =       "route",
 596        .classify       =       route4_classify,
 597        .init           =       route4_init,
 598        .destroy        =       route4_destroy,
 599        .get            =       route4_get,
 600        .put            =       route4_put,
 601        .change         =       route4_change,
 602        .delete         =       route4_delete,
 603        .walk           =       route4_walk,
 604        .dump           =       route4_dump,
 605        .owner          =       THIS_MODULE,
 606};
 607
 608static int __init init_route4(void)
 609{
 610        return register_tcf_proto_ops(&cls_route4_ops);
 611}
 612
 613static void __exit exit_route4(void)
 614{
 615        unregister_tcf_proto_ops(&cls_route4_ops);
 616}
 617
 618module_init(init_route4)
 619module_exit(exit_route4)
 620MODULE_LICENSE("GPL");
 621