linux/net/sched/cls_tcindex.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_tcindex.c      Packet classifier for skb->tc_index
   3 *
   4 * Written 1998,1999 by Werner Almesberger, EPFL ICA
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/types.h>
   9#include <linux/kernel.h>
  10#include <linux/skbuff.h>
  11#include <linux/errno.h>
  12#include <net/act_api.h>
  13#include <net/netlink.h>
  14#include <net/pkt_cls.h>
  15
  16/*
  17 * Passing parameters to the root seems to be done more awkwardly than really
  18 * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
  19 * verified. FIXME.
  20 */
  21
  22#define PERFECT_HASH_THRESHOLD  64      /* use perfect hash if not bigger */
  23#define DEFAULT_HASH_SIZE       64      /* optimized for diffserv */
  24
  25
  26#define PRIV(tp)        ((struct tcindex_data *) (tp)->root)
  27
  28
  29struct tcindex_filter_result {
  30        struct tcf_exts         exts;
  31        struct tcf_result       res;
  32};
  33
  34struct tcindex_filter {
  35        u16 key;
  36        struct tcindex_filter_result result;
  37        struct tcindex_filter *next;
  38};
  39
  40
  41struct tcindex_data {
  42        struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
  43        struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
  44                                      NULL if unused */
  45        u16 mask;               /* AND key with mask */
  46        int shift;              /* shift ANDed key to the right */
  47        int hash;               /* hash table size; 0 if undefined */
  48        int alloc_hash;         /* allocated size */
  49        int fall_through;       /* 0: only classify if explicit match */
  50};
  51
  52static const struct tcf_ext_map tcindex_ext_map = {
  53        .police = TCA_TCINDEX_POLICE,
  54        .action = TCA_TCINDEX_ACT
  55};
  56
  57static inline int
  58tcindex_filter_is_set(struct tcindex_filter_result *r)
  59{
  60        return tcf_exts_is_predicative(&r->exts) || r->res.classid;
  61}
  62
  63static struct tcindex_filter_result *
  64tcindex_lookup(struct tcindex_data *p, u16 key)
  65{
  66        struct tcindex_filter *f;
  67
  68        if (p->perfect)
  69                return tcindex_filter_is_set(p->perfect + key) ?
  70                        p->perfect + key : NULL;
  71        else if (p->h) {
  72                for (f = p->h[key % p->hash]; f; f = f->next)
  73                        if (f->key == key)
  74                                return &f->result;
  75        }
  76
  77        return NULL;
  78}
  79
  80
  81static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp,
  82                            struct tcf_result *res)
  83{
  84        struct tcindex_data *p = PRIV(tp);
  85        struct tcindex_filter_result *f;
  86        int key = (skb->tc_index & p->mask) >> p->shift;
  87
  88        pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
  89                 skb, tp, res, p);
  90
  91        f = tcindex_lookup(p, key);
  92        if (!f) {
  93                if (!p->fall_through)
  94                        return -1;
  95                res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
  96                res->class = 0;
  97                pr_debug("alg 0x%x\n", res->classid);
  98                return 0;
  99        }
 100        *res = f->res;
 101        pr_debug("map 0x%x\n", res->classid);
 102
 103        return tcf_exts_exec(skb, &f->exts, res);
 104}
 105
 106
 107static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
 108{
 109        struct tcindex_data *p = PRIV(tp);
 110        struct tcindex_filter_result *r;
 111
 112        pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
 113        if (p->perfect && handle >= p->alloc_hash)
 114                return 0;
 115        r = tcindex_lookup(p, handle);
 116        return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
 117}
 118
 119
 120static void tcindex_put(struct tcf_proto *tp, unsigned long f)
 121{
 122        pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
 123}
 124
 125
 126static int tcindex_init(struct tcf_proto *tp)
 127{
 128        struct tcindex_data *p;
 129
 130        pr_debug("tcindex_init(tp %p)\n", tp);
 131        p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
 132        if (!p)
 133                return -ENOMEM;
 134
 135        p->mask = 0xffff;
 136        p->hash = DEFAULT_HASH_SIZE;
 137        p->fall_through = 1;
 138
 139        tp->root = p;
 140        return 0;
 141}
 142
 143
 144static int
 145__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
 146{
 147        struct tcindex_data *p = PRIV(tp);
 148        struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
 149        struct tcindex_filter *f = NULL;
 150
 151        pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
 152        if (p->perfect) {
 153                if (!r->res.class)
 154                        return -ENOENT;
 155        } else {
 156                int i;
 157                struct tcindex_filter **walk = NULL;
 158
 159                for (i = 0; i < p->hash; i++)
 160                        for (walk = p->h+i; *walk; walk = &(*walk)->next)
 161                                if (&(*walk)->result == r)
 162                                        goto found;
 163                return -ENOENT;
 164
 165found:
 166                f = *walk;
 167                if (lock)
 168                        tcf_tree_lock(tp);
 169                *walk = f->next;
 170                if (lock)
 171                        tcf_tree_unlock(tp);
 172        }
 173        tcf_unbind_filter(tp, &r->res);
 174        tcf_exts_destroy(tp, &r->exts);
 175        kfree(f);
 176        return 0;
 177}
 178
 179static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
 180{
 181        return __tcindex_delete(tp, arg, 1);
 182}
 183
 184static inline int
 185valid_perfect_hash(struct tcindex_data *p)
 186{
 187        return  p->hash > (p->mask >> p->shift);
 188}
 189
 190static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
 191        [TCA_TCINDEX_HASH]              = { .type = NLA_U32 },
 192        [TCA_TCINDEX_MASK]              = { .type = NLA_U16 },
 193        [TCA_TCINDEX_SHIFT]             = { .type = NLA_U32 },
 194        [TCA_TCINDEX_FALL_THROUGH]      = { .type = NLA_U32 },
 195        [TCA_TCINDEX_CLASSID]           = { .type = NLA_U32 },
 196};
 197
 198static int
 199tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
 200                  struct tcindex_data *p, struct tcindex_filter_result *r,
 201                  struct nlattr **tb, struct nlattr *est)
 202{
 203        int err, balloc = 0;
 204        struct tcindex_filter_result new_filter_result, *old_r = r;
 205        struct tcindex_filter_result cr;
 206        struct tcindex_data cp;
 207        struct tcindex_filter *f = NULL; /* make gcc behave */
 208        struct tcf_exts e;
 209
 210        err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
 211        if (err < 0)
 212                return err;
 213
 214        memcpy(&cp, p, sizeof(cp));
 215        memset(&new_filter_result, 0, sizeof(new_filter_result));
 216
 217        if (old_r)
 218                memcpy(&cr, r, sizeof(cr));
 219        else
 220                memset(&cr, 0, sizeof(cr));
 221
 222        if (tb[TCA_TCINDEX_HASH])
 223                cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
 224
 225        if (tb[TCA_TCINDEX_MASK])
 226                cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 227
 228        if (tb[TCA_TCINDEX_SHIFT])
 229                cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
 230
 231        err = -EBUSY;
 232        /* Hash already allocated, make sure that we still meet the
 233         * requirements for the allocated hash.
 234         */
 235        if (cp.perfect) {
 236                if (!valid_perfect_hash(&cp) ||
 237                    cp.hash > cp.alloc_hash)
 238                        goto errout;
 239        } else if (cp.h && cp.hash != cp.alloc_hash)
 240                goto errout;
 241
 242        err = -EINVAL;
 243        if (tb[TCA_TCINDEX_FALL_THROUGH])
 244                cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
 245
 246        if (!cp.hash) {
 247                /* Hash not specified, use perfect hash if the upper limit
 248                 * of the hashing index is below the threshold.
 249                 */
 250                if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
 251                        cp.hash = (cp.mask >> cp.shift)+1;
 252                else
 253                        cp.hash = DEFAULT_HASH_SIZE;
 254        }
 255
 256        if (!cp.perfect && !cp.h)
 257                cp.alloc_hash = cp.hash;
 258
 259        /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
 260         * but then, we'd fail handles that may become valid after some future
 261         * mask change. While this is extremely unlikely to ever matter,
 262         * the check below is safer (and also more backwards-compatible).
 263         */
 264        if (cp.perfect || valid_perfect_hash(&cp))
 265                if (handle >= cp.alloc_hash)
 266                        goto errout;
 267
 268
 269        err = -ENOMEM;
 270        if (!cp.perfect && !cp.h) {
 271                if (valid_perfect_hash(&cp)) {
 272                        cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
 273                        if (!cp.perfect)
 274                                goto errout;
 275                        balloc = 1;
 276                } else {
 277                        cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
 278                        if (!cp.h)
 279                                goto errout;
 280                        balloc = 2;
 281                }
 282        }
 283
 284        if (cp.perfect)
 285                r = cp.perfect + handle;
 286        else
 287                r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
 288
 289        if (r == &new_filter_result) {
 290                f = kzalloc(sizeof(*f), GFP_KERNEL);
 291                if (!f)
 292                        goto errout_alloc;
 293        }
 294
 295        if (tb[TCA_TCINDEX_CLASSID]) {
 296                cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
 297                tcf_bind_filter(tp, &cr.res, base);
 298        }
 299
 300        tcf_exts_change(tp, &cr.exts, &e);
 301
 302        tcf_tree_lock(tp);
 303        if (old_r && old_r != r)
 304                memset(old_r, 0, sizeof(*old_r));
 305
 306        memcpy(p, &cp, sizeof(cp));
 307        memcpy(r, &cr, sizeof(cr));
 308
 309        if (r == &new_filter_result) {
 310                struct tcindex_filter **fp;
 311
 312                f->key = handle;
 313                f->result = new_filter_result;
 314                f->next = NULL;
 315                for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
 316                        /* nothing */;
 317                *fp = f;
 318        }
 319        tcf_tree_unlock(tp);
 320
 321        return 0;
 322
 323errout_alloc:
 324        if (balloc == 1)
 325                kfree(cp.perfect);
 326        else if (balloc == 2)
 327                kfree(cp.h);
 328errout:
 329        tcf_exts_destroy(tp, &e);
 330        return err;
 331}
 332
 333static int
 334tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
 335               struct nlattr **tca, unsigned long *arg)
 336{
 337        struct nlattr *opt = tca[TCA_OPTIONS];
 338        struct nlattr *tb[TCA_TCINDEX_MAX + 1];
 339        struct tcindex_data *p = PRIV(tp);
 340        struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
 341        int err;
 342
 343        pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
 344            "p %p,r %p,*arg 0x%lx\n",
 345            tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
 346
 347        if (!opt)
 348                return 0;
 349
 350        err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
 351        if (err < 0)
 352                return err;
 353
 354        return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]);
 355}
 356
 357
 358static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
 359{
 360        struct tcindex_data *p = PRIV(tp);
 361        struct tcindex_filter *f, *next;
 362        int i;
 363
 364        pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
 365        if (p->perfect) {
 366                for (i = 0; i < p->hash; i++) {
 367                        if (!p->perfect[i].res.class)
 368                                continue;
 369                        if (walker->count >= walker->skip) {
 370                                if (walker->fn(tp,
 371                                    (unsigned long) (p->perfect+i), walker)
 372                                     < 0) {
 373                                        walker->stop = 1;
 374                                        return;
 375                                }
 376                        }
 377                        walker->count++;
 378                }
 379        }
 380        if (!p->h)
 381                return;
 382        for (i = 0; i < p->hash; i++) {
 383                for (f = p->h[i]; f; f = next) {
 384                        next = f->next;
 385                        if (walker->count >= walker->skip) {
 386                                if (walker->fn(tp, (unsigned long) &f->result,
 387                                    walker) < 0) {
 388                                        walker->stop = 1;
 389                                        return;
 390                                }
 391                        }
 392                        walker->count++;
 393                }
 394        }
 395}
 396
 397
 398static int tcindex_destroy_element(struct tcf_proto *tp,
 399    unsigned long arg, struct tcf_walker *walker)
 400{
 401        return __tcindex_delete(tp, arg, 0);
 402}
 403
 404
 405static void tcindex_destroy(struct tcf_proto *tp)
 406{
 407        struct tcindex_data *p = PRIV(tp);
 408        struct tcf_walker walker;
 409
 410        pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
 411        walker.count = 0;
 412        walker.skip = 0;
 413        walker.fn = &tcindex_destroy_element;
 414        tcindex_walk(tp, &walker);
 415        kfree(p->perfect);
 416        kfree(p->h);
 417        kfree(p);
 418        tp->root = NULL;
 419}
 420
 421
 422static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
 423    struct sk_buff *skb, struct tcmsg *t)
 424{
 425        struct tcindex_data *p = PRIV(tp);
 426        struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
 427        unsigned char *b = skb_tail_pointer(skb);
 428        struct nlattr *nest;
 429
 430        pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
 431                 tp, fh, skb, t, p, r, b);
 432        pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
 433
 434        nest = nla_nest_start(skb, TCA_OPTIONS);
 435        if (nest == NULL)
 436                goto nla_put_failure;
 437
 438        if (!fh) {
 439                t->tcm_handle = ~0; /* whatever ... */
 440                NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash);
 441                NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask);
 442                NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift);
 443                NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through);
 444                nla_nest_end(skb, nest);
 445        } else {
 446                if (p->perfect) {
 447                        t->tcm_handle = r-p->perfect;
 448                } else {
 449                        struct tcindex_filter *f;
 450                        int i;
 451
 452                        t->tcm_handle = 0;
 453                        for (i = 0; !t->tcm_handle && i < p->hash; i++) {
 454                                for (f = p->h[i]; !t->tcm_handle && f;
 455                                     f = f->next) {
 456                                        if (&f->result == r)
 457                                                t->tcm_handle = f->key;
 458                                }
 459                        }
 460                }
 461                pr_debug("handle = %d\n", t->tcm_handle);
 462                if (r->res.class)
 463                        NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid);
 464
 465                if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
 466                        goto nla_put_failure;
 467                nla_nest_end(skb, nest);
 468
 469                if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
 470                        goto nla_put_failure;
 471        }
 472
 473        return skb->len;
 474
 475nla_put_failure:
 476        nlmsg_trim(skb, b);
 477        return -1;
 478}
 479
 480static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
 481        .kind           =       "tcindex",
 482        .classify       =       tcindex_classify,
 483        .init           =       tcindex_init,
 484        .destroy        =       tcindex_destroy,
 485        .get            =       tcindex_get,
 486        .put            =       tcindex_put,
 487        .change         =       tcindex_change,
 488        .delete         =       tcindex_delete,
 489        .walk           =       tcindex_walk,
 490        .dump           =       tcindex_dump,
 491        .owner          =       THIS_MODULE,
 492};
 493
 494static int __init init_tcindex(void)
 495{
 496        return register_tcf_proto_ops(&cls_tcindex_ops);
 497}
 498
 499static void __exit exit_tcindex(void)
 500{
 501        unregister_tcf_proto_ops(&cls_tcindex_ops);
 502}
 503
 504module_init(init_tcindex)
 505module_exit(exit_tcindex)
 506MODULE_LICENSE("GPL");
 507