linux/net/sched/cls_matchall.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_matchll.c              Match-all classifier
   3 *
   4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/module.h>
  15
  16#include <net/sch_generic.h>
  17#include <net/pkt_cls.h>
  18
  19struct cls_mall_head {
  20        struct tcf_exts exts;
  21        struct tcf_result res;
  22        u32 handle;
  23        u32 flags;
  24        struct rcu_head rcu;
  25};
  26
  27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  28                         struct tcf_result *res)
  29{
  30        struct cls_mall_head *head = rcu_dereference_bh(tp->root);
  31
  32        if (tc_skip_sw(head->flags))
  33                return -1;
  34
  35        return tcf_exts_exec(skb, &head->exts, res);
  36}
  37
  38static int mall_init(struct tcf_proto *tp)
  39{
  40        return 0;
  41}
  42
  43static void mall_destroy_rcu(struct rcu_head *rcu)
  44{
  45        struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
  46                                                  rcu);
  47
  48        tcf_exts_destroy(&head->exts);
  49        kfree(head);
  50}
  51
  52static int mall_replace_hw_filter(struct tcf_proto *tp,
  53                                  struct cls_mall_head *head,
  54                                  unsigned long cookie)
  55{
  56        struct net_device *dev = tp->q->dev_queue->dev;
  57        struct tc_to_netdev offload;
  58        struct tc_cls_matchall_offload mall_offload = {0};
  59
  60        offload.type = TC_SETUP_MATCHALL;
  61        offload.cls_mall = &mall_offload;
  62        offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
  63        offload.cls_mall->exts = &head->exts;
  64        offload.cls_mall->cookie = cookie;
  65
  66        return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
  67                                             &offload);
  68}
  69
  70static void mall_destroy_hw_filter(struct tcf_proto *tp,
  71                                   struct cls_mall_head *head,
  72                                   unsigned long cookie)
  73{
  74        struct net_device *dev = tp->q->dev_queue->dev;
  75        struct tc_to_netdev offload;
  76        struct tc_cls_matchall_offload mall_offload = {0};
  77
  78        offload.type = TC_SETUP_MATCHALL;
  79        offload.cls_mall = &mall_offload;
  80        offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
  81        offload.cls_mall->exts = NULL;
  82        offload.cls_mall->cookie = cookie;
  83
  84        dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
  85                                             &offload);
  86}
  87
  88static bool mall_destroy(struct tcf_proto *tp, bool force)
  89{
  90        struct cls_mall_head *head = rtnl_dereference(tp->root);
  91        struct net_device *dev = tp->q->dev_queue->dev;
  92
  93        if (!head)
  94                return true;
  95
  96        if (tc_should_offload(dev, tp, head->flags))
  97                mall_destroy_hw_filter(tp, head, (unsigned long) head);
  98
  99        call_rcu(&head->rcu, mall_destroy_rcu);
 100        return true;
 101}
 102
 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
 104{
 105        return 0UL;
 106}
 107
 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
 109        [TCA_MATCHALL_UNSPEC]           = { .type = NLA_UNSPEC },
 110        [TCA_MATCHALL_CLASSID]          = { .type = NLA_U32 },
 111};
 112
 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
 114                          struct cls_mall_head *head,
 115                          unsigned long base, struct nlattr **tb,
 116                          struct nlattr *est, bool ovr)
 117{
 118        struct tcf_exts e;
 119        int err;
 120
 121        tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
 122        err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
 123        if (err < 0)
 124                return err;
 125
 126        if (tb[TCA_MATCHALL_CLASSID]) {
 127                head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
 128                tcf_bind_filter(tp, &head->res, base);
 129        }
 130
 131        tcf_exts_change(tp, &head->exts, &e);
 132
 133        return 0;
 134}
 135
 136static int mall_change(struct net *net, struct sk_buff *in_skb,
 137                       struct tcf_proto *tp, unsigned long base,
 138                       u32 handle, struct nlattr **tca,
 139                       unsigned long *arg, bool ovr)
 140{
 141        struct cls_mall_head *head = rtnl_dereference(tp->root);
 142        struct net_device *dev = tp->q->dev_queue->dev;
 143        struct nlattr *tb[TCA_MATCHALL_MAX + 1];
 144        struct cls_mall_head *new;
 145        u32 flags = 0;
 146        int err;
 147
 148        if (!tca[TCA_OPTIONS])
 149                return -EINVAL;
 150
 151        if (head)
 152                return -EEXIST;
 153
 154        err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
 155                               tca[TCA_OPTIONS], mall_policy);
 156        if (err < 0)
 157                return err;
 158
 159        if (tb[TCA_MATCHALL_FLAGS]) {
 160                flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
 161                if (!tc_flags_valid(flags))
 162                        return -EINVAL;
 163        }
 164
 165        new = kzalloc(sizeof(*new), GFP_KERNEL);
 166        if (!new)
 167                return -ENOBUFS;
 168
 169        tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
 170
 171        if (!handle)
 172                handle = 1;
 173        new->handle = handle;
 174        new->flags = flags;
 175
 176        err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
 177        if (err)
 178                goto errout;
 179
 180        if (tc_should_offload(dev, tp, flags)) {
 181                err = mall_replace_hw_filter(tp, new, (unsigned long) new);
 182                if (err) {
 183                        if (tc_skip_sw(flags))
 184                                goto errout;
 185                        else
 186                                err = 0;
 187                }
 188        }
 189
 190        *arg = (unsigned long) head;
 191        rcu_assign_pointer(tp->root, new);
 192        if (head)
 193                call_rcu(&head->rcu, mall_destroy_rcu);
 194        return 0;
 195
 196errout:
 197        kfree(new);
 198        return err;
 199}
 200
 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
 202{
 203        return -EOPNOTSUPP;
 204}
 205
 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 207{
 208        struct cls_mall_head *head = rtnl_dereference(tp->root);
 209
 210        if (arg->count < arg->skip)
 211                goto skip;
 212        if (arg->fn(tp, (unsigned long) head, arg) < 0)
 213                arg->stop = 1;
 214skip:
 215        arg->count++;
 216}
 217
 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 219                     struct sk_buff *skb, struct tcmsg *t)
 220{
 221        struct cls_mall_head *head = (struct cls_mall_head *) fh;
 222        struct nlattr *nest;
 223
 224        if (!head)
 225                return skb->len;
 226
 227        t->tcm_handle = head->handle;
 228
 229        nest = nla_nest_start(skb, TCA_OPTIONS);
 230        if (!nest)
 231                goto nla_put_failure;
 232
 233        if (head->res.classid &&
 234            nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
 235                goto nla_put_failure;
 236
 237        if (tcf_exts_dump(skb, &head->exts))
 238                goto nla_put_failure;
 239
 240        nla_nest_end(skb, nest);
 241
 242        if (tcf_exts_dump_stats(skb, &head->exts) < 0)
 243                goto nla_put_failure;
 244
 245        return skb->len;
 246
 247nla_put_failure:
 248        nla_nest_cancel(skb, nest);
 249        return -1;
 250}
 251
 252static struct tcf_proto_ops cls_mall_ops __read_mostly = {
 253        .kind           = "matchall",
 254        .classify       = mall_classify,
 255        .init           = mall_init,
 256        .destroy        = mall_destroy,
 257        .get            = mall_get,
 258        .change         = mall_change,
 259        .delete         = mall_delete,
 260        .walk           = mall_walk,
 261        .dump           = mall_dump,
 262        .owner          = THIS_MODULE,
 263};
 264
 265static int __init cls_mall_init(void)
 266{
 267        return register_tcf_proto_ops(&cls_mall_ops);
 268}
 269
 270static void __exit cls_mall_exit(void)
 271{
 272        unregister_tcf_proto_ops(&cls_mall_ops);
 273}
 274
 275module_init(cls_mall_init);
 276module_exit(cls_mall_exit);
 277
 278MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
 279MODULE_DESCRIPTION("Match-all classifier");
 280MODULE_LICENSE("GPL v2");
 281