linux/net/sched/cls_cgroup.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_cgroup.c       Control Group Classifier
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:     Thomas Graf <tgraf@suug.ch>
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/slab.h>
  14#include <linux/skbuff.h>
  15#include <linux/rcupdate.h>
  16#include <net/rtnetlink.h>
  17#include <net/pkt_cls.h>
  18#include <net/sock.h>
  19#include <net/cls_cgroup.h>
  20
  21struct cls_cgroup_head {
  22        u32                     handle;
  23        struct tcf_exts         exts;
  24        struct tcf_ematch_tree  ematches;
  25        struct tcf_proto        *tp;
  26        struct rcu_head         rcu;
  27};
  28
  29static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  30                               struct tcf_result *res)
  31{
  32        struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
  33        u32 classid;
  34
  35        classid = task_cls_state(current)->classid;
  36
  37        /*
  38         * Due to the nature of the classifier it is required to ignore all
  39         * packets originating from softirq context as accessing `current'
  40         * would lead to false results.
  41         *
  42         * This test assumes that all callers of dev_queue_xmit() explicitely
  43         * disable bh. Knowing this, it is possible to detect softirq based
  44         * calls by looking at the number of nested bh disable calls because
  45         * softirqs always disables bh.
  46         */
  47        if (in_serving_softirq()) {
  48                /* If there is an sk_classid we'll use that. */
  49                if (!skb->sk)
  50                        return -1;
  51                classid = skb->sk->sk_classid;
  52        }
  53
  54        if (!classid)
  55                return -1;
  56
  57        if (!tcf_em_tree_match(skb, &head->ematches, NULL))
  58                return -1;
  59
  60        res->classid = classid;
  61        res->class = 0;
  62        return tcf_exts_exec(skb, &head->exts, res);
  63}
  64
  65static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
  66{
  67        return 0UL;
  68}
  69
  70static int cls_cgroup_init(struct tcf_proto *tp)
  71{
  72        return 0;
  73}
  74
  75static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
  76        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
  77};
  78
  79static void cls_cgroup_destroy_rcu(struct rcu_head *root)
  80{
  81        struct cls_cgroup_head *head = container_of(root,
  82                                                    struct cls_cgroup_head,
  83                                                    rcu);
  84
  85        tcf_exts_destroy(&head->exts);
  86        tcf_em_tree_destroy(&head->ematches);
  87        kfree(head);
  88}
  89
  90static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
  91                             struct tcf_proto *tp, unsigned long base,
  92                             u32 handle, struct nlattr **tca,
  93                             unsigned long *arg, bool ovr)
  94{
  95        struct nlattr *tb[TCA_CGROUP_MAX + 1];
  96        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
  97        struct cls_cgroup_head *new;
  98        struct tcf_ematch_tree t;
  99        struct tcf_exts e;
 100        int err;
 101
 102        if (!tca[TCA_OPTIONS])
 103                return -EINVAL;
 104
 105        if (!head && !handle)
 106                return -EINVAL;
 107
 108        if (head && handle != head->handle)
 109                return -ENOENT;
 110
 111        new = kzalloc(sizeof(*head), GFP_KERNEL);
 112        if (!new)
 113                return -ENOBUFS;
 114
 115        tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
 116        new->handle = handle;
 117        new->tp = tp;
 118        err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
 119                               cgroup_policy);
 120        if (err < 0)
 121                goto errout;
 122
 123        tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
 124        err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
 125        if (err < 0)
 126                goto errout;
 127
 128        err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
 129        if (err < 0) {
 130                tcf_exts_destroy(&e);
 131                goto errout;
 132        }
 133
 134        tcf_exts_change(tp, &new->exts, &e);
 135        tcf_em_tree_change(tp, &new->ematches, &t);
 136
 137        rcu_assign_pointer(tp->root, new);
 138        if (head)
 139                call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
 140        return 0;
 141errout:
 142        kfree(new);
 143        return err;
 144}
 145
 146static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
 147{
 148        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 149
 150        if (!force)
 151                return false;
 152
 153        if (head) {
 154                RCU_INIT_POINTER(tp->root, NULL);
 155                call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
 156        }
 157        return true;
 158}
 159
 160static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
 161{
 162        return -EOPNOTSUPP;
 163}
 164
 165static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 166{
 167        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 168
 169        if (arg->count < arg->skip)
 170                goto skip;
 171
 172        if (arg->fn(tp, (unsigned long) head, arg) < 0) {
 173                arg->stop = 1;
 174                return;
 175        }
 176skip:
 177        arg->count++;
 178}
 179
 180static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 181                           struct sk_buff *skb, struct tcmsg *t)
 182{
 183        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 184        struct nlattr *nest;
 185
 186        t->tcm_handle = head->handle;
 187
 188        nest = nla_nest_start(skb, TCA_OPTIONS);
 189        if (nest == NULL)
 190                goto nla_put_failure;
 191
 192        if (tcf_exts_dump(skb, &head->exts) < 0 ||
 193            tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
 194                goto nla_put_failure;
 195
 196        nla_nest_end(skb, nest);
 197
 198        if (tcf_exts_dump_stats(skb, &head->exts) < 0)
 199                goto nla_put_failure;
 200
 201        return skb->len;
 202
 203nla_put_failure:
 204        nla_nest_cancel(skb, nest);
 205        return -1;
 206}
 207
 208static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
 209        .kind           =       "cgroup",
 210        .init           =       cls_cgroup_init,
 211        .change         =       cls_cgroup_change,
 212        .classify       =       cls_cgroup_classify,
 213        .destroy        =       cls_cgroup_destroy,
 214        .get            =       cls_cgroup_get,
 215        .delete         =       cls_cgroup_delete,
 216        .walk           =       cls_cgroup_walk,
 217        .dump           =       cls_cgroup_dump,
 218        .owner          =       THIS_MODULE,
 219};
 220
 221static int __init init_cgroup_cls(void)
 222{
 223        return register_tcf_proto_ops(&cls_cgroup_ops);
 224}
 225
 226static void __exit exit_cgroup_cls(void)
 227{
 228        unregister_tcf_proto_ops(&cls_cgroup_ops);
 229}
 230
 231module_init(init_cgroup_cls);
 232module_exit(exit_cgroup_cls);
 233MODULE_LICENSE("GPL");
 234