linux/net/sched/cls_cgroup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_cgroup.c       Control Group Classifier
   4 *
   5 * Authors:     Thomas Graf <tgraf@suug.ch>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/skbuff.h>
  11#include <linux/rcupdate.h>
  12#include <net/rtnetlink.h>
  13#include <net/pkt_cls.h>
  14#include <net/sock.h>
  15#include <net/cls_cgroup.h>
  16
  17struct cls_cgroup_head {
  18        u32                     handle;
  19        struct tcf_exts         exts;
  20        struct tcf_ematch_tree  ematches;
  21        struct tcf_proto        *tp;
  22        struct rcu_work         rwork;
  23};
  24
  25static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  26                               struct tcf_result *res)
  27{
  28        struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
  29        u32 classid = task_get_classid(skb);
  30
  31        if (unlikely(!head))
  32                return -1;
  33        if (!classid)
  34                return -1;
  35        if (!tcf_em_tree_match(skb, &head->ematches, NULL))
  36                return -1;
  37
  38        res->classid = classid;
  39        res->class = 0;
  40
  41        return tcf_exts_exec(skb, &head->exts, res);
  42}
  43
  44static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle)
  45{
  46        return NULL;
  47}
  48
  49static int cls_cgroup_init(struct tcf_proto *tp)
  50{
  51        return 0;
  52}
  53
  54static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
  55        [TCA_CGROUP_EMATCHES]   = { .type = NLA_NESTED },
  56};
  57
  58static void __cls_cgroup_destroy(struct cls_cgroup_head *head)
  59{
  60        tcf_exts_destroy(&head->exts);
  61        tcf_em_tree_destroy(&head->ematches);
  62        tcf_exts_put_net(&head->exts);
  63        kfree(head);
  64}
  65
  66static void cls_cgroup_destroy_work(struct work_struct *work)
  67{
  68        struct cls_cgroup_head *head = container_of(to_rcu_work(work),
  69                                                    struct cls_cgroup_head,
  70                                                    rwork);
  71        rtnl_lock();
  72        __cls_cgroup_destroy(head);
  73        rtnl_unlock();
  74}
  75
  76static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
  77                             struct tcf_proto *tp, unsigned long base,
  78                             u32 handle, struct nlattr **tca,
  79                             void **arg, u32 flags,
  80                             struct netlink_ext_ack *extack)
  81{
  82        struct nlattr *tb[TCA_CGROUP_MAX + 1];
  83        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
  84        struct cls_cgroup_head *new;
  85        int err;
  86
  87        if (!tca[TCA_OPTIONS])
  88                return -EINVAL;
  89
  90        if (!head && !handle)
  91                return -EINVAL;
  92
  93        if (head && handle != head->handle)
  94                return -ENOENT;
  95
  96        new = kzalloc(sizeof(*head), GFP_KERNEL);
  97        if (!new)
  98                return -ENOBUFS;
  99
 100        err = tcf_exts_init(&new->exts, net, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
 101        if (err < 0)
 102                goto errout;
 103        new->handle = handle;
 104        new->tp = tp;
 105        err = nla_parse_nested_deprecated(tb, TCA_CGROUP_MAX,
 106                                          tca[TCA_OPTIONS], cgroup_policy,
 107                                          NULL);
 108        if (err < 0)
 109                goto errout;
 110
 111        err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, flags,
 112                                extack);
 113        if (err < 0)
 114                goto errout;
 115
 116        err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches);
 117        if (err < 0)
 118                goto errout;
 119
 120        rcu_assign_pointer(tp->root, new);
 121        if (head) {
 122                tcf_exts_get_net(&head->exts);
 123                tcf_queue_work(&head->rwork, cls_cgroup_destroy_work);
 124        }
 125        return 0;
 126errout:
 127        tcf_exts_destroy(&new->exts);
 128        kfree(new);
 129        return err;
 130}
 131
 132static void cls_cgroup_destroy(struct tcf_proto *tp, bool rtnl_held,
 133                               struct netlink_ext_ack *extack)
 134{
 135        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 136
 137        /* Head can still be NULL due to cls_cgroup_init(). */
 138        if (head) {
 139                if (tcf_exts_get_net(&head->exts))
 140                        tcf_queue_work(&head->rwork, cls_cgroup_destroy_work);
 141                else
 142                        __cls_cgroup_destroy(head);
 143        }
 144}
 145
 146static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last,
 147                             bool rtnl_held, struct netlink_ext_ack *extack)
 148{
 149        return -EOPNOTSUPP;
 150}
 151
 152static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 153                            bool rtnl_held)
 154{
 155        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 156
 157        if (arg->count < arg->skip)
 158                goto skip;
 159
 160        if (!head)
 161                return;
 162        if (arg->fn(tp, head, arg) < 0) {
 163                arg->stop = 1;
 164                return;
 165        }
 166skip:
 167        arg->count++;
 168}
 169
 170static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh,
 171                           struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
 172{
 173        struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 174        struct nlattr *nest;
 175
 176        t->tcm_handle = head->handle;
 177
 178        nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
 179        if (nest == NULL)
 180                goto nla_put_failure;
 181
 182        if (tcf_exts_dump(skb, &head->exts) < 0 ||
 183            tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
 184                goto nla_put_failure;
 185
 186        nla_nest_end(skb, nest);
 187
 188        if (tcf_exts_dump_stats(skb, &head->exts) < 0)
 189                goto nla_put_failure;
 190
 191        return skb->len;
 192
 193nla_put_failure:
 194        nla_nest_cancel(skb, nest);
 195        return -1;
 196}
 197
 198static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
 199        .kind           =       "cgroup",
 200        .init           =       cls_cgroup_init,
 201        .change         =       cls_cgroup_change,
 202        .classify       =       cls_cgroup_classify,
 203        .destroy        =       cls_cgroup_destroy,
 204        .get            =       cls_cgroup_get,
 205        .delete         =       cls_cgroup_delete,
 206        .walk           =       cls_cgroup_walk,
 207        .dump           =       cls_cgroup_dump,
 208        .owner          =       THIS_MODULE,
 209};
 210
 211static int __init init_cgroup_cls(void)
 212{
 213        return register_tcf_proto_ops(&cls_cgroup_ops);
 214}
 215
 216static void __exit exit_cgroup_cls(void)
 217{
 218        unregister_tcf_proto_ops(&cls_cgroup_ops);
 219}
 220
 221module_init(init_cgroup_cls);
 222module_exit(exit_cgroup_cls);
 223MODULE_LICENSE("GPL");
 224