linux/net/sched/cls_bpf.c
<<
>>
Prefs
   1/*
   2 * Berkeley Packet Filter based traffic classifier
   3 *
   4 * Might be used to classify traffic through flexible, user-defined and
   5 * possibly JIT-ed BPF filters for traffic control as an alternative to
   6 * ematches.
   7 *
   8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/skbuff.h>
  18#include <linux/filter.h>
  19#include <linux/bpf.h>
  20
  21#include <net/rtnetlink.h>
  22#include <net/pkt_cls.h>
  23#include <net/sock.h>
  24
  25MODULE_LICENSE("GPL");
  26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  27MODULE_DESCRIPTION("TC BPF based classifier");
  28
  29#define CLS_BPF_NAME_LEN        256
  30
  31struct cls_bpf_head {
  32        struct list_head plist;
  33        u32 hgen;
  34        struct rcu_head rcu;
  35};
  36
  37struct cls_bpf_prog {
  38        struct bpf_prog *filter;
  39        struct list_head link;
  40        struct tcf_result res;
  41        struct tcf_exts exts;
  42        u32 handle;
  43        union {
  44                u32 bpf_fd;
  45                u16 bpf_num_ops;
  46        };
  47        struct sock_filter *bpf_ops;
  48        const char *bpf_name;
  49        struct tcf_proto *tp;
  50        struct rcu_head rcu;
  51};
  52
  53static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  54        [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
  55        [TCA_BPF_FD]            = { .type = NLA_U32 },
  56        [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
  57        [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
  58        [TCA_BPF_OPS]           = { .type = NLA_BINARY,
  59                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  60};
  61
  62static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  63                            struct tcf_result *res)
  64{
  65        struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
  66        struct cls_bpf_prog *prog;
  67        int ret = -1;
  68
  69        if (unlikely(!skb_mac_header_was_set(skb)))
  70                return -1;
  71
  72        /* Needed here for accessing maps. */
  73        rcu_read_lock();
  74        list_for_each_entry_rcu(prog, &head->plist, link) {
  75                int filter_res = BPF_PROG_RUN(prog->filter, skb);
  76
  77                if (filter_res == 0)
  78                        continue;
  79
  80                *res = prog->res;
  81                if (filter_res != -1)
  82                        res->classid = filter_res;
  83
  84                ret = tcf_exts_exec(skb, &prog->exts, res);
  85                if (ret < 0)
  86                        continue;
  87
  88                break;
  89        }
  90        rcu_read_unlock();
  91
  92        return ret;
  93}
  94
  95static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
  96{
  97        return !prog->bpf_ops;
  98}
  99
 100static int cls_bpf_init(struct tcf_proto *tp)
 101{
 102        struct cls_bpf_head *head;
 103
 104        head = kzalloc(sizeof(*head), GFP_KERNEL);
 105        if (head == NULL)
 106                return -ENOBUFS;
 107
 108        INIT_LIST_HEAD_RCU(&head->plist);
 109        rcu_assign_pointer(tp->root, head);
 110
 111        return 0;
 112}
 113
 114static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 115{
 116        tcf_exts_destroy(&prog->exts);
 117
 118        if (cls_bpf_is_ebpf(prog))
 119                bpf_prog_put(prog->filter);
 120        else
 121                bpf_prog_destroy(prog->filter);
 122
 123        kfree(prog->bpf_name);
 124        kfree(prog->bpf_ops);
 125        kfree(prog);
 126}
 127
 128static void __cls_bpf_delete_prog(struct rcu_head *rcu)
 129{
 130        struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
 131
 132        cls_bpf_delete_prog(prog->tp, prog);
 133}
 134
 135static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
 136{
 137        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
 138
 139        list_del_rcu(&prog->link);
 140        tcf_unbind_filter(tp, &prog->res);
 141        call_rcu(&prog->rcu, __cls_bpf_delete_prog);
 142
 143        return 0;
 144}
 145
 146static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
 147{
 148        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 149        struct cls_bpf_prog *prog, *tmp;
 150
 151        if (!force && !list_empty(&head->plist))
 152                return false;
 153
 154        list_for_each_entry_safe(prog, tmp, &head->plist, link) {
 155                list_del_rcu(&prog->link);
 156                tcf_unbind_filter(tp, &prog->res);
 157                call_rcu(&prog->rcu, __cls_bpf_delete_prog);
 158        }
 159
 160        RCU_INIT_POINTER(tp->root, NULL);
 161        kfree_rcu(head, rcu);
 162        return true;
 163}
 164
 165static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
 166{
 167        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 168        struct cls_bpf_prog *prog;
 169        unsigned long ret = 0UL;
 170
 171        if (head == NULL)
 172                return 0UL;
 173
 174        list_for_each_entry(prog, &head->plist, link) {
 175                if (prog->handle == handle) {
 176                        ret = (unsigned long) prog;
 177                        break;
 178                }
 179        }
 180
 181        return ret;
 182}
 183
 184static int cls_bpf_prog_from_ops(struct nlattr **tb,
 185                                 struct cls_bpf_prog *prog, u32 classid)
 186{
 187        struct sock_filter *bpf_ops;
 188        struct sock_fprog_kern fprog_tmp;
 189        struct bpf_prog *fp;
 190        u16 bpf_size, bpf_num_ops;
 191        int ret;
 192
 193        bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
 194        if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
 195                return -EINVAL;
 196
 197        bpf_size = bpf_num_ops * sizeof(*bpf_ops);
 198        if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
 199                return -EINVAL;
 200
 201        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
 202        if (bpf_ops == NULL)
 203                return -ENOMEM;
 204
 205        memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 206
 207        fprog_tmp.len = bpf_num_ops;
 208        fprog_tmp.filter = bpf_ops;
 209
 210        ret = bpf_prog_create(&fp, &fprog_tmp);
 211        if (ret < 0) {
 212                kfree(bpf_ops);
 213                return ret;
 214        }
 215
 216        prog->bpf_ops = bpf_ops;
 217        prog->bpf_num_ops = bpf_num_ops;
 218        prog->bpf_name = NULL;
 219
 220        prog->filter = fp;
 221        prog->res.classid = classid;
 222
 223        return 0;
 224}
 225
 226static int cls_bpf_prog_from_efd(struct nlattr **tb,
 227                                 struct cls_bpf_prog *prog, u32 classid)
 228{
 229        struct bpf_prog *fp;
 230        char *name = NULL;
 231        u32 bpf_fd;
 232
 233        bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
 234
 235        fp = bpf_prog_get(bpf_fd);
 236        if (IS_ERR(fp))
 237                return PTR_ERR(fp);
 238
 239        if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
 240                bpf_prog_put(fp);
 241                return -EINVAL;
 242        }
 243
 244        if (tb[TCA_BPF_NAME]) {
 245                name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
 246                               nla_len(tb[TCA_BPF_NAME]),
 247                               GFP_KERNEL);
 248                if (!name) {
 249                        bpf_prog_put(fp);
 250                        return -ENOMEM;
 251                }
 252        }
 253
 254        prog->bpf_ops = NULL;
 255        prog->bpf_fd = bpf_fd;
 256        prog->bpf_name = name;
 257
 258        prog->filter = fp;
 259        prog->res.classid = classid;
 260
 261        return 0;
 262}
 263
 264static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
 265                                   struct cls_bpf_prog *prog,
 266                                   unsigned long base, struct nlattr **tb,
 267                                   struct nlattr *est, bool ovr)
 268{
 269        struct tcf_exts exts;
 270        bool is_bpf, is_ebpf;
 271        u32 classid;
 272        int ret;
 273
 274        is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
 275        is_ebpf = tb[TCA_BPF_FD];
 276
 277        if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
 278            !tb[TCA_BPF_CLASSID])
 279                return -EINVAL;
 280
 281        tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
 282        ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
 283        if (ret < 0)
 284                return ret;
 285
 286        classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
 287
 288        ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
 289                       cls_bpf_prog_from_efd(tb, prog, classid);
 290        if (ret < 0) {
 291                tcf_exts_destroy(&exts);
 292                return ret;
 293        }
 294
 295        tcf_bind_filter(tp, &prog->res, base);
 296        tcf_exts_change(tp, &prog->exts, &exts);
 297
 298        return 0;
 299}
 300
 301static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
 302                                   struct cls_bpf_head *head)
 303{
 304        unsigned int i = 0x80000000;
 305        u32 handle;
 306
 307        do {
 308                if (++head->hgen == 0x7FFFFFFF)
 309                        head->hgen = 1;
 310        } while (--i > 0 && cls_bpf_get(tp, head->hgen));
 311
 312        if (unlikely(i == 0)) {
 313                pr_err("Insufficient number of handles\n");
 314                handle = 0;
 315        } else {
 316                handle = head->hgen;
 317        }
 318
 319        return handle;
 320}
 321
 322static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 323                          struct tcf_proto *tp, unsigned long base,
 324                          u32 handle, struct nlattr **tca,
 325                          unsigned long *arg, bool ovr)
 326{
 327        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 328        struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
 329        struct nlattr *tb[TCA_BPF_MAX + 1];
 330        struct cls_bpf_prog *prog;
 331        int ret;
 332
 333        if (tca[TCA_OPTIONS] == NULL)
 334                return -EINVAL;
 335
 336        ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
 337        if (ret < 0)
 338                return ret;
 339
 340        prog = kzalloc(sizeof(*prog), GFP_KERNEL);
 341        if (!prog)
 342                return -ENOBUFS;
 343
 344        tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
 345
 346        if (oldprog) {
 347                if (handle && oldprog->handle != handle) {
 348                        ret = -EINVAL;
 349                        goto errout;
 350                }
 351        }
 352
 353        if (handle == 0)
 354                prog->handle = cls_bpf_grab_new_handle(tp, head);
 355        else
 356                prog->handle = handle;
 357        if (prog->handle == 0) {
 358                ret = -EINVAL;
 359                goto errout;
 360        }
 361
 362        ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
 363        if (ret < 0)
 364                goto errout;
 365
 366        if (oldprog) {
 367                list_replace_rcu(&prog->link, &oldprog->link);
 368                tcf_unbind_filter(tp, &oldprog->res);
 369                call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
 370        } else {
 371                list_add_rcu(&prog->link, &head->plist);
 372        }
 373
 374        *arg = (unsigned long) prog;
 375        return 0;
 376errout:
 377        kfree(prog);
 378
 379        return ret;
 380}
 381
 382static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
 383                                 struct sk_buff *skb)
 384{
 385        struct nlattr *nla;
 386
 387        if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
 388                return -EMSGSIZE;
 389
 390        nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
 391                          sizeof(struct sock_filter));
 392        if (nla == NULL)
 393                return -EMSGSIZE;
 394
 395        memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
 396
 397        return 0;
 398}
 399
 400static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
 401                                  struct sk_buff *skb)
 402{
 403        if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
 404                return -EMSGSIZE;
 405
 406        if (prog->bpf_name &&
 407            nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
 408                return -EMSGSIZE;
 409
 410        return 0;
 411}
 412
 413static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 414                        struct sk_buff *skb, struct tcmsg *tm)
 415{
 416        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
 417        struct nlattr *nest;
 418        int ret;
 419
 420        if (prog == NULL)
 421                return skb->len;
 422
 423        tm->tcm_handle = prog->handle;
 424
 425        nest = nla_nest_start(skb, TCA_OPTIONS);
 426        if (nest == NULL)
 427                goto nla_put_failure;
 428
 429        if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
 430                goto nla_put_failure;
 431
 432        if (cls_bpf_is_ebpf(prog))
 433                ret = cls_bpf_dump_ebpf_info(prog, skb);
 434        else
 435                ret = cls_bpf_dump_bpf_info(prog, skb);
 436        if (ret)
 437                goto nla_put_failure;
 438
 439        if (tcf_exts_dump(skb, &prog->exts) < 0)
 440                goto nla_put_failure;
 441
 442        nla_nest_end(skb, nest);
 443
 444        if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
 445                goto nla_put_failure;
 446
 447        return skb->len;
 448
 449nla_put_failure:
 450        nla_nest_cancel(skb, nest);
 451        return -1;
 452}
 453
 454static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 455{
 456        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 457        struct cls_bpf_prog *prog;
 458
 459        list_for_each_entry(prog, &head->plist, link) {
 460                if (arg->count < arg->skip)
 461                        goto skip;
 462                if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
 463                        arg->stop = 1;
 464                        break;
 465                }
 466skip:
 467                arg->count++;
 468        }
 469}
 470
 471static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
 472        .kind           =       "bpf",
 473        .owner          =       THIS_MODULE,
 474        .classify       =       cls_bpf_classify,
 475        .init           =       cls_bpf_init,
 476        .destroy        =       cls_bpf_destroy,
 477        .get            =       cls_bpf_get,
 478        .change         =       cls_bpf_change,
 479        .delete         =       cls_bpf_delete,
 480        .walk           =       cls_bpf_walk,
 481        .dump           =       cls_bpf_dump,
 482};
 483
 484static int __init cls_bpf_init_mod(void)
 485{
 486        return register_tcf_proto_ops(&cls_bpf_ops);
 487}
 488
 489static void __exit cls_bpf_exit_mod(void)
 490{
 491        unregister_tcf_proto_ops(&cls_bpf_ops);
 492}
 493
 494module_init(cls_bpf_init_mod);
 495module_exit(cls_bpf_exit_mod);
 496