linux/net/sched/cls_bpf.c
<<
>>
Prefs
   1/*
   2 * Berkeley Packet Filter based traffic classifier
   3 *
   4 * Might be used to classify traffic through flexible, user-defined and
   5 * possibly JIT-ed BPF filters for traffic control as an alternative to
   6 * ematches.
   7 *
   8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/types.h>
  17#include <linux/skbuff.h>
  18#include <linux/filter.h>
  19#include <linux/bpf.h>
  20
  21#include <net/rtnetlink.h>
  22#include <net/pkt_cls.h>
  23#include <net/sock.h>
  24
  25MODULE_LICENSE("GPL");
  26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
  27MODULE_DESCRIPTION("TC BPF based classifier");
  28
  29#define CLS_BPF_NAME_LEN        256
  30#define CLS_BPF_SUPPORTED_GEN_FLAGS             \
  31        (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
  32
  33struct cls_bpf_head {
  34        struct list_head plist;
  35        u32 hgen;
  36        struct rcu_head rcu;
  37};
  38
  39struct cls_bpf_prog {
  40        struct bpf_prog *filter;
  41        struct list_head link;
  42        struct tcf_result res;
  43        bool exts_integrated;
  44        bool offloaded;
  45        u32 gen_flags;
  46        struct tcf_exts exts;
  47        u32 handle;
  48        union {
  49                u32 bpf_fd;
  50                u16 bpf_num_ops;
  51        };
  52        struct sock_filter *bpf_ops;
  53        const char *bpf_name;
  54        struct tcf_proto *tp;
  55        struct rcu_head rcu;
  56};
  57
  58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
  59        [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
  60        [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
  61        [TCA_BPF_FLAGS_GEN]     = { .type = NLA_U32 },
  62        [TCA_BPF_FD]            = { .type = NLA_U32 },
  63        [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING,
  64                                    .len = CLS_BPF_NAME_LEN },
  65        [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
  66        [TCA_BPF_OPS]           = { .type = NLA_BINARY,
  67                                    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  68};
  69
  70static int cls_bpf_exec_opcode(int code)
  71{
  72        switch (code) {
  73        case TC_ACT_OK:
  74        case TC_ACT_SHOT:
  75        case TC_ACT_STOLEN:
  76        case TC_ACT_REDIRECT:
  77        case TC_ACT_UNSPEC:
  78                return code;
  79        default:
  80                return TC_ACT_UNSPEC;
  81        }
  82}
  83
  84static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  85                            struct tcf_result *res)
  86{
  87        struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
  88        bool at_ingress = skb_at_tc_ingress(skb);
  89        struct cls_bpf_prog *prog;
  90        int ret = -1;
  91
  92        /* Needed here for accessing maps. */
  93        rcu_read_lock();
  94        list_for_each_entry_rcu(prog, &head->plist, link) {
  95                int filter_res;
  96
  97                qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
  98
  99                if (tc_skip_sw(prog->gen_flags)) {
 100                        filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
 101                } else if (at_ingress) {
 102                        /* It is safe to push/pull even if skb_shared() */
 103                        __skb_push(skb, skb->mac_len);
 104                        bpf_compute_data_end(skb);
 105                        filter_res = BPF_PROG_RUN(prog->filter, skb);
 106                        __skb_pull(skb, skb->mac_len);
 107                } else {
 108                        bpf_compute_data_end(skb);
 109                        filter_res = BPF_PROG_RUN(prog->filter, skb);
 110                }
 111
 112                if (prog->exts_integrated) {
 113                        res->class   = 0;
 114                        res->classid = TC_H_MAJ(prog->res.classid) |
 115                                       qdisc_skb_cb(skb)->tc_classid;
 116
 117                        ret = cls_bpf_exec_opcode(filter_res);
 118                        if (ret == TC_ACT_UNSPEC)
 119                                continue;
 120                        break;
 121                }
 122
 123                if (filter_res == 0)
 124                        continue;
 125                if (filter_res != -1) {
 126                        res->class   = 0;
 127                        res->classid = filter_res;
 128                } else {
 129                        *res = prog->res;
 130                }
 131
 132                ret = tcf_exts_exec(skb, &prog->exts, res);
 133                if (ret < 0)
 134                        continue;
 135
 136                break;
 137        }
 138        rcu_read_unlock();
 139
 140        return ret;
 141}
 142
 143static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 144{
 145        return !prog->bpf_ops;
 146}
 147
 148static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 149                               enum tc_clsbpf_command cmd)
 150{
 151        struct net_device *dev = tp->q->dev_queue->dev;
 152        struct tc_cls_bpf_offload bpf_offload = {};
 153        struct tc_to_netdev offload;
 154
 155        offload.type = TC_SETUP_CLSBPF;
 156        offload.cls_bpf = &bpf_offload;
 157
 158        bpf_offload.command = cmd;
 159        bpf_offload.exts = &prog->exts;
 160        bpf_offload.prog = prog->filter;
 161        bpf_offload.name = prog->bpf_name;
 162        bpf_offload.exts_integrated = prog->exts_integrated;
 163        bpf_offload.gen_flags = prog->gen_flags;
 164
 165        return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
 166                                             tp->protocol, &offload);
 167}
 168
 169static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 170                           struct cls_bpf_prog *oldprog)
 171{
 172        struct net_device *dev = tp->q->dev_queue->dev;
 173        struct cls_bpf_prog *obj = prog;
 174        enum tc_clsbpf_command cmd;
 175        bool skip_sw;
 176        int ret;
 177
 178        skip_sw = tc_skip_sw(prog->gen_flags) ||
 179                (oldprog && tc_skip_sw(oldprog->gen_flags));
 180
 181        if (oldprog && oldprog->offloaded) {
 182                if (tc_should_offload(dev, tp, prog->gen_flags)) {
 183                        cmd = TC_CLSBPF_REPLACE;
 184                } else if (!tc_skip_sw(prog->gen_flags)) {
 185                        obj = oldprog;
 186                        cmd = TC_CLSBPF_DESTROY;
 187                } else {
 188                        return -EINVAL;
 189                }
 190        } else {
 191                if (!tc_should_offload(dev, tp, prog->gen_flags))
 192                        return skip_sw ? -EINVAL : 0;
 193                cmd = TC_CLSBPF_ADD;
 194        }
 195
 196        ret = cls_bpf_offload_cmd(tp, obj, cmd);
 197        if (ret)
 198                return skip_sw ? ret : 0;
 199
 200        obj->offloaded = true;
 201        if (oldprog)
 202                oldprog->offloaded = false;
 203
 204        return 0;
 205}
 206
 207static void cls_bpf_stop_offload(struct tcf_proto *tp,
 208                                 struct cls_bpf_prog *prog)
 209{
 210        int err;
 211
 212        if (!prog->offloaded)
 213                return;
 214
 215        err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
 216        if (err) {
 217                pr_err("Stopping hardware offload failed: %d\n", err);
 218                return;
 219        }
 220
 221        prog->offloaded = false;
 222}
 223
 224static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
 225                                         struct cls_bpf_prog *prog)
 226{
 227        if (!prog->offloaded)
 228                return;
 229
 230        cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
 231}
 232
 233static int cls_bpf_init(struct tcf_proto *tp)
 234{
 235        struct cls_bpf_head *head;
 236
 237        head = kzalloc(sizeof(*head), GFP_KERNEL);
 238        if (head == NULL)
 239                return -ENOBUFS;
 240
 241        INIT_LIST_HEAD_RCU(&head->plist);
 242        rcu_assign_pointer(tp->root, head);
 243
 244        return 0;
 245}
 246
 247static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 248{
 249        tcf_exts_destroy(&prog->exts);
 250
 251        if (cls_bpf_is_ebpf(prog))
 252                bpf_prog_put(prog->filter);
 253        else
 254                bpf_prog_destroy(prog->filter);
 255
 256        kfree(prog->bpf_name);
 257        kfree(prog->bpf_ops);
 258        kfree(prog);
 259}
 260
 261static void __cls_bpf_delete_prog(struct rcu_head *rcu)
 262{
 263        struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
 264
 265        cls_bpf_delete_prog(prog->tp, prog);
 266}
 267
 268static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
 269{
 270        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
 271
 272        cls_bpf_stop_offload(tp, prog);
 273        list_del_rcu(&prog->link);
 274        tcf_unbind_filter(tp, &prog->res);
 275        call_rcu(&prog->rcu, __cls_bpf_delete_prog);
 276
 277        return 0;
 278}
 279
 280static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
 281{
 282        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 283        struct cls_bpf_prog *prog, *tmp;
 284
 285        if (!force && !list_empty(&head->plist))
 286                return false;
 287
 288        list_for_each_entry_safe(prog, tmp, &head->plist, link) {
 289                cls_bpf_stop_offload(tp, prog);
 290                list_del_rcu(&prog->link);
 291                tcf_unbind_filter(tp, &prog->res);
 292                call_rcu(&prog->rcu, __cls_bpf_delete_prog);
 293        }
 294
 295        kfree_rcu(head, rcu);
 296        return true;
 297}
 298
 299static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
 300{
 301        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 302        struct cls_bpf_prog *prog;
 303        unsigned long ret = 0UL;
 304
 305        list_for_each_entry(prog, &head->plist, link) {
 306                if (prog->handle == handle) {
 307                        ret = (unsigned long) prog;
 308                        break;
 309                }
 310        }
 311
 312        return ret;
 313}
 314
 315static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
 316{
 317        struct sock_filter *bpf_ops;
 318        struct sock_fprog_kern fprog_tmp;
 319        struct bpf_prog *fp;
 320        u16 bpf_size, bpf_num_ops;
 321        int ret;
 322
 323        bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
 324        if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
 325                return -EINVAL;
 326
 327        bpf_size = bpf_num_ops * sizeof(*bpf_ops);
 328        if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
 329                return -EINVAL;
 330
 331        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
 332        if (bpf_ops == NULL)
 333                return -ENOMEM;
 334
 335        memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
 336
 337        fprog_tmp.len = bpf_num_ops;
 338        fprog_tmp.filter = bpf_ops;
 339
 340        ret = bpf_prog_create(&fp, &fprog_tmp);
 341        if (ret < 0) {
 342                kfree(bpf_ops);
 343                return ret;
 344        }
 345
 346        prog->bpf_ops = bpf_ops;
 347        prog->bpf_num_ops = bpf_num_ops;
 348        prog->bpf_name = NULL;
 349        prog->filter = fp;
 350
 351        return 0;
 352}
 353
 354static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
 355                                 const struct tcf_proto *tp)
 356{
 357        struct bpf_prog *fp;
 358        char *name = NULL;
 359        u32 bpf_fd;
 360
 361        bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
 362
 363        fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
 364        if (IS_ERR(fp))
 365                return PTR_ERR(fp);
 366
 367        if (tb[TCA_BPF_NAME]) {
 368                name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
 369                               nla_len(tb[TCA_BPF_NAME]),
 370                               GFP_KERNEL);
 371                if (!name) {
 372                        bpf_prog_put(fp);
 373                        return -ENOMEM;
 374                }
 375        }
 376
 377        prog->bpf_ops = NULL;
 378        prog->bpf_fd = bpf_fd;
 379        prog->bpf_name = name;
 380        prog->filter = fp;
 381
 382        if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
 383                netif_keep_dst(qdisc_dev(tp->q));
 384
 385        return 0;
 386}
 387
 388static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
 389                                   struct cls_bpf_prog *prog,
 390                                   unsigned long base, struct nlattr **tb,
 391                                   struct nlattr *est, bool ovr)
 392{
 393        bool is_bpf, is_ebpf, have_exts = false;
 394        struct tcf_exts exts;
 395        u32 gen_flags = 0;
 396        int ret;
 397
 398        is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
 399        is_ebpf = tb[TCA_BPF_FD];
 400        if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
 401                return -EINVAL;
 402
 403        ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
 404        if (ret < 0)
 405                return ret;
 406        ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
 407        if (ret < 0)
 408                goto errout;
 409
 410        if (tb[TCA_BPF_FLAGS]) {
 411                u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
 412
 413                if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
 414                        ret = -EINVAL;
 415                        goto errout;
 416                }
 417
 418                have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
 419        }
 420        if (tb[TCA_BPF_FLAGS_GEN]) {
 421                gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
 422                if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
 423                    !tc_flags_valid(gen_flags)) {
 424                        ret = -EINVAL;
 425                        goto errout;
 426                }
 427        }
 428
 429        prog->exts_integrated = have_exts;
 430        prog->gen_flags = gen_flags;
 431
 432        ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
 433                       cls_bpf_prog_from_efd(tb, prog, tp);
 434        if (ret < 0)
 435                goto errout;
 436
 437        if (tb[TCA_BPF_CLASSID]) {
 438                prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
 439                tcf_bind_filter(tp, &prog->res, base);
 440        }
 441
 442        tcf_exts_change(tp, &prog->exts, &exts);
 443        return 0;
 444
 445errout:
 446        tcf_exts_destroy(&exts);
 447        return ret;
 448}
 449
 450static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
 451                                   struct cls_bpf_head *head)
 452{
 453        unsigned int i = 0x80000000;
 454        u32 handle;
 455
 456        do {
 457                if (++head->hgen == 0x7FFFFFFF)
 458                        head->hgen = 1;
 459        } while (--i > 0 && cls_bpf_get(tp, head->hgen));
 460
 461        if (unlikely(i == 0)) {
 462                pr_err("Insufficient number of handles\n");
 463                handle = 0;
 464        } else {
 465                handle = head->hgen;
 466        }
 467
 468        return handle;
 469}
 470
 471static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 472                          struct tcf_proto *tp, unsigned long base,
 473                          u32 handle, struct nlattr **tca,
 474                          unsigned long *arg, bool ovr)
 475{
 476        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 477        struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
 478        struct nlattr *tb[TCA_BPF_MAX + 1];
 479        struct cls_bpf_prog *prog;
 480        int ret;
 481
 482        if (tca[TCA_OPTIONS] == NULL)
 483                return -EINVAL;
 484
 485        ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
 486        if (ret < 0)
 487                return ret;
 488
 489        prog = kzalloc(sizeof(*prog), GFP_KERNEL);
 490        if (!prog)
 491                return -ENOBUFS;
 492
 493        ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
 494        if (ret < 0)
 495                goto errout;
 496
 497        if (oldprog) {
 498                if (handle && oldprog->handle != handle) {
 499                        ret = -EINVAL;
 500                        goto errout;
 501                }
 502        }
 503
 504        if (handle == 0)
 505                prog->handle = cls_bpf_grab_new_handle(tp, head);
 506        else
 507                prog->handle = handle;
 508        if (prog->handle == 0) {
 509                ret = -EINVAL;
 510                goto errout;
 511        }
 512
 513        ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
 514                                      ovr);
 515        if (ret < 0)
 516                goto errout;
 517
 518        ret = cls_bpf_offload(tp, prog, oldprog);
 519        if (ret) {
 520                cls_bpf_delete_prog(tp, prog);
 521                return ret;
 522        }
 523
 524        if (oldprog) {
 525                list_replace_rcu(&oldprog->link, &prog->link);
 526                tcf_unbind_filter(tp, &oldprog->res);
 527                call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
 528        } else {
 529                list_add_rcu(&prog->link, &head->plist);
 530        }
 531
 532        *arg = (unsigned long) prog;
 533        return 0;
 534
 535errout:
 536        tcf_exts_destroy(&prog->exts);
 537        kfree(prog);
 538        return ret;
 539}
 540
 541static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
 542                                 struct sk_buff *skb)
 543{
 544        struct nlattr *nla;
 545
 546        if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
 547                return -EMSGSIZE;
 548
 549        nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
 550                          sizeof(struct sock_filter));
 551        if (nla == NULL)
 552                return -EMSGSIZE;
 553
 554        memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
 555
 556        return 0;
 557}
 558
 559static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
 560                                  struct sk_buff *skb)
 561{
 562        if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
 563                return -EMSGSIZE;
 564
 565        if (prog->bpf_name &&
 566            nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
 567                return -EMSGSIZE;
 568
 569        return 0;
 570}
 571
 572static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
 573                        struct sk_buff *skb, struct tcmsg *tm)
 574{
 575        struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
 576        struct nlattr *nest;
 577        u32 bpf_flags = 0;
 578        int ret;
 579
 580        if (prog == NULL)
 581                return skb->len;
 582
 583        tm->tcm_handle = prog->handle;
 584
 585        cls_bpf_offload_update_stats(tp, prog);
 586
 587        nest = nla_nest_start(skb, TCA_OPTIONS);
 588        if (nest == NULL)
 589                goto nla_put_failure;
 590
 591        if (prog->res.classid &&
 592            nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
 593                goto nla_put_failure;
 594
 595        if (cls_bpf_is_ebpf(prog))
 596                ret = cls_bpf_dump_ebpf_info(prog, skb);
 597        else
 598                ret = cls_bpf_dump_bpf_info(prog, skb);
 599        if (ret)
 600                goto nla_put_failure;
 601
 602        if (tcf_exts_dump(skb, &prog->exts) < 0)
 603                goto nla_put_failure;
 604
 605        if (prog->exts_integrated)
 606                bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
 607        if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
 608                goto nla_put_failure;
 609        if (prog->gen_flags &&
 610            nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
 611                goto nla_put_failure;
 612
 613        nla_nest_end(skb, nest);
 614
 615        if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
 616                goto nla_put_failure;
 617
 618        return skb->len;
 619
 620nla_put_failure:
 621        nla_nest_cancel(skb, nest);
 622        return -1;
 623}
 624
 625static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 626{
 627        struct cls_bpf_head *head = rtnl_dereference(tp->root);
 628        struct cls_bpf_prog *prog;
 629
 630        list_for_each_entry(prog, &head->plist, link) {
 631                if (arg->count < arg->skip)
 632                        goto skip;
 633                if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
 634                        arg->stop = 1;
 635                        break;
 636                }
 637skip:
 638                arg->count++;
 639        }
 640}
 641
 642static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
 643        .kind           =       "bpf",
 644        .owner          =       THIS_MODULE,
 645        .classify       =       cls_bpf_classify,
 646        .init           =       cls_bpf_init,
 647        .destroy        =       cls_bpf_destroy,
 648        .get            =       cls_bpf_get,
 649        .change         =       cls_bpf_change,
 650        .delete         =       cls_bpf_delete,
 651        .walk           =       cls_bpf_walk,
 652        .dump           =       cls_bpf_dump,
 653};
 654
 655static int __init cls_bpf_init_mod(void)
 656{
 657        return register_tcf_proto_ops(&cls_bpf_ops);
 658}
 659
 660static void __exit cls_bpf_exit_mod(void)
 661{
 662        unregister_tcf_proto_ops(&cls_bpf_ops);
 663}
 664
 665module_init(cls_bpf_init_mod);
 666module_exit(cls_bpf_exit_mod);
 667