linux/drivers/net/netdevsim/bpf.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Netronome Systems, Inc.
   3 *
   4 * This software is licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree.
   7 *
   8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
   9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
  10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
  12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
  13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  14 */
  15
  16#include <linux/bpf.h>
  17#include <linux/bpf_verifier.h>
  18#include <linux/debugfs.h>
  19#include <linux/kernel.h>
  20#include <linux/mutex.h>
  21#include <linux/rtnetlink.h>
  22#include <net/pkt_cls.h>
  23
  24#include "netdevsim.h"
  25
  26#define pr_vlog(env, fmt, ...)  \
  27        bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
  28
  29struct nsim_bpf_bound_prog {
  30        struct nsim_dev *nsim_dev;
  31        struct bpf_prog *prog;
  32        struct dentry *ddir;
  33        const char *state;
  34        bool is_loaded;
  35        struct list_head l;
  36};
  37
  38#define NSIM_BPF_MAX_KEYS               2
  39
  40struct nsim_bpf_bound_map {
  41        struct netdevsim *ns;
  42        struct bpf_offloaded_map *map;
  43        struct mutex mutex;
  44        struct nsim_map_entry {
  45                void *key;
  46                void *value;
  47        } entry[NSIM_BPF_MAX_KEYS];
  48        struct list_head l;
  49};
  50
  51static int nsim_bpf_string_show(struct seq_file *file, void *data)
  52{
  53        const char **str = file->private;
  54
  55        if (*str)
  56                seq_printf(file, "%s\n", *str);
  57
  58        return 0;
  59}
  60DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string);
  61
  62static int
  63nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
  64{
  65        struct nsim_bpf_bound_prog *state;
  66
  67        state = env->prog->aux->offload->dev_priv;
  68        if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
  69                msleep(state->nsim_dev->bpf_bind_verifier_delay);
  70
  71        if (insn_idx == env->prog->len - 1)
  72                pr_vlog(env, "Hello from netdevsim!\n");
  73
  74        return 0;
  75}
  76
  77static int nsim_bpf_finalize(struct bpf_verifier_env *env)
  78{
  79        return 0;
  80}
  81
  82static bool nsim_xdp_offload_active(struct netdevsim *ns)
  83{
  84        return ns->xdp_hw.prog;
  85}
  86
  87static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
  88{
  89        struct nsim_bpf_bound_prog *state;
  90
  91        if (!prog || !prog->aux->offload)
  92                return;
  93
  94        state = prog->aux->offload->dev_priv;
  95        state->is_loaded = loaded;
  96}
  97
  98static int
  99nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
 100{
 101        nsim_prog_set_loaded(ns->bpf_offloaded, false);
 102
 103        WARN(!!ns->bpf_offloaded != oldprog,
 104             "bad offload state, expected offload %sto be active",
 105             oldprog ? "" : "not ");
 106        ns->bpf_offloaded = prog;
 107        ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
 108        nsim_prog_set_loaded(prog, true);
 109
 110        return 0;
 111}
 112
 113int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
 114                               void *type_data, void *cb_priv)
 115{
 116        struct tc_cls_bpf_offload *cls_bpf = type_data;
 117        struct bpf_prog *prog = cls_bpf->prog;
 118        struct netdevsim *ns = cb_priv;
 119        struct bpf_prog *oldprog;
 120
 121        if (type != TC_SETUP_CLSBPF) {
 122                NSIM_EA(cls_bpf->common.extack,
 123                        "only offload of BPF classifiers supported");
 124                return -EOPNOTSUPP;
 125        }
 126
 127        if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
 128                return -EOPNOTSUPP;
 129
 130        if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
 131                NSIM_EA(cls_bpf->common.extack,
 132                        "only ETH_P_ALL supported as filter protocol");
 133                return -EOPNOTSUPP;
 134        }
 135
 136        if (!ns->bpf_tc_accept) {
 137                NSIM_EA(cls_bpf->common.extack,
 138                        "netdevsim configured to reject BPF TC offload");
 139                return -EOPNOTSUPP;
 140        }
 141        /* Note: progs without skip_sw will probably not be dev bound */
 142        if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
 143                NSIM_EA(cls_bpf->common.extack,
 144                        "netdevsim configured to reject unbound programs");
 145                return -EOPNOTSUPP;
 146        }
 147
 148        if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
 149                return -EOPNOTSUPP;
 150
 151        oldprog = cls_bpf->oldprog;
 152
 153        /* Don't remove if oldprog doesn't match driver's state */
 154        if (ns->bpf_offloaded != oldprog) {
 155                oldprog = NULL;
 156                if (!cls_bpf->prog)
 157                        return 0;
 158                if (ns->bpf_offloaded) {
 159                        NSIM_EA(cls_bpf->common.extack,
 160                                "driver and netdev offload states mismatch");
 161                        return -EBUSY;
 162                }
 163        }
 164
 165        return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
 166}
 167
 168int nsim_bpf_disable_tc(struct netdevsim *ns)
 169{
 170        if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
 171                return -EBUSY;
 172        return 0;
 173}
 174
 175static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
 176{
 177        if (!nsim_xdp_offload_active(ns) && !bpf->prog)
 178                return 0;
 179        if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
 180                NSIM_EA(bpf->extack, "TC program is already loaded");
 181                return -EBUSY;
 182        }
 183
 184        return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
 185}
 186
 187static int
 188nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
 189                  struct xdp_attachment_info *xdp)
 190{
 191        int err;
 192
 193        if (!xdp_attachment_flags_ok(xdp, bpf))
 194                return -EBUSY;
 195
 196        if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
 197                NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
 198                return -EOPNOTSUPP;
 199        }
 200        if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
 201                NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
 202                return -EOPNOTSUPP;
 203        }
 204
 205        if (bpf->command == XDP_SETUP_PROG_HW) {
 206                err = nsim_xdp_offload_prog(ns, bpf);
 207                if (err)
 208                        return err;
 209        }
 210
 211        xdp_attachment_setup(xdp, bpf);
 212
 213        return 0;
 214}
 215
 216static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
 217                                struct bpf_prog *prog)
 218{
 219        struct nsim_bpf_bound_prog *state;
 220        char name[16];
 221
 222        state = kzalloc(sizeof(*state), GFP_KERNEL);
 223        if (!state)
 224                return -ENOMEM;
 225
 226        state->nsim_dev = nsim_dev;
 227        state->prog = prog;
 228        state->state = "verify";
 229
 230        /* Program id is not populated yet when we create the state. */
 231        sprintf(name, "%u", nsim_dev->prog_id_gen++);
 232        state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
 233        if (IS_ERR_OR_NULL(state->ddir)) {
 234                kfree(state);
 235                return -ENOMEM;
 236        }
 237
 238        debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
 239        debugfs_create_file("state", 0400, state->ddir,
 240                            &state->state, &nsim_bpf_string_fops);
 241        debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
 242
 243        list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
 244
 245        prog->aux->offload->dev_priv = state;
 246
 247        return 0;
 248}
 249
 250static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
 251{
 252        struct nsim_dev *nsim_dev =
 253                        bpf_offload_dev_priv(prog->aux->offload->offdev);
 254
 255        if (!nsim_dev->bpf_bind_accept)
 256                return -EOPNOTSUPP;
 257
 258        return nsim_bpf_create_prog(nsim_dev, prog);
 259}
 260
 261static int nsim_bpf_translate(struct bpf_prog *prog)
 262{
 263        struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
 264
 265        state->state = "xlated";
 266        return 0;
 267}
 268
 269static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
 270{
 271        struct nsim_bpf_bound_prog *state;
 272
 273        state = prog->aux->offload->dev_priv;
 274        WARN(state->is_loaded,
 275             "offload state destroyed while program still bound");
 276        debugfs_remove_recursive(state->ddir);
 277        list_del(&state->l);
 278        kfree(state);
 279}
 280
 281static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
 282        .insn_hook      = nsim_bpf_verify_insn,
 283        .finalize       = nsim_bpf_finalize,
 284        .prepare        = nsim_bpf_verifier_prep,
 285        .translate      = nsim_bpf_translate,
 286        .destroy        = nsim_bpf_destroy_prog,
 287};
 288
 289static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
 290{
 291        if (bpf->prog && bpf->prog->aux->offload) {
 292                NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
 293                return -EINVAL;
 294        }
 295        if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
 296                NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
 297                return -EINVAL;
 298        }
 299        return 0;
 300}
 301
 302static int
 303nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
 304{
 305        struct nsim_bpf_bound_prog *state;
 306
 307        if (!bpf->prog)
 308                return 0;
 309
 310        if (!bpf->prog->aux->offload) {
 311                NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
 312                return -EINVAL;
 313        }
 314        if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
 315                NSIM_EA(bpf->extack, "program bound to different dev");
 316                return -EINVAL;
 317        }
 318
 319        state = bpf->prog->aux->offload->dev_priv;
 320        if (WARN_ON(strcmp(state->state, "xlated"))) {
 321                NSIM_EA(bpf->extack, "offloading program in bad state");
 322                return -EINVAL;
 323        }
 324        return 0;
 325}
 326
 327static bool
 328nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
 329{
 330        return e->key && !memcmp(key, e->key, map->key_size);
 331}
 332
 333static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
 334{
 335        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 336        unsigned int i;
 337
 338        for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
 339                if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
 340                        return i;
 341
 342        return -ENOENT;
 343}
 344
 345static int
 346nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
 347{
 348        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 349
 350        nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
 351        if (!nmap->entry[idx].key)
 352                return -ENOMEM;
 353        nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
 354        if (!nmap->entry[idx].value) {
 355                kfree(nmap->entry[idx].key);
 356                nmap->entry[idx].key = NULL;
 357                return -ENOMEM;
 358        }
 359
 360        return 0;
 361}
 362
 363static int
 364nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
 365                      void *key, void *next_key)
 366{
 367        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 368        int idx = -ENOENT;
 369
 370        mutex_lock(&nmap->mutex);
 371
 372        if (key)
 373                idx = nsim_map_key_find(offmap, key);
 374        if (idx == -ENOENT)
 375                idx = 0;
 376        else
 377                idx++;
 378
 379        for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
 380                if (nmap->entry[idx].key) {
 381                        memcpy(next_key, nmap->entry[idx].key,
 382                               offmap->map.key_size);
 383                        break;
 384                }
 385        }
 386
 387        mutex_unlock(&nmap->mutex);
 388
 389        if (idx == ARRAY_SIZE(nmap->entry))
 390                return -ENOENT;
 391        return 0;
 392}
 393
 394static int
 395nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
 396{
 397        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 398        int idx;
 399
 400        mutex_lock(&nmap->mutex);
 401
 402        idx = nsim_map_key_find(offmap, key);
 403        if (idx >= 0)
 404                memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
 405
 406        mutex_unlock(&nmap->mutex);
 407
 408        return idx < 0 ? idx : 0;
 409}
 410
 411static int
 412nsim_map_update_elem(struct bpf_offloaded_map *offmap,
 413                     void *key, void *value, u64 flags)
 414{
 415        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 416        int idx, err = 0;
 417
 418        mutex_lock(&nmap->mutex);
 419
 420        idx = nsim_map_key_find(offmap, key);
 421        if (idx < 0 && flags == BPF_EXIST) {
 422                err = idx;
 423                goto exit_unlock;
 424        }
 425        if (idx >= 0 && flags == BPF_NOEXIST) {
 426                err = -EEXIST;
 427                goto exit_unlock;
 428        }
 429
 430        if (idx < 0) {
 431                for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
 432                        if (!nmap->entry[idx].key)
 433                                break;
 434                if (idx == ARRAY_SIZE(nmap->entry)) {
 435                        err = -E2BIG;
 436                        goto exit_unlock;
 437                }
 438
 439                err = nsim_map_alloc_elem(offmap, idx);
 440                if (err)
 441                        goto exit_unlock;
 442        }
 443
 444        memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
 445        memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
 446exit_unlock:
 447        mutex_unlock(&nmap->mutex);
 448
 449        return err;
 450}
 451
 452static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
 453{
 454        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 455        int idx;
 456
 457        if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
 458                return -EINVAL;
 459
 460        mutex_lock(&nmap->mutex);
 461
 462        idx = nsim_map_key_find(offmap, key);
 463        if (idx >= 0) {
 464                kfree(nmap->entry[idx].key);
 465                kfree(nmap->entry[idx].value);
 466                memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
 467        }
 468
 469        mutex_unlock(&nmap->mutex);
 470
 471        return idx < 0 ? idx : 0;
 472}
 473
 474static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
 475        .map_get_next_key       = nsim_map_get_next_key,
 476        .map_lookup_elem        = nsim_map_lookup_elem,
 477        .map_update_elem        = nsim_map_update_elem,
 478        .map_delete_elem        = nsim_map_delete_elem,
 479};
 480
 481static int
 482nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
 483{
 484        struct nsim_bpf_bound_map *nmap;
 485        int i, err;
 486
 487        if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
 488                    offmap->map.map_type != BPF_MAP_TYPE_HASH))
 489                return -EINVAL;
 490        if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
 491                return -ENOMEM;
 492        if (offmap->map.map_flags)
 493                return -EINVAL;
 494
 495        nmap = kzalloc(sizeof(*nmap), GFP_USER);
 496        if (!nmap)
 497                return -ENOMEM;
 498
 499        offmap->dev_priv = nmap;
 500        nmap->ns = ns;
 501        nmap->map = offmap;
 502        mutex_init(&nmap->mutex);
 503
 504        if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
 505                for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
 506                        u32 *key;
 507
 508                        err = nsim_map_alloc_elem(offmap, i);
 509                        if (err)
 510                                goto err_free;
 511                        key = nmap->entry[i].key;
 512                        *key = i;
 513                }
 514        }
 515
 516        offmap->dev_ops = &nsim_bpf_map_ops;
 517        list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps);
 518
 519        return 0;
 520
 521err_free:
 522        while (--i >= 0) {
 523                kfree(nmap->entry[i].key);
 524                kfree(nmap->entry[i].value);
 525        }
 526        kfree(nmap);
 527        return err;
 528}
 529
 530static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
 531{
 532        struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
 533        unsigned int i;
 534
 535        for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
 536                kfree(nmap->entry[i].key);
 537                kfree(nmap->entry[i].value);
 538        }
 539        list_del_init(&nmap->l);
 540        mutex_destroy(&nmap->mutex);
 541        kfree(nmap);
 542}
 543
 544int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
 545{
 546        struct netdevsim *ns = netdev_priv(dev);
 547        int err;
 548
 549        ASSERT_RTNL();
 550
 551        switch (bpf->command) {
 552        case XDP_QUERY_PROG:
 553                return xdp_attachment_query(&ns->xdp, bpf);
 554        case XDP_QUERY_PROG_HW:
 555                return xdp_attachment_query(&ns->xdp_hw, bpf);
 556        case XDP_SETUP_PROG:
 557                err = nsim_setup_prog_checks(ns, bpf);
 558                if (err)
 559                        return err;
 560
 561                return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
 562        case XDP_SETUP_PROG_HW:
 563                err = nsim_setup_prog_hw_checks(ns, bpf);
 564                if (err)
 565                        return err;
 566
 567                return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
 568        case BPF_OFFLOAD_MAP_ALLOC:
 569                if (!ns->bpf_map_accept)
 570                        return -EOPNOTSUPP;
 571
 572                return nsim_bpf_map_alloc(ns, bpf->offmap);
 573        case BPF_OFFLOAD_MAP_FREE:
 574                nsim_bpf_map_free(bpf->offmap);
 575                return 0;
 576        default:
 577                return -EINVAL;
 578        }
 579}
 580
 581int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
 582{
 583        int err;
 584
 585        INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs);
 586        INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps);
 587
 588        nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
 589                                                            nsim_dev->ddir);
 590        if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
 591                return -ENOMEM;
 592
 593        nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
 594        err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
 595        if (err)
 596                return err;
 597
 598        nsim_dev->bpf_bind_accept = true;
 599        debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir,
 600                            &nsim_dev->bpf_bind_accept);
 601        debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
 602                           &nsim_dev->bpf_bind_verifier_delay);
 603        return 0;
 604}
 605
 606void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
 607{
 608        WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs));
 609        WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps));
 610        bpf_offload_dev_destroy(nsim_dev->bpf_dev);
 611}
 612
 613int nsim_bpf_init(struct netdevsim *ns)
 614{
 615        struct dentry *ddir = ns->nsim_dev_port->ddir;
 616        int err;
 617
 618        err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev,
 619                                              ns->netdev);
 620        if (err)
 621                return err;
 622
 623        debugfs_create_u32("bpf_offloaded_id", 0400, ddir,
 624                           &ns->bpf_offloaded_id);
 625
 626        ns->bpf_tc_accept = true;
 627        debugfs_create_bool("bpf_tc_accept", 0600, ddir,
 628                            &ns->bpf_tc_accept);
 629        debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir,
 630                            &ns->bpf_tc_non_bound_accept);
 631        ns->bpf_xdpdrv_accept = true;
 632        debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir,
 633                            &ns->bpf_xdpdrv_accept);
 634        ns->bpf_xdpoffload_accept = true;
 635        debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir,
 636                            &ns->bpf_xdpoffload_accept);
 637
 638        ns->bpf_map_accept = true;
 639        debugfs_create_bool("bpf_map_accept", 0600, ddir,
 640                            &ns->bpf_map_accept);
 641
 642        return 0;
 643}
 644
 645void nsim_bpf_uninit(struct netdevsim *ns)
 646{
 647        WARN_ON(ns->xdp.prog);
 648        WARN_ON(ns->xdp_hw.prog);
 649        WARN_ON(ns->bpf_offloaded);
 650        bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev);
 651}
 652