linux/drivers/net/ethernet/netronome/nfp/bpf/offload.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
   3 *
   4 * This software is dual licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree or the BSD 2-Clause License provided below.  You have the
   7 * option to license this software under the complete terms of either license.
   8 *
   9 * The BSD 2-Clause License:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      1. Redistributions of source code must retain the above
  16 *         copyright notice, this list of conditions and the following
  17 *         disclaimer.
  18 *
  19 *      2. Redistributions in binary form must reproduce the above
  20 *         copyright notice, this list of conditions and the following
  21 *         disclaimer in the documentation and/or other materials
  22 *         provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * nfp_net_offload.c
  36 * Netronome network device driver: TC offload functions for PF and VF
  37 */
  38
  39#define pr_fmt(fmt)     "NFP net bpf: " fmt
  40
  41#include <linux/bpf.h>
  42#include <linux/kernel.h>
  43#include <linux/netdevice.h>
  44#include <linux/pci.h>
  45#include <linux/jiffies.h>
  46#include <linux/timer.h>
  47#include <linux/list.h>
  48#include <linux/mm.h>
  49
  50#include <net/pkt_cls.h>
  51#include <net/tc_act/tc_gact.h>
  52#include <net/tc_act/tc_mirred.h>
  53
  54#include "main.h"
  55#include "../nfp_app.h"
  56#include "../nfp_net_ctrl.h"
  57#include "../nfp_net.h"
  58
  59static int
  60nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
  61                 unsigned int cnt)
  62{
  63        struct nfp_insn_meta *meta;
  64        unsigned int i;
  65
  66        for (i = 0; i < cnt; i++) {
  67                meta = kzalloc(sizeof(*meta), GFP_KERNEL);
  68                if (!meta)
  69                        return -ENOMEM;
  70
  71                meta->insn = prog[i];
  72                meta->n = i;
  73
  74                list_add_tail(&meta->l, &nfp_prog->insns);
  75        }
  76
  77        nfp_bpf_jit_prepare(nfp_prog, cnt);
  78
  79        return 0;
  80}
  81
  82static void nfp_prog_free(struct nfp_prog *nfp_prog)
  83{
  84        struct nfp_insn_meta *meta, *tmp;
  85
  86        list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
  87                list_del(&meta->l);
  88                kfree(meta);
  89        }
  90        kfree(nfp_prog);
  91}
  92
  93static int
  94nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
  95                      struct netdev_bpf *bpf)
  96{
  97        struct bpf_prog *prog = bpf->verifier.prog;
  98        struct nfp_prog *nfp_prog;
  99        int ret;
 100
 101        nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
 102        if (!nfp_prog)
 103                return -ENOMEM;
 104        prog->aux->offload->dev_priv = nfp_prog;
 105
 106        INIT_LIST_HEAD(&nfp_prog->insns);
 107        nfp_prog->type = prog->type;
 108        nfp_prog->bpf = app->priv;
 109
 110        ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
 111        if (ret)
 112                goto err_free;
 113
 114        nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
 115        bpf->verifier.ops = &nfp_bpf_analyzer_ops;
 116
 117        return 0;
 118
 119err_free:
 120        nfp_prog_free(nfp_prog);
 121
 122        return ret;
 123}
 124
 125static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
 126{
 127        struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 128        unsigned int stack_size;
 129        unsigned int max_instr;
 130        int err;
 131
 132        stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
 133        if (prog->aux->stack_depth > stack_size) {
 134                nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
 135                        prog->aux->stack_depth, stack_size);
 136                return -EOPNOTSUPP;
 137        }
 138        nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
 139
 140        max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
 141        nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
 142
 143        nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
 144        if (!nfp_prog->prog)
 145                return -ENOMEM;
 146
 147        err = nfp_bpf_jit(nfp_prog);
 148        if (err)
 149                return err;
 150
 151        prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
 152        prog->aux->offload->jited_image = nfp_prog->prog;
 153
 154        return 0;
 155}
 156
 157static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
 158{
 159        struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 160
 161        kvfree(nfp_prog->prog);
 162        nfp_prog_free(nfp_prog);
 163
 164        return 0;
 165}
 166
 167/* Atomic engine requires values to be in big endian, we need to byte swap
 168 * the value words used with xadd.
 169 */
 170static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
 171{
 172        u32 *word = value;
 173        unsigned int i;
 174
 175        for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
 176                if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
 177                        word[i] = (__force u32)cpu_to_be32(word[i]);
 178}
 179
 180static int
 181nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
 182                         void *key, void *value)
 183{
 184        int err;
 185
 186        err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
 187        if (err)
 188                return err;
 189
 190        nfp_map_bpf_byte_swap(offmap->dev_priv, value);
 191        return 0;
 192}
 193
 194static int
 195nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
 196                         void *key, void *value, u64 flags)
 197{
 198        nfp_map_bpf_byte_swap(offmap->dev_priv, value);
 199        return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
 200}
 201
 202static int
 203nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
 204                         void *key, void *next_key)
 205{
 206        if (!key)
 207                return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
 208        return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
 209}
 210
 211static int
 212nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
 213{
 214        if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
 215                return -EINVAL;
 216        return nfp_bpf_ctrl_del_entry(offmap, key);
 217}
 218
 219static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
 220        .map_get_next_key       = nfp_bpf_map_get_next_key,
 221        .map_lookup_elem        = nfp_bpf_map_lookup_entry,
 222        .map_update_elem        = nfp_bpf_map_update_entry,
 223        .map_delete_elem        = nfp_bpf_map_delete_elem,
 224};
 225
 226static int
 227nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
 228{
 229        struct nfp_bpf_map *nfp_map;
 230        unsigned int use_map_size;
 231        long long int res;
 232
 233        if (!bpf->maps.types)
 234                return -EOPNOTSUPP;
 235
 236        if (offmap->map.map_flags ||
 237            offmap->map.numa_node != NUMA_NO_NODE) {
 238                pr_info("map flags are not supported\n");
 239                return -EINVAL;
 240        }
 241
 242        if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
 243                pr_info("map type not supported\n");
 244                return -EOPNOTSUPP;
 245        }
 246        if (bpf->maps.max_maps == bpf->maps_in_use) {
 247                pr_info("too many maps for a device\n");
 248                return -ENOMEM;
 249        }
 250        if (bpf->maps.max_elems - bpf->map_elems_in_use <
 251            offmap->map.max_entries) {
 252                pr_info("map with too many elements: %u, left: %u\n",
 253                        offmap->map.max_entries,
 254                        bpf->maps.max_elems - bpf->map_elems_in_use);
 255                return -ENOMEM;
 256        }
 257        if (offmap->map.key_size > bpf->maps.max_key_sz ||
 258            offmap->map.value_size > bpf->maps.max_val_sz ||
 259            round_up(offmap->map.key_size, 8) +
 260            round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
 261                pr_info("elements don't fit in device constraints\n");
 262                return -ENOMEM;
 263        }
 264
 265        use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
 266                       FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
 267
 268        nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
 269        if (!nfp_map)
 270                return -ENOMEM;
 271
 272        offmap->dev_priv = nfp_map;
 273        nfp_map->offmap = offmap;
 274        nfp_map->bpf = bpf;
 275
 276        res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
 277        if (res < 0) {
 278                kfree(nfp_map);
 279                return res;
 280        }
 281
 282        nfp_map->tid = res;
 283        offmap->dev_ops = &nfp_bpf_map_ops;
 284        bpf->maps_in_use++;
 285        bpf->map_elems_in_use += offmap->map.max_entries;
 286        list_add_tail(&nfp_map->l, &bpf->map_list);
 287
 288        return 0;
 289}
 290
 291static int
 292nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
 293{
 294        struct nfp_bpf_map *nfp_map = offmap->dev_priv;
 295
 296        nfp_bpf_ctrl_free_map(bpf, nfp_map);
 297        list_del_init(&nfp_map->l);
 298        bpf->map_elems_in_use -= offmap->map.max_entries;
 299        bpf->maps_in_use--;
 300        kfree(nfp_map);
 301
 302        return 0;
 303}
 304
 305int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
 306{
 307        switch (bpf->command) {
 308        case BPF_OFFLOAD_VERIFIER_PREP:
 309                return nfp_bpf_verifier_prep(app, nn, bpf);
 310        case BPF_OFFLOAD_TRANSLATE:
 311                return nfp_bpf_translate(nn, bpf->offload.prog);
 312        case BPF_OFFLOAD_DESTROY:
 313                return nfp_bpf_destroy(nn, bpf->offload.prog);
 314        case BPF_OFFLOAD_MAP_ALLOC:
 315                return nfp_bpf_map_alloc(app->priv, bpf->offmap);
 316        case BPF_OFFLOAD_MAP_FREE:
 317                return nfp_bpf_map_free(app->priv, bpf->offmap);
 318        default:
 319                return -EINVAL;
 320        }
 321}
 322
 323static int
 324nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
 325                 struct netlink_ext_ack *extack)
 326{
 327        struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 328        unsigned int max_mtu;
 329        dma_addr_t dma_addr;
 330        void *img;
 331        int err;
 332
 333        max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
 334        if (max_mtu < nn->dp.netdev->mtu) {
 335                NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
 336                return -EOPNOTSUPP;
 337        }
 338
 339        img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
 340        if (IS_ERR(img))
 341                return PTR_ERR(img);
 342
 343        dma_addr = dma_map_single(nn->dp.dev, img,
 344                                  nfp_prog->prog_len * sizeof(u64),
 345                                  DMA_TO_DEVICE);
 346        if (dma_mapping_error(nn->dp.dev, dma_addr)) {
 347                kfree(img);
 348                return -ENOMEM;
 349        }
 350
 351        nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
 352        nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
 353
 354        /* Load up the JITed code */
 355        err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
 356        if (err)
 357                NL_SET_ERR_MSG_MOD(extack,
 358                                   "FW command error while loading BPF");
 359
 360        dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
 361                         DMA_TO_DEVICE);
 362        kfree(img);
 363
 364        return err;
 365}
 366
 367static void
 368nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
 369{
 370        int err;
 371
 372        /* Enable passing packets through BPF function */
 373        nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
 374        nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 375        err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 376        if (err)
 377                NL_SET_ERR_MSG_MOD(extack,
 378                                   "FW command error while enabling BPF");
 379}
 380
 381static int nfp_net_bpf_stop(struct nfp_net *nn)
 382{
 383        if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
 384                return 0;
 385
 386        nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
 387        nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
 388
 389        return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
 390}
 391
 392int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 393                        bool old_prog, struct netlink_ext_ack *extack)
 394{
 395        int err;
 396
 397        if (prog) {
 398                struct bpf_prog_offload *offload = prog->aux->offload;
 399
 400                if (!offload)
 401                        return -EINVAL;
 402                if (offload->netdev != nn->dp.netdev)
 403                        return -EINVAL;
 404        }
 405
 406        if (prog && old_prog) {
 407                u8 cap;
 408
 409                cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
 410                if (!(cap & NFP_NET_BPF_CAP_RELO)) {
 411                        NL_SET_ERR_MSG_MOD(extack,
 412                                           "FW does not support live reload");
 413                        return -EBUSY;
 414                }
 415        }
 416
 417        /* Something else is loaded, different program type? */
 418        if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
 419                return -EBUSY;
 420
 421        if (old_prog && !prog)
 422                return nfp_net_bpf_stop(nn);
 423
 424        err = nfp_net_bpf_load(nn, prog, extack);
 425        if (err)
 426                return err;
 427
 428        if (!old_prog)
 429                nfp_net_bpf_start(nn, extack);
 430
 431        return 0;
 432}
 433