linux/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
   3
   4#include <linux/bpf.h>
   5#include <linux/bitops.h>
   6#include <linux/bug.h>
   7#include <linux/jiffies.h>
   8#include <linux/skbuff.h>
   9#include <linux/timekeeping.h>
  10
  11#include "../ccm.h"
  12#include "../nfp_app.h"
  13#include "../nfp_net.h"
  14#include "fw.h"
  15#include "main.h"
  16
  17static struct sk_buff *
  18nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
  19{
  20        struct sk_buff *skb;
  21
  22        skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
  23        skb_put(skb, size);
  24
  25        return skb;
  26}
  27
  28static unsigned int
  29nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
  30{
  31        unsigned int size;
  32
  33        size = sizeof(struct cmsg_req_map_op);
  34        size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
  35
  36        return size;
  37}
  38
  39static struct sk_buff *
  40nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
  41{
  42        return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
  43}
  44
  45static unsigned int
  46nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
  47{
  48        unsigned int size;
  49
  50        size = sizeof(struct cmsg_reply_map_op);
  51        size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
  52
  53        return size;
  54}
  55
  56static int
  57nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
  58                         struct cmsg_reply_map_simple *reply)
  59{
  60        static const int res_table[] = {
  61                [CMSG_RC_SUCCESS]       = 0,
  62                [CMSG_RC_ERR_MAP_FD]    = -EBADFD,
  63                [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
  64                [CMSG_RC_ERR_MAP_ERR]   = -EINVAL,
  65                [CMSG_RC_ERR_MAP_PARSE] = -EIO,
  66                [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
  67                [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
  68                [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
  69        };
  70        u32 rc;
  71
  72        rc = be32_to_cpu(reply->rc);
  73        if (rc >= ARRAY_SIZE(res_table)) {
  74                cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
  75                return -EIO;
  76        }
  77
  78        return res_table[rc];
  79}
  80
  81long long int
  82nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
  83{
  84        struct cmsg_reply_map_alloc_tbl *reply;
  85        struct cmsg_req_map_alloc_tbl *req;
  86        struct sk_buff *skb;
  87        u32 tid;
  88        int err;
  89
  90        skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
  91        if (!skb)
  92                return -ENOMEM;
  93
  94        req = (void *)skb->data;
  95        req->key_size = cpu_to_be32(map->key_size);
  96        req->value_size = cpu_to_be32(map->value_size);
  97        req->max_entries = cpu_to_be32(map->max_entries);
  98        req->map_type = cpu_to_be32(map->map_type);
  99        req->map_flags = 0;
 100
 101        skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
 102                                  sizeof(*reply));
 103        if (IS_ERR(skb))
 104                return PTR_ERR(skb);
 105
 106        reply = (void *)skb->data;
 107        err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
 108        if (err)
 109                goto err_free;
 110
 111        tid = be32_to_cpu(reply->tid);
 112        dev_consume_skb_any(skb);
 113
 114        return tid;
 115err_free:
 116        dev_kfree_skb_any(skb);
 117        return err;
 118}
 119
 120void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
 121{
 122        struct cmsg_reply_map_free_tbl *reply;
 123        struct cmsg_req_map_free_tbl *req;
 124        struct sk_buff *skb;
 125        int err;
 126
 127        skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
 128        if (!skb) {
 129                cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
 130                return;
 131        }
 132
 133        req = (void *)skb->data;
 134        req->tid = cpu_to_be32(nfp_map->tid);
 135
 136        skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
 137                                  sizeof(*reply));
 138        if (IS_ERR(skb)) {
 139                cmsg_warn(bpf, "leaking map - I/O error\n");
 140                return;
 141        }
 142
 143        reply = (void *)skb->data;
 144        err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
 145        if (err)
 146                cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
 147
 148        dev_consume_skb_any(skb);
 149}
 150
 151static void *
 152nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
 153                     unsigned int n)
 154{
 155        return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
 156}
 157
 158static void *
 159nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
 160                     unsigned int n)
 161{
 162        return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
 163}
 164
 165static void *
 166nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
 167                       unsigned int n)
 168{
 169        return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
 170}
 171
 172static void *
 173nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
 174                       unsigned int n)
 175{
 176        return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
 177}
 178
 179static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
 180{
 181        return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
 182               op == NFP_CCM_TYPE_BPF_MAP_DELETE;
 183}
 184
 185static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
 186{
 187        return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
 188               op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
 189}
 190
 191static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
 192{
 193        return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
 194               op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
 195}
 196
 197static unsigned int
 198nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
 199                          const u8 *key, u8 *out_key, u8 *out_value,
 200                          u32 *cache_gen)
 201{
 202        struct bpf_map *map = &nfp_map->offmap->map;
 203        struct nfp_app_bpf *bpf = nfp_map->bpf;
 204        unsigned int i, count, n_entries;
 205        struct cmsg_reply_map_op *reply;
 206
 207        n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
 208
 209        spin_lock(&nfp_map->cache_lock);
 210        *cache_gen = nfp_map->cache_gen;
 211        if (nfp_map->cache_blockers)
 212                n_entries = 1;
 213
 214        if (nfp_bpf_ctrl_op_cache_invalidate(op))
 215                goto exit_block;
 216        if (!nfp_bpf_ctrl_op_cache_capable(op))
 217                goto exit_unlock;
 218
 219        if (!nfp_map->cache)
 220                goto exit_unlock;
 221        if (nfp_map->cache_to < ktime_get_ns())
 222                goto exit_invalidate;
 223
 224        reply = (void *)nfp_map->cache->data;
 225        count = be32_to_cpu(reply->count);
 226
 227        for (i = 0; i < count; i++) {
 228                void *cached_key;
 229
 230                cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
 231                if (memcmp(cached_key, key, map->key_size))
 232                        continue;
 233
 234                if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
 235                        memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
 236                               map->value_size);
 237                if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
 238                        if (i + 1 == count)
 239                                break;
 240
 241                        memcpy(out_key,
 242                               nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
 243                               map->key_size);
 244                }
 245
 246                n_entries = 0;
 247                goto exit_unlock;
 248        }
 249        goto exit_unlock;
 250
 251exit_block:
 252        nfp_map->cache_blockers++;
 253exit_invalidate:
 254        dev_consume_skb_any(nfp_map->cache);
 255        nfp_map->cache = NULL;
 256exit_unlock:
 257        spin_unlock(&nfp_map->cache_lock);
 258        return n_entries;
 259}
 260
 261static void
 262nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
 263                          struct sk_buff *skb, u32 cache_gen)
 264{
 265        bool blocker, filler;
 266
 267        blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
 268        filler = nfp_bpf_ctrl_op_cache_fill(op);
 269        if (blocker || filler) {
 270                u64 to = 0;
 271
 272                if (filler)
 273                        to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
 274
 275                spin_lock(&nfp_map->cache_lock);
 276                if (blocker) {
 277                        nfp_map->cache_blockers--;
 278                        nfp_map->cache_gen++;
 279                }
 280                if (filler && !nfp_map->cache_blockers &&
 281                    nfp_map->cache_gen == cache_gen) {
 282                        nfp_map->cache_to = to;
 283                        swap(nfp_map->cache, skb);
 284                }
 285                spin_unlock(&nfp_map->cache_lock);
 286        }
 287
 288        dev_consume_skb_any(skb);
 289}
 290
 291static int
 292nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
 293                      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
 294{
 295        struct nfp_bpf_map *nfp_map = offmap->dev_priv;
 296        unsigned int n_entries, reply_entries, count;
 297        struct nfp_app_bpf *bpf = nfp_map->bpf;
 298        struct bpf_map *map = &offmap->map;
 299        struct cmsg_reply_map_op *reply;
 300        struct cmsg_req_map_op *req;
 301        struct sk_buff *skb;
 302        u32 cache_gen;
 303        int err;
 304
 305        /* FW messages have no space for more than 32 bits of flags */
 306        if (flags >> 32)
 307                return -EOPNOTSUPP;
 308
 309        /* Handle op cache */
 310        n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
 311                                              out_value, &cache_gen);
 312        if (!n_entries)
 313                return 0;
 314
 315        skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
 316        if (!skb) {
 317                err = -ENOMEM;
 318                goto err_cache_put;
 319        }
 320
 321        req = (void *)skb->data;
 322        req->tid = cpu_to_be32(nfp_map->tid);
 323        req->count = cpu_to_be32(n_entries);
 324        req->flags = cpu_to_be32(flags);
 325
 326        /* Copy inputs */
 327        if (key)
 328                memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
 329        if (value)
 330                memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
 331                       map->value_size);
 332
 333        skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
 334        if (IS_ERR(skb)) {
 335                err = PTR_ERR(skb);
 336                goto err_cache_put;
 337        }
 338
 339        if (skb->len < sizeof(*reply)) {
 340                cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
 341                          op, skb->len);
 342                err = -EIO;
 343                goto err_free;
 344        }
 345
 346        reply = (void *)skb->data;
 347        count = be32_to_cpu(reply->count);
 348        err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
 349        /* FW responds with message sized to hold the good entries,
 350         * plus one extra entry if there was an error.
 351         */
 352        reply_entries = count + !!err;
 353        if (n_entries > 1 && count)
 354                err = 0;
 355        if (err)
 356                goto err_free;
 357
 358        if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
 359                cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
 360                          op, skb->len, reply_entries);
 361                err = -EIO;
 362                goto err_free;
 363        }
 364
 365        /* Copy outputs */
 366        if (out_key)
 367                memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
 368                       map->key_size);
 369        if (out_value)
 370                memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
 371                       map->value_size);
 372
 373        nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
 374
 375        return 0;
 376err_free:
 377        dev_kfree_skb_any(skb);
 378err_cache_put:
 379        nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
 380        return err;
 381}
 382
 383int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
 384                              void *key, void *value, u64 flags)
 385{
 386        return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
 387                                     key, value, flags, NULL, NULL);
 388}
 389
 390int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
 391{
 392        return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
 393                                     key, NULL, 0, NULL, NULL);
 394}
 395
 396int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
 397                              void *key, void *value)
 398{
 399        return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
 400                                     key, NULL, 0, NULL, value);
 401}
 402
 403int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
 404                                void *next_key)
 405{
 406        return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
 407                                     NULL, NULL, 0, next_key, NULL);
 408}
 409
 410int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
 411                               void *key, void *next_key)
 412{
 413        return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
 414                                     key, NULL, 0, next_key, NULL);
 415}
 416
 417unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
 418{
 419        return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
 420                   nfp_bpf_cmsg_map_reply_size(bpf, 1));
 421}
 422
 423unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
 424{
 425        return max3(NFP_NET_DEFAULT_MTU,
 426                    nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
 427                    nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
 428}
 429
 430unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
 431{
 432        unsigned int mtu, req_max, reply_max, entry_sz;
 433
 434        mtu = bpf->app->ctrl->dp.mtu;
 435        entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
 436        req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
 437        reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
 438
 439        return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
 440}
 441
 442void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
 443{
 444        struct nfp_app_bpf *bpf = app->priv;
 445
 446        if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
 447                cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
 448                dev_kfree_skb_any(skb);
 449                return;
 450        }
 451
 452        if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
 453                if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
 454                        dev_consume_skb_any(skb);
 455                else
 456                        dev_kfree_skb_any(skb);
 457                return;
 458        }
 459
 460        nfp_ccm_rx(&bpf->ccm, skb);
 461}
 462
 463void
 464nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
 465{
 466        const struct nfp_ccm_hdr *hdr = data;
 467        struct nfp_app_bpf *bpf = app->priv;
 468
 469        if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
 470                cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
 471                return;
 472        }
 473
 474        if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
 475                nfp_bpf_event_output(bpf, data, len);
 476        else
 477                cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
 478                          hdr->type);
 479}
 480