linux/drivers/net/ethernet/netronome/nfp/bpf/main.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
   3 *
   4 * This software is dual licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree or the BSD 2-Clause License provided below.  You have the
   7 * option to license this software under the complete terms of either license.
   8 *
   9 * The BSD 2-Clause License:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      1. Redistributions of source code must retain the above
  16 *         copyright notice, this list of conditions and the following
  17 *         disclaimer.
  18 *
  19 *      2. Redistributions in binary form must reproduce the above
  20 *         copyright notice, this list of conditions and the following
  21 *         disclaimer in the documentation and/or other materials
  22 *         provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#ifndef __NFP_BPF_H__
  35#define __NFP_BPF_H__ 1
  36
  37#include <linux/bitfield.h>
  38#include <linux/bpf.h>
  39#include <linux/bpf_verifier.h>
  40#include <linux/kernel.h>
  41#include <linux/list.h>
  42#include <linux/skbuff.h>
  43#include <linux/types.h>
  44#include <linux/wait.h>
  45
  46#include "../nfp_asm.h"
  47#include "fw.h"
  48
  49/* For relocation logic use up-most byte of branch instruction as scratch
  50 * area.  Remember to clear this before sending instructions to HW!
  51 */
  52#define OP_RELO_TYPE    0xff00000000000000ULL
  53
  54enum nfp_relo_type {
  55        RELO_NONE = 0,
  56        /* standard internal jumps */
  57        RELO_BR_REL,
  58        /* internal jumps to parts of the outro */
  59        RELO_BR_GO_OUT,
  60        RELO_BR_GO_ABORT,
  61        /* external jumps to fixed addresses */
  62        RELO_BR_NEXT_PKT,
  63        RELO_BR_HELPER,
  64        /* immediate relocation against load address */
  65        RELO_IMMED_REL,
  66};
  67
  68/* To make absolute relocated branches (branches other than RELO_BR_REL)
  69 * distinguishable in user space dumps from normal jumps, add a large offset
  70 * to them.
  71 */
  72#define BR_OFF_RELO             15000
  73
  74enum static_regs {
  75        STATIC_REG_IMMA         = 20, /* Bank AB */
  76        STATIC_REG_IMM          = 21, /* Bank AB */
  77        STATIC_REG_STACK        = 22, /* Bank A */
  78        STATIC_REG_PKT_LEN      = 22, /* Bank B */
  79};
  80
  81enum pkt_vec {
  82        PKT_VEC_PKT_LEN         = 0,
  83        PKT_VEC_PKT_PTR         = 2,
  84};
  85
  86#define pv_len(np)      reg_lm(1, PKT_VEC_PKT_LEN)
  87#define pv_ctm_ptr(np)  reg_lm(1, PKT_VEC_PKT_PTR)
  88
  89#define stack_reg(np)   reg_a(STATIC_REG_STACK)
  90#define stack_imm(np)   imm_b(np)
  91#define plen_reg(np)    reg_b(STATIC_REG_PKT_LEN)
  92#define pptr_reg(np)    pv_ctm_ptr(np)
  93#define imm_a(np)       reg_a(STATIC_REG_IMM)
  94#define imm_b(np)       reg_b(STATIC_REG_IMM)
  95#define imma_a(np)      reg_a(STATIC_REG_IMMA)
  96#define imma_b(np)      reg_b(STATIC_REG_IMMA)
  97#define imm_both(np)    reg_both(STATIC_REG_IMM)
  98
  99#define NFP_BPF_ABI_FLAGS       reg_imm(0)
 100#define   NFP_BPF_ABI_FLAG_MARK 1
 101
 102/**
 103 * struct nfp_app_bpf - bpf app priv structure
 104 * @app:                backpointer to the app
 105 *
 106 * @tag_allocator:      bitmap of control message tags in use
 107 * @tag_alloc_next:     next tag bit to allocate
 108 * @tag_alloc_last:     next tag bit to be freed
 109 *
 110 * @cmsg_replies:       received cmsg replies waiting to be consumed
 111 * @cmsg_wq:            work queue for waiting for cmsg replies
 112 *
 113 * @map_list:           list of offloaded maps
 114 * @maps_in_use:        number of currently offloaded maps
 115 * @map_elems_in_use:   number of elements allocated to offloaded maps
 116 *
 117 * @adjust_head:        adjust head capability
 118 * @adjust_head.flags:          extra flags for adjust head
 119 * @adjust_head.off_min:        minimal packet offset within buffer required
 120 * @adjust_head.off_max:        maximum packet offset within buffer required
 121 * @adjust_head.guaranteed_sub: negative adjustment guaranteed possible
 122 * @adjust_head.guaranteed_add: positive adjustment guaranteed possible
 123 *
 124 * @maps:               map capability
 125 * @maps.types:                 supported map types
 126 * @maps.max_maps:              max number of maps supported
 127 * @maps.max_elems:             max number of entries in each map
 128 * @maps.max_key_sz:            max size of map key
 129 * @maps.max_val_sz:            max size of map value
 130 * @maps.max_elem_sz:           max size of map entry (key + value)
 131 *
 132 * @helpers:            helper addressess for various calls
 133 * @helpers.map_lookup:         map lookup helper address
 134 * @helpers.map_update:         map update helper address
 135 * @helpers.map_delete:         map delete helper address
 136 *
 137 * @pseudo_random:      FW initialized the pseudo-random machinery (CSRs)
 138 */
 139struct nfp_app_bpf {
 140        struct nfp_app *app;
 141
 142        DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
 143        u16 tag_alloc_next;
 144        u16 tag_alloc_last;
 145
 146        struct sk_buff_head cmsg_replies;
 147        struct wait_queue_head cmsg_wq;
 148
 149        struct list_head map_list;
 150        unsigned int maps_in_use;
 151        unsigned int map_elems_in_use;
 152
 153        struct nfp_bpf_cap_adjust_head {
 154                u32 flags;
 155                int off_min;
 156                int off_max;
 157                int guaranteed_sub;
 158                int guaranteed_add;
 159        } adjust_head;
 160
 161        struct {
 162                u32 types;
 163                u32 max_maps;
 164                u32 max_elems;
 165                u32 max_key_sz;
 166                u32 max_val_sz;
 167                u32 max_elem_sz;
 168        } maps;
 169
 170        struct {
 171                u32 map_lookup;
 172                u32 map_update;
 173                u32 map_delete;
 174        } helpers;
 175
 176        bool pseudo_random;
 177};
 178
 179enum nfp_bpf_map_use {
 180        NFP_MAP_UNUSED = 0,
 181        NFP_MAP_USE_READ,
 182        NFP_MAP_USE_WRITE,
 183        NFP_MAP_USE_ATOMIC_CNT,
 184};
 185
 186/**
 187 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
 188 * @offmap:     pointer to the offloaded BPF map
 189 * @bpf:        back pointer to bpf app private structure
 190 * @tid:        table id identifying map on datapath
 191 * @l:          link on the nfp_app_bpf->map_list list
 192 * @use_map:    map of how the value is used (in 4B chunks)
 193 */
 194struct nfp_bpf_map {
 195        struct bpf_offloaded_map *offmap;
 196        struct nfp_app_bpf *bpf;
 197        u32 tid;
 198        struct list_head l;
 199        enum nfp_bpf_map_use use_map[];
 200};
 201
 202struct nfp_prog;
 203struct nfp_insn_meta;
 204typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
 205
 206#define nfp_prog_first_meta(nfp_prog)                                   \
 207        list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
 208#define nfp_prog_last_meta(nfp_prog)                                    \
 209        list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
 210#define nfp_meta_next(meta)     list_next_entry(meta, l)
 211#define nfp_meta_prev(meta)     list_prev_entry(meta, l)
 212
 213/**
 214 * struct nfp_bpf_reg_state - register state for calls
 215 * @reg: BPF register state from latest path
 216 * @var_off: for stack arg - changes stack offset on different paths
 217 */
 218struct nfp_bpf_reg_state {
 219        struct bpf_reg_state reg;
 220        bool var_off;
 221};
 222
 223#define FLAG_INSN_IS_JUMP_DST   BIT(0)
 224
 225/**
 226 * struct nfp_insn_meta - BPF instruction wrapper
 227 * @insn: BPF instruction
 228 * @ptr: pointer type for memory operations
 229 * @ldst_gather_len: memcpy length gathered from load/store sequence
 230 * @paired_st: the paired store insn at the head of the sequence
 231 * @ptr_not_const: pointer is not always constant
 232 * @pkt_cache: packet data cache information
 233 * @pkt_cache.range_start: start offset for associated packet data cache
 234 * @pkt_cache.range_end: end offset for associated packet data cache
 235 * @pkt_cache.do_init: this read needs to initialize packet data cache
 236 * @xadd_over_16bit: 16bit immediate is not guaranteed
 237 * @xadd_maybe_16bit: 16bit immediate is possible
 238 * @jmp_dst: destination info for jump instructions
 239 * @func_id: function id for call instructions
 240 * @arg1: arg1 for call instructions
 241 * @arg2: arg2 for call instructions
 242 * @off: index of first generated machine instruction (in nfp_prog.prog)
 243 * @n: eBPF instruction number
 244 * @flags: eBPF instruction extra optimization flags
 245 * @skip: skip this instruction (optimized out)
 246 * @double_cb: callback for second part of the instruction
 247 * @l: link on nfp_prog->insns list
 248 */
 249struct nfp_insn_meta {
 250        struct bpf_insn insn;
 251        union {
 252                /* pointer ops (ld/st/xadd) */
 253                struct {
 254                        struct bpf_reg_state ptr;
 255                        struct bpf_insn *paired_st;
 256                        s16 ldst_gather_len;
 257                        bool ptr_not_const;
 258                        struct {
 259                                s16 range_start;
 260                                s16 range_end;
 261                                bool do_init;
 262                        } pkt_cache;
 263                        bool xadd_over_16bit;
 264                        bool xadd_maybe_16bit;
 265                };
 266                /* jump */
 267                struct nfp_insn_meta *jmp_dst;
 268                /* function calls */
 269                struct {
 270                        u32 func_id;
 271                        struct bpf_reg_state arg1;
 272                        struct nfp_bpf_reg_state arg2;
 273                };
 274        };
 275        unsigned int off;
 276        unsigned short n;
 277        unsigned short flags;
 278        bool skip;
 279        instr_cb_t double_cb;
 280
 281        struct list_head l;
 282};
 283
 284#define BPF_SIZE_MASK   0x18
 285
 286static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
 287{
 288        return BPF_CLASS(meta->insn.code);
 289}
 290
 291static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
 292{
 293        return BPF_SRC(meta->insn.code);
 294}
 295
 296static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
 297{
 298        return BPF_OP(meta->insn.code);
 299}
 300
 301static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
 302{
 303        return BPF_MODE(meta->insn.code);
 304}
 305
 306static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
 307{
 308        return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
 309}
 310
 311static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
 312{
 313        return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
 314}
 315
 316static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
 317{
 318        return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
 319}
 320
 321static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
 322{
 323        return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
 324}
 325
 326static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
 327{
 328        u8 code = meta->insn.code;
 329
 330        return BPF_CLASS(code) == BPF_LD &&
 331               (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
 332}
 333
 334static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
 335{
 336        u8 code = meta->insn.code;
 337
 338        return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
 339}
 340
 341static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
 342{
 343        return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
 344}
 345
 346static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
 347{
 348        return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
 349}
 350
 351/**
 352 * struct nfp_prog - nfp BPF program
 353 * @bpf: backpointer to the bpf app priv structure
 354 * @prog: machine code
 355 * @prog_len: number of valid instructions in @prog array
 356 * @__prog_alloc_len: alloc size of @prog array
 357 * @verifier_meta: temporary storage for verifier's insn meta
 358 * @type: BPF program type
 359 * @last_bpf_off: address of the last instruction translated from BPF
 360 * @tgt_out: jump target for normal exit
 361 * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
 362 * @n_translated: number of successfully translated instructions (for errors)
 363 * @error: error code if something went wrong
 364 * @stack_depth: max stack depth from the verifier
 365 * @adjust_head_location: if program has single adjust head call - the insn no.
 366 * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
 367 */
 368struct nfp_prog {
 369        struct nfp_app_bpf *bpf;
 370
 371        u64 *prog;
 372        unsigned int prog_len;
 373        unsigned int __prog_alloc_len;
 374
 375        struct nfp_insn_meta *verifier_meta;
 376
 377        enum bpf_prog_type type;
 378
 379        unsigned int last_bpf_off;
 380        unsigned int tgt_out;
 381        unsigned int tgt_abort;
 382
 383        unsigned int n_translated;
 384        int error;
 385
 386        unsigned int stack_depth;
 387        unsigned int adjust_head_location;
 388
 389        struct list_head insns;
 390};
 391
 392/**
 393 * struct nfp_bpf_vnic - per-vNIC BPF priv structure
 394 * @tc_prog:    currently loaded cls_bpf program
 395 * @start_off:  address of the first instruction in the memory
 396 * @tgt_done:   jump target to get the next packet
 397 */
 398struct nfp_bpf_vnic {
 399        struct bpf_prog *tc_prog;
 400        unsigned int start_off;
 401        unsigned int tgt_done;
 402};
 403
 404void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
 405int nfp_bpf_jit(struct nfp_prog *prog);
 406bool nfp_bpf_supported_opcode(u8 code);
 407
 408extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 409
 410struct netdev_bpf;
 411struct nfp_app;
 412struct nfp_net;
 413
 414int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
 415                struct netdev_bpf *bpf);
 416int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 417                        bool old_prog, struct netlink_ext_ack *extack);
 418
 419struct nfp_insn_meta *
 420nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 421                  unsigned int insn_idx, unsigned int n_insns);
 422
 423void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
 424
 425long long int
 426nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
 427void
 428nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
 429int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
 430                                void *next_key);
 431int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
 432                              void *key, void *value, u64 flags);
 433int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
 434int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
 435                              void *key, void *value);
 436int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
 437                               void *key, void *next_key);
 438
 439void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
 440#endif
 441