linux/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016-2017 Netronome Systems, Inc.
   3 *
   4 * This software is dual licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree or the BSD 2-Clause License provided below.  You have the
   7 * option to license this software under the complete terms of either license.
   8 *
   9 * The BSD 2-Clause License:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      1. Redistributions of source code must retain the above
  16 *         copyright notice, this list of conditions and the following
  17 *         disclaimer.
  18 *
  19 *      2. Redistributions in binary form must reproduce the above
  20 *         copyright notice, this list of conditions and the following
  21 *         disclaimer in the documentation and/or other materials
  22 *         provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/bpf.h>
  35#include <linux/bpf_verifier.h>
  36#include <linux/kernel.h>
  37#include <linux/pkt_cls.h>
  38
  39#include "fw.h"
  40#include "main.h"
  41
  42#define pr_vlog(env, fmt, ...)  \
  43        bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
  44
  45struct nfp_insn_meta *
  46nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
  47                  unsigned int insn_idx, unsigned int n_insns)
  48{
  49        unsigned int forward, backward, i;
  50
  51        backward = meta->n - insn_idx;
  52        forward = insn_idx - meta->n;
  53
  54        if (min(forward, backward) > n_insns - insn_idx - 1) {
  55                backward = n_insns - insn_idx - 1;
  56                meta = nfp_prog_last_meta(nfp_prog);
  57        }
  58        if (min(forward, backward) > insn_idx && backward > insn_idx) {
  59                forward = insn_idx;
  60                meta = nfp_prog_first_meta(nfp_prog);
  61        }
  62
  63        if (forward < backward)
  64                for (i = 0; i < forward; i++)
  65                        meta = nfp_meta_next(meta);
  66        else
  67                for (i = 0; i < backward; i++)
  68                        meta = nfp_meta_prev(meta);
  69
  70        return meta;
  71}
  72
  73static void
  74nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
  75                       struct nfp_insn_meta *meta,
  76                       const struct bpf_reg_state *reg2)
  77{
  78        unsigned int location = UINT_MAX;
  79        int imm;
  80
  81        /* Datapath usually can give us guarantees on how much adjust head
  82         * can be done without the need for any checks.  Optimize the simple
  83         * case where there is only one adjust head by a constant.
  84         */
  85        if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
  86                goto exit_set_location;
  87        imm = reg2->var_off.value;
  88        /* Translator will skip all checks, we need to guarantee min pkt len */
  89        if (imm > ETH_ZLEN - ETH_HLEN)
  90                goto exit_set_location;
  91        if (imm > (int)bpf->adjust_head.guaranteed_add ||
  92            imm < -bpf->adjust_head.guaranteed_sub)
  93                goto exit_set_location;
  94
  95        if (nfp_prog->adjust_head_location) {
  96                /* Only one call per program allowed */
  97                if (nfp_prog->adjust_head_location != meta->n)
  98                        goto exit_set_location;
  99
 100                if (meta->arg2.reg.var_off.value != imm)
 101                        goto exit_set_location;
 102        }
 103
 104        location = meta->n;
 105exit_set_location:
 106        nfp_prog->adjust_head_location = location;
 107}
 108
 109static int
 110nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
 111                     const struct bpf_reg_state *reg,
 112                     struct nfp_bpf_reg_state *old_arg)
 113{
 114        s64 off, old_off;
 115
 116        if (reg->type != PTR_TO_STACK) {
 117                pr_vlog(env, "%s: unsupported ptr type %d\n",
 118                        fname, reg->type);
 119                return false;
 120        }
 121        if (!tnum_is_const(reg->var_off)) {
 122                pr_vlog(env, "%s: variable pointer\n", fname);
 123                return false;
 124        }
 125
 126        off = reg->var_off.value + reg->off;
 127        if (-off % 4) {
 128                pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
 129                return false;
 130        }
 131
 132        /* Rest of the checks is only if we re-parse the same insn */
 133        if (!old_arg)
 134                return true;
 135
 136        old_off = old_arg->reg.var_off.value + old_arg->reg.off;
 137        old_arg->var_off |= off != old_off;
 138
 139        return true;
 140}
 141
 142static bool
 143nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
 144                    struct nfp_insn_meta *meta,
 145                    u32 helper_tgt, const struct bpf_reg_state *reg1)
 146{
 147        if (!helper_tgt) {
 148                pr_vlog(env, "%s: not supported by FW\n", fname);
 149                return false;
 150        }
 151
 152        /* Rest of the checks is only if we re-parse the same insn */
 153        if (!meta->func_id)
 154                return true;
 155
 156        if (meta->arg1.map_ptr != reg1->map_ptr) {
 157                pr_vlog(env, "%s: called for different map\n", fname);
 158                return false;
 159        }
 160
 161        return true;
 162}
 163
 164static int
 165nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
 166                   struct nfp_insn_meta *meta)
 167{
 168        const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
 169        const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
 170        const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
 171        struct nfp_app_bpf *bpf = nfp_prog->bpf;
 172        u32 func_id = meta->insn.imm;
 173
 174        switch (func_id) {
 175        case BPF_FUNC_xdp_adjust_head:
 176                if (!bpf->adjust_head.off_max) {
 177                        pr_vlog(env, "adjust_head not supported by FW\n");
 178                        return -EOPNOTSUPP;
 179                }
 180                if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
 181                        pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
 182                        return -EOPNOTSUPP;
 183                }
 184
 185                nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
 186                break;
 187
 188        case BPF_FUNC_map_lookup_elem:
 189                if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
 190                                         bpf->helpers.map_lookup, reg1) ||
 191                    !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
 192                                          meta->func_id ? &meta->arg2 : NULL))
 193                        return -EOPNOTSUPP;
 194                break;
 195
 196        case BPF_FUNC_map_update_elem:
 197                if (!nfp_bpf_map_call_ok("map_update", env, meta,
 198                                         bpf->helpers.map_update, reg1) ||
 199                    !nfp_bpf_stack_arg_ok("map_update", env, reg2,
 200                                          meta->func_id ? &meta->arg2 : NULL) ||
 201                    !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
 202                        return -EOPNOTSUPP;
 203                break;
 204
 205        case BPF_FUNC_map_delete_elem:
 206                if (!nfp_bpf_map_call_ok("map_delete", env, meta,
 207                                         bpf->helpers.map_delete, reg1) ||
 208                    !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
 209                                          meta->func_id ? &meta->arg2 : NULL))
 210                        return -EOPNOTSUPP;
 211                break;
 212
 213        case BPF_FUNC_get_prandom_u32:
 214                if (bpf->pseudo_random)
 215                        break;
 216                pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
 217                return -EOPNOTSUPP;
 218
 219        default:
 220                pr_vlog(env, "unsupported function id: %d\n", func_id);
 221                return -EOPNOTSUPP;
 222        }
 223
 224        meta->func_id = func_id;
 225        meta->arg1 = *reg1;
 226        meta->arg2.reg = *reg2;
 227
 228        return 0;
 229}
 230
 231static int
 232nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 233                   struct bpf_verifier_env *env)
 234{
 235        const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
 236        u64 imm;
 237
 238        if (nfp_prog->type == BPF_PROG_TYPE_XDP)
 239                return 0;
 240
 241        if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
 242                char tn_buf[48];
 243
 244                tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
 245                pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
 246                        reg0->type, tn_buf);
 247                return -EINVAL;
 248        }
 249
 250        imm = reg0->var_off.value;
 251        if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
 252            imm <= TC_ACT_REDIRECT &&
 253            imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
 254            imm != TC_ACT_QUEUED) {
 255                pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
 256                        reg0->type, imm);
 257                return -EINVAL;
 258        }
 259
 260        return 0;
 261}
 262
 263static int
 264nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
 265                           struct nfp_insn_meta *meta,
 266                           const struct bpf_reg_state *reg,
 267                           struct bpf_verifier_env *env)
 268{
 269        s32 old_off, new_off;
 270
 271        if (!tnum_is_const(reg->var_off)) {
 272                pr_vlog(env, "variable ptr stack access\n");
 273                return -EINVAL;
 274        }
 275
 276        if (meta->ptr.type == NOT_INIT)
 277                return 0;
 278
 279        old_off = meta->ptr.off + meta->ptr.var_off.value;
 280        new_off = reg->off + reg->var_off.value;
 281
 282        meta->ptr_not_const |= old_off != new_off;
 283
 284        if (!meta->ptr_not_const)
 285                return 0;
 286
 287        if (old_off % 4 == new_off % 4)
 288                return 0;
 289
 290        pr_vlog(env, "stack access changed location was:%d is:%d\n",
 291                old_off, new_off);
 292        return -EINVAL;
 293}
 294
 295static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
 296{
 297        static const char * const names[] = {
 298                [NFP_MAP_UNUSED]        = "unused",
 299                [NFP_MAP_USE_READ]      = "read",
 300                [NFP_MAP_USE_WRITE]     = "write",
 301                [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
 302        };
 303
 304        if (use >= ARRAY_SIZE(names) || !names[use])
 305                return "unknown";
 306        return names[use];
 307}
 308
 309static int
 310nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
 311                          struct nfp_bpf_map *nfp_map,
 312                          unsigned int off, enum nfp_bpf_map_use use)
 313{
 314        if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
 315            nfp_map->use_map[off / 4] != use) {
 316                pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
 317                        nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
 318                        nfp_bpf_map_use_name(use), off);
 319                return -EOPNOTSUPP;
 320        }
 321
 322        nfp_map->use_map[off / 4] = use;
 323
 324        return 0;
 325}
 326
 327static int
 328nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
 329                      const struct bpf_reg_state *reg,
 330                      enum nfp_bpf_map_use use)
 331{
 332        struct bpf_offloaded_map *offmap;
 333        struct nfp_bpf_map *nfp_map;
 334        unsigned int size, off;
 335        int i, err;
 336
 337        if (!tnum_is_const(reg->var_off)) {
 338                pr_vlog(env, "map value offset is variable\n");
 339                return -EOPNOTSUPP;
 340        }
 341
 342        off = reg->var_off.value + meta->insn.off + reg->off;
 343        size = BPF_LDST_BYTES(&meta->insn);
 344        offmap = map_to_offmap(reg->map_ptr);
 345        nfp_map = offmap->dev_priv;
 346
 347        if (off + size > offmap->map.value_size) {
 348                pr_vlog(env, "map value access out-of-bounds\n");
 349                return -EINVAL;
 350        }
 351
 352        for (i = 0; i < size; i += 4 - (off + i) % 4) {
 353                err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
 354                if (err)
 355                        return err;
 356        }
 357
 358        return 0;
 359}
 360
 361static int
 362nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 363                  struct bpf_verifier_env *env, u8 reg_no)
 364{
 365        const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
 366        int err;
 367
 368        if (reg->type != PTR_TO_CTX &&
 369            reg->type != PTR_TO_STACK &&
 370            reg->type != PTR_TO_MAP_VALUE &&
 371            reg->type != PTR_TO_PACKET) {
 372                pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
 373                return -EINVAL;
 374        }
 375
 376        if (reg->type == PTR_TO_STACK) {
 377                err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
 378                if (err)
 379                        return err;
 380        }
 381
 382        if (reg->type == PTR_TO_MAP_VALUE) {
 383                if (is_mbpf_load(meta)) {
 384                        err = nfp_bpf_map_mark_used(env, meta, reg,
 385                                                    NFP_MAP_USE_READ);
 386                        if (err)
 387                                return err;
 388                }
 389                if (is_mbpf_store(meta)) {
 390                        pr_vlog(env, "map writes not supported\n");
 391                        return -EOPNOTSUPP;
 392                }
 393                if (is_mbpf_xadd(meta)) {
 394                        err = nfp_bpf_map_mark_used(env, meta, reg,
 395                                                    NFP_MAP_USE_ATOMIC_CNT);
 396                        if (err)
 397                                return err;
 398                }
 399        }
 400
 401        if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
 402                pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
 403                        meta->ptr.type, reg->type);
 404                return -EINVAL;
 405        }
 406
 407        meta->ptr = *reg;
 408
 409        return 0;
 410}
 411
 412static int
 413nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 414                   struct bpf_verifier_env *env)
 415{
 416        const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
 417        const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
 418
 419        if (dreg->type != PTR_TO_MAP_VALUE) {
 420                pr_vlog(env, "atomic add not to a map value pointer: %d\n",
 421                        dreg->type);
 422                return -EOPNOTSUPP;
 423        }
 424        if (sreg->type != SCALAR_VALUE) {
 425                pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
 426                return -EOPNOTSUPP;
 427        }
 428
 429        meta->xadd_over_16bit |=
 430                sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
 431        meta->xadd_maybe_16bit |=
 432                (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
 433
 434        return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
 435}
 436
 437static int
 438nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
 439{
 440        struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
 441        struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
 442
 443        meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
 444        nfp_prog->verifier_meta = meta;
 445
 446        if (!nfp_bpf_supported_opcode(meta->insn.code)) {
 447                pr_vlog(env, "instruction %#02x not supported\n",
 448                        meta->insn.code);
 449                return -EINVAL;
 450        }
 451
 452        if (meta->insn.src_reg >= MAX_BPF_REG ||
 453            meta->insn.dst_reg >= MAX_BPF_REG) {
 454                pr_vlog(env, "program uses extended registers - jit hardening?\n");
 455                return -EINVAL;
 456        }
 457
 458        if (meta->insn.code == (BPF_JMP | BPF_CALL))
 459                return nfp_bpf_check_call(nfp_prog, env, meta);
 460        if (meta->insn.code == (BPF_JMP | BPF_EXIT))
 461                return nfp_bpf_check_exit(nfp_prog, env);
 462
 463        if (is_mbpf_load(meta))
 464                return nfp_bpf_check_ptr(nfp_prog, meta, env,
 465                                         meta->insn.src_reg);
 466        if (is_mbpf_store(meta))
 467                return nfp_bpf_check_ptr(nfp_prog, meta, env,
 468                                         meta->insn.dst_reg);
 469        if (is_mbpf_xadd(meta))
 470                return nfp_bpf_check_xadd(nfp_prog, meta, env);
 471
 472        return 0;
 473}
 474
 475const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
 476        .insn_hook = nfp_verify_insn,
 477};
 478