linux/tools/testing/selftests/bpf/test_verifier.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Testsuite for eBPF verifier
   4 *
   5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
   6 * Copyright (c) 2017 Facebook
   7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
   8 */
   9
  10#include <endian.h>
  11#include <asm/types.h>
  12#include <linux/types.h>
  13#include <stdint.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <unistd.h>
  17#include <errno.h>
  18#include <string.h>
  19#include <stddef.h>
  20#include <stdbool.h>
  21#include <sched.h>
  22#include <limits.h>
  23#include <assert.h>
  24
  25#include <sys/capability.h>
  26
  27#include <linux/unistd.h>
  28#include <linux/filter.h>
  29#include <linux/bpf_perf_event.h>
  30#include <linux/bpf.h>
  31#include <linux/if_ether.h>
  32#include <linux/btf.h>
  33
  34#include <bpf/bpf.h>
  35#include <bpf/libbpf.h>
  36
  37#ifdef HAVE_GENHDR
  38# include "autoconf.h"
  39#else
  40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  41#  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  42# endif
  43#endif
  44#include "bpf_rlimit.h"
  45#include "bpf_rand.h"
  46#include "bpf_util.h"
  47#include "test_btf.h"
  48#include "../../../include/linux/filter.h"
  49
  50#define MAX_INSNS       BPF_MAXINSNS
  51#define MAX_TEST_INSNS  1000000
  52#define MAX_FIXUPS      8
  53#define MAX_NR_MAPS     20
  54#define MAX_TEST_RUNS   8
  55#define POINTER_VALUE   0xcafe4all
  56#define TEST_DATA_LEN   64
  57
  58#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS      (1 << 0)
  59#define F_LOAD_WITH_STRICT_ALIGNMENT            (1 << 1)
  60
  61#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
  62static bool unpriv_disabled = false;
  63static int skips;
  64static bool verbose = false;
  65
  66struct bpf_test {
  67        const char *descr;
  68        struct bpf_insn insns[MAX_INSNS];
  69        struct bpf_insn *fill_insns;
  70        int fixup_map_hash_8b[MAX_FIXUPS];
  71        int fixup_map_hash_48b[MAX_FIXUPS];
  72        int fixup_map_hash_16b[MAX_FIXUPS];
  73        int fixup_map_array_48b[MAX_FIXUPS];
  74        int fixup_map_sockmap[MAX_FIXUPS];
  75        int fixup_map_sockhash[MAX_FIXUPS];
  76        int fixup_map_xskmap[MAX_FIXUPS];
  77        int fixup_map_stacktrace[MAX_FIXUPS];
  78        int fixup_prog1[MAX_FIXUPS];
  79        int fixup_prog2[MAX_FIXUPS];
  80        int fixup_map_in_map[MAX_FIXUPS];
  81        int fixup_cgroup_storage[MAX_FIXUPS];
  82        int fixup_percpu_cgroup_storage[MAX_FIXUPS];
  83        int fixup_map_spin_lock[MAX_FIXUPS];
  84        int fixup_map_array_ro[MAX_FIXUPS];
  85        int fixup_map_array_wo[MAX_FIXUPS];
  86        int fixup_map_array_small[MAX_FIXUPS];
  87        int fixup_sk_storage_map[MAX_FIXUPS];
  88        int fixup_map_event_output[MAX_FIXUPS];
  89        int fixup_map_reuseport_array[MAX_FIXUPS];
  90        const char *errstr;
  91        const char *errstr_unpriv;
  92        uint32_t insn_processed;
  93        int prog_len;
  94        enum {
  95                UNDEF,
  96                ACCEPT,
  97                REJECT,
  98                VERBOSE_ACCEPT,
  99        } result, result_unpriv;
 100        enum bpf_prog_type prog_type;
 101        uint8_t flags;
 102        void (*fill_helper)(struct bpf_test *self);
 103        uint8_t runs;
 104#define bpf_testdata_struct_t                                   \
 105        struct {                                                \
 106                uint32_t retval, retval_unpriv;                 \
 107                union {                                         \
 108                        __u8 data[TEST_DATA_LEN];               \
 109                        __u64 data64[TEST_DATA_LEN / 8];        \
 110                };                                              \
 111        }
 112        union {
 113                bpf_testdata_struct_t;
 114                bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
 115        };
 116        enum bpf_attach_type expected_attach_type;
 117};
 118
 119/* Note we want this to be 64 bit aligned so that the end of our array is
 120 * actually the end of the structure.
 121 */
 122#define MAX_ENTRIES 11
 123
 124struct test_val {
 125        unsigned int index;
 126        int foo[MAX_ENTRIES];
 127};
 128
 129struct other_val {
 130        long long foo;
 131        long long bar;
 132};
 133
 134static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
 135{
 136        /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
 137#define PUSH_CNT 51
 138        /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
 139        unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
 140        struct bpf_insn *insn = self->fill_insns;
 141        int i = 0, j, k = 0;
 142
 143        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 144loop:
 145        for (j = 0; j < PUSH_CNT; j++) {
 146                insn[i++] = BPF_LD_ABS(BPF_B, 0);
 147                /* jump to error label */
 148                insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
 149                i++;
 150                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
 151                insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
 152                insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
 153                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 154                                         BPF_FUNC_skb_vlan_push),
 155                insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
 156                i++;
 157        }
 158
 159        for (j = 0; j < PUSH_CNT; j++) {
 160                insn[i++] = BPF_LD_ABS(BPF_B, 0);
 161                insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
 162                i++;
 163                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
 164                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 165                                         BPF_FUNC_skb_vlan_pop),
 166                insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
 167                i++;
 168        }
 169        if (++k < 5)
 170                goto loop;
 171
 172        for (; i < len - 3; i++)
 173                insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
 174        insn[len - 3] = BPF_JMP_A(1);
 175        /* error label */
 176        insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
 177        insn[len - 1] = BPF_EXIT_INSN();
 178        self->prog_len = len;
 179}
 180
 181static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 182{
 183        struct bpf_insn *insn = self->fill_insns;
 184        /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
 185         * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
 186         * to extend the error value of the inlined ld_abs sequence which then
 187         * contains 7 insns. so, set the dividend to 7 so the testcase could
 188         * work on all arches.
 189         */
 190        unsigned int len = (1 << 15) / 7;
 191        int i = 0;
 192
 193        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 194        insn[i++] = BPF_LD_ABS(BPF_B, 0);
 195        insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
 196        i++;
 197        while (i < len - 1)
 198                insn[i++] = BPF_LD_ABS(BPF_B, 1);
 199        insn[i] = BPF_EXIT_INSN();
 200        self->prog_len = i + 1;
 201}
 202
 203static void bpf_fill_rand_ld_dw(struct bpf_test *self)
 204{
 205        struct bpf_insn *insn = self->fill_insns;
 206        uint64_t res = 0;
 207        int i = 0;
 208
 209        insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
 210        while (i < self->retval) {
 211                uint64_t val = bpf_semi_rand_get();
 212                struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
 213
 214                res ^= val;
 215                insn[i++] = tmp[0];
 216                insn[i++] = tmp[1];
 217                insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
 218        }
 219        insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
 220        insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
 221        insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
 222        insn[i] = BPF_EXIT_INSN();
 223        self->prog_len = i + 1;
 224        res ^= (res >> 32);
 225        self->retval = (uint32_t)res;
 226}
 227
 228#define MAX_JMP_SEQ 8192
 229
 230/* test the sequence of 8k jumps */
 231static void bpf_fill_scale1(struct bpf_test *self)
 232{
 233        struct bpf_insn *insn = self->fill_insns;
 234        int i = 0, k = 0;
 235
 236        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 237        /* test to check that the long sequence of jumps is acceptable */
 238        while (k++ < MAX_JMP_SEQ) {
 239                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 240                                         BPF_FUNC_get_prandom_u32);
 241                insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
 242                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
 243                insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
 244                                        -8 * (k % 64 + 1));
 245        }
 246        /* is_state_visited() doesn't allocate state for pruning for every jump.
 247         * Hence multiply jmps by 4 to accommodate that heuristic
 248         */
 249        while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
 250                insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
 251        insn[i] = BPF_EXIT_INSN();
 252        self->prog_len = i + 1;
 253        self->retval = 42;
 254}
 255
 256/* test the sequence of 8k jumps in inner most function (function depth 8)*/
 257static void bpf_fill_scale2(struct bpf_test *self)
 258{
 259        struct bpf_insn *insn = self->fill_insns;
 260        int i = 0, k = 0;
 261
 262#define FUNC_NEST 7
 263        for (k = 0; k < FUNC_NEST; k++) {
 264                insn[i++] = BPF_CALL_REL(1);
 265                insn[i++] = BPF_EXIT_INSN();
 266        }
 267        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 268        /* test to check that the long sequence of jumps is acceptable */
 269        k = 0;
 270        while (k++ < MAX_JMP_SEQ) {
 271                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 272                                         BPF_FUNC_get_prandom_u32);
 273                insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
 274                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
 275                insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
 276                                        -8 * (k % (64 - 4 * FUNC_NEST) + 1));
 277        }
 278        while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
 279                insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
 280        insn[i] = BPF_EXIT_INSN();
 281        self->prog_len = i + 1;
 282        self->retval = 42;
 283}
 284
 285static void bpf_fill_scale(struct bpf_test *self)
 286{
 287        switch (self->retval) {
 288        case 1:
 289                return bpf_fill_scale1(self);
 290        case 2:
 291                return bpf_fill_scale2(self);
 292        default:
 293                self->prog_len = 0;
 294                break;
 295        }
 296}
 297
 298/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
 299#define BPF_SK_LOOKUP(func)                                             \
 300        /* struct bpf_sock_tuple tuple = {} */                          \
 301        BPF_MOV64_IMM(BPF_REG_2, 0),                                    \
 302        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),                  \
 303        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),                \
 304        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),                \
 305        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),                \
 306        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),                \
 307        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),                \
 308        /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */                \
 309        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                           \
 310        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),                         \
 311        BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),        \
 312        BPF_MOV64_IMM(BPF_REG_4, 0),                                    \
 313        BPF_MOV64_IMM(BPF_REG_5, 0),                                    \
 314        BPF_EMIT_CALL(BPF_FUNC_ ## func)
 315
 316/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
 317 * value into 0 and does necessary preparation for direct packet access
 318 * through r2. The allowed access range is 8 bytes.
 319 */
 320#define BPF_DIRECT_PKT_R2                                               \
 321        BPF_MOV64_IMM(BPF_REG_0, 0),                                    \
 322        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                        \
 323                    offsetof(struct __sk_buff, data)),                  \
 324        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,                        \
 325                    offsetof(struct __sk_buff, data_end)),              \
 326        BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),                            \
 327        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),                           \
 328        BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),                  \
 329        BPF_EXIT_INSN()
 330
 331/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
 332 * positive u32, and zero-extend it into 64-bit.
 333 */
 334#define BPF_RAND_UEXT_R7                                                \
 335        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
 336                     BPF_FUNC_get_prandom_u32),                         \
 337        BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
 338        BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),                          \
 339        BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
 340
 341/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
 342 * negative u32, and sign-extend it into 64-bit.
 343 */
 344#define BPF_RAND_SEXT_R7                                                \
 345        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
 346                     BPF_FUNC_get_prandom_u32),                         \
 347        BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
 348        BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),                   \
 349        BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),                          \
 350        BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
 351
 352static struct bpf_test tests[] = {
 353#define FILL_ARRAY
 354#include <verifier/tests.h>
 355#undef FILL_ARRAY
 356};
 357
 358static int probe_filter_length(const struct bpf_insn *fp)
 359{
 360        int len;
 361
 362        for (len = MAX_INSNS - 1; len > 0; --len)
 363                if (fp[len].code != 0 || fp[len].imm != 0)
 364                        break;
 365        return len + 1;
 366}
 367
 368static bool skip_unsupported_map(enum bpf_map_type map_type)
 369{
 370        if (!bpf_probe_map_type(map_type, 0)) {
 371                printf("SKIP (unsupported map type %d)\n", map_type);
 372                skips++;
 373                return true;
 374        }
 375        return false;
 376}
 377
 378static int __create_map(uint32_t type, uint32_t size_key,
 379                        uint32_t size_value, uint32_t max_elem,
 380                        uint32_t extra_flags)
 381{
 382        int fd;
 383
 384        fd = bpf_create_map(type, size_key, size_value, max_elem,
 385                            (type == BPF_MAP_TYPE_HASH ?
 386                             BPF_F_NO_PREALLOC : 0) | extra_flags);
 387        if (fd < 0) {
 388                if (skip_unsupported_map(type))
 389                        return -1;
 390                printf("Failed to create hash map '%s'!\n", strerror(errno));
 391        }
 392
 393        return fd;
 394}
 395
 396static int create_map(uint32_t type, uint32_t size_key,
 397                      uint32_t size_value, uint32_t max_elem)
 398{
 399        return __create_map(type, size_key, size_value, max_elem, 0);
 400}
 401
 402static void update_map(int fd, int index)
 403{
 404        struct test_val value = {
 405                .index = (6 + 1) * sizeof(int),
 406                .foo[6] = 0xabcdef12,
 407        };
 408
 409        assert(!bpf_map_update_elem(fd, &index, &value, 0));
 410}
 411
 412static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
 413{
 414        struct bpf_insn prog[] = {
 415                BPF_MOV64_IMM(BPF_REG_0, ret),
 416                BPF_EXIT_INSN(),
 417        };
 418
 419        return bpf_load_program(prog_type, prog,
 420                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 421}
 422
 423static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
 424                                  int idx, int ret)
 425{
 426        struct bpf_insn prog[] = {
 427                BPF_MOV64_IMM(BPF_REG_3, idx),
 428                BPF_LD_MAP_FD(BPF_REG_2, mfd),
 429                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 430                             BPF_FUNC_tail_call),
 431                BPF_MOV64_IMM(BPF_REG_0, ret),
 432                BPF_EXIT_INSN(),
 433        };
 434
 435        return bpf_load_program(prog_type, prog,
 436                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 437}
 438
 439static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
 440                             int p1key, int p2key, int p3key)
 441{
 442        int mfd, p1fd, p2fd, p3fd;
 443
 444        mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
 445                             sizeof(int), max_elem, 0);
 446        if (mfd < 0) {
 447                if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
 448                        return -1;
 449                printf("Failed to create prog array '%s'!\n", strerror(errno));
 450                return -1;
 451        }
 452
 453        p1fd = create_prog_dummy_simple(prog_type, 42);
 454        p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
 455        p3fd = create_prog_dummy_simple(prog_type, 24);
 456        if (p1fd < 0 || p2fd < 0 || p3fd < 0)
 457                goto err;
 458        if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
 459                goto err;
 460        if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
 461                goto err;
 462        if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
 463err:
 464                close(mfd);
 465                mfd = -1;
 466        }
 467        close(p3fd);
 468        close(p2fd);
 469        close(p1fd);
 470        return mfd;
 471}
 472
 473static int create_map_in_map(void)
 474{
 475        int inner_map_fd, outer_map_fd;
 476
 477        inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 478                                      sizeof(int), 1, 0);
 479        if (inner_map_fd < 0) {
 480                if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
 481                        return -1;
 482                printf("Failed to create array '%s'!\n", strerror(errno));
 483                return inner_map_fd;
 484        }
 485
 486        outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
 487                                             sizeof(int), inner_map_fd, 1, 0);
 488        if (outer_map_fd < 0) {
 489                if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
 490                        return -1;
 491                printf("Failed to create array of maps '%s'!\n",
 492                       strerror(errno));
 493        }
 494
 495        close(inner_map_fd);
 496
 497        return outer_map_fd;
 498}
 499
 500static int create_cgroup_storage(bool percpu)
 501{
 502        enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
 503                BPF_MAP_TYPE_CGROUP_STORAGE;
 504        int fd;
 505
 506        fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
 507                            TEST_DATA_LEN, 0, 0);
 508        if (fd < 0) {
 509                if (skip_unsupported_map(type))
 510                        return -1;
 511                printf("Failed to create cgroup storage '%s'!\n",
 512                       strerror(errno));
 513        }
 514
 515        return fd;
 516}
 517
 518/* struct bpf_spin_lock {
 519 *   int val;
 520 * };
 521 * struct val {
 522 *   int cnt;
 523 *   struct bpf_spin_lock l;
 524 * };
 525 */
 526static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
 527static __u32 btf_raw_types[] = {
 528        /* int */
 529        BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 530        /* struct bpf_spin_lock */                      /* [2] */
 531        BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
 532        BTF_MEMBER_ENC(15, 1, 0), /* int val; */
 533        /* struct val */                                /* [3] */
 534        BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
 535        BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
 536        BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
 537};
 538
 539static int load_btf(void)
 540{
 541        struct btf_header hdr = {
 542                .magic = BTF_MAGIC,
 543                .version = BTF_VERSION,
 544                .hdr_len = sizeof(struct btf_header),
 545                .type_len = sizeof(btf_raw_types),
 546                .str_off = sizeof(btf_raw_types),
 547                .str_len = sizeof(btf_str_sec),
 548        };
 549        void *ptr, *raw_btf;
 550        int btf_fd;
 551
 552        ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
 553                               sizeof(btf_str_sec));
 554
 555        memcpy(ptr, &hdr, sizeof(hdr));
 556        ptr += sizeof(hdr);
 557        memcpy(ptr, btf_raw_types, hdr.type_len);
 558        ptr += hdr.type_len;
 559        memcpy(ptr, btf_str_sec, hdr.str_len);
 560        ptr += hdr.str_len;
 561
 562        btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
 563        free(raw_btf);
 564        if (btf_fd < 0)
 565                return -1;
 566        return btf_fd;
 567}
 568
 569static int create_map_spin_lock(void)
 570{
 571        struct bpf_create_map_attr attr = {
 572                .name = "test_map",
 573                .map_type = BPF_MAP_TYPE_ARRAY,
 574                .key_size = 4,
 575                .value_size = 8,
 576                .max_entries = 1,
 577                .btf_key_type_id = 1,
 578                .btf_value_type_id = 3,
 579        };
 580        int fd, btf_fd;
 581
 582        btf_fd = load_btf();
 583        if (btf_fd < 0)
 584                return -1;
 585        attr.btf_fd = btf_fd;
 586        fd = bpf_create_map_xattr(&attr);
 587        if (fd < 0)
 588                printf("Failed to create map with spin_lock\n");
 589        return fd;
 590}
 591
 592static int create_sk_storage_map(void)
 593{
 594        struct bpf_create_map_attr attr = {
 595                .name = "test_map",
 596                .map_type = BPF_MAP_TYPE_SK_STORAGE,
 597                .key_size = 4,
 598                .value_size = 8,
 599                .max_entries = 0,
 600                .map_flags = BPF_F_NO_PREALLOC,
 601                .btf_key_type_id = 1,
 602                .btf_value_type_id = 3,
 603        };
 604        int fd, btf_fd;
 605
 606        btf_fd = load_btf();
 607        if (btf_fd < 0)
 608                return -1;
 609        attr.btf_fd = btf_fd;
 610        fd = bpf_create_map_xattr(&attr);
 611        close(attr.btf_fd);
 612        if (fd < 0)
 613                printf("Failed to create sk_storage_map\n");
 614        return fd;
 615}
 616
 617static char bpf_vlog[UINT_MAX >> 8];
 618
 619static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
 620                          struct bpf_insn *prog, int *map_fds)
 621{
 622        int *fixup_map_hash_8b = test->fixup_map_hash_8b;
 623        int *fixup_map_hash_48b = test->fixup_map_hash_48b;
 624        int *fixup_map_hash_16b = test->fixup_map_hash_16b;
 625        int *fixup_map_array_48b = test->fixup_map_array_48b;
 626        int *fixup_map_sockmap = test->fixup_map_sockmap;
 627        int *fixup_map_sockhash = test->fixup_map_sockhash;
 628        int *fixup_map_xskmap = test->fixup_map_xskmap;
 629        int *fixup_map_stacktrace = test->fixup_map_stacktrace;
 630        int *fixup_prog1 = test->fixup_prog1;
 631        int *fixup_prog2 = test->fixup_prog2;
 632        int *fixup_map_in_map = test->fixup_map_in_map;
 633        int *fixup_cgroup_storage = test->fixup_cgroup_storage;
 634        int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
 635        int *fixup_map_spin_lock = test->fixup_map_spin_lock;
 636        int *fixup_map_array_ro = test->fixup_map_array_ro;
 637        int *fixup_map_array_wo = test->fixup_map_array_wo;
 638        int *fixup_map_array_small = test->fixup_map_array_small;
 639        int *fixup_sk_storage_map = test->fixup_sk_storage_map;
 640        int *fixup_map_event_output = test->fixup_map_event_output;
 641        int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
 642
 643        if (test->fill_helper) {
 644                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
 645                test->fill_helper(test);
 646        }
 647
 648        /* Allocating HTs with 1 elem is fine here, since we only test
 649         * for verifier and not do a runtime lookup, so the only thing
 650         * that really matters is value size in this case.
 651         */
 652        if (*fixup_map_hash_8b) {
 653                map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 654                                        sizeof(long long), 1);
 655                do {
 656                        prog[*fixup_map_hash_8b].imm = map_fds[0];
 657                        fixup_map_hash_8b++;
 658                } while (*fixup_map_hash_8b);
 659        }
 660
 661        if (*fixup_map_hash_48b) {
 662                map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 663                                        sizeof(struct test_val), 1);
 664                do {
 665                        prog[*fixup_map_hash_48b].imm = map_fds[1];
 666                        fixup_map_hash_48b++;
 667                } while (*fixup_map_hash_48b);
 668        }
 669
 670        if (*fixup_map_hash_16b) {
 671                map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 672                                        sizeof(struct other_val), 1);
 673                do {
 674                        prog[*fixup_map_hash_16b].imm = map_fds[2];
 675                        fixup_map_hash_16b++;
 676                } while (*fixup_map_hash_16b);
 677        }
 678
 679        if (*fixup_map_array_48b) {
 680                map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 681                                        sizeof(struct test_val), 1);
 682                update_map(map_fds[3], 0);
 683                do {
 684                        prog[*fixup_map_array_48b].imm = map_fds[3];
 685                        fixup_map_array_48b++;
 686                } while (*fixup_map_array_48b);
 687        }
 688
 689        if (*fixup_prog1) {
 690                map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
 691                do {
 692                        prog[*fixup_prog1].imm = map_fds[4];
 693                        fixup_prog1++;
 694                } while (*fixup_prog1);
 695        }
 696
 697        if (*fixup_prog2) {
 698                map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
 699                do {
 700                        prog[*fixup_prog2].imm = map_fds[5];
 701                        fixup_prog2++;
 702                } while (*fixup_prog2);
 703        }
 704
 705        if (*fixup_map_in_map) {
 706                map_fds[6] = create_map_in_map();
 707                do {
 708                        prog[*fixup_map_in_map].imm = map_fds[6];
 709                        fixup_map_in_map++;
 710                } while (*fixup_map_in_map);
 711        }
 712
 713        if (*fixup_cgroup_storage) {
 714                map_fds[7] = create_cgroup_storage(false);
 715                do {
 716                        prog[*fixup_cgroup_storage].imm = map_fds[7];
 717                        fixup_cgroup_storage++;
 718                } while (*fixup_cgroup_storage);
 719        }
 720
 721        if (*fixup_percpu_cgroup_storage) {
 722                map_fds[8] = create_cgroup_storage(true);
 723                do {
 724                        prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
 725                        fixup_percpu_cgroup_storage++;
 726                } while (*fixup_percpu_cgroup_storage);
 727        }
 728        if (*fixup_map_sockmap) {
 729                map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
 730                                        sizeof(int), 1);
 731                do {
 732                        prog[*fixup_map_sockmap].imm = map_fds[9];
 733                        fixup_map_sockmap++;
 734                } while (*fixup_map_sockmap);
 735        }
 736        if (*fixup_map_sockhash) {
 737                map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
 738                                        sizeof(int), 1);
 739                do {
 740                        prog[*fixup_map_sockhash].imm = map_fds[10];
 741                        fixup_map_sockhash++;
 742                } while (*fixup_map_sockhash);
 743        }
 744        if (*fixup_map_xskmap) {
 745                map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
 746                                        sizeof(int), 1);
 747                do {
 748                        prog[*fixup_map_xskmap].imm = map_fds[11];
 749                        fixup_map_xskmap++;
 750                } while (*fixup_map_xskmap);
 751        }
 752        if (*fixup_map_stacktrace) {
 753                map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
 754                                         sizeof(u64), 1);
 755                do {
 756                        prog[*fixup_map_stacktrace].imm = map_fds[12];
 757                        fixup_map_stacktrace++;
 758                } while (*fixup_map_stacktrace);
 759        }
 760        if (*fixup_map_spin_lock) {
 761                map_fds[13] = create_map_spin_lock();
 762                do {
 763                        prog[*fixup_map_spin_lock].imm = map_fds[13];
 764                        fixup_map_spin_lock++;
 765                } while (*fixup_map_spin_lock);
 766        }
 767        if (*fixup_map_array_ro) {
 768                map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 769                                           sizeof(struct test_val), 1,
 770                                           BPF_F_RDONLY_PROG);
 771                update_map(map_fds[14], 0);
 772                do {
 773                        prog[*fixup_map_array_ro].imm = map_fds[14];
 774                        fixup_map_array_ro++;
 775                } while (*fixup_map_array_ro);
 776        }
 777        if (*fixup_map_array_wo) {
 778                map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 779                                           sizeof(struct test_val), 1,
 780                                           BPF_F_WRONLY_PROG);
 781                update_map(map_fds[15], 0);
 782                do {
 783                        prog[*fixup_map_array_wo].imm = map_fds[15];
 784                        fixup_map_array_wo++;
 785                } while (*fixup_map_array_wo);
 786        }
 787        if (*fixup_map_array_small) {
 788                map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 789                                           1, 1, 0);
 790                update_map(map_fds[16], 0);
 791                do {
 792                        prog[*fixup_map_array_small].imm = map_fds[16];
 793                        fixup_map_array_small++;
 794                } while (*fixup_map_array_small);
 795        }
 796        if (*fixup_sk_storage_map) {
 797                map_fds[17] = create_sk_storage_map();
 798                do {
 799                        prog[*fixup_sk_storage_map].imm = map_fds[17];
 800                        fixup_sk_storage_map++;
 801                } while (*fixup_sk_storage_map);
 802        }
 803        if (*fixup_map_event_output) {
 804                map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 805                                           sizeof(int), sizeof(int), 1, 0);
 806                do {
 807                        prog[*fixup_map_event_output].imm = map_fds[18];
 808                        fixup_map_event_output++;
 809                } while (*fixup_map_event_output);
 810        }
 811        if (*fixup_map_reuseport_array) {
 812                map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
 813                                           sizeof(u32), sizeof(u64), 1, 0);
 814                do {
 815                        prog[*fixup_map_reuseport_array].imm = map_fds[19];
 816                        fixup_map_reuseport_array++;
 817                } while (*fixup_map_reuseport_array);
 818        }
 819}
 820
 821struct libcap {
 822        struct __user_cap_header_struct hdr;
 823        struct __user_cap_data_struct data[2];
 824};
 825
 826static int set_admin(bool admin)
 827{
 828        cap_t caps;
 829        /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
 830        const cap_value_t cap_net_admin = CAP_NET_ADMIN;
 831        const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
 832        struct libcap *cap;
 833        int ret = -1;
 834
 835        caps = cap_get_proc();
 836        if (!caps) {
 837                perror("cap_get_proc");
 838                return -1;
 839        }
 840        cap = (struct libcap *)caps;
 841        if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
 842                perror("cap_set_flag clear admin");
 843                goto out;
 844        }
 845        if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
 846                                admin ? CAP_SET : CAP_CLEAR)) {
 847                perror("cap_set_flag set_or_clear net");
 848                goto out;
 849        }
 850        /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
 851         * so update effective bits manually
 852         */
 853        if (admin) {
 854                cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
 855                cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
 856        } else {
 857                cap->data[1].effective &= ~(1 << (38 - 32));
 858                cap->data[1].effective &= ~(1 << (39 - 32));
 859        }
 860        if (cap_set_proc(caps)) {
 861                perror("cap_set_proc");
 862                goto out;
 863        }
 864        ret = 0;
 865out:
 866        if (cap_free(caps))
 867                perror("cap_free");
 868        return ret;
 869}
 870
 871static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
 872                            void *data, size_t size_data)
 873{
 874        __u8 tmp[TEST_DATA_LEN << 2];
 875        __u32 size_tmp = sizeof(tmp);
 876        uint32_t retval;
 877        int err;
 878
 879        if (unpriv)
 880                set_admin(true);
 881        err = bpf_prog_test_run(fd_prog, 1, data, size_data,
 882                                tmp, &size_tmp, &retval, NULL);
 883        if (unpriv)
 884                set_admin(false);
 885        if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
 886                printf("Unexpected bpf_prog_test_run error ");
 887                return err;
 888        }
 889        if (!err && retval != expected_val &&
 890            expected_val != POINTER_VALUE) {
 891                printf("FAIL retval %d != %d ", retval, expected_val);
 892                return 1;
 893        }
 894
 895        return 0;
 896}
 897
 898static bool cmp_str_seq(const char *log, const char *exp)
 899{
 900        char needle[80];
 901        const char *p, *q;
 902        int len;
 903
 904        do {
 905                p = strchr(exp, '\t');
 906                if (!p)
 907                        p = exp + strlen(exp);
 908
 909                len = p - exp;
 910                if (len >= sizeof(needle) || !len) {
 911                        printf("FAIL\nTestcase bug\n");
 912                        return false;
 913                }
 914                strncpy(needle, exp, len);
 915                needle[len] = 0;
 916                q = strstr(log, needle);
 917                if (!q) {
 918                        printf("FAIL\nUnexpected verifier log in successful load!\n"
 919                               "EXP: %s\nRES:\n", needle);
 920                        return false;
 921                }
 922                log = q + len;
 923                exp = p + 1;
 924        } while (*p);
 925        return true;
 926}
 927
 928static void do_test_single(struct bpf_test *test, bool unpriv,
 929                           int *passes, int *errors)
 930{
 931        int fd_prog, expected_ret, alignment_prevented_execution;
 932        int prog_len, prog_type = test->prog_type;
 933        struct bpf_insn *prog = test->insns;
 934        struct bpf_load_program_attr attr;
 935        int run_errs, run_successes;
 936        int map_fds[MAX_NR_MAPS];
 937        const char *expected_err;
 938        int fixup_skips;
 939        __u32 pflags;
 940        int i, err;
 941
 942        for (i = 0; i < MAX_NR_MAPS; i++)
 943                map_fds[i] = -1;
 944
 945        if (!prog_type)
 946                prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 947        fixup_skips = skips;
 948        do_test_fixup(test, prog_type, prog, map_fds);
 949        if (test->fill_insns) {
 950                prog = test->fill_insns;
 951                prog_len = test->prog_len;
 952        } else {
 953                prog_len = probe_filter_length(prog);
 954        }
 955        /* If there were some map skips during fixup due to missing bpf
 956         * features, skip this test.
 957         */
 958        if (fixup_skips != skips)
 959                return;
 960
 961        pflags = BPF_F_TEST_RND_HI32;
 962        if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
 963                pflags |= BPF_F_STRICT_ALIGNMENT;
 964        if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
 965                pflags |= BPF_F_ANY_ALIGNMENT;
 966        if (test->flags & ~3)
 967                pflags |= test->flags;
 968
 969        expected_ret = unpriv && test->result_unpriv != UNDEF ?
 970                       test->result_unpriv : test->result;
 971        expected_err = unpriv && test->errstr_unpriv ?
 972                       test->errstr_unpriv : test->errstr;
 973        memset(&attr, 0, sizeof(attr));
 974        attr.prog_type = prog_type;
 975        attr.expected_attach_type = test->expected_attach_type;
 976        attr.insns = prog;
 977        attr.insns_cnt = prog_len;
 978        attr.license = "GPL";
 979        if (verbose)
 980                attr.log_level = 1;
 981        else if (expected_ret == VERBOSE_ACCEPT)
 982                attr.log_level = 2;
 983        else
 984                attr.log_level = 4;
 985        attr.prog_flags = pflags;
 986
 987        fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
 988        if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
 989                printf("SKIP (unsupported program type %d)\n", prog_type);
 990                skips++;
 991                goto close_fds;
 992        }
 993
 994        alignment_prevented_execution = 0;
 995
 996        if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
 997                if (fd_prog < 0) {
 998                        printf("FAIL\nFailed to load prog '%s'!\n",
 999                               strerror(errno));
1000                        goto fail_log;
1001                }
1002#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1003                if (fd_prog >= 0 &&
1004                    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1005                        alignment_prevented_execution = 1;
1006#endif
1007                if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1008                        goto fail_log;
1009                }
1010        } else {
1011                if (fd_prog >= 0) {
1012                        printf("FAIL\nUnexpected success to load!\n");
1013                        goto fail_log;
1014                }
1015                if (!expected_err || !strstr(bpf_vlog, expected_err)) {
1016                        printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1017                              expected_err, bpf_vlog);
1018                        goto fail_log;
1019                }
1020        }
1021
1022        if (test->insn_processed) {
1023                uint32_t insn_processed;
1024                char *proc;
1025
1026                proc = strstr(bpf_vlog, "processed ");
1027                insn_processed = atoi(proc + 10);
1028                if (test->insn_processed != insn_processed) {
1029                        printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1030                               insn_processed, test->insn_processed);
1031                        goto fail_log;
1032                }
1033        }
1034
1035        if (verbose)
1036                printf(", verifier log:\n%s", bpf_vlog);
1037
1038        run_errs = 0;
1039        run_successes = 0;
1040        if (!alignment_prevented_execution && fd_prog >= 0) {
1041                uint32_t expected_val;
1042                int i;
1043
1044                if (!test->runs)
1045                        test->runs = 1;
1046
1047                for (i = 0; i < test->runs; i++) {
1048                        if (unpriv && test->retvals[i].retval_unpriv)
1049                                expected_val = test->retvals[i].retval_unpriv;
1050                        else
1051                                expected_val = test->retvals[i].retval;
1052
1053                        err = do_prog_test_run(fd_prog, unpriv, expected_val,
1054                                               test->retvals[i].data,
1055                                               sizeof(test->retvals[i].data));
1056                        if (err) {
1057                                printf("(run %d/%d) ", i + 1, test->runs);
1058                                run_errs++;
1059                        } else {
1060                                run_successes++;
1061                        }
1062                }
1063        }
1064
1065        if (!run_errs) {
1066                (*passes)++;
1067                if (run_successes > 1)
1068                        printf("%d cases ", run_successes);
1069                printf("OK");
1070                if (alignment_prevented_execution)
1071                        printf(" (NOTE: not executed due to unknown alignment)");
1072                printf("\n");
1073        } else {
1074                printf("\n");
1075                goto fail_log;
1076        }
1077close_fds:
1078        if (test->fill_insns)
1079                free(test->fill_insns);
1080        close(fd_prog);
1081        for (i = 0; i < MAX_NR_MAPS; i++)
1082                close(map_fds[i]);
1083        sched_yield();
1084        return;
1085fail_log:
1086        (*errors)++;
1087        printf("%s", bpf_vlog);
1088        goto close_fds;
1089}
1090
1091static bool is_admin(void)
1092{
1093        cap_flag_value_t net_priv = CAP_CLEAR;
1094        bool perfmon_priv = false;
1095        bool bpf_priv = false;
1096        struct libcap *cap;
1097        cap_t caps;
1098
1099#ifdef CAP_IS_SUPPORTED
1100        if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1101                perror("cap_get_flag");
1102                return false;
1103        }
1104#endif
1105        caps = cap_get_proc();
1106        if (!caps) {
1107                perror("cap_get_proc");
1108                return false;
1109        }
1110        cap = (struct libcap *)caps;
1111        bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
1112        perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
1113        if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1114                perror("cap_get_flag NET");
1115        if (cap_free(caps))
1116                perror("cap_free");
1117        return bpf_priv && perfmon_priv && net_priv == CAP_SET;
1118}
1119
1120static void get_unpriv_disabled()
1121{
1122        char buf[2];
1123        FILE *fd;
1124
1125        fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1126        if (!fd) {
1127                perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1128                unpriv_disabled = true;
1129                return;
1130        }
1131        if (fgets(buf, 2, fd) == buf && atoi(buf))
1132                unpriv_disabled = true;
1133        fclose(fd);
1134}
1135
1136static bool test_as_unpriv(struct bpf_test *test)
1137{
1138        return !test->prog_type ||
1139               test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1140               test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1141}
1142
1143static int do_test(bool unpriv, unsigned int from, unsigned int to)
1144{
1145        int i, passes = 0, errors = 0;
1146
1147        for (i = from; i < to; i++) {
1148                struct bpf_test *test = &tests[i];
1149
1150                /* Program types that are not supported by non-root we
1151                 * skip right away.
1152                 */
1153                if (test_as_unpriv(test) && unpriv_disabled) {
1154                        printf("#%d/u %s SKIP\n", i, test->descr);
1155                        skips++;
1156                } else if (test_as_unpriv(test)) {
1157                        if (!unpriv)
1158                                set_admin(false);
1159                        printf("#%d/u %s ", i, test->descr);
1160                        do_test_single(test, true, &passes, &errors);
1161                        if (!unpriv)
1162                                set_admin(true);
1163                }
1164
1165                if (unpriv) {
1166                        printf("#%d/p %s SKIP\n", i, test->descr);
1167                        skips++;
1168                } else {
1169                        printf("#%d/p %s ", i, test->descr);
1170                        do_test_single(test, false, &passes, &errors);
1171                }
1172        }
1173
1174        printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1175               skips, errors);
1176        return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1177}
1178
1179int main(int argc, char **argv)
1180{
1181        unsigned int from = 0, to = ARRAY_SIZE(tests);
1182        bool unpriv = !is_admin();
1183        int arg = 1;
1184
1185        if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1186                arg++;
1187                verbose = true;
1188                argc--;
1189        }
1190
1191        if (argc == 3) {
1192                unsigned int l = atoi(argv[arg]);
1193                unsigned int u = atoi(argv[arg + 1]);
1194
1195                if (l < to && u < to) {
1196                        from = l;
1197                        to   = u + 1;
1198                }
1199        } else if (argc == 2) {
1200                unsigned int t = atoi(argv[arg]);
1201
1202                if (t < to) {
1203                        from = t;
1204                        to   = t + 1;
1205                }
1206        }
1207
1208        get_unpriv_disabled();
1209        if (unpriv && unpriv_disabled) {
1210                printf("Cannot run as unprivileged user with sysctl %s.\n",
1211                       UNPRIV_SYSCTL);
1212                return EXIT_FAILURE;
1213        }
1214
1215        bpf_semi_rand_init();
1216        return do_test(unpriv, from, to);
1217}
1218