linux/tools/testing/selftests/bpf/test_verifier.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Testsuite for eBPF verifier
   4 *
   5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
   6 * Copyright (c) 2017 Facebook
   7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
   8 */
   9
  10#include <endian.h>
  11#include <asm/types.h>
  12#include <linux/types.h>
  13#include <stdint.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <unistd.h>
  17#include <errno.h>
  18#include <string.h>
  19#include <stddef.h>
  20#include <stdbool.h>
  21#include <sched.h>
  22#include <limits.h>
  23#include <assert.h>
  24
  25#include <sys/capability.h>
  26
  27#include <linux/unistd.h>
  28#include <linux/filter.h>
  29#include <linux/bpf_perf_event.h>
  30#include <linux/bpf.h>
  31#include <linux/if_ether.h>
  32#include <linux/btf.h>
  33
  34#include <bpf/bpf.h>
  35#include <bpf/libbpf.h>
  36
  37#ifdef HAVE_GENHDR
  38# include "autoconf.h"
  39#else
  40# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  41#  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  42# endif
  43#endif
  44#include "bpf_rlimit.h"
  45#include "bpf_rand.h"
  46#include "bpf_util.h"
  47#include "test_btf.h"
  48#include "../../../include/linux/filter.h"
  49
  50#define MAX_INSNS       BPF_MAXINSNS
  51#define MAX_TEST_INSNS  1000000
  52#define MAX_FIXUPS      8
  53#define MAX_NR_MAPS     19
  54#define MAX_TEST_RUNS   8
  55#define POINTER_VALUE   0xcafe4all
  56#define TEST_DATA_LEN   64
  57
  58#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS      (1 << 0)
  59#define F_LOAD_WITH_STRICT_ALIGNMENT            (1 << 1)
  60
  61#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
  62static bool unpriv_disabled = false;
  63static int skips;
  64static bool verbose = false;
  65
  66struct bpf_test {
  67        const char *descr;
  68        struct bpf_insn insns[MAX_INSNS];
  69        struct bpf_insn *fill_insns;
  70        int fixup_map_hash_8b[MAX_FIXUPS];
  71        int fixup_map_hash_48b[MAX_FIXUPS];
  72        int fixup_map_hash_16b[MAX_FIXUPS];
  73        int fixup_map_array_48b[MAX_FIXUPS];
  74        int fixup_map_sockmap[MAX_FIXUPS];
  75        int fixup_map_sockhash[MAX_FIXUPS];
  76        int fixup_map_xskmap[MAX_FIXUPS];
  77        int fixup_map_stacktrace[MAX_FIXUPS];
  78        int fixup_prog1[MAX_FIXUPS];
  79        int fixup_prog2[MAX_FIXUPS];
  80        int fixup_map_in_map[MAX_FIXUPS];
  81        int fixup_cgroup_storage[MAX_FIXUPS];
  82        int fixup_percpu_cgroup_storage[MAX_FIXUPS];
  83        int fixup_map_spin_lock[MAX_FIXUPS];
  84        int fixup_map_array_ro[MAX_FIXUPS];
  85        int fixup_map_array_wo[MAX_FIXUPS];
  86        int fixup_map_array_small[MAX_FIXUPS];
  87        int fixup_sk_storage_map[MAX_FIXUPS];
  88        int fixup_map_event_output[MAX_FIXUPS];
  89        const char *errstr;
  90        const char *errstr_unpriv;
  91        uint32_t insn_processed;
  92        int prog_len;
  93        enum {
  94                UNDEF,
  95                ACCEPT,
  96                REJECT,
  97                VERBOSE_ACCEPT,
  98        } result, result_unpriv;
  99        enum bpf_prog_type prog_type;
 100        uint8_t flags;
 101        void (*fill_helper)(struct bpf_test *self);
 102        uint8_t runs;
 103#define bpf_testdata_struct_t                                   \
 104        struct {                                                \
 105                uint32_t retval, retval_unpriv;                 \
 106                union {                                         \
 107                        __u8 data[TEST_DATA_LEN];               \
 108                        __u64 data64[TEST_DATA_LEN / 8];        \
 109                };                                              \
 110        }
 111        union {
 112                bpf_testdata_struct_t;
 113                bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
 114        };
 115        enum bpf_attach_type expected_attach_type;
 116};
 117
 118/* Note we want this to be 64 bit aligned so that the end of our array is
 119 * actually the end of the structure.
 120 */
 121#define MAX_ENTRIES 11
 122
 123struct test_val {
 124        unsigned int index;
 125        int foo[MAX_ENTRIES];
 126};
 127
 128struct other_val {
 129        long long foo;
 130        long long bar;
 131};
 132
 133static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
 134{
 135        /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
 136#define PUSH_CNT 51
 137        /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
 138        unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
 139        struct bpf_insn *insn = self->fill_insns;
 140        int i = 0, j, k = 0;
 141
 142        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 143loop:
 144        for (j = 0; j < PUSH_CNT; j++) {
 145                insn[i++] = BPF_LD_ABS(BPF_B, 0);
 146                /* jump to error label */
 147                insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
 148                i++;
 149                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
 150                insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
 151                insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
 152                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 153                                         BPF_FUNC_skb_vlan_push),
 154                insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
 155                i++;
 156        }
 157
 158        for (j = 0; j < PUSH_CNT; j++) {
 159                insn[i++] = BPF_LD_ABS(BPF_B, 0);
 160                insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
 161                i++;
 162                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
 163                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 164                                         BPF_FUNC_skb_vlan_pop),
 165                insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
 166                i++;
 167        }
 168        if (++k < 5)
 169                goto loop;
 170
 171        for (; i < len - 3; i++)
 172                insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
 173        insn[len - 3] = BPF_JMP_A(1);
 174        /* error label */
 175        insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
 176        insn[len - 1] = BPF_EXIT_INSN();
 177        self->prog_len = len;
 178}
 179
 180static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 181{
 182        struct bpf_insn *insn = self->fill_insns;
 183        /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
 184         * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
 185         * to extend the error value of the inlined ld_abs sequence which then
 186         * contains 7 insns. so, set the dividend to 7 so the testcase could
 187         * work on all arches.
 188         */
 189        unsigned int len = (1 << 15) / 7;
 190        int i = 0;
 191
 192        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 193        insn[i++] = BPF_LD_ABS(BPF_B, 0);
 194        insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
 195        i++;
 196        while (i < len - 1)
 197                insn[i++] = BPF_LD_ABS(BPF_B, 1);
 198        insn[i] = BPF_EXIT_INSN();
 199        self->prog_len = i + 1;
 200}
 201
 202static void bpf_fill_rand_ld_dw(struct bpf_test *self)
 203{
 204        struct bpf_insn *insn = self->fill_insns;
 205        uint64_t res = 0;
 206        int i = 0;
 207
 208        insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
 209        while (i < self->retval) {
 210                uint64_t val = bpf_semi_rand_get();
 211                struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
 212
 213                res ^= val;
 214                insn[i++] = tmp[0];
 215                insn[i++] = tmp[1];
 216                insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
 217        }
 218        insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
 219        insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
 220        insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
 221        insn[i] = BPF_EXIT_INSN();
 222        self->prog_len = i + 1;
 223        res ^= (res >> 32);
 224        self->retval = (uint32_t)res;
 225}
 226
 227#define MAX_JMP_SEQ 8192
 228
 229/* test the sequence of 8k jumps */
 230static void bpf_fill_scale1(struct bpf_test *self)
 231{
 232        struct bpf_insn *insn = self->fill_insns;
 233        int i = 0, k = 0;
 234
 235        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 236        /* test to check that the long sequence of jumps is acceptable */
 237        while (k++ < MAX_JMP_SEQ) {
 238                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 239                                         BPF_FUNC_get_prandom_u32);
 240                insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
 241                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
 242                insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
 243                                        -8 * (k % 64 + 1));
 244        }
 245        /* is_state_visited() doesn't allocate state for pruning for every jump.
 246         * Hence multiply jmps by 4 to accommodate that heuristic
 247         */
 248        while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
 249                insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
 250        insn[i] = BPF_EXIT_INSN();
 251        self->prog_len = i + 1;
 252        self->retval = 42;
 253}
 254
 255/* test the sequence of 8k jumps in inner most function (function depth 8)*/
 256static void bpf_fill_scale2(struct bpf_test *self)
 257{
 258        struct bpf_insn *insn = self->fill_insns;
 259        int i = 0, k = 0;
 260
 261#define FUNC_NEST 7
 262        for (k = 0; k < FUNC_NEST; k++) {
 263                insn[i++] = BPF_CALL_REL(1);
 264                insn[i++] = BPF_EXIT_INSN();
 265        }
 266        insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
 267        /* test to check that the long sequence of jumps is acceptable */
 268        k = 0;
 269        while (k++ < MAX_JMP_SEQ) {
 270                insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 271                                         BPF_FUNC_get_prandom_u32);
 272                insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
 273                insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
 274                insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
 275                                        -8 * (k % (64 - 4 * FUNC_NEST) + 1));
 276        }
 277        while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
 278                insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
 279        insn[i] = BPF_EXIT_INSN();
 280        self->prog_len = i + 1;
 281        self->retval = 42;
 282}
 283
 284static void bpf_fill_scale(struct bpf_test *self)
 285{
 286        switch (self->retval) {
 287        case 1:
 288                return bpf_fill_scale1(self);
 289        case 2:
 290                return bpf_fill_scale2(self);
 291        default:
 292                self->prog_len = 0;
 293                break;
 294        }
 295}
 296
 297/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
 298#define BPF_SK_LOOKUP(func)                                             \
 299        /* struct bpf_sock_tuple tuple = {} */                          \
 300        BPF_MOV64_IMM(BPF_REG_2, 0),                                    \
 301        BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),                  \
 302        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),                \
 303        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),                \
 304        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),                \
 305        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),                \
 306        BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),                \
 307        /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */                \
 308        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),                           \
 309        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),                         \
 310        BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),        \
 311        BPF_MOV64_IMM(BPF_REG_4, 0),                                    \
 312        BPF_MOV64_IMM(BPF_REG_5, 0),                                    \
 313        BPF_EMIT_CALL(BPF_FUNC_ ## func)
 314
 315/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
 316 * value into 0 and does necessary preparation for direct packet access
 317 * through r2. The allowed access range is 8 bytes.
 318 */
 319#define BPF_DIRECT_PKT_R2                                               \
 320        BPF_MOV64_IMM(BPF_REG_0, 0),                                    \
 321        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                        \
 322                    offsetof(struct __sk_buff, data)),                  \
 323        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,                        \
 324                    offsetof(struct __sk_buff, data_end)),              \
 325        BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),                            \
 326        BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),                           \
 327        BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),                  \
 328        BPF_EXIT_INSN()
 329
 330/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
 331 * positive u32, and zero-extend it into 64-bit.
 332 */
 333#define BPF_RAND_UEXT_R7                                                \
 334        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
 335                     BPF_FUNC_get_prandom_u32),                         \
 336        BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
 337        BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),                          \
 338        BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
 339
 340/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
 341 * negative u32, and sign-extend it into 64-bit.
 342 */
 343#define BPF_RAND_SEXT_R7                                                \
 344        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,                       \
 345                     BPF_FUNC_get_prandom_u32),                         \
 346        BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                            \
 347        BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),                   \
 348        BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),                          \
 349        BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
 350
 351static struct bpf_test tests[] = {
 352#define FILL_ARRAY
 353#include <verifier/tests.h>
 354#undef FILL_ARRAY
 355};
 356
 357static int probe_filter_length(const struct bpf_insn *fp)
 358{
 359        int len;
 360
 361        for (len = MAX_INSNS - 1; len > 0; --len)
 362                if (fp[len].code != 0 || fp[len].imm != 0)
 363                        break;
 364        return len + 1;
 365}
 366
 367static bool skip_unsupported_map(enum bpf_map_type map_type)
 368{
 369        if (!bpf_probe_map_type(map_type, 0)) {
 370                printf("SKIP (unsupported map type %d)\n", map_type);
 371                skips++;
 372                return true;
 373        }
 374        return false;
 375}
 376
 377static int __create_map(uint32_t type, uint32_t size_key,
 378                        uint32_t size_value, uint32_t max_elem,
 379                        uint32_t extra_flags)
 380{
 381        int fd;
 382
 383        fd = bpf_create_map(type, size_key, size_value, max_elem,
 384                            (type == BPF_MAP_TYPE_HASH ?
 385                             BPF_F_NO_PREALLOC : 0) | extra_flags);
 386        if (fd < 0) {
 387                if (skip_unsupported_map(type))
 388                        return -1;
 389                printf("Failed to create hash map '%s'!\n", strerror(errno));
 390        }
 391
 392        return fd;
 393}
 394
 395static int create_map(uint32_t type, uint32_t size_key,
 396                      uint32_t size_value, uint32_t max_elem)
 397{
 398        return __create_map(type, size_key, size_value, max_elem, 0);
 399}
 400
 401static void update_map(int fd, int index)
 402{
 403        struct test_val value = {
 404                .index = (6 + 1) * sizeof(int),
 405                .foo[6] = 0xabcdef12,
 406        };
 407
 408        assert(!bpf_map_update_elem(fd, &index, &value, 0));
 409}
 410
 411static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
 412{
 413        struct bpf_insn prog[] = {
 414                BPF_MOV64_IMM(BPF_REG_0, ret),
 415                BPF_EXIT_INSN(),
 416        };
 417
 418        return bpf_load_program(prog_type, prog,
 419                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 420}
 421
 422static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
 423                                  int idx, int ret)
 424{
 425        struct bpf_insn prog[] = {
 426                BPF_MOV64_IMM(BPF_REG_3, idx),
 427                BPF_LD_MAP_FD(BPF_REG_2, mfd),
 428                BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 429                             BPF_FUNC_tail_call),
 430                BPF_MOV64_IMM(BPF_REG_0, ret),
 431                BPF_EXIT_INSN(),
 432        };
 433
 434        return bpf_load_program(prog_type, prog,
 435                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 436}
 437
 438static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
 439                             int p1key, int p2key, int p3key)
 440{
 441        int mfd, p1fd, p2fd, p3fd;
 442
 443        mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
 444                             sizeof(int), max_elem, 0);
 445        if (mfd < 0) {
 446                if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
 447                        return -1;
 448                printf("Failed to create prog array '%s'!\n", strerror(errno));
 449                return -1;
 450        }
 451
 452        p1fd = create_prog_dummy_simple(prog_type, 42);
 453        p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
 454        p3fd = create_prog_dummy_simple(prog_type, 24);
 455        if (p1fd < 0 || p2fd < 0 || p3fd < 0)
 456                goto err;
 457        if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
 458                goto err;
 459        if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
 460                goto err;
 461        if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
 462err:
 463                close(mfd);
 464                mfd = -1;
 465        }
 466        close(p3fd);
 467        close(p2fd);
 468        close(p1fd);
 469        return mfd;
 470}
 471
 472static int create_map_in_map(void)
 473{
 474        int inner_map_fd, outer_map_fd;
 475
 476        inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 477                                      sizeof(int), 1, 0);
 478        if (inner_map_fd < 0) {
 479                if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
 480                        return -1;
 481                printf("Failed to create array '%s'!\n", strerror(errno));
 482                return inner_map_fd;
 483        }
 484
 485        outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
 486                                             sizeof(int), inner_map_fd, 1, 0);
 487        if (outer_map_fd < 0) {
 488                if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
 489                        return -1;
 490                printf("Failed to create array of maps '%s'!\n",
 491                       strerror(errno));
 492        }
 493
 494        close(inner_map_fd);
 495
 496        return outer_map_fd;
 497}
 498
 499static int create_cgroup_storage(bool percpu)
 500{
 501        enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
 502                BPF_MAP_TYPE_CGROUP_STORAGE;
 503        int fd;
 504
 505        fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
 506                            TEST_DATA_LEN, 0, 0);
 507        if (fd < 0) {
 508                if (skip_unsupported_map(type))
 509                        return -1;
 510                printf("Failed to create cgroup storage '%s'!\n",
 511                       strerror(errno));
 512        }
 513
 514        return fd;
 515}
 516
 517/* struct bpf_spin_lock {
 518 *   int val;
 519 * };
 520 * struct val {
 521 *   int cnt;
 522 *   struct bpf_spin_lock l;
 523 * };
 524 */
 525static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
 526static __u32 btf_raw_types[] = {
 527        /* int */
 528        BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 529        /* struct bpf_spin_lock */                      /* [2] */
 530        BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
 531        BTF_MEMBER_ENC(15, 1, 0), /* int val; */
 532        /* struct val */                                /* [3] */
 533        BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
 534        BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
 535        BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
 536};
 537
 538static int load_btf(void)
 539{
 540        struct btf_header hdr = {
 541                .magic = BTF_MAGIC,
 542                .version = BTF_VERSION,
 543                .hdr_len = sizeof(struct btf_header),
 544                .type_len = sizeof(btf_raw_types),
 545                .str_off = sizeof(btf_raw_types),
 546                .str_len = sizeof(btf_str_sec),
 547        };
 548        void *ptr, *raw_btf;
 549        int btf_fd;
 550
 551        ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
 552                               sizeof(btf_str_sec));
 553
 554        memcpy(ptr, &hdr, sizeof(hdr));
 555        ptr += sizeof(hdr);
 556        memcpy(ptr, btf_raw_types, hdr.type_len);
 557        ptr += hdr.type_len;
 558        memcpy(ptr, btf_str_sec, hdr.str_len);
 559        ptr += hdr.str_len;
 560
 561        btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
 562        free(raw_btf);
 563        if (btf_fd < 0)
 564                return -1;
 565        return btf_fd;
 566}
 567
 568static int create_map_spin_lock(void)
 569{
 570        struct bpf_create_map_attr attr = {
 571                .name = "test_map",
 572                .map_type = BPF_MAP_TYPE_ARRAY,
 573                .key_size = 4,
 574                .value_size = 8,
 575                .max_entries = 1,
 576                .btf_key_type_id = 1,
 577                .btf_value_type_id = 3,
 578        };
 579        int fd, btf_fd;
 580
 581        btf_fd = load_btf();
 582        if (btf_fd < 0)
 583                return -1;
 584        attr.btf_fd = btf_fd;
 585        fd = bpf_create_map_xattr(&attr);
 586        if (fd < 0)
 587                printf("Failed to create map with spin_lock\n");
 588        return fd;
 589}
 590
 591static int create_sk_storage_map(void)
 592{
 593        struct bpf_create_map_attr attr = {
 594                .name = "test_map",
 595                .map_type = BPF_MAP_TYPE_SK_STORAGE,
 596                .key_size = 4,
 597                .value_size = 8,
 598                .max_entries = 0,
 599                .map_flags = BPF_F_NO_PREALLOC,
 600                .btf_key_type_id = 1,
 601                .btf_value_type_id = 3,
 602        };
 603        int fd, btf_fd;
 604
 605        btf_fd = load_btf();
 606        if (btf_fd < 0)
 607                return -1;
 608        attr.btf_fd = btf_fd;
 609        fd = bpf_create_map_xattr(&attr);
 610        close(attr.btf_fd);
 611        if (fd < 0)
 612                printf("Failed to create sk_storage_map\n");
 613        return fd;
 614}
 615
 616static char bpf_vlog[UINT_MAX >> 8];
 617
 618static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
 619                          struct bpf_insn *prog, int *map_fds)
 620{
 621        int *fixup_map_hash_8b = test->fixup_map_hash_8b;
 622        int *fixup_map_hash_48b = test->fixup_map_hash_48b;
 623        int *fixup_map_hash_16b = test->fixup_map_hash_16b;
 624        int *fixup_map_array_48b = test->fixup_map_array_48b;
 625        int *fixup_map_sockmap = test->fixup_map_sockmap;
 626        int *fixup_map_sockhash = test->fixup_map_sockhash;
 627        int *fixup_map_xskmap = test->fixup_map_xskmap;
 628        int *fixup_map_stacktrace = test->fixup_map_stacktrace;
 629        int *fixup_prog1 = test->fixup_prog1;
 630        int *fixup_prog2 = test->fixup_prog2;
 631        int *fixup_map_in_map = test->fixup_map_in_map;
 632        int *fixup_cgroup_storage = test->fixup_cgroup_storage;
 633        int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
 634        int *fixup_map_spin_lock = test->fixup_map_spin_lock;
 635        int *fixup_map_array_ro = test->fixup_map_array_ro;
 636        int *fixup_map_array_wo = test->fixup_map_array_wo;
 637        int *fixup_map_array_small = test->fixup_map_array_small;
 638        int *fixup_sk_storage_map = test->fixup_sk_storage_map;
 639        int *fixup_map_event_output = test->fixup_map_event_output;
 640
 641        if (test->fill_helper) {
 642                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
 643                test->fill_helper(test);
 644        }
 645
 646        /* Allocating HTs with 1 elem is fine here, since we only test
 647         * for verifier and not do a runtime lookup, so the only thing
 648         * that really matters is value size in this case.
 649         */
 650        if (*fixup_map_hash_8b) {
 651                map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 652                                        sizeof(long long), 1);
 653                do {
 654                        prog[*fixup_map_hash_8b].imm = map_fds[0];
 655                        fixup_map_hash_8b++;
 656                } while (*fixup_map_hash_8b);
 657        }
 658
 659        if (*fixup_map_hash_48b) {
 660                map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 661                                        sizeof(struct test_val), 1);
 662                do {
 663                        prog[*fixup_map_hash_48b].imm = map_fds[1];
 664                        fixup_map_hash_48b++;
 665                } while (*fixup_map_hash_48b);
 666        }
 667
 668        if (*fixup_map_hash_16b) {
 669                map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
 670                                        sizeof(struct other_val), 1);
 671                do {
 672                        prog[*fixup_map_hash_16b].imm = map_fds[2];
 673                        fixup_map_hash_16b++;
 674                } while (*fixup_map_hash_16b);
 675        }
 676
 677        if (*fixup_map_array_48b) {
 678                map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 679                                        sizeof(struct test_val), 1);
 680                update_map(map_fds[3], 0);
 681                do {
 682                        prog[*fixup_map_array_48b].imm = map_fds[3];
 683                        fixup_map_array_48b++;
 684                } while (*fixup_map_array_48b);
 685        }
 686
 687        if (*fixup_prog1) {
 688                map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
 689                do {
 690                        prog[*fixup_prog1].imm = map_fds[4];
 691                        fixup_prog1++;
 692                } while (*fixup_prog1);
 693        }
 694
 695        if (*fixup_prog2) {
 696                map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
 697                do {
 698                        prog[*fixup_prog2].imm = map_fds[5];
 699                        fixup_prog2++;
 700                } while (*fixup_prog2);
 701        }
 702
 703        if (*fixup_map_in_map) {
 704                map_fds[6] = create_map_in_map();
 705                do {
 706                        prog[*fixup_map_in_map].imm = map_fds[6];
 707                        fixup_map_in_map++;
 708                } while (*fixup_map_in_map);
 709        }
 710
 711        if (*fixup_cgroup_storage) {
 712                map_fds[7] = create_cgroup_storage(false);
 713                do {
 714                        prog[*fixup_cgroup_storage].imm = map_fds[7];
 715                        fixup_cgroup_storage++;
 716                } while (*fixup_cgroup_storage);
 717        }
 718
 719        if (*fixup_percpu_cgroup_storage) {
 720                map_fds[8] = create_cgroup_storage(true);
 721                do {
 722                        prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
 723                        fixup_percpu_cgroup_storage++;
 724                } while (*fixup_percpu_cgroup_storage);
 725        }
 726        if (*fixup_map_sockmap) {
 727                map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
 728                                        sizeof(int), 1);
 729                do {
 730                        prog[*fixup_map_sockmap].imm = map_fds[9];
 731                        fixup_map_sockmap++;
 732                } while (*fixup_map_sockmap);
 733        }
 734        if (*fixup_map_sockhash) {
 735                map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
 736                                        sizeof(int), 1);
 737                do {
 738                        prog[*fixup_map_sockhash].imm = map_fds[10];
 739                        fixup_map_sockhash++;
 740                } while (*fixup_map_sockhash);
 741        }
 742        if (*fixup_map_xskmap) {
 743                map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
 744                                        sizeof(int), 1);
 745                do {
 746                        prog[*fixup_map_xskmap].imm = map_fds[11];
 747                        fixup_map_xskmap++;
 748                } while (*fixup_map_xskmap);
 749        }
 750        if (*fixup_map_stacktrace) {
 751                map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
 752                                         sizeof(u64), 1);
 753                do {
 754                        prog[*fixup_map_stacktrace].imm = map_fds[12];
 755                        fixup_map_stacktrace++;
 756                } while (*fixup_map_stacktrace);
 757        }
 758        if (*fixup_map_spin_lock) {
 759                map_fds[13] = create_map_spin_lock();
 760                do {
 761                        prog[*fixup_map_spin_lock].imm = map_fds[13];
 762                        fixup_map_spin_lock++;
 763                } while (*fixup_map_spin_lock);
 764        }
 765        if (*fixup_map_array_ro) {
 766                map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 767                                           sizeof(struct test_val), 1,
 768                                           BPF_F_RDONLY_PROG);
 769                update_map(map_fds[14], 0);
 770                do {
 771                        prog[*fixup_map_array_ro].imm = map_fds[14];
 772                        fixup_map_array_ro++;
 773                } while (*fixup_map_array_ro);
 774        }
 775        if (*fixup_map_array_wo) {
 776                map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 777                                           sizeof(struct test_val), 1,
 778                                           BPF_F_WRONLY_PROG);
 779                update_map(map_fds[15], 0);
 780                do {
 781                        prog[*fixup_map_array_wo].imm = map_fds[15];
 782                        fixup_map_array_wo++;
 783                } while (*fixup_map_array_wo);
 784        }
 785        if (*fixup_map_array_small) {
 786                map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
 787                                           1, 1, 0);
 788                update_map(map_fds[16], 0);
 789                do {
 790                        prog[*fixup_map_array_small].imm = map_fds[16];
 791                        fixup_map_array_small++;
 792                } while (*fixup_map_array_small);
 793        }
 794        if (*fixup_sk_storage_map) {
 795                map_fds[17] = create_sk_storage_map();
 796                do {
 797                        prog[*fixup_sk_storage_map].imm = map_fds[17];
 798                        fixup_sk_storage_map++;
 799                } while (*fixup_sk_storage_map);
 800        }
 801        if (*fixup_map_event_output) {
 802                map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 803                                           sizeof(int), sizeof(int), 1, 0);
 804                do {
 805                        prog[*fixup_map_event_output].imm = map_fds[18];
 806                        fixup_map_event_output++;
 807                } while (*fixup_map_event_output);
 808        }
 809}
 810
 811static int set_admin(bool admin)
 812{
 813        cap_t caps;
 814        const cap_value_t cap_val = CAP_SYS_ADMIN;
 815        int ret = -1;
 816
 817        caps = cap_get_proc();
 818        if (!caps) {
 819                perror("cap_get_proc");
 820                return -1;
 821        }
 822        if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
 823                                admin ? CAP_SET : CAP_CLEAR)) {
 824                perror("cap_set_flag");
 825                goto out;
 826        }
 827        if (cap_set_proc(caps)) {
 828                perror("cap_set_proc");
 829                goto out;
 830        }
 831        ret = 0;
 832out:
 833        if (cap_free(caps))
 834                perror("cap_free");
 835        return ret;
 836}
 837
 838static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
 839                            void *data, size_t size_data)
 840{
 841        __u8 tmp[TEST_DATA_LEN << 2];
 842        __u32 size_tmp = sizeof(tmp);
 843        uint32_t retval;
 844        int err;
 845
 846        if (unpriv)
 847                set_admin(true);
 848        err = bpf_prog_test_run(fd_prog, 1, data, size_data,
 849                                tmp, &size_tmp, &retval, NULL);
 850        if (unpriv)
 851                set_admin(false);
 852        if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
 853                printf("Unexpected bpf_prog_test_run error ");
 854                return err;
 855        }
 856        if (!err && retval != expected_val &&
 857            expected_val != POINTER_VALUE) {
 858                printf("FAIL retval %d != %d ", retval, expected_val);
 859                return 1;
 860        }
 861
 862        return 0;
 863}
 864
 865static bool cmp_str_seq(const char *log, const char *exp)
 866{
 867        char needle[80];
 868        const char *p, *q;
 869        int len;
 870
 871        do {
 872                p = strchr(exp, '\t');
 873                if (!p)
 874                        p = exp + strlen(exp);
 875
 876                len = p - exp;
 877                if (len >= sizeof(needle) || !len) {
 878                        printf("FAIL\nTestcase bug\n");
 879                        return false;
 880                }
 881                strncpy(needle, exp, len);
 882                needle[len] = 0;
 883                q = strstr(log, needle);
 884                if (!q) {
 885                        printf("FAIL\nUnexpected verifier log in successful load!\n"
 886                               "EXP: %s\nRES:\n", needle);
 887                        return false;
 888                }
 889                log = q + len;
 890                exp = p + 1;
 891        } while (*p);
 892        return true;
 893}
 894
 895static void do_test_single(struct bpf_test *test, bool unpriv,
 896                           int *passes, int *errors)
 897{
 898        int fd_prog, expected_ret, alignment_prevented_execution;
 899        int prog_len, prog_type = test->prog_type;
 900        struct bpf_insn *prog = test->insns;
 901        struct bpf_load_program_attr attr;
 902        int run_errs, run_successes;
 903        int map_fds[MAX_NR_MAPS];
 904        const char *expected_err;
 905        int fixup_skips;
 906        __u32 pflags;
 907        int i, err;
 908
 909        for (i = 0; i < MAX_NR_MAPS; i++)
 910                map_fds[i] = -1;
 911
 912        if (!prog_type)
 913                prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
 914        fixup_skips = skips;
 915        do_test_fixup(test, prog_type, prog, map_fds);
 916        if (test->fill_insns) {
 917                prog = test->fill_insns;
 918                prog_len = test->prog_len;
 919        } else {
 920                prog_len = probe_filter_length(prog);
 921        }
 922        /* If there were some map skips during fixup due to missing bpf
 923         * features, skip this test.
 924         */
 925        if (fixup_skips != skips)
 926                return;
 927
 928        pflags = BPF_F_TEST_RND_HI32;
 929        if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
 930                pflags |= BPF_F_STRICT_ALIGNMENT;
 931        if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
 932                pflags |= BPF_F_ANY_ALIGNMENT;
 933        if (test->flags & ~3)
 934                pflags |= test->flags;
 935
 936        expected_ret = unpriv && test->result_unpriv != UNDEF ?
 937                       test->result_unpriv : test->result;
 938        expected_err = unpriv && test->errstr_unpriv ?
 939                       test->errstr_unpriv : test->errstr;
 940        memset(&attr, 0, sizeof(attr));
 941        attr.prog_type = prog_type;
 942        attr.expected_attach_type = test->expected_attach_type;
 943        attr.insns = prog;
 944        attr.insns_cnt = prog_len;
 945        attr.license = "GPL";
 946        attr.log_level = verbose || expected_ret == VERBOSE_ACCEPT ? 1 : 4;
 947        attr.prog_flags = pflags;
 948
 949        fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
 950        if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
 951                printf("SKIP (unsupported program type %d)\n", prog_type);
 952                skips++;
 953                goto close_fds;
 954        }
 955
 956        alignment_prevented_execution = 0;
 957
 958        if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
 959                if (fd_prog < 0) {
 960                        printf("FAIL\nFailed to load prog '%s'!\n",
 961                               strerror(errno));
 962                        goto fail_log;
 963                }
 964#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 965                if (fd_prog >= 0 &&
 966                    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
 967                        alignment_prevented_execution = 1;
 968#endif
 969                if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
 970                        goto fail_log;
 971                }
 972        } else {
 973                if (fd_prog >= 0) {
 974                        printf("FAIL\nUnexpected success to load!\n");
 975                        goto fail_log;
 976                }
 977                if (!expected_err || !strstr(bpf_vlog, expected_err)) {
 978                        printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
 979                              expected_err, bpf_vlog);
 980                        goto fail_log;
 981                }
 982        }
 983
 984        if (test->insn_processed) {
 985                uint32_t insn_processed;
 986                char *proc;
 987
 988                proc = strstr(bpf_vlog, "processed ");
 989                insn_processed = atoi(proc + 10);
 990                if (test->insn_processed != insn_processed) {
 991                        printf("FAIL\nUnexpected insn_processed %u vs %u\n",
 992                               insn_processed, test->insn_processed);
 993                        goto fail_log;
 994                }
 995        }
 996
 997        if (verbose)
 998                printf(", verifier log:\n%s", bpf_vlog);
 999
1000        run_errs = 0;
1001        run_successes = 0;
1002        if (!alignment_prevented_execution && fd_prog >= 0) {
1003                uint32_t expected_val;
1004                int i;
1005
1006                if (!test->runs)
1007                        test->runs = 1;
1008
1009                for (i = 0; i < test->runs; i++) {
1010                        if (unpriv && test->retvals[i].retval_unpriv)
1011                                expected_val = test->retvals[i].retval_unpriv;
1012                        else
1013                                expected_val = test->retvals[i].retval;
1014
1015                        err = do_prog_test_run(fd_prog, unpriv, expected_val,
1016                                               test->retvals[i].data,
1017                                               sizeof(test->retvals[i].data));
1018                        if (err) {
1019                                printf("(run %d/%d) ", i + 1, test->runs);
1020                                run_errs++;
1021                        } else {
1022                                run_successes++;
1023                        }
1024                }
1025        }
1026
1027        if (!run_errs) {
1028                (*passes)++;
1029                if (run_successes > 1)
1030                        printf("%d cases ", run_successes);
1031                printf("OK");
1032                if (alignment_prevented_execution)
1033                        printf(" (NOTE: not executed due to unknown alignment)");
1034                printf("\n");
1035        } else {
1036                printf("\n");
1037                goto fail_log;
1038        }
1039close_fds:
1040        if (test->fill_insns)
1041                free(test->fill_insns);
1042        close(fd_prog);
1043        for (i = 0; i < MAX_NR_MAPS; i++)
1044                close(map_fds[i]);
1045        sched_yield();
1046        return;
1047fail_log:
1048        (*errors)++;
1049        printf("%s", bpf_vlog);
1050        goto close_fds;
1051}
1052
1053static bool is_admin(void)
1054{
1055        cap_t caps;
1056        cap_flag_value_t sysadmin = CAP_CLEAR;
1057        const cap_value_t cap_val = CAP_SYS_ADMIN;
1058
1059#ifdef CAP_IS_SUPPORTED
1060        if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1061                perror("cap_get_flag");
1062                return false;
1063        }
1064#endif
1065        caps = cap_get_proc();
1066        if (!caps) {
1067                perror("cap_get_proc");
1068                return false;
1069        }
1070        if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
1071                perror("cap_get_flag");
1072        if (cap_free(caps))
1073                perror("cap_free");
1074        return (sysadmin == CAP_SET);
1075}
1076
1077static void get_unpriv_disabled()
1078{
1079        char buf[2];
1080        FILE *fd;
1081
1082        fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1083        if (!fd) {
1084                perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1085                unpriv_disabled = true;
1086                return;
1087        }
1088        if (fgets(buf, 2, fd) == buf && atoi(buf))
1089                unpriv_disabled = true;
1090        fclose(fd);
1091}
1092
1093static bool test_as_unpriv(struct bpf_test *test)
1094{
1095        return !test->prog_type ||
1096               test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1097               test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1098}
1099
1100static int do_test(bool unpriv, unsigned int from, unsigned int to)
1101{
1102        int i, passes = 0, errors = 0;
1103
1104        for (i = from; i < to; i++) {
1105                struct bpf_test *test = &tests[i];
1106
1107                /* Program types that are not supported by non-root we
1108                 * skip right away.
1109                 */
1110                if (test_as_unpriv(test) && unpriv_disabled) {
1111                        printf("#%d/u %s SKIP\n", i, test->descr);
1112                        skips++;
1113                } else if (test_as_unpriv(test)) {
1114                        if (!unpriv)
1115                                set_admin(false);
1116                        printf("#%d/u %s ", i, test->descr);
1117                        do_test_single(test, true, &passes, &errors);
1118                        if (!unpriv)
1119                                set_admin(true);
1120                }
1121
1122                if (unpriv) {
1123                        printf("#%d/p %s SKIP\n", i, test->descr);
1124                        skips++;
1125                } else {
1126                        printf("#%d/p %s ", i, test->descr);
1127                        do_test_single(test, false, &passes, &errors);
1128                }
1129        }
1130
1131        printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1132               skips, errors);
1133        return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1134}
1135
1136int main(int argc, char **argv)
1137{
1138        unsigned int from = 0, to = ARRAY_SIZE(tests);
1139        bool unpriv = !is_admin();
1140        int arg = 1;
1141
1142        if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1143                arg++;
1144                verbose = true;
1145                argc--;
1146        }
1147
1148        if (argc == 3) {
1149                unsigned int l = atoi(argv[arg]);
1150                unsigned int u = atoi(argv[arg + 1]);
1151
1152                if (l < to && u < to) {
1153                        from = l;
1154                        to   = u + 1;
1155                }
1156        } else if (argc == 2) {
1157                unsigned int t = atoi(argv[arg]);
1158
1159                if (t < to) {
1160                        from = t;
1161                        to   = t + 1;
1162                }
1163        }
1164
1165        get_unpriv_disabled();
1166        if (unpriv && unpriv_disabled) {
1167                printf("Cannot run as unprivileged user with sysctl %s.\n",
1168                       UNPRIV_SYSCTL);
1169                return EXIT_FAILURE;
1170        }
1171
1172        bpf_semi_rand_init();
1173        return do_test(unpriv, from, to);
1174}
1175