linux/tools/perf/tests/bpf.c
<<
>>
Prefs
   1#include <errno.h>
   2#include <stdio.h>
   3#include <sys/epoll.h>
   4#include <sys/types.h>
   5#include <sys/stat.h>
   6#include <util/util.h>
   7#include <util/bpf-loader.h>
   8#include <util/evlist.h>
   9#include <linux/bpf.h>
  10#include <linux/filter.h>
  11#include <linux/kernel.h>
  12#include <api/fs/fs.h>
  13#include <bpf/bpf.h>
  14#include "tests.h"
  15#include "llvm.h"
  16#include "debug.h"
  17#define NR_ITERS       111
  18#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
  19
  20#ifdef HAVE_LIBBPF_SUPPORT
  21
  22static int epoll_wait_loop(void)
  23{
  24        int i;
  25
  26        /* Should fail NR_ITERS times */
  27        for (i = 0; i < NR_ITERS; i++)
  28                epoll_wait(-(i + 1), NULL, 0, 0);
  29        return 0;
  30}
  31
  32#ifdef HAVE_BPF_PROLOGUE
  33
  34static int llseek_loop(void)
  35{
  36        int fds[2], i;
  37
  38        fds[0] = open("/dev/null", O_RDONLY);
  39        fds[1] = open("/dev/null", O_RDWR);
  40
  41        if (fds[0] < 0 || fds[1] < 0)
  42                return -1;
  43
  44        for (i = 0; i < NR_ITERS; i++) {
  45                lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  46                lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
  47        }
  48        close(fds[0]);
  49        close(fds[1]);
  50        return 0;
  51}
  52
  53#endif
  54
  55static struct {
  56        enum test_llvm__testcase prog_id;
  57        const char *desc;
  58        const char *name;
  59        const char *msg_compile_fail;
  60        const char *msg_load_fail;
  61        int (*target_func)(void);
  62        int expect_result;
  63        bool    pin;
  64} bpf_testcase_table[] = {
  65        {
  66                LLVM_TESTCASE_BASE,
  67                "Basic BPF filtering",
  68                "[basic_bpf_test]",
  69                "fix 'perf test LLVM' first",
  70                "load bpf object failed",
  71                &epoll_wait_loop,
  72                (NR_ITERS + 1) / 2,
  73                false,
  74        },
  75        {
  76                LLVM_TESTCASE_BASE,
  77                "BPF pinning",
  78                "[bpf_pinning]",
  79                "fix kbuild first",
  80                "check your vmlinux setting?",
  81                &epoll_wait_loop,
  82                (NR_ITERS + 1) / 2,
  83                true,
  84        },
  85#ifdef HAVE_BPF_PROLOGUE
  86        {
  87                LLVM_TESTCASE_BPF_PROLOGUE,
  88                "BPF prologue generation",
  89                "[bpf_prologue_test]",
  90                "fix kbuild first",
  91                "check your vmlinux setting?",
  92                &llseek_loop,
  93                (NR_ITERS + 1) / 4,
  94                false,
  95        },
  96#endif
  97        {
  98                LLVM_TESTCASE_BPF_RELOCATION,
  99                "BPF relocation checker",
 100                "[bpf_relocation_test]",
 101                "fix 'perf test LLVM' first",
 102                "libbpf error when dealing with relocation",
 103                NULL,
 104                0,
 105                false,
 106        },
 107};
 108
 109static int do_test(struct bpf_object *obj, int (*func)(void),
 110                   int expect)
 111{
 112        struct record_opts opts = {
 113                .target = {
 114                        .uid = UINT_MAX,
 115                        .uses_mmap = true,
 116                },
 117                .freq         = 0,
 118                .mmap_pages   = 256,
 119                .default_interval = 1,
 120        };
 121
 122        char pid[16];
 123        char sbuf[STRERR_BUFSIZE];
 124        struct perf_evlist *evlist;
 125        int i, ret = TEST_FAIL, err = 0, count = 0;
 126
 127        struct parse_events_state parse_state;
 128        struct parse_events_error parse_error;
 129
 130        bzero(&parse_error, sizeof(parse_error));
 131        bzero(&parse_state, sizeof(parse_state));
 132        parse_state.error = &parse_error;
 133        INIT_LIST_HEAD(&parse_state.list);
 134
 135        err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
 136        if (err || list_empty(&parse_state.list)) {
 137                pr_debug("Failed to add events selected by BPF\n");
 138                return TEST_FAIL;
 139        }
 140
 141        snprintf(pid, sizeof(pid), "%d", getpid());
 142        pid[sizeof(pid) - 1] = '\0';
 143        opts.target.tid = opts.target.pid = pid;
 144
 145        /* Instead of perf_evlist__new_default, don't add default events */
 146        evlist = perf_evlist__new();
 147        if (!evlist) {
 148                pr_debug("Not enough memory to create evlist\n");
 149                return TEST_FAIL;
 150        }
 151
 152        err = perf_evlist__create_maps(evlist, &opts.target);
 153        if (err < 0) {
 154                pr_debug("Not enough memory to create thread/cpu maps\n");
 155                goto out_delete_evlist;
 156        }
 157
 158        perf_evlist__splice_list_tail(evlist, &parse_state.list);
 159        evlist->nr_groups = parse_state.nr_groups;
 160
 161        perf_evlist__config(evlist, &opts, NULL);
 162
 163        err = perf_evlist__open(evlist);
 164        if (err < 0) {
 165                pr_debug("perf_evlist__open: %s\n",
 166                         str_error_r(errno, sbuf, sizeof(sbuf)));
 167                goto out_delete_evlist;
 168        }
 169
 170        err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
 171        if (err < 0) {
 172                pr_debug("perf_evlist__mmap: %s\n",
 173                         str_error_r(errno, sbuf, sizeof(sbuf)));
 174                goto out_delete_evlist;
 175        }
 176
 177        perf_evlist__enable(evlist);
 178        (*func)();
 179        perf_evlist__disable(evlist);
 180
 181        for (i = 0; i < evlist->nr_mmaps; i++) {
 182                union perf_event *event;
 183
 184                while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
 185                        const u32 type = event->header.type;
 186
 187                        if (type == PERF_RECORD_SAMPLE)
 188                                count ++;
 189                }
 190        }
 191
 192        if (count != expect) {
 193                pr_debug("BPF filter result incorrect\n");
 194                goto out_delete_evlist;
 195        }
 196
 197        ret = TEST_OK;
 198
 199out_delete_evlist:
 200        perf_evlist__delete(evlist);
 201        return ret;
 202}
 203
 204static struct bpf_object *
 205prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
 206{
 207        struct bpf_object *obj;
 208
 209        obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
 210        if (IS_ERR(obj)) {
 211                pr_debug("Compile BPF program failed.\n");
 212                return NULL;
 213        }
 214        return obj;
 215}
 216
 217static int __test__bpf(int idx)
 218{
 219        int ret;
 220        void *obj_buf;
 221        size_t obj_buf_sz;
 222        struct bpf_object *obj;
 223
 224        ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
 225                                       bpf_testcase_table[idx].prog_id,
 226                                       true, NULL);
 227        if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
 228                pr_debug("Unable to get BPF object, %s\n",
 229                         bpf_testcase_table[idx].msg_compile_fail);
 230                if (idx == 0)
 231                        return TEST_SKIP;
 232                else
 233                        return TEST_FAIL;
 234        }
 235
 236        obj = prepare_bpf(obj_buf, obj_buf_sz,
 237                          bpf_testcase_table[idx].name);
 238        if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
 239                if (!obj)
 240                        pr_debug("Fail to load BPF object: %s\n",
 241                                 bpf_testcase_table[idx].msg_load_fail);
 242                else
 243                        pr_debug("Success unexpectedly: %s\n",
 244                                 bpf_testcase_table[idx].msg_load_fail);
 245                ret = TEST_FAIL;
 246                goto out;
 247        }
 248
 249        if (obj) {
 250                ret = do_test(obj,
 251                              bpf_testcase_table[idx].target_func,
 252                              bpf_testcase_table[idx].expect_result);
 253                if (ret != TEST_OK)
 254                        goto out;
 255                if (bpf_testcase_table[idx].pin) {
 256                        int err;
 257
 258                        if (!bpf_fs__mount()) {
 259                                pr_debug("BPF filesystem not mounted\n");
 260                                ret = TEST_FAIL;
 261                                goto out;
 262                        }
 263                        err = mkdir(PERF_TEST_BPF_PATH, 0777);
 264                        if (err && errno != EEXIST) {
 265                                pr_debug("Failed to make perf_test dir: %s\n",
 266                                         strerror(errno));
 267                                ret = TEST_FAIL;
 268                                goto out;
 269                        }
 270                        if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
 271                                ret = TEST_FAIL;
 272                        if (rm_rf(PERF_TEST_BPF_PATH))
 273                                ret = TEST_FAIL;
 274                }
 275        }
 276
 277out:
 278        bpf__clear();
 279        return ret;
 280}
 281
 282int test__bpf_subtest_get_nr(void)
 283{
 284        return (int)ARRAY_SIZE(bpf_testcase_table);
 285}
 286
 287const char *test__bpf_subtest_get_desc(int i)
 288{
 289        if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
 290                return NULL;
 291        return bpf_testcase_table[i].desc;
 292}
 293
 294static int check_env(void)
 295{
 296        int err;
 297        unsigned int kver_int;
 298        char license[] = "GPL";
 299
 300        struct bpf_insn insns[] = {
 301                BPF_MOV64_IMM(BPF_REG_0, 1),
 302                BPF_EXIT_INSN(),
 303        };
 304
 305        err = fetch_kernel_version(&kver_int, NULL, 0);
 306        if (err) {
 307                pr_debug("Unable to get kernel version\n");
 308                return err;
 309        }
 310
 311        err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
 312                               sizeof(insns) / sizeof(insns[0]),
 313                               license, kver_int, NULL, 0);
 314        if (err < 0) {
 315                pr_err("Missing basic BPF support, skip this test: %s\n",
 316                       strerror(errno));
 317                return err;
 318        }
 319        close(err);
 320
 321        return 0;
 322}
 323
 324int test__bpf(struct test *test __maybe_unused, int i)
 325{
 326        int err;
 327
 328        if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
 329                return TEST_FAIL;
 330
 331        if (geteuid() != 0) {
 332                pr_debug("Only root can run BPF test\n");
 333                return TEST_SKIP;
 334        }
 335
 336        if (check_env())
 337                return TEST_SKIP;
 338
 339        err = __test__bpf(i);
 340        return err;
 341}
 342
 343#else
 344int test__bpf_subtest_get_nr(void)
 345{
 346        return 0;
 347}
 348
 349const char *test__bpf_subtest_get_desc(int i __maybe_unused)
 350{
 351        return NULL;
 352}
 353
 354int test__bpf(struct test *test __maybe_unused, int i __maybe_unused)
 355{
 356        pr_debug("Skip BPF test because BPF support is not compiled\n");
 357        return TEST_SKIP;
 358}
 359#endif
 360