linux/tools/testing/selftests/bpf/prog_tests/perf_branches.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#define _GNU_SOURCE
   3#include <pthread.h>
   4#include <sched.h>
   5#include <sys/socket.h>
   6#include <test_progs.h>
   7#include "bpf/libbpf_internal.h"
   8#include "test_perf_branches.skel.h"
   9
  10static void check_good_sample(struct test_perf_branches *skel)
  11{
  12        int written_global = skel->bss->written_global_out;
  13        int required_size = skel->bss->required_size_out;
  14        int written_stack = skel->bss->written_stack_out;
  15        int pbe_size = sizeof(struct perf_branch_entry);
  16        int duration = 0;
  17
  18        if (CHECK(!skel->bss->valid, "output not valid",
  19                 "no valid sample from prog"))
  20                return;
  21
  22        /*
  23         * It's hard to validate the contents of the branch entries b/c it
  24         * would require some kind of disassembler and also encoding the
  25         * valid jump instructions for supported architectures. So just check
  26         * the easy stuff for now.
  27         */
  28        CHECK(required_size <= 0, "read_branches_size", "err %d\n", required_size);
  29        CHECK(written_stack < 0, "read_branches_stack", "err %d\n", written_stack);
  30        CHECK(written_stack % pbe_size != 0, "read_branches_stack",
  31              "stack bytes written=%d not multiple of struct size=%d\n",
  32              written_stack, pbe_size);
  33        CHECK(written_global < 0, "read_branches_global", "err %d\n", written_global);
  34        CHECK(written_global % pbe_size != 0, "read_branches_global",
  35              "global bytes written=%d not multiple of struct size=%d\n",
  36              written_global, pbe_size);
  37        CHECK(written_global < written_stack, "read_branches_size",
  38              "written_global=%d < written_stack=%d\n", written_global, written_stack);
  39}
  40
  41static void check_bad_sample(struct test_perf_branches *skel)
  42{
  43        int written_global = skel->bss->written_global_out;
  44        int required_size = skel->bss->required_size_out;
  45        int written_stack = skel->bss->written_stack_out;
  46        int duration = 0;
  47
  48        if (CHECK(!skel->bss->valid, "output not valid",
  49                 "no valid sample from prog"))
  50                return;
  51
  52        CHECK((required_size != -EINVAL && required_size != -ENOENT),
  53              "read_branches_size", "err %d\n", required_size);
  54        CHECK((written_stack != -EINVAL && written_stack != -ENOENT),
  55              "read_branches_stack", "written %d\n", written_stack);
  56        CHECK((written_global != -EINVAL && written_global != -ENOENT),
  57              "read_branches_global", "written %d\n", written_global);
  58}
  59
  60static void test_perf_branches_common(int perf_fd,
  61                                      void (*cb)(struct test_perf_branches *))
  62{
  63        struct test_perf_branches *skel;
  64        int err, i, duration = 0;
  65        bool detached = false;
  66        struct bpf_link *link;
  67        volatile int j = 0;
  68        cpu_set_t cpu_set;
  69
  70        skel = test_perf_branches__open_and_load();
  71        if (CHECK(!skel, "test_perf_branches_load",
  72                  "perf_branches skeleton failed\n"))
  73                return;
  74
  75        /* attach perf_event */
  76        link = bpf_program__attach_perf_event(skel->progs.perf_branches, perf_fd);
  77        if (!ASSERT_OK_PTR(link, "attach_perf_event"))
  78                goto out_destroy_skel;
  79
  80        /* generate some branches on cpu 0 */
  81        CPU_ZERO(&cpu_set);
  82        CPU_SET(0, &cpu_set);
  83        err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
  84        if (CHECK(err, "set_affinity", "cpu #0, err %d\n", err))
  85                goto out_destroy;
  86        /* spin the loop for a while (random high number) */
  87        for (i = 0; i < 1000000; ++i)
  88                ++j;
  89
  90        test_perf_branches__detach(skel);
  91        detached = true;
  92
  93        cb(skel);
  94out_destroy:
  95        bpf_link__destroy(link);
  96out_destroy_skel:
  97        if (!detached)
  98                test_perf_branches__detach(skel);
  99        test_perf_branches__destroy(skel);
 100}
 101
 102static void test_perf_branches_hw(void)
 103{
 104        struct perf_event_attr attr = {0};
 105        int duration = 0;
 106        int pfd;
 107
 108        /* create perf event */
 109        attr.size = sizeof(attr);
 110        attr.type = PERF_TYPE_HARDWARE;
 111        attr.config = PERF_COUNT_HW_CPU_CYCLES;
 112        attr.freq = 1;
 113        attr.sample_freq = 4000;
 114        attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
 115        attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
 116        pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
 117
 118        /*
 119         * Some setups don't support branch records (virtual machines, !x86),
 120         * so skip test in this case.
 121         */
 122        if (pfd < 0) {
 123                if (errno == ENOENT || errno == EOPNOTSUPP) {
 124                        printf("%s:SKIP:no PERF_SAMPLE_BRANCH_STACK\n",
 125                               __func__);
 126                        test__skip();
 127                        return;
 128                }
 129                if (CHECK(pfd < 0, "perf_event_open", "err %d errno %d\n",
 130                          pfd, errno))
 131                        return;
 132        }
 133
 134        test_perf_branches_common(pfd, check_good_sample);
 135
 136        close(pfd);
 137}
 138
 139/*
 140 * Tests negative case -- run bpf_read_branch_records() on improperly configured
 141 * perf event.
 142 */
 143static void test_perf_branches_no_hw(void)
 144{
 145        struct perf_event_attr attr = {0};
 146        int duration = 0;
 147        int pfd;
 148
 149        /* create perf event */
 150        attr.size = sizeof(attr);
 151        attr.type = PERF_TYPE_SOFTWARE;
 152        attr.config = PERF_COUNT_SW_CPU_CLOCK;
 153        attr.freq = 1;
 154        attr.sample_freq = 4000;
 155        pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
 156        if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd))
 157                return;
 158
 159        test_perf_branches_common(pfd, check_bad_sample);
 160
 161        close(pfd);
 162}
 163
 164void test_perf_branches(void)
 165{
 166        if (test__start_subtest("perf_branches_hw"))
 167                test_perf_branches_hw();
 168        if (test__start_subtest("perf_branches_no_hw"))
 169                test_perf_branches_no_hw();
 170}
 171