linux/tools/testing/selftests/bpf/prog_tests/atomics.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <test_progs.h>
   4
   5#include "atomics.lskel.h"
   6
   7static void test_add(struct atomics_lskel *skel)
   8{
   9        int err, prog_fd;
  10        LIBBPF_OPTS(bpf_test_run_opts, topts);
  11
  12        /* No need to attach it, just run it directly */
  13        prog_fd = skel->progs.add.prog_fd;
  14        err = bpf_prog_test_run_opts(prog_fd, &topts);
  15        if (!ASSERT_OK(err, "test_run_opts err"))
  16                return;
  17        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
  18                return;
  19
  20        ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
  21        ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
  22
  23        ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
  24        ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
  25
  26        ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
  27        ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
  28
  29        ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
  30}
  31
  32static void test_sub(struct atomics_lskel *skel)
  33{
  34        int err, prog_fd;
  35        LIBBPF_OPTS(bpf_test_run_opts, topts);
  36
  37        /* No need to attach it, just run it directly */
  38        prog_fd = skel->progs.sub.prog_fd;
  39        err = bpf_prog_test_run_opts(prog_fd, &topts);
  40        if (!ASSERT_OK(err, "test_run_opts err"))
  41                return;
  42        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
  43                return;
  44
  45        ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
  46        ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
  47
  48        ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
  49        ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
  50
  51        ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
  52        ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
  53
  54        ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
  55}
  56
  57static void test_and(struct atomics_lskel *skel)
  58{
  59        int err, prog_fd;
  60        LIBBPF_OPTS(bpf_test_run_opts, topts);
  61
  62        /* No need to attach it, just run it directly */
  63        prog_fd = skel->progs.and.prog_fd;
  64        err = bpf_prog_test_run_opts(prog_fd, &topts);
  65        if (!ASSERT_OK(err, "test_run_opts err"))
  66                return;
  67        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
  68                return;
  69
  70        ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
  71        ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
  72
  73        ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
  74        ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
  75
  76        ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
  77}
  78
  79static void test_or(struct atomics_lskel *skel)
  80{
  81        int err, prog_fd;
  82        LIBBPF_OPTS(bpf_test_run_opts, topts);
  83
  84        /* No need to attach it, just run it directly */
  85        prog_fd = skel->progs.or.prog_fd;
  86        err = bpf_prog_test_run_opts(prog_fd, &topts);
  87        if (!ASSERT_OK(err, "test_run_opts err"))
  88                return;
  89        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
  90                return;
  91
  92        ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
  93        ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
  94
  95        ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
  96        ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
  97
  98        ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
  99}
 100
 101static void test_xor(struct atomics_lskel *skel)
 102{
 103        int err, prog_fd;
 104        LIBBPF_OPTS(bpf_test_run_opts, topts);
 105
 106        /* No need to attach it, just run it directly */
 107        prog_fd = skel->progs.xor.prog_fd;
 108        err = bpf_prog_test_run_opts(prog_fd, &topts);
 109        if (!ASSERT_OK(err, "test_run_opts err"))
 110                return;
 111        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
 112                return;
 113
 114        ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
 115        ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
 116
 117        ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
 118        ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
 119
 120        ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
 121}
 122
 123static void test_cmpxchg(struct atomics_lskel *skel)
 124{
 125        int err, prog_fd;
 126        LIBBPF_OPTS(bpf_test_run_opts, topts);
 127
 128        /* No need to attach it, just run it directly */
 129        prog_fd = skel->progs.cmpxchg.prog_fd;
 130        err = bpf_prog_test_run_opts(prog_fd, &topts);
 131        if (!ASSERT_OK(err, "test_run_opts err"))
 132                return;
 133        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
 134                return;
 135
 136        ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
 137        ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
 138        ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
 139
 140        ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
 141        ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
 142        ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
 143}
 144
 145static void test_xchg(struct atomics_lskel *skel)
 146{
 147        int err, prog_fd;
 148        LIBBPF_OPTS(bpf_test_run_opts, topts);
 149
 150        /* No need to attach it, just run it directly */
 151        prog_fd = skel->progs.xchg.prog_fd;
 152        err = bpf_prog_test_run_opts(prog_fd, &topts);
 153        if (!ASSERT_OK(err, "test_run_opts err"))
 154                return;
 155        if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
 156                return;
 157
 158        ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
 159        ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
 160
 161        ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
 162        ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
 163}
 164
 165void test_atomics(void)
 166{
 167        struct atomics_lskel *skel;
 168
 169        skel = atomics_lskel__open_and_load();
 170        if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
 171                return;
 172
 173        if (skel->data->skip_tests) {
 174                printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
 175                       __func__);
 176                test__skip();
 177                goto cleanup;
 178        }
 179        skel->bss->pid = getpid();
 180
 181        if (test__start_subtest("add"))
 182                test_add(skel);
 183        if (test__start_subtest("sub"))
 184                test_sub(skel);
 185        if (test__start_subtest("and"))
 186                test_and(skel);
 187        if (test__start_subtest("or"))
 188                test_or(skel);
 189        if (test__start_subtest("xor"))
 190                test_xor(skel);
 191        if (test__start_subtest("cmpxchg"))
 192                test_cmpxchg(skel);
 193        if (test__start_subtest("xchg"))
 194                test_xchg(skel);
 195
 196cleanup:
 197        atomics_lskel__destroy(skel);
 198}
 199