linux/tools/testing/selftests/bpf/get_cgroup_id_user.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (c) 2018 Facebook
   3
   4#include <stdio.h>
   5#include <stdlib.h>
   6#include <string.h>
   7#include <errno.h>
   8#include <fcntl.h>
   9#include <syscall.h>
  10#include <unistd.h>
  11#include <linux/perf_event.h>
  12#include <sys/ioctl.h>
  13#include <sys/time.h>
  14#include <sys/types.h>
  15#include <sys/stat.h>
  16
  17#include <linux/bpf.h>
  18#include <bpf/bpf.h>
  19#include <bpf/libbpf.h>
  20
  21#include "cgroup_helpers.h"
  22#include "bpf_rlimit.h"
  23
  24#define CHECK(condition, tag, format...) ({             \
  25        int __ret = !!(condition);                      \
  26        if (__ret) {                                    \
  27                printf("%s:FAIL:%s ", __func__, tag);   \
  28                printf(format);                         \
  29        } else {                                        \
  30                printf("%s:PASS:%s\n", __func__, tag);  \
  31        }                                               \
  32        __ret;                                          \
  33})
  34
  35static int bpf_find_map(const char *test, struct bpf_object *obj,
  36                        const char *name)
  37{
  38        struct bpf_map *map;
  39
  40        map = bpf_object__find_map_by_name(obj, name);
  41        if (!map)
  42                return -1;
  43        return bpf_map__fd(map);
  44}
  45
  46#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
  47
  48int main(int argc, char **argv)
  49{
  50        const char *probe_name = "syscalls/sys_enter_nanosleep";
  51        const char *file = "get_cgroup_id_kern.o";
  52        int err, bytes, efd, prog_fd, pmu_fd;
  53        int cgroup_fd, cgidmap_fd, pidmap_fd;
  54        struct perf_event_attr attr = {};
  55        struct bpf_object *obj;
  56        __u64 kcgid = 0, ucgid;
  57        __u32 key = 0, pid;
  58        int exit_code = 1;
  59        char buf[256];
  60        const struct timespec req = {
  61                .tv_sec = 1,
  62                .tv_nsec = 0,
  63        };
  64
  65        cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
  66        if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
  67                return 1;
  68
  69        err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  70        if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
  71                goto cleanup_cgroup_env;
  72
  73        cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
  74        if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  75                  cgidmap_fd, errno))
  76                goto close_prog;
  77
  78        pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
  79        if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  80                  pidmap_fd, errno))
  81                goto close_prog;
  82
  83        pid = getpid();
  84        bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
  85
  86        snprintf(buf, sizeof(buf),
  87                 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
  88        efd = open(buf, O_RDONLY, 0);
  89        if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  90                goto close_prog;
  91        bytes = read(efd, buf, sizeof(buf));
  92        close(efd);
  93        if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
  94                  "bytes %d errno %d\n", bytes, errno))
  95                goto close_prog;
  96
  97        attr.config = strtol(buf, NULL, 0);
  98        attr.type = PERF_TYPE_TRACEPOINT;
  99        attr.sample_type = PERF_SAMPLE_RAW;
 100        attr.sample_period = 1;
 101        attr.wakeup_events = 1;
 102
 103        /* attach to this pid so the all bpf invocations will be in the
 104         * cgroup associated with this pid.
 105         */
 106        pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
 107        if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
 108                  errno))
 109                goto close_prog;
 110
 111        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
 112        if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
 113                  errno))
 114                goto close_pmu;
 115
 116        err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
 117        if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
 118                  errno))
 119                goto close_pmu;
 120
 121        /* trigger some syscalls */
 122        syscall(__NR_nanosleep, &req, NULL);
 123
 124        err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
 125        if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
 126                goto close_pmu;
 127
 128        ucgid = get_cgroup_id(TEST_CGROUP);
 129        if (CHECK(kcgid != ucgid, "compare_cgroup_id",
 130                  "kern cgid %llx user cgid %llx", kcgid, ucgid))
 131                goto close_pmu;
 132
 133        exit_code = 0;
 134        printf("%s:PASS\n", argv[0]);
 135
 136close_pmu:
 137        close(pmu_fd);
 138close_prog:
 139        bpf_object__close(obj);
 140cleanup_cgroup_env:
 141        cleanup_cgroup_environment();
 142        return exit_code;
 143}
 144