1
2
3
4#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7#include <errno.h>
8#include <fcntl.h>
9#include <syscall.h>
10#include <unistd.h>
11#include <linux/perf_event.h>
12#include <sys/ioctl.h>
13#include <sys/time.h>
14#include <sys/types.h>
15#include <sys/stat.h>
16
17#include <linux/bpf.h>
18#include <bpf/bpf.h>
19#include <bpf/libbpf.h>
20
21#include "cgroup_helpers.h"
22#include "bpf_rlimit.h"
23
24#define CHECK(condition, tag, format...) ({ \
25 int __ret = !!(condition); \
26 if (__ret) { \
27 printf("%s:FAIL:%s ", __func__, tag); \
28 printf(format); \
29 } else { \
30 printf("%s:PASS:%s\n", __func__, tag); \
31 } \
32 __ret; \
33})
34
35static int bpf_find_map(const char *test, struct bpf_object *obj,
36 const char *name)
37{
38 struct bpf_map *map;
39
40 map = bpf_object__find_map_by_name(obj, name);
41 if (!map)
42 return -1;
43 return bpf_map__fd(map);
44}
45
46#define TEST_CGROUP "/test-bpf-get-cgroup-id/"
47
48int main(int argc, char **argv)
49{
50 const char *probe_name = "syscalls/sys_enter_nanosleep";
51 const char *file = "get_cgroup_id_kern.o";
52 int err, bytes, efd, prog_fd, pmu_fd;
53 int cgroup_fd, cgidmap_fd, pidmap_fd;
54 struct perf_event_attr attr = {};
55 struct bpf_object *obj;
56 __u64 kcgid = 0, ucgid;
57 __u32 key = 0, pid;
58 int exit_code = 1;
59 char buf[256];
60
61 err = setup_cgroup_environment();
62 if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err,
63 errno))
64 return 1;
65
66 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
67 if (CHECK(cgroup_fd < 0, "create_and_get_cgroup", "err %d errno %d\n",
68 cgroup_fd, errno))
69 goto cleanup_cgroup_env;
70
71 err = join_cgroup(TEST_CGROUP);
72 if (CHECK(err, "join_cgroup", "err %d errno %d\n", err, errno))
73 goto cleanup_cgroup_env;
74
75 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
76 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
77 goto cleanup_cgroup_env;
78
79 cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
80 if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
81 cgidmap_fd, errno))
82 goto close_prog;
83
84 pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
85 if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
86 pidmap_fd, errno))
87 goto close_prog;
88
89 pid = getpid();
90 bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
91
92 snprintf(buf, sizeof(buf),
93 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
94 efd = open(buf, O_RDONLY, 0);
95 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
96 goto close_prog;
97 bytes = read(efd, buf, sizeof(buf));
98 close(efd);
99 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
100 "bytes %d errno %d\n", bytes, errno))
101 goto close_prog;
102
103 attr.config = strtol(buf, NULL, 0);
104 attr.type = PERF_TYPE_TRACEPOINT;
105 attr.sample_type = PERF_SAMPLE_RAW;
106 attr.sample_period = 1;
107 attr.wakeup_events = 1;
108
109
110
111
112 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
113 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
114 errno))
115 goto close_prog;
116
117 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
118 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
119 errno))
120 goto close_pmu;
121
122 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
123 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
124 errno))
125 goto close_pmu;
126
127
128 sleep(1);
129
130 err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
131 if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
132 goto close_pmu;
133
134 ucgid = get_cgroup_id(TEST_CGROUP);
135 if (CHECK(kcgid != ucgid, "compare_cgroup_id",
136 "kern cgid %llx user cgid %llx", kcgid, ucgid))
137 goto close_pmu;
138
139 exit_code = 0;
140 printf("%s:PASS\n", argv[0]);
141
142close_pmu:
143 close(pmu_fd);
144close_prog:
145 bpf_object__close(obj);
146cleanup_cgroup_env:
147 cleanup_cgroup_environment();
148 return exit_code;
149}
150