1
2#define _GNU_SOURCE
3#include <pthread.h>
4#include <sched.h>
5#include <sys/socket.h>
6#include <test_progs.h>
7
8#define MAX_CNT_RAWTP 10ull
9#define MAX_STACK_RAWTP 100
10
11static int duration = 0;
12
13struct get_stack_trace_t {
14 int pid;
15 int kern_stack_size;
16 int user_stack_size;
17 int user_stack_buildid_size;
18 __u64 kern_stack[MAX_STACK_RAWTP];
19 __u64 user_stack[MAX_STACK_RAWTP];
20 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
21};
22
23static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
24{
25 bool good_kern_stack = false, good_user_stack = false;
26 const char *nonjit_func = "___bpf_prog_run";
27 struct get_stack_trace_t *e = data;
28 int i, num_stack;
29 static __u64 cnt;
30 struct ksym *ks;
31
32 cnt++;
33
34 if (size < sizeof(struct get_stack_trace_t)) {
35 __u64 *raw_data = data;
36 bool found = false;
37
38 num_stack = size / sizeof(__u64);
39
40
41
42
43
44 if (env.jit_enabled) {
45 found = num_stack > 0;
46 } else {
47 for (i = 0; i < num_stack; i++) {
48 ks = ksym_search(raw_data[i]);
49 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
50 found = true;
51 break;
52 }
53 }
54 }
55 if (found) {
56 good_kern_stack = true;
57 good_user_stack = true;
58 }
59 } else {
60 num_stack = e->kern_stack_size / sizeof(__u64);
61 if (env.jit_enabled) {
62 good_kern_stack = num_stack > 0;
63 } else {
64 for (i = 0; i < num_stack; i++) {
65 ks = ksym_search(e->kern_stack[i]);
66 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
67 good_kern_stack = true;
68 break;
69 }
70 }
71 }
72 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
73 good_user_stack = true;
74 }
75
76 if (!good_kern_stack)
77 CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
78 if (!good_user_stack)
79 CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
80}
81
82void test_get_stack_raw_tp(void)
83{
84 const char *file = "./test_get_stack_rawtp.o";
85 const char *prog_name = "raw_tracepoint/sys_enter";
86 int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
87 struct perf_buffer_opts pb_opts = {};
88 struct perf_buffer *pb = NULL;
89 struct bpf_link *link = NULL;
90 struct timespec tv = {0, 10};
91 struct bpf_program *prog;
92 struct bpf_object *obj;
93 struct bpf_map *map;
94 cpu_set_t cpu_set;
95
96 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
97 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
98 return;
99
100 prog = bpf_object__find_program_by_title(obj, prog_name);
101 if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
102 goto close_prog;
103
104 map = bpf_object__find_map_by_name(obj, "perfmap");
105 if (CHECK(!map, "bpf_find_map", "not found\n"))
106 goto close_prog;
107
108 err = load_kallsyms();
109 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
110 goto close_prog;
111
112 CPU_ZERO(&cpu_set);
113 CPU_SET(0, &cpu_set);
114 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
115 if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
116 goto close_prog;
117
118 link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
119 if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
120 goto close_prog;
121
122 pb_opts.sample_cb = get_stack_print_output;
123 pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
124 if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
125 goto close_prog;
126
127
128 for (i = 0; i < MAX_CNT_RAWTP; i++)
129 nanosleep(&tv, NULL);
130
131 while (exp_cnt > 0) {
132 err = perf_buffer__poll(pb, 100);
133 if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
134 goto close_prog;
135 exp_cnt -= err;
136 }
137
138close_prog:
139 if (!IS_ERR_OR_NULL(link))
140 bpf_link__destroy(link);
141 if (!IS_ERR_OR_NULL(pb))
142 perf_buffer__free(pb);
143 bpf_object__close(obj);
144}
145