1
2#include <test_progs.h>
3
4void test_tp_attach_query(void)
5{
6 const int num_progs = 3;
7 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
8 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
9 const char *file = "./test_tracepoint.o";
10 struct perf_event_query_bpf *query;
11 struct perf_event_attr attr = {};
12 struct bpf_object *obj[num_progs];
13 struct bpf_prog_info prog_info;
14 char buf[256];
15
16 for (i = 0; i < num_progs; i++)
17 obj[i] = NULL;
18
19 snprintf(buf, sizeof(buf),
20 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
21 efd = open(buf, O_RDONLY, 0);
22 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
23 return;
24 bytes = read(efd, buf, sizeof(buf));
25 close(efd);
26 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
27 "read", "bytes %d errno %d\n", bytes, errno))
28 return;
29
30 attr.config = strtol(buf, NULL, 0);
31 attr.type = PERF_TYPE_TRACEPOINT;
32 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
33 attr.sample_period = 1;
34 attr.wakeup_events = 1;
35
36 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
37 for (i = 0; i < num_progs; i++) {
38 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
39 &prog_fd[i]);
40 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
41 goto cleanup1;
42
43 bzero(&prog_info, sizeof(prog_info));
44 prog_info.jited_prog_len = 0;
45 prog_info.xlated_prog_len = 0;
46 prog_info.nr_map_ids = 0;
47 info_len = sizeof(prog_info);
48 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
49 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
50 err, errno))
51 goto cleanup1;
52 saved_prog_ids[i] = prog_info.id;
53
54 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 ,
55 0 , -1 ,
56 0 );
57 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
58 pmu_fd[i], errno))
59 goto cleanup2;
60 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
61 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
62 err, errno))
63 goto cleanup3;
64
65 if (i == 0) {
66
67 query->ids_len = num_progs;
68 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
69 if (CHECK(err || query->prog_cnt != 0,
70 "perf_event_ioc_query_bpf",
71 "err %d errno %d query->prog_cnt %u\n",
72 err, errno, query->prog_cnt))
73 goto cleanup3;
74 }
75
76 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
77 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
78 err, errno))
79 goto cleanup3;
80
81 if (i == 1) {
82
83 query->ids_len = 0;
84 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
85 if (CHECK(err || query->prog_cnt != 2,
86 "perf_event_ioc_query_bpf",
87 "err %d errno %d query->prog_cnt %u\n",
88 err, errno, query->prog_cnt))
89 goto cleanup3;
90
91
92
93 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
94 (struct perf_event_query_bpf *)0x1);
95 if (CHECK(!err || errno != EFAULT,
96 "perf_event_ioc_query_bpf",
97 "err %d errno %d\n", err, errno))
98 goto cleanup3;
99
100
101 query->ids_len = 1;
102 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
103 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
104 "perf_event_ioc_query_bpf",
105 "err %d errno %d query->prog_cnt %u\n",
106 err, errno, query->prog_cnt))
107 goto cleanup3;
108 }
109
110 query->ids_len = num_progs;
111 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
112 if (CHECK(err || query->prog_cnt != (i + 1),
113 "perf_event_ioc_query_bpf",
114 "err %d errno %d query->prog_cnt %u\n",
115 err, errno, query->prog_cnt))
116 goto cleanup3;
117 for (j = 0; j < i + 1; j++)
118 if (CHECK(saved_prog_ids[j] != query->ids[j],
119 "perf_event_ioc_query_bpf",
120 "#%d saved_prog_id %x query prog_id %x\n",
121 j, saved_prog_ids[j], query->ids[j]))
122 goto cleanup3;
123 }
124
125 i = num_progs - 1;
126 for (; i >= 0; i--) {
127 cleanup3:
128 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
129 cleanup2:
130 close(pmu_fd[i]);
131 cleanup1:
132 bpf_object__close(obj[i]);
133 }
134 free(query);
135}
136