1
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/list.h>
9#include <internal/evsel.h>
10#include <linux/zalloc.h>
11#include <stdlib.h>
12#include <internal/xyarray.h>
13#include <internal/cpumap.h>
14#include <internal/mmap.h>
15#include <internal/threadmap.h>
16#include <internal/lib.h>
17#include <linux/string.h>
18#include <sys/ioctl.h>
19#include <sys/mman.h>
20#include <asm/bug.h>
21
22void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 int idx)
24{
25 INIT_LIST_HEAD(&evsel->node);
26 evsel->attr = *attr;
27 evsel->idx = idx;
28 evsel->leader = evsel;
29}
30
31struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32{
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34
35 if (evsel != NULL)
36 perf_evsel__init(evsel, attr, 0);
37
38 return evsel;
39}
40
41void perf_evsel__delete(struct perf_evsel *evsel)
42{
43 free(evsel);
44}
45
46#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
47#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
48
49int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
50{
51 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
52
53 if (evsel->fd) {
54 int cpu, thread;
55 for (cpu = 0; cpu < ncpus; cpu++) {
56 for (thread = 0; thread < nthreads; thread++) {
57 FD(evsel, cpu, thread) = -1;
58 }
59 }
60 }
61
62 return evsel->fd != NULL ? 0 : -ENOMEM;
63}
64
65static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
66{
67 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
68
69 return evsel->mmap != NULL ? 0 : -ENOMEM;
70}
71
72static int
73sys_perf_event_open(struct perf_event_attr *attr,
74 pid_t pid, int cpu, int group_fd,
75 unsigned long flags)
76{
77 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
78}
79
80static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
81{
82 struct perf_evsel *leader = evsel->leader;
83 int fd;
84
85 if (evsel == leader) {
86 *group_fd = -1;
87 return 0;
88 }
89
90
91
92
93
94 if (!leader->fd)
95 return -ENOTCONN;
96
97 fd = FD(leader, cpu, thread);
98 if (fd == -1)
99 return -EBADF;
100
101 *group_fd = fd;
102
103 return 0;
104}
105
106int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
107 struct perf_thread_map *threads)
108{
109 int cpu, thread, err = 0;
110
111 if (cpus == NULL) {
112 static struct perf_cpu_map *empty_cpu_map;
113
114 if (empty_cpu_map == NULL) {
115 empty_cpu_map = perf_cpu_map__dummy_new();
116 if (empty_cpu_map == NULL)
117 return -ENOMEM;
118 }
119
120 cpus = empty_cpu_map;
121 }
122
123 if (threads == NULL) {
124 static struct perf_thread_map *empty_thread_map;
125
126 if (empty_thread_map == NULL) {
127 empty_thread_map = perf_thread_map__new_dummy();
128 if (empty_thread_map == NULL)
129 return -ENOMEM;
130 }
131
132 threads = empty_thread_map;
133 }
134
135 if (evsel->fd == NULL &&
136 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
137 return -ENOMEM;
138
139 for (cpu = 0; cpu < cpus->nr; cpu++) {
140 for (thread = 0; thread < threads->nr; thread++) {
141 int fd, group_fd;
142
143 err = get_group_fd(evsel, cpu, thread, &group_fd);
144 if (err < 0)
145 return err;
146
147 fd = sys_perf_event_open(&evsel->attr,
148 threads->map[thread].pid,
149 cpus->map[cpu], group_fd, 0);
150
151 if (fd < 0)
152 return -errno;
153
154 FD(evsel, cpu, thread) = fd;
155 }
156 }
157
158 return err;
159}
160
161static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
162{
163 int thread;
164
165 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
166 if (FD(evsel, cpu, thread) >= 0)
167 close(FD(evsel, cpu, thread));
168 FD(evsel, cpu, thread) = -1;
169 }
170}
171
172void perf_evsel__close_fd(struct perf_evsel *evsel)
173{
174 int cpu;
175
176 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
177 perf_evsel__close_fd_cpu(evsel, cpu);
178}
179
180void perf_evsel__free_fd(struct perf_evsel *evsel)
181{
182 xyarray__delete(evsel->fd);
183 evsel->fd = NULL;
184}
185
186void perf_evsel__close(struct perf_evsel *evsel)
187{
188 if (evsel->fd == NULL)
189 return;
190
191 perf_evsel__close_fd(evsel);
192 perf_evsel__free_fd(evsel);
193}
194
195void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
196{
197 if (evsel->fd == NULL)
198 return;
199
200 perf_evsel__close_fd_cpu(evsel, cpu);
201}
202
203void perf_evsel__munmap(struct perf_evsel *evsel)
204{
205 int cpu, thread;
206
207 if (evsel->fd == NULL || evsel->mmap == NULL)
208 return;
209
210 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
211 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
212 int fd = FD(evsel, cpu, thread);
213 struct perf_mmap *map = MMAP(evsel, cpu, thread);
214
215 if (fd < 0)
216 continue;
217
218 perf_mmap__munmap(map);
219 }
220 }
221
222 xyarray__delete(evsel->mmap);
223 evsel->mmap = NULL;
224}
225
226int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
227{
228 int ret, cpu, thread;
229 struct perf_mmap_param mp = {
230 .prot = PROT_READ | PROT_WRITE,
231 .mask = (pages * page_size) - 1,
232 };
233
234 if (evsel->fd == NULL || evsel->mmap)
235 return -EINVAL;
236
237 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
238 return -ENOMEM;
239
240 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
241 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
242 int fd = FD(evsel, cpu, thread);
243 struct perf_mmap *map = MMAP(evsel, cpu, thread);
244
245 if (fd < 0)
246 continue;
247
248 perf_mmap__init(map, NULL, false, NULL);
249
250 ret = perf_mmap__mmap(map, &mp, fd, cpu);
251 if (ret) {
252 perf_evsel__munmap(evsel);
253 return ret;
254 }
255 }
256 }
257
258 return 0;
259}
260
261void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
262{
263 if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
264 return NULL;
265
266 return MMAP(evsel, cpu, thread)->base;
267}
268
269int perf_evsel__read_size(struct perf_evsel *evsel)
270{
271 u64 read_format = evsel->attr.read_format;
272 int entry = sizeof(u64);
273 int size = 0;
274 int nr = 1;
275
276 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
277 size += sizeof(u64);
278
279 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
280 size += sizeof(u64);
281
282 if (read_format & PERF_FORMAT_ID)
283 entry += sizeof(u64);
284
285 if (read_format & PERF_FORMAT_GROUP) {
286 nr = evsel->nr_members;
287 size += sizeof(u64);
288 }
289
290 size += entry * nr;
291 return size;
292}
293
294int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
295 struct perf_counts_values *count)
296{
297 size_t size = perf_evsel__read_size(evsel);
298
299 memset(count, 0, sizeof(*count));
300
301 if (FD(evsel, cpu, thread) < 0)
302 return -EINVAL;
303
304 if (MMAP(evsel, cpu, thread) &&
305 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
306 return 0;
307
308 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
309 return -errno;
310
311 return 0;
312}
313
314static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
315 int ioc, void *arg,
316 int cpu)
317{
318 int thread;
319
320 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
321 int fd = FD(evsel, cpu, thread),
322 err = ioctl(fd, ioc, arg);
323
324 if (err)
325 return err;
326 }
327
328 return 0;
329}
330
331int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
332{
333 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
334}
335
336int perf_evsel__enable(struct perf_evsel *evsel)
337{
338 int i;
339 int err = 0;
340
341 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
342 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
343 return err;
344}
345
346int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
347{
348 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
349}
350
351int perf_evsel__disable(struct perf_evsel *evsel)
352{
353 int i;
354 int err = 0;
355
356 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
357 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
358 return err;
359}
360
361int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
362{
363 int err = 0, i;
364
365 for (i = 0; i < evsel->cpus->nr && !err; i++)
366 err = perf_evsel__run_ioctl(evsel,
367 PERF_EVENT_IOC_SET_FILTER,
368 (void *)filter, i);
369 return err;
370}
371
372struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
373{
374 return evsel->cpus;
375}
376
377struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
378{
379 return evsel->threads;
380}
381
382struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
383{
384 return &evsel->attr;
385}
386
387int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
388{
389 if (ncpus == 0 || nthreads == 0)
390 return 0;
391
392 if (evsel->system_wide)
393 nthreads = 1;
394
395 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
396 if (evsel->sample_id == NULL)
397 return -ENOMEM;
398
399 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
400 if (evsel->id == NULL) {
401 xyarray__delete(evsel->sample_id);
402 evsel->sample_id = NULL;
403 return -ENOMEM;
404 }
405
406 return 0;
407}
408
409void perf_evsel__free_id(struct perf_evsel *evsel)
410{
411 xyarray__delete(evsel->sample_id);
412 evsel->sample_id = NULL;
413 zfree(&evsel->id);
414 evsel->ids = 0;
415}
416