1
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/list.h>
9#include <internal/evsel.h>
10#include <linux/zalloc.h>
11#include <stdlib.h>
12#include <internal/xyarray.h>
13#include <internal/cpumap.h>
14#include <internal/mmap.h>
15#include <internal/threadmap.h>
16#include <internal/lib.h>
17#include <linux/string.h>
18#include <sys/ioctl.h>
19#include <sys/mman.h>
20#include <asm/bug.h>
21
22void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 int idx)
24{
25 INIT_LIST_HEAD(&evsel->node);
26 evsel->attr = *attr;
27 evsel->idx = idx;
28 evsel->leader = evsel;
29}
30
31struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32{
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34
35 if (evsel != NULL)
36 perf_evsel__init(evsel, attr, 0);
37
38 return evsel;
39}
40
41void perf_evsel__delete(struct perf_evsel *evsel)
42{
43 free(evsel);
44}
45
46#define FD(_evsel, _cpu_map_idx, _thread) \
47 ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
48#define MMAP(_evsel, _cpu_map_idx, _thread) \
49 (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
50 : NULL)
51
52int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53{
54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55
56 if (evsel->fd) {
57 int idx, thread;
58
59 for (idx = 0; idx < ncpus; idx++) {
60 for (thread = 0; thread < nthreads; thread++) {
61 int *fd = FD(evsel, idx, thread);
62
63 if (fd)
64 *fd = -1;
65 }
66 }
67 }
68
69 return evsel->fd != NULL ? 0 : -ENOMEM;
70}
71
72static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
73{
74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
75
76 return evsel->mmap != NULL ? 0 : -ENOMEM;
77}
78
79static int
80sys_perf_event_open(struct perf_event_attr *attr,
81 pid_t pid, struct perf_cpu cpu, int group_fd,
82 unsigned long flags)
83{
84 return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
85}
86
87static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
88{
89 struct perf_evsel *leader = evsel->leader;
90 int *fd;
91
92 if (evsel == leader) {
93 *group_fd = -1;
94 return 0;
95 }
96
97
98
99
100
101 if (!leader->fd)
102 return -ENOTCONN;
103
104 fd = FD(leader, cpu_map_idx, thread);
105 if (fd == NULL || *fd == -1)
106 return -EBADF;
107
108 *group_fd = *fd;
109
110 return 0;
111}
112
113int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
114 struct perf_thread_map *threads)
115{
116 struct perf_cpu cpu;
117 int idx, thread, err = 0;
118
119 if (cpus == NULL) {
120 static struct perf_cpu_map *empty_cpu_map;
121
122 if (empty_cpu_map == NULL) {
123 empty_cpu_map = perf_cpu_map__dummy_new();
124 if (empty_cpu_map == NULL)
125 return -ENOMEM;
126 }
127
128 cpus = empty_cpu_map;
129 }
130
131 if (threads == NULL) {
132 static struct perf_thread_map *empty_thread_map;
133
134 if (empty_thread_map == NULL) {
135 empty_thread_map = perf_thread_map__new_dummy();
136 if (empty_thread_map == NULL)
137 return -ENOMEM;
138 }
139
140 threads = empty_thread_map;
141 }
142
143 if (evsel->fd == NULL &&
144 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
145 return -ENOMEM;
146
147 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
148 for (thread = 0; thread < threads->nr; thread++) {
149 int fd, group_fd, *evsel_fd;
150
151 evsel_fd = FD(evsel, idx, thread);
152 if (evsel_fd == NULL)
153 return -EINVAL;
154
155 err = get_group_fd(evsel, idx, thread, &group_fd);
156 if (err < 0)
157 return err;
158
159 fd = sys_perf_event_open(&evsel->attr,
160 threads->map[thread].pid,
161 cpu, group_fd, 0);
162
163 if (fd < 0)
164 return -errno;
165
166 *evsel_fd = fd;
167 }
168 }
169
170 return err;
171}
172
173static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
174{
175 int thread;
176
177 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
178 int *fd = FD(evsel, cpu_map_idx, thread);
179
180 if (fd && *fd >= 0) {
181 close(*fd);
182 *fd = -1;
183 }
184 }
185}
186
187void perf_evsel__close_fd(struct perf_evsel *evsel)
188{
189 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
190 perf_evsel__close_fd_cpu(evsel, idx);
191}
192
193void perf_evsel__free_fd(struct perf_evsel *evsel)
194{
195 xyarray__delete(evsel->fd);
196 evsel->fd = NULL;
197}
198
199void perf_evsel__close(struct perf_evsel *evsel)
200{
201 if (evsel->fd == NULL)
202 return;
203
204 perf_evsel__close_fd(evsel);
205 perf_evsel__free_fd(evsel);
206}
207
208void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
209{
210 if (evsel->fd == NULL)
211 return;
212
213 perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
214}
215
216void perf_evsel__munmap(struct perf_evsel *evsel)
217{
218 int idx, thread;
219
220 if (evsel->fd == NULL || evsel->mmap == NULL)
221 return;
222
223 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
224 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
225 int *fd = FD(evsel, idx, thread);
226
227 if (fd == NULL || *fd < 0)
228 continue;
229
230 perf_mmap__munmap(MMAP(evsel, idx, thread));
231 }
232 }
233
234 xyarray__delete(evsel->mmap);
235 evsel->mmap = NULL;
236}
237
238int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
239{
240 int ret, idx, thread;
241 struct perf_mmap_param mp = {
242 .prot = PROT_READ | PROT_WRITE,
243 .mask = (pages * page_size) - 1,
244 };
245
246 if (evsel->fd == NULL || evsel->mmap)
247 return -EINVAL;
248
249 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
250 return -ENOMEM;
251
252 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
253 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
254 int *fd = FD(evsel, idx, thread);
255 struct perf_mmap *map;
256 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
257
258 if (fd == NULL || *fd < 0)
259 continue;
260
261 map = MMAP(evsel, idx, thread);
262 perf_mmap__init(map, NULL, false, NULL);
263
264 ret = perf_mmap__mmap(map, &mp, *fd, cpu);
265 if (ret) {
266 perf_evsel__munmap(evsel);
267 return ret;
268 }
269 }
270 }
271
272 return 0;
273}
274
275void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
276{
277 int *fd = FD(evsel, cpu_map_idx, thread);
278
279 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
280 return NULL;
281
282 return MMAP(evsel, cpu_map_idx, thread)->base;
283}
284
285int perf_evsel__read_size(struct perf_evsel *evsel)
286{
287 u64 read_format = evsel->attr.read_format;
288 int entry = sizeof(u64);
289 int size = 0;
290 int nr = 1;
291
292 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
293 size += sizeof(u64);
294
295 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
296 size += sizeof(u64);
297
298 if (read_format & PERF_FORMAT_ID)
299 entry += sizeof(u64);
300
301 if (read_format & PERF_FORMAT_GROUP) {
302 nr = evsel->nr_members;
303 size += sizeof(u64);
304 }
305
306 size += entry * nr;
307 return size;
308}
309
310int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
311 struct perf_counts_values *count)
312{
313 size_t size = perf_evsel__read_size(evsel);
314 int *fd = FD(evsel, cpu_map_idx, thread);
315
316 memset(count, 0, sizeof(*count));
317
318 if (fd == NULL || *fd < 0)
319 return -EINVAL;
320
321 if (MMAP(evsel, cpu_map_idx, thread) &&
322 !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
323 return 0;
324
325 if (readn(*fd, count->values, size) <= 0)
326 return -errno;
327
328 return 0;
329}
330
331static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
332 int ioc, void *arg,
333 int cpu_map_idx)
334{
335 int thread;
336
337 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
338 int err;
339 int *fd = FD(evsel, cpu_map_idx, thread);
340
341 if (fd == NULL || *fd < 0)
342 return -1;
343
344 err = ioctl(*fd, ioc, arg);
345
346 if (err)
347 return err;
348 }
349
350 return 0;
351}
352
353int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
354{
355 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
356}
357
358int perf_evsel__enable(struct perf_evsel *evsel)
359{
360 int i;
361 int err = 0;
362
363 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
364 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
365 return err;
366}
367
368int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
369{
370 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
371}
372
373int perf_evsel__disable(struct perf_evsel *evsel)
374{
375 int i;
376 int err = 0;
377
378 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
379 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
380 return err;
381}
382
383int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
384{
385 int err = 0, i;
386
387 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
388 err = perf_evsel__run_ioctl(evsel,
389 PERF_EVENT_IOC_SET_FILTER,
390 (void *)filter, i);
391 return err;
392}
393
394struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
395{
396 return evsel->cpus;
397}
398
399struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
400{
401 return evsel->threads;
402}
403
404struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
405{
406 return &evsel->attr;
407}
408
409int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
410{
411 if (ncpus == 0 || nthreads == 0)
412 return 0;
413
414 if (evsel->system_wide)
415 nthreads = 1;
416
417 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
418 if (evsel->sample_id == NULL)
419 return -ENOMEM;
420
421 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
422 if (evsel->id == NULL) {
423 xyarray__delete(evsel->sample_id);
424 evsel->sample_id = NULL;
425 return -ENOMEM;
426 }
427
428 return 0;
429}
430
431void perf_evsel__free_id(struct perf_evsel *evsel)
432{
433 xyarray__delete(evsel->sample_id);
434 evsel->sample_id = NULL;
435 zfree(&evsel->id);
436 evsel->ids = 0;
437}
438
439void perf_counts_values__scale(struct perf_counts_values *count,
440 bool scale, __s8 *pscaled)
441{
442 s8 scaled = 0;
443
444 if (scale) {
445 if (count->run == 0) {
446 scaled = -1;
447 count->val = 0;
448 } else if (count->run < count->ena) {
449 scaled = 1;
450 count->val = (u64)((double)count->val * count->ena / count->run);
451 }
452 }
453
454 if (pscaled)
455 *pscaled = scaled;
456}
457