1
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/list.h>
9#include <internal/evsel.h>
10#include <linux/zalloc.h>
11#include <stdlib.h>
12#include <internal/xyarray.h>
13#include <internal/cpumap.h>
14#include <internal/threadmap.h>
15#include <internal/lib.h>
16#include <linux/string.h>
17#include <sys/ioctl.h>
18
19void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
20{
21 INIT_LIST_HEAD(&evsel->node);
22 evsel->attr = *attr;
23}
24
25struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
26{
27 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
28
29 if (evsel != NULL)
30 perf_evsel__init(evsel, attr);
31
32 return evsel;
33}
34
35void perf_evsel__delete(struct perf_evsel *evsel)
36{
37 free(evsel);
38}
39
40#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
41
42int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
43{
44 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
45
46 if (evsel->fd) {
47 int cpu, thread;
48 for (cpu = 0; cpu < ncpus; cpu++) {
49 for (thread = 0; thread < nthreads; thread++) {
50 FD(evsel, cpu, thread) = -1;
51 }
52 }
53 }
54
55 return evsel->fd != NULL ? 0 : -ENOMEM;
56}
57
58static int
59sys_perf_event_open(struct perf_event_attr *attr,
60 pid_t pid, int cpu, int group_fd,
61 unsigned long flags)
62{
63 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
64}
65
66int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
67 struct perf_thread_map *threads)
68{
69 int cpu, thread, err = 0;
70
71 if (cpus == NULL) {
72 static struct perf_cpu_map *empty_cpu_map;
73
74 if (empty_cpu_map == NULL) {
75 empty_cpu_map = perf_cpu_map__dummy_new();
76 if (empty_cpu_map == NULL)
77 return -ENOMEM;
78 }
79
80 cpus = empty_cpu_map;
81 }
82
83 if (threads == NULL) {
84 static struct perf_thread_map *empty_thread_map;
85
86 if (empty_thread_map == NULL) {
87 empty_thread_map = perf_thread_map__new_dummy();
88 if (empty_thread_map == NULL)
89 return -ENOMEM;
90 }
91
92 threads = empty_thread_map;
93 }
94
95 if (evsel->fd == NULL &&
96 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
97 return -ENOMEM;
98
99 for (cpu = 0; cpu < cpus->nr; cpu++) {
100 for (thread = 0; thread < threads->nr; thread++) {
101 int fd;
102
103 fd = sys_perf_event_open(&evsel->attr,
104 threads->map[thread].pid,
105 cpus->map[cpu], -1, 0);
106
107 if (fd < 0)
108 return -errno;
109
110 FD(evsel, cpu, thread) = fd;
111 }
112 }
113
114 return err;
115}
116
117static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
118{
119 int thread;
120
121 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
122 if (FD(evsel, cpu, thread) >= 0)
123 close(FD(evsel, cpu, thread));
124 FD(evsel, cpu, thread) = -1;
125 }
126}
127
128void perf_evsel__close_fd(struct perf_evsel *evsel)
129{
130 int cpu;
131
132 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
133 perf_evsel__close_fd_cpu(evsel, cpu);
134}
135
136void perf_evsel__free_fd(struct perf_evsel *evsel)
137{
138 xyarray__delete(evsel->fd);
139 evsel->fd = NULL;
140}
141
142void perf_evsel__close(struct perf_evsel *evsel)
143{
144 if (evsel->fd == NULL)
145 return;
146
147 perf_evsel__close_fd(evsel);
148 perf_evsel__free_fd(evsel);
149}
150
151void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
152{
153 if (evsel->fd == NULL)
154 return;
155
156 perf_evsel__close_fd_cpu(evsel, cpu);
157}
158
159int perf_evsel__read_size(struct perf_evsel *evsel)
160{
161 u64 read_format = evsel->attr.read_format;
162 int entry = sizeof(u64);
163 int size = 0;
164 int nr = 1;
165
166 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
167 size += sizeof(u64);
168
169 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
170 size += sizeof(u64);
171
172 if (read_format & PERF_FORMAT_ID)
173 entry += sizeof(u64);
174
175 if (read_format & PERF_FORMAT_GROUP) {
176 nr = evsel->nr_members;
177 size += sizeof(u64);
178 }
179
180 size += entry * nr;
181 return size;
182}
183
184int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
185 struct perf_counts_values *count)
186{
187 size_t size = perf_evsel__read_size(evsel);
188
189 memset(count, 0, sizeof(*count));
190
191 if (FD(evsel, cpu, thread) < 0)
192 return -EINVAL;
193
194 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
195 return -errno;
196
197 return 0;
198}
199
200static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
201 int ioc, void *arg,
202 int cpu)
203{
204 int thread;
205
206 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
207 int fd = FD(evsel, cpu, thread),
208 err = ioctl(fd, ioc, arg);
209
210 if (err)
211 return err;
212 }
213
214 return 0;
215}
216
217int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
218{
219 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
220}
221
222int perf_evsel__enable(struct perf_evsel *evsel)
223{
224 int i;
225 int err = 0;
226
227 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
228 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
229 return err;
230}
231
232int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
233{
234 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
235}
236
237int perf_evsel__disable(struct perf_evsel *evsel)
238{
239 int i;
240 int err = 0;
241
242 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
243 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
244 return err;
245}
246
247int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
248{
249 int err = 0, i;
250
251 for (i = 0; i < evsel->cpus->nr && !err; i++)
252 err = perf_evsel__run_ioctl(evsel,
253 PERF_EVENT_IOC_SET_FILTER,
254 (void *)filter, i);
255 return err;
256}
257
258struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
259{
260 return evsel->cpus;
261}
262
263struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
264{
265 return evsel->threads;
266}
267
268struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
269{
270 return &evsel->attr;
271}
272
273int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
274{
275 if (ncpus == 0 || nthreads == 0)
276 return 0;
277
278 if (evsel->system_wide)
279 nthreads = 1;
280
281 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
282 if (evsel->sample_id == NULL)
283 return -ENOMEM;
284
285 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
286 if (evsel->id == NULL) {
287 xyarray__delete(evsel->sample_id);
288 evsel->sample_id = NULL;
289 return -ENOMEM;
290 }
291
292 return 0;
293}
294
295void perf_evsel__free_id(struct perf_evsel *evsel)
296{
297 xyarray__delete(evsel->sample_id);
298 evsel->sample_id = NULL;
299 zfree(&evsel->id);
300 evsel->ids = 0;
301}
302