1
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/list.h>
9#include <internal/evsel.h>
10#include <linux/zalloc.h>
11#include <stdlib.h>
12#include <internal/xyarray.h>
13#include <internal/cpumap.h>
14#include <internal/threadmap.h>
15#include <internal/lib.h>
16#include <linux/string.h>
17#include <sys/ioctl.h>
18
19void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
20{
21 INIT_LIST_HEAD(&evsel->node);
22 evsel->attr = *attr;
23}
24
25struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
26{
27 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
28
29 if (evsel != NULL)
30 perf_evsel__init(evsel, attr);
31
32 return evsel;
33}
34
35void perf_evsel__delete(struct perf_evsel *evsel)
36{
37 free(evsel);
38}
39
40#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
41
42int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
43{
44 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
45
46 if (evsel->fd) {
47 int cpu, thread;
48 for (cpu = 0; cpu < ncpus; cpu++) {
49 for (thread = 0; thread < nthreads; thread++) {
50 FD(evsel, cpu, thread) = -1;
51 }
52 }
53 }
54
55 return evsel->fd != NULL ? 0 : -ENOMEM;
56}
57
58static int
59sys_perf_event_open(struct perf_event_attr *attr,
60 pid_t pid, int cpu, int group_fd,
61 unsigned long flags)
62{
63 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
64}
65
66int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
67 struct perf_thread_map *threads)
68{
69 int cpu, thread, err = 0;
70
71 if (cpus == NULL) {
72 static struct perf_cpu_map *empty_cpu_map;
73
74 if (empty_cpu_map == NULL) {
75 empty_cpu_map = perf_cpu_map__dummy_new();
76 if (empty_cpu_map == NULL)
77 return -ENOMEM;
78 }
79
80 cpus = empty_cpu_map;
81 }
82
83 if (threads == NULL) {
84 static struct perf_thread_map *empty_thread_map;
85
86 if (empty_thread_map == NULL) {
87 empty_thread_map = perf_thread_map__new_dummy();
88 if (empty_thread_map == NULL)
89 return -ENOMEM;
90 }
91
92 threads = empty_thread_map;
93 }
94
95 if (evsel->fd == NULL &&
96 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
97 return -ENOMEM;
98
99 for (cpu = 0; cpu < cpus->nr; cpu++) {
100 for (thread = 0; thread < threads->nr; thread++) {
101 int fd;
102
103 fd = sys_perf_event_open(&evsel->attr,
104 threads->map[thread].pid,
105 cpus->map[cpu], -1, 0);
106
107 if (fd < 0)
108 return -errno;
109
110 FD(evsel, cpu, thread) = fd;
111 }
112 }
113
114 return err;
115}
116
117void perf_evsel__close_fd(struct perf_evsel *evsel)
118{
119 int cpu, thread;
120
121 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
122 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
123 close(FD(evsel, cpu, thread));
124 FD(evsel, cpu, thread) = -1;
125 }
126}
127
128void perf_evsel__free_fd(struct perf_evsel *evsel)
129{
130 xyarray__delete(evsel->fd);
131 evsel->fd = NULL;
132}
133
134void perf_evsel__close(struct perf_evsel *evsel)
135{
136 if (evsel->fd == NULL)
137 return;
138
139 perf_evsel__close_fd(evsel);
140 perf_evsel__free_fd(evsel);
141}
142
143int perf_evsel__read_size(struct perf_evsel *evsel)
144{
145 u64 read_format = evsel->attr.read_format;
146 int entry = sizeof(u64);
147 int size = 0;
148 int nr = 1;
149
150 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
151 size += sizeof(u64);
152
153 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
154 size += sizeof(u64);
155
156 if (read_format & PERF_FORMAT_ID)
157 entry += sizeof(u64);
158
159 if (read_format & PERF_FORMAT_GROUP) {
160 nr = evsel->nr_members;
161 size += sizeof(u64);
162 }
163
164 size += entry * nr;
165 return size;
166}
167
168int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
169 struct perf_counts_values *count)
170{
171 size_t size = perf_evsel__read_size(evsel);
172
173 memset(count, 0, sizeof(*count));
174
175 if (FD(evsel, cpu, thread) < 0)
176 return -EINVAL;
177
178 if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
179 return -errno;
180
181 return 0;
182}
183
184static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
185 int ioc, void *arg)
186{
187 int cpu, thread;
188
189 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
190 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
191 int fd = FD(evsel, cpu, thread),
192 err = ioctl(fd, ioc, arg);
193
194 if (err)
195 return err;
196 }
197 }
198
199 return 0;
200}
201
202int perf_evsel__enable(struct perf_evsel *evsel)
203{
204 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
205}
206
207int perf_evsel__disable(struct perf_evsel *evsel)
208{
209 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
210}
211
212int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
213{
214 return perf_evsel__run_ioctl(evsel,
215 PERF_EVENT_IOC_SET_FILTER,
216 (void *)filter);
217}
218
219struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
220{
221 return evsel->cpus;
222}
223
224struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
225{
226 return evsel->threads;
227}
228
229struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
230{
231 return &evsel->attr;
232}
233
234int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
235{
236 if (ncpus == 0 || nthreads == 0)
237 return 0;
238
239 if (evsel->system_wide)
240 nthreads = 1;
241
242 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
243 if (evsel->sample_id == NULL)
244 return -ENOMEM;
245
246 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
247 if (evsel->id == NULL) {
248 xyarray__delete(evsel->sample_id);
249 evsel->sample_id = NULL;
250 return -ENOMEM;
251 }
252
253 return 0;
254}
255
256void perf_evsel__free_id(struct perf_evsel *evsel)
257{
258 xyarray__delete(evsel->sample_id);
259 evsel->sample_id = NULL;
260 zfree(&evsel->id);
261 evsel->ids = 0;
262}
263