1
2#ifndef __PERF_EVLIST_H
3#define __PERF_EVLIST_H 1
4
5#include <linux/compiler.h>
6#include <linux/kernel.h>
7#include <linux/refcount.h>
8#include <linux/list.h>
9#include <api/fd/array.h>
10#include <stdio.h>
11#include "../perf.h"
12#include "event.h"
13#include "evsel.h"
14#include "util.h"
15#include "auxtrace.h"
16#include <signal.h>
17#include <unistd.h>
18
19struct pollfd;
20struct thread_map;
21struct cpu_map;
22struct record_opts;
23
24#define PERF_EVLIST__HLIST_BITS 8
25#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
26
27
28
29
30
31
32struct perf_mmap {
33 void *base;
34 int mask;
35 int fd;
36 refcount_t refcnt;
37 u64 prev;
38 struct auxtrace_mmap auxtrace_mmap;
39 char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
40};
41
42static inline size_t
43perf_mmap__mmap_len(struct perf_mmap *map)
44{
45 return map->mask + 1 + page_size;
46}
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69enum bkw_mmap_state {
70 BKW_MMAP_NOTREADY,
71 BKW_MMAP_RUNNING,
72 BKW_MMAP_DATA_PENDING,
73 BKW_MMAP_EMPTY,
74};
75
76struct perf_evlist {
77 struct list_head entries;
78 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
79 int nr_entries;
80 int nr_groups;
81 int nr_mmaps;
82 bool overwrite;
83 bool enabled;
84 bool has_user_cpus;
85 size_t mmap_len;
86 int id_pos;
87 int is_pos;
88 u64 combined_sample_type;
89 enum bkw_mmap_state bkw_mmap_state;
90 struct {
91 int cork_fd;
92 pid_t pid;
93 } workload;
94 struct fdarray pollfd;
95 struct perf_mmap *mmap;
96 struct perf_mmap *backward_mmap;
97 struct thread_map *threads;
98 struct cpu_map *cpus;
99 struct perf_evsel *selected;
100 struct events_stats stats;
101 struct perf_env *env;
102};
103
104struct perf_evsel_str_handler {
105 const char *name;
106 void *handler;
107};
108
109struct perf_evlist *perf_evlist__new(void);
110struct perf_evlist *perf_evlist__new_default(void);
111struct perf_evlist *perf_evlist__new_dummy(void);
112void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
113 struct thread_map *threads);
114void perf_evlist__exit(struct perf_evlist *evlist);
115void perf_evlist__delete(struct perf_evlist *evlist);
116
117void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
118void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
119
120int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise);
121
122static inline int perf_evlist__add_default(struct perf_evlist *evlist)
123{
124 return __perf_evlist__add_default(evlist, true);
125}
126
127int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
128 struct perf_event_attr *attrs, size_t nr_attrs);
129
130#define perf_evlist__add_default_attrs(evlist, array) \
131 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
132
133int perf_evlist__add_dummy(struct perf_evlist *evlist);
134
135int perf_evlist__add_newtp(struct perf_evlist *evlist,
136 const char *sys, const char *name, void *handler);
137
138void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
139 enum perf_event_sample_format bit);
140void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
141 enum perf_event_sample_format bit);
142
143#define perf_evlist__set_sample_bit(evlist, bit) \
144 __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
145
146#define perf_evlist__reset_sample_bit(evlist, bit) \
147 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
148
149int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
150int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
151int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
152
153struct perf_evsel *
154perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
155
156struct perf_evsel *
157perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
158 const char *name);
159
160void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
161 int cpu, int thread, u64 id);
162int perf_evlist__id_add_fd(struct perf_evlist *evlist,
163 struct perf_evsel *evsel,
164 int cpu, int thread, int fd);
165
166int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
167int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
168int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
169
170int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
171
172struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
173struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
174 u64 id);
175
176struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
177
178void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
179
180union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
181union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
182
183void perf_mmap__read_catchup(struct perf_mmap *md);
184void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
185
186union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
187
188union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
189 int idx);
190union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
191 int idx);
192void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
193
194void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
195
196int perf_evlist__open(struct perf_evlist *evlist);
197void perf_evlist__close(struct perf_evlist *evlist);
198
199struct callchain_param;
200
201void perf_evlist__set_id_pos(struct perf_evlist *evlist);
202bool perf_can_sample_identifier(void);
203bool perf_can_record_switch_events(void);
204bool perf_can_record_cpu_wide(void);
205void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
206 struct callchain_param *callchain);
207int record_opts__config(struct record_opts *opts);
208
209int perf_evlist__prepare_workload(struct perf_evlist *evlist,
210 struct target *target,
211 const char *argv[], bool pipe_output,
212 void (*exec_error)(int signo, siginfo_t *info,
213 void *ucontext));
214int perf_evlist__start_workload(struct perf_evlist *evlist);
215
216struct option;
217
218int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
219int perf_evlist__parse_mmap_pages(const struct option *opt,
220 const char *str,
221 int unset);
222
223unsigned long perf_event_mlock_kb_in_pages(void);
224
225int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
226 bool overwrite, unsigned int auxtrace_pages,
227 bool auxtrace_overwrite);
228int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
229 bool overwrite);
230void perf_evlist__munmap(struct perf_evlist *evlist);
231
232size_t perf_evlist__mmap_size(unsigned long pages);
233
234void perf_evlist__disable(struct perf_evlist *evlist);
235void perf_evlist__enable(struct perf_evlist *evlist);
236void perf_evlist__toggle_enable(struct perf_evlist *evlist);
237
238int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
239 struct perf_evsel *evsel, int idx);
240
241void perf_evlist__set_selected(struct perf_evlist *evlist,
242 struct perf_evsel *evsel);
243
244void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
245 struct thread_map *threads);
246int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
247int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
248
249void __perf_evlist__set_leader(struct list_head *list);
250void perf_evlist__set_leader(struct perf_evlist *evlist);
251
252u64 perf_evlist__read_format(struct perf_evlist *evlist);
253u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
254u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
255u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
256bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
257u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
258
259int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
260 struct perf_sample *sample);
261
262bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
263bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
264bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
265
266void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
267 struct list_head *list);
268
269static inline bool perf_evlist__empty(struct perf_evlist *evlist)
270{
271 return list_empty(&evlist->entries);
272}
273
274static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
275{
276 return list_entry(evlist->entries.next, struct perf_evsel, node);
277}
278
279static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
280{
281 return list_entry(evlist->entries.prev, struct perf_evsel, node);
282}
283
284size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
285
286int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
287int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
288
289static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
290{
291 struct perf_event_mmap_page *pc = mm->base;
292 u64 head = ACCESS_ONCE(pc->data_head);
293 rmb();
294 return head;
295}
296
297static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
298{
299 struct perf_event_mmap_page *pc = md->base;
300
301
302
303
304 mb();
305 pc->data_tail = tail;
306}
307
308bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
309void perf_evlist__to_front(struct perf_evlist *evlist,
310 struct perf_evsel *move_evsel);
311
312
313
314
315
316
317#define __evlist__for_each_entry(list, evsel) \
318 list_for_each_entry(evsel, list, node)
319
320
321
322
323
324
325#define evlist__for_each_entry(evlist, evsel) \
326 __evlist__for_each_entry(&(evlist)->entries, evsel)
327
328
329
330
331
332
333#define __evlist__for_each_entry_continue(list, evsel) \
334 list_for_each_entry_continue(evsel, list, node)
335
336
337
338
339
340
341#define evlist__for_each_entry_continue(evlist, evsel) \
342 __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
343
344
345
346
347
348
349#define __evlist__for_each_entry_reverse(list, evsel) \
350 list_for_each_entry_reverse(evsel, list, node)
351
352
353
354
355
356
357#define evlist__for_each_entry_reverse(evlist, evsel) \
358 __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
359
360
361
362
363
364
365
366#define __evlist__for_each_entry_safe(list, tmp, evsel) \
367 list_for_each_entry_safe(evsel, tmp, list, node)
368
369
370
371
372
373
374
375#define evlist__for_each_entry_safe(evlist, tmp, evsel) \
376 __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
377
378void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
379 struct perf_evsel *tracking_evsel);
380
381void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
382
383struct perf_evsel *
384perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
385
386struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
387 union perf_event *event);
388#endif
389