1#ifndef __PERF_EVSEL_H
2#define __PERF_EVSEL_H 1
3
4#include <linux/list.h>
5#include <stdbool.h>
6#include <stddef.h>
7#include <linux/perf_event.h>
8#include <linux/types.h>
9#include "xyarray.h"
10#include "symbol.h"
11#include "cpumap.h"
12#include "counts.h"
13
14struct perf_evsel;
15
16
17
18
19
20struct perf_sample_id {
21 struct hlist_node node;
22 u64 id;
23 struct perf_evsel *evsel;
24 int idx;
25 int cpu;
26 pid_t tid;
27
28
29 u64 period;
30};
31
32struct cgroup_sel;
33
34
35
36
37
38
39
40enum {
41 PERF_EVSEL__CONFIG_TERM_PERIOD,
42 PERF_EVSEL__CONFIG_TERM_FREQ,
43 PERF_EVSEL__CONFIG_TERM_TIME,
44 PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
45 PERF_EVSEL__CONFIG_TERM_STACK_USER,
46 PERF_EVSEL__CONFIG_TERM_INHERIT,
47 PERF_EVSEL__CONFIG_TERM_MAX_STACK,
48 PERF_EVSEL__CONFIG_TERM_OVERWRITE,
49 PERF_EVSEL__CONFIG_TERM_DRV_CFG,
50 PERF_EVSEL__CONFIG_TERM_MAX,
51};
52
53struct perf_evsel_config_term {
54 struct list_head list;
55 int type;
56 union {
57 u64 period;
58 u64 freq;
59 bool time;
60 char *callgraph;
61 char *drv_cfg;
62 u64 stack_user;
63 int max_stack;
64 bool inherit;
65 bool overwrite;
66 } val;
67};
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85struct perf_evsel {
86 struct list_head node;
87 struct perf_evlist *evlist;
88 struct perf_event_attr attr;
89 char *filter;
90 struct xyarray *fd;
91 struct xyarray *sample_id;
92 u64 *id;
93 struct perf_counts *counts;
94 struct perf_counts *prev_raw_counts;
95 int idx;
96 u32 ids;
97 char *name;
98 double scale;
99 const char *unit;
100 struct event_format *tp_format;
101 off_t id_offset;
102 void *priv;
103 u64 db_id;
104 struct cgroup_sel *cgrp;
105 void *handler;
106 struct cpu_map *cpus;
107 struct cpu_map *own_cpus;
108 struct thread_map *threads;
109 unsigned int sample_size;
110 int id_pos;
111 int is_pos;
112 bool snapshot;
113 bool supported;
114 bool needs_swap;
115 bool no_aux_samples;
116 bool immediate;
117 bool system_wide;
118 bool tracking;
119 bool per_pkg;
120 bool precise_max;
121
122 int exclude_GH;
123 int nr_members;
124 int sample_read;
125 unsigned long *per_pkg_mask;
126 struct perf_evsel *leader;
127 char *group_name;
128 bool cmdline_group_boundary;
129 struct list_head config_terms;
130 int bpf_fd;
131};
132
133union u64_swap {
134 u64 val64;
135 u32 val32[2];
136};
137
138struct cpu_map;
139struct target;
140struct thread_map;
141struct record_opts;
142
143static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
144{
145 return evsel->cpus;
146}
147
148static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
149{
150 return perf_evsel__cpus(evsel)->nr;
151}
152
153void perf_counts_values__scale(struct perf_counts_values *count,
154 bool scale, s8 *pscaled);
155
156void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
157 struct perf_counts_values *count);
158
159int perf_evsel__object_config(size_t object_size,
160 int (*init)(struct perf_evsel *evsel),
161 void (*fini)(struct perf_evsel *evsel));
162
163struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
164
165static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
166{
167 return perf_evsel__new_idx(attr, 0);
168}
169
170struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
171
172
173
174
175static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
176{
177 return perf_evsel__newtp_idx(sys, name, 0);
178}
179
180struct perf_evsel *perf_evsel__new_cycles(void);
181
182struct event_format *event_format__new(const char *sys, const char *name);
183
184void perf_evsel__init(struct perf_evsel *evsel,
185 struct perf_event_attr *attr, int idx);
186void perf_evsel__exit(struct perf_evsel *evsel);
187void perf_evsel__delete(struct perf_evsel *evsel);
188
189struct callchain_param;
190
191void perf_evsel__config(struct perf_evsel *evsel,
192 struct record_opts *opts,
193 struct callchain_param *callchain);
194void perf_evsel__config_callchain(struct perf_evsel *evsel,
195 struct record_opts *opts,
196 struct callchain_param *callchain);
197
198int __perf_evsel__sample_size(u64 sample_type);
199void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
200
201bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
202
203#define PERF_EVSEL__MAX_ALIASES 8
204
205extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
206 [PERF_EVSEL__MAX_ALIASES];
207extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
208 [PERF_EVSEL__MAX_ALIASES];
209extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
210 [PERF_EVSEL__MAX_ALIASES];
211extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
212extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
213int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
214 char *bf, size_t size);
215const char *perf_evsel__name(struct perf_evsel *evsel);
216
217const char *perf_evsel__group_name(struct perf_evsel *evsel);
218int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
219
220int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
221void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
222
223void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
224 enum perf_event_sample_format bit);
225void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
226 enum perf_event_sample_format bit);
227
228#define perf_evsel__set_sample_bit(evsel, bit) \
229 __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
230
231#define perf_evsel__reset_sample_bit(evsel, bit) \
232 __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
233
234void perf_evsel__set_sample_id(struct perf_evsel *evsel,
235 bool use_sample_identifier);
236
237int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
238int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter);
239int perf_evsel__append_addr_filter(struct perf_evsel *evsel,
240 const char *filter);
241int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
242 const char *filter);
243int perf_evsel__enable(struct perf_evsel *evsel);
244int perf_evsel__disable(struct perf_evsel *evsel);
245
246int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
247 struct cpu_map *cpus);
248int perf_evsel__open_per_thread(struct perf_evsel *evsel,
249 struct thread_map *threads);
250int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
251 struct thread_map *threads);
252void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
253
254struct perf_sample;
255
256void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
257 const char *name);
258u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
259 const char *name);
260
261static inline char *perf_evsel__strval(struct perf_evsel *evsel,
262 struct perf_sample *sample,
263 const char *name)
264{
265 return perf_evsel__rawptr(evsel, sample, name);
266}
267
268struct format_field;
269
270u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap);
271
272struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
273
274#define perf_evsel__match(evsel, t, c) \
275 (evsel->attr.type == PERF_TYPE_##t && \
276 evsel->attr.config == PERF_COUNT_##c)
277
278static inline bool perf_evsel__match2(struct perf_evsel *e1,
279 struct perf_evsel *e2)
280{
281 return (e1->attr.type == e2->attr.type) &&
282 (e1->attr.config == e2->attr.config);
283}
284
285#define perf_evsel__cmp(a, b) \
286 ((a) && \
287 (b) && \
288 (a)->attr.type == (b)->attr.type && \
289 (a)->attr.config == (b)->attr.config)
290
291int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
292 struct perf_counts_values *count);
293
294int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
295 int cpu, int thread, bool scale);
296
297
298
299
300
301
302
303
304static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
305 int cpu, int thread)
306{
307 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
308}
309
310
311
312
313
314
315
316
317static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
318 int cpu, int thread)
319{
320 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
321}
322
323int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
324 struct perf_sample *sample);
325
326static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
327{
328 return list_entry(evsel->node.next, struct perf_evsel, node);
329}
330
331static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
332{
333 return list_entry(evsel->node.prev, struct perf_evsel, node);
334}
335
336
337
338
339
340
341
342
343static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
344{
345 return evsel->leader == evsel;
346}
347
348
349
350
351
352
353
354
355
356static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
357{
358 if (!symbol_conf.event_group)
359 return false;
360
361 return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
362}
363
364bool perf_evsel__is_function_event(struct perf_evsel *evsel);
365
366static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
367{
368 struct perf_event_attr *attr = &evsel->attr;
369
370 return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
371 (attr->type == PERF_TYPE_SOFTWARE);
372}
373
374struct perf_attr_details {
375 bool freq;
376 bool verbose;
377 bool event_group;
378 bool force;
379 bool trace_fields;
380};
381
382int perf_evsel__fprintf(struct perf_evsel *evsel,
383 struct perf_attr_details *details, FILE *fp);
384
385#define EVSEL__PRINT_IP (1<<0)
386#define EVSEL__PRINT_SYM (1<<1)
387#define EVSEL__PRINT_DSO (1<<2)
388#define EVSEL__PRINT_SYMOFFSET (1<<3)
389#define EVSEL__PRINT_ONELINE (1<<4)
390#define EVSEL__PRINT_SRCLINE (1<<5)
391#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
392
393struct callchain_cursor;
394
395int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
396 unsigned int print_opts,
397 struct callchain_cursor *cursor, FILE *fp);
398
399int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
400 int left_alignment, unsigned int print_opts,
401 struct callchain_cursor *cursor, FILE *fp);
402
403bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
404 char *msg, size_t msgsize);
405int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
406 int err, char *msg, size_t size);
407
408static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
409{
410 return evsel->idx - evsel->leader->idx;
411}
412
413#define for_each_group_member(_evsel, _leader) \
414for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
415 (_evsel) && (_evsel)->leader == (_leader); \
416 (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
417
418static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
419{
420 return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
421}
422
423typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
424
425int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
426 attr__fprintf_f attr__fprintf, void *priv);
427
428char *perf_evsel__env_arch(struct perf_evsel *evsel);
429
430#endif
431