1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <uapi/linux/perf_event.h>
18
19
20
21
22
23#ifdef CONFIG_PERF_EVENTS
24# include <linux/cgroup.h>
25# include <asm/perf_event.h>
26# include <asm/local64.h>
27#endif
28
29struct perf_guest_info_callbacks {
30 int (*is_in_guest)(void);
31 int (*is_user_mode)(void);
32 unsigned long (*get_guest_ip)(void);
33};
34
35#ifdef CONFIG_HAVE_HW_BREAKPOINT
36#include <asm/hw_breakpoint.h>
37#endif
38
39#include <linux/list.h>
40#include <linux/mutex.h>
41#include <linux/rculist.h>
42#include <linux/rcupdate.h>
43#include <linux/spinlock.h>
44#include <linux/hrtimer.h>
45#include <linux/fs.h>
46#include <linux/pid_namespace.h>
47#include <linux/workqueue.h>
48#include <linux/ftrace.h>
49#include <linux/cpu.h>
50#include <linux/irq_work.h>
51#include <linux/static_key.h>
52#include <linux/atomic.h>
53#include <linux/sysfs.h>
54#include <linux/perf_regs.h>
55#include <asm/local.h>
56
57struct perf_callchain_entry {
58 __u64 nr;
59 __u64 ip[PERF_MAX_STACK_DEPTH];
60};
61
62struct perf_raw_record {
63 u32 size;
64 void *data;
65};
66
67
68
69
70
71
72
73
74
75
76
77
78struct perf_branch_entry {
79 __u64 from;
80 __u64 to;
81 __u64 mispred:1,
82 predicted:1,
83 reserved:62;
84};
85
86
87
88
89
90
91
92
93
94
95struct perf_branch_stack {
96 __u64 nr;
97 struct perf_branch_entry entries[0];
98};
99
100struct perf_regs_user {
101 __u64 abi;
102 struct pt_regs *regs;
103};
104
105struct task_struct;
106
107
108
109
110struct hw_perf_event_extra {
111 u64 config;
112 unsigned int reg;
113 int alloc;
114 int idx;
115};
116
117
118
119
120struct hw_perf_event {
121#ifdef CONFIG_PERF_EVENTS
122 union {
123 struct {
124 u64 config;
125 u64 last_tag;
126 unsigned long config_base;
127 unsigned long event_base;
128 int event_base_rdpmc;
129 int idx;
130 int last_cpu;
131
132 struct hw_perf_event_extra extra_reg;
133 struct hw_perf_event_extra branch_reg;
134 };
135 struct {
136 struct hrtimer hrtimer;
137 };
138 struct {
139 struct task_struct *tp_target;
140
141 struct list_head tp_list;
142 };
143#ifdef CONFIG_HAVE_HW_BREAKPOINT
144 struct {
145
146
147
148
149
150 struct task_struct *bp_target;
151 struct arch_hw_breakpoint info;
152 struct list_head bp_list;
153 };
154#endif
155 };
156 int state;
157 local64_t prev_count;
158 u64 sample_period;
159 u64 last_period;
160 local64_t period_left;
161 u64 interrupts_seq;
162 u64 interrupts;
163
164 u64 freq_time_stamp;
165 u64 freq_count_stamp;
166#endif
167};
168
169
170
171
172#define PERF_HES_STOPPED 0x01
173#define PERF_HES_UPTODATE 0x02
174#define PERF_HES_ARCH 0x04
175
176struct perf_event;
177
178
179
180
181#define PERF_EVENT_TXN 0x1
182
183
184
185
186struct pmu {
187 struct list_head entry;
188
189 struct device *dev;
190 const struct attribute_group **attr_groups;
191 char *name;
192 int type;
193
194 int * __percpu pmu_disable_count;
195 struct perf_cpu_context * __percpu pmu_cpu_context;
196 int task_ctx_nr;
197
198
199
200
201
202 void (*pmu_enable) (struct pmu *pmu);
203 void (*pmu_disable) (struct pmu *pmu);
204
205
206
207
208
209 int (*event_init) (struct perf_event *event);
210
211#define PERF_EF_START 0x01
212#define PERF_EF_RELOAD 0x02
213#define PERF_EF_UPDATE 0x04
214
215
216
217
218
219 int (*add) (struct perf_event *event, int flags);
220 void (*del) (struct perf_event *event, int flags);
221
222
223
224
225
226
227 void (*start) (struct perf_event *event, int flags);
228 void (*stop) (struct perf_event *event, int flags);
229
230
231
232
233 void (*read) (struct perf_event *event);
234
235
236
237
238
239
240
241
242
243 void (*start_txn) (struct pmu *pmu);
244
245
246
247
248
249
250 int (*commit_txn) (struct pmu *pmu);
251
252
253
254
255 void (*cancel_txn) (struct pmu *pmu);
256
257
258
259
260
261 int (*event_idx) (struct perf_event *event);
262
263
264
265
266 void (*flush_branch_stack) (void);
267};
268
269
270
271
272enum perf_event_active_state {
273 PERF_EVENT_STATE_ERROR = -2,
274 PERF_EVENT_STATE_OFF = -1,
275 PERF_EVENT_STATE_INACTIVE = 0,
276 PERF_EVENT_STATE_ACTIVE = 1,
277};
278
279struct file;
280struct perf_sample_data;
281
282typedef void (*perf_overflow_handler_t)(struct perf_event *,
283 struct perf_sample_data *,
284 struct pt_regs *regs);
285
286enum perf_group_flag {
287 PERF_GROUP_SOFTWARE = 0x1,
288};
289
290#define SWEVENT_HLIST_BITS 8
291#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
292
293struct swevent_hlist {
294 struct hlist_head heads[SWEVENT_HLIST_SIZE];
295 struct rcu_head rcu_head;
296};
297
298#define PERF_ATTACH_CONTEXT 0x01
299#define PERF_ATTACH_GROUP 0x02
300#define PERF_ATTACH_TASK 0x04
301
302#ifdef CONFIG_CGROUP_PERF
303
304
305
306
307struct perf_cgroup_info {
308 u64 time;
309 u64 timestamp;
310};
311
312struct perf_cgroup {
313 struct cgroup_subsys_state css;
314 struct perf_cgroup_info *info;
315};
316#endif
317
318struct ring_buffer;
319
320
321
322
323struct perf_event {
324#ifdef CONFIG_PERF_EVENTS
325 struct list_head group_entry;
326 struct list_head event_entry;
327 struct list_head sibling_list;
328 struct hlist_node hlist_entry;
329 int nr_siblings;
330 int group_flags;
331 struct perf_event *group_leader;
332 struct pmu *pmu;
333
334 enum perf_event_active_state state;
335 unsigned int attach_state;
336 local64_t count;
337 atomic64_t child_count;
338
339
340
341
342
343
344
345
346
347
348 u64 total_time_enabled;
349 u64 total_time_running;
350
351
352
353
354
355
356
357
358
359
360
361 u64 tstamp_enabled;
362 u64 tstamp_running;
363 u64 tstamp_stopped;
364
365
366
367
368
369
370
371
372
373 u64 shadow_ctx_time;
374
375 struct perf_event_attr attr;
376 u16 header_size;
377 u16 id_header_size;
378 u16 read_size;
379 struct hw_perf_event hw;
380
381 struct perf_event_context *ctx;
382 atomic_long_t refcount;
383
384
385
386
387
388 atomic64_t child_total_time_enabled;
389 atomic64_t child_total_time_running;
390
391
392
393
394 struct mutex child_mutex;
395 struct list_head child_list;
396 struct perf_event *parent;
397
398 int oncpu;
399 int cpu;
400
401 struct list_head owner_entry;
402 struct task_struct *owner;
403
404
405 struct mutex mmap_mutex;
406 atomic_t mmap_count;
407 int mmap_locked;
408 struct user_struct *mmap_user;
409 struct ring_buffer *rb;
410 struct list_head rb_entry;
411
412
413 wait_queue_head_t waitq;
414 struct fasync_struct *fasync;
415
416
417 int pending_wakeup;
418 int pending_kill;
419 int pending_disable;
420 struct irq_work pending;
421
422 atomic_t event_limit;
423
424 void (*destroy)(struct perf_event *);
425 struct rcu_head rcu_head;
426
427 struct pid_namespace *ns;
428 u64 id;
429
430 perf_overflow_handler_t overflow_handler;
431 void *overflow_handler_context;
432
433#ifdef CONFIG_EVENT_TRACING
434 struct ftrace_event_call *tp_event;
435 struct event_filter *filter;
436#ifdef CONFIG_FUNCTION_TRACER
437 struct ftrace_ops ftrace_ops;
438#endif
439#endif
440
441#ifdef CONFIG_CGROUP_PERF
442 struct perf_cgroup *cgrp;
443 int cgrp_defer_enabled;
444#endif
445
446#endif
447};
448
449enum perf_event_context_type {
450 task_context,
451 cpu_context,
452};
453
454
455
456
457
458
459struct perf_event_context {
460 struct pmu *pmu;
461 enum perf_event_context_type type;
462
463
464
465
466 raw_spinlock_t lock;
467
468
469
470
471
472 struct mutex mutex;
473
474 struct list_head pinned_groups;
475 struct list_head flexible_groups;
476 struct list_head event_list;
477 int nr_events;
478 int nr_active;
479 int is_active;
480 int nr_stat;
481 int nr_freq;
482 int rotate_disable;
483 atomic_t refcount;
484 struct task_struct *task;
485
486
487
488
489 u64 time;
490 u64 timestamp;
491
492
493
494
495
496 struct perf_event_context *parent_ctx;
497 u64 parent_gen;
498 u64 generation;
499 int pin_count;
500 int nr_cgroups;
501 int nr_branch_stack;
502 struct rcu_head rcu_head;
503};
504
505
506
507
508
509#define PERF_NR_CONTEXTS 4
510
511
512
513
514struct perf_cpu_context {
515 struct perf_event_context ctx;
516 struct perf_event_context *task_ctx;
517 int active_oncpu;
518 int exclusive;
519 struct list_head rotation_list;
520 int jiffies_interval;
521 struct pmu *unique_pmu;
522 struct perf_cgroup *cgrp;
523};
524
525struct perf_output_handle {
526 struct perf_event *event;
527 struct ring_buffer *rb;
528 unsigned long wakeup;
529 unsigned long size;
530 void *addr;
531 int page;
532};
533
534#ifdef CONFIG_PERF_EVENTS
535
536extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
537extern void perf_pmu_unregister(struct pmu *pmu);
538
539extern int perf_num_counters(void);
540extern const char *perf_pmu_name(void);
541extern void __perf_event_task_sched_in(struct task_struct *prev,
542 struct task_struct *task);
543extern void __perf_event_task_sched_out(struct task_struct *prev,
544 struct task_struct *next);
545extern int perf_event_init_task(struct task_struct *child);
546extern void perf_event_exit_task(struct task_struct *child);
547extern void perf_event_free_task(struct task_struct *task);
548extern void perf_event_delayed_put(struct task_struct *task);
549extern void perf_event_print_debug(void);
550extern void perf_pmu_disable(struct pmu *pmu);
551extern void perf_pmu_enable(struct pmu *pmu);
552extern int perf_event_task_disable(void);
553extern int perf_event_task_enable(void);
554extern int perf_event_refresh(struct perf_event *event, int refresh);
555extern void perf_event_update_userpage(struct perf_event *event);
556extern int perf_event_release_kernel(struct perf_event *event);
557extern struct perf_event *
558perf_event_create_kernel_counter(struct perf_event_attr *attr,
559 int cpu,
560 struct task_struct *task,
561 perf_overflow_handler_t callback,
562 void *context);
563extern void perf_pmu_migrate_context(struct pmu *pmu,
564 int src_cpu, int dst_cpu);
565extern u64 perf_event_read_value(struct perf_event *event,
566 u64 *enabled, u64 *running);
567
568
569struct perf_sample_data {
570 u64 type;
571
572 u64 ip;
573 struct {
574 u32 pid;
575 u32 tid;
576 } tid_entry;
577 u64 time;
578 u64 addr;
579 u64 id;
580 u64 stream_id;
581 struct {
582 u32 cpu;
583 u32 reserved;
584 } cpu_entry;
585 u64 period;
586 struct perf_callchain_entry *callchain;
587 struct perf_raw_record *raw;
588 struct perf_branch_stack *br_stack;
589 struct perf_regs_user regs_user;
590 u64 stack_user_size;
591};
592
593static inline void perf_sample_data_init(struct perf_sample_data *data,
594 u64 addr, u64 period)
595{
596
597 data->addr = addr;
598 data->raw = NULL;
599 data->br_stack = NULL;
600 data->period = period;
601 data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
602 data->regs_user.regs = NULL;
603 data->stack_user_size = 0;
604}
605
606extern void perf_output_sample(struct perf_output_handle *handle,
607 struct perf_event_header *header,
608 struct perf_sample_data *data,
609 struct perf_event *event);
610extern void perf_prepare_sample(struct perf_event_header *header,
611 struct perf_sample_data *data,
612 struct perf_event *event,
613 struct pt_regs *regs);
614
615extern int perf_event_overflow(struct perf_event *event,
616 struct perf_sample_data *data,
617 struct pt_regs *regs);
618
619static inline bool is_sampling_event(struct perf_event *event)
620{
621 return event->attr.sample_period != 0;
622}
623
624
625
626
627static inline int is_software_event(struct perf_event *event)
628{
629 return event->pmu->task_ctx_nr == perf_sw_context;
630}
631
632extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
633
634extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
635
636#ifndef perf_arch_fetch_caller_regs
637static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
638#endif
639
640
641
642
643
644
645
646
647
648static inline void perf_fetch_caller_regs(struct pt_regs *regs)
649{
650 memset(regs, 0, sizeof(*regs));
651
652 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
653}
654
655static __always_inline void
656perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
657{
658 struct pt_regs hot_regs;
659
660 if (static_key_false(&perf_swevent_enabled[event_id])) {
661 if (!regs) {
662 perf_fetch_caller_regs(&hot_regs);
663 regs = &hot_regs;
664 }
665 __perf_sw_event(event_id, nr, regs, addr);
666 }
667}
668
669extern struct static_key_deferred perf_sched_events;
670
671static inline void perf_event_task_sched_in(struct task_struct *prev,
672 struct task_struct *task)
673{
674 if (static_key_false(&perf_sched_events.key))
675 __perf_event_task_sched_in(prev, task);
676}
677
678static inline void perf_event_task_sched_out(struct task_struct *prev,
679 struct task_struct *next)
680{
681 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
682
683 if (static_key_false(&perf_sched_events.key))
684 __perf_event_task_sched_out(prev, next);
685}
686
687extern void perf_event_mmap(struct vm_area_struct *vma);
688extern struct perf_guest_info_callbacks *perf_guest_cbs;
689extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
690extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
691
692extern void perf_event_comm(struct task_struct *tsk);
693extern void perf_event_fork(struct task_struct *tsk);
694
695
696DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
697
698extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
699extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
700
701static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
702{
703 if (entry->nr < PERF_MAX_STACK_DEPTH)
704 entry->ip[entry->nr++] = ip;
705}
706
707extern int sysctl_perf_event_paranoid;
708extern int sysctl_perf_event_mlock;
709extern int sysctl_perf_event_sample_rate;
710
711extern int perf_proc_update_handler(struct ctl_table *table, int write,
712 void __user *buffer, size_t *lenp,
713 loff_t *ppos);
714
715static inline bool perf_paranoid_tracepoint_raw(void)
716{
717 return sysctl_perf_event_paranoid > -1;
718}
719
720static inline bool perf_paranoid_cpu(void)
721{
722 return sysctl_perf_event_paranoid > 0;
723}
724
725static inline bool perf_paranoid_kernel(void)
726{
727 return sysctl_perf_event_paranoid > 1;
728}
729
730extern void perf_event_init(void);
731extern void perf_tp_event(u64 addr, u64 count, void *record,
732 int entry_size, struct pt_regs *regs,
733 struct hlist_head *head, int rctx,
734 struct task_struct *task);
735extern void perf_bp_event(struct perf_event *event, void *data);
736
737#ifndef perf_misc_flags
738# define perf_misc_flags(regs) \
739 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
740# define perf_instruction_pointer(regs) instruction_pointer(regs)
741#endif
742
743static inline bool has_branch_stack(struct perf_event *event)
744{
745 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
746}
747
748extern int perf_output_begin(struct perf_output_handle *handle,
749 struct perf_event *event, unsigned int size);
750extern void perf_output_end(struct perf_output_handle *handle);
751extern unsigned int perf_output_copy(struct perf_output_handle *handle,
752 const void *buf, unsigned int len);
753extern unsigned int perf_output_skip(struct perf_output_handle *handle,
754 unsigned int len);
755extern int perf_swevent_get_recursion_context(void);
756extern void perf_swevent_put_recursion_context(int rctx);
757extern void perf_event_enable(struct perf_event *event);
758extern void perf_event_disable(struct perf_event *event);
759extern int __perf_event_disable(void *info);
760extern void perf_event_task_tick(void);
761#else
762static inline void
763perf_event_task_sched_in(struct task_struct *prev,
764 struct task_struct *task) { }
765static inline void
766perf_event_task_sched_out(struct task_struct *prev,
767 struct task_struct *next) { }
768static inline int perf_event_init_task(struct task_struct *child) { return 0; }
769static inline void perf_event_exit_task(struct task_struct *child) { }
770static inline void perf_event_free_task(struct task_struct *task) { }
771static inline void perf_event_delayed_put(struct task_struct *task) { }
772static inline void perf_event_print_debug(void) { }
773static inline int perf_event_task_disable(void) { return -EINVAL; }
774static inline int perf_event_task_enable(void) { return -EINVAL; }
775static inline int perf_event_refresh(struct perf_event *event, int refresh)
776{
777 return -EINVAL;
778}
779
780static inline void
781perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
782static inline void
783perf_bp_event(struct perf_event *event, void *data) { }
784
785static inline int perf_register_guest_info_callbacks
786(struct perf_guest_info_callbacks *callbacks) { return 0; }
787static inline int perf_unregister_guest_info_callbacks
788(struct perf_guest_info_callbacks *callbacks) { return 0; }
789
790static inline void perf_event_mmap(struct vm_area_struct *vma) { }
791static inline void perf_event_comm(struct task_struct *tsk) { }
792static inline void perf_event_fork(struct task_struct *tsk) { }
793static inline void perf_event_init(void) { }
794static inline int perf_swevent_get_recursion_context(void) { return -1; }
795static inline void perf_swevent_put_recursion_context(int rctx) { }
796static inline void perf_event_enable(struct perf_event *event) { }
797static inline void perf_event_disable(struct perf_event *event) { }
798static inline int __perf_event_disable(void *info) { return -1; }
799static inline void perf_event_task_tick(void) { }
800#endif
801
802#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
803extern void perf_restore_debug_store(void);
804#else
805static inline void perf_restore_debug_store(void) { }
806#endif
807
808#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
809
810
811
812
813#define perf_cpu_notifier(fn) \
814do { \
815 static struct notifier_block fn##_nb __cpuinitdata = \
816 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
817 unsigned long cpu = smp_processor_id(); \
818 unsigned long flags; \
819 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
820 (void *)(unsigned long)cpu); \
821 local_irq_save(flags); \
822 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
823 (void *)(unsigned long)cpu); \
824 local_irq_restore(flags); \
825 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
826 (void *)(unsigned long)cpu); \
827 register_cpu_notifier(&fn##_nb); \
828} while (0)
829
830
831struct perf_pmu_events_attr {
832 struct device_attribute attr;
833 u64 id;
834};
835
836#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
837static struct perf_pmu_events_attr _var = { \
838 .attr = __ATTR(_name, 0444, _show, NULL), \
839 .id = _id, \
840};
841
842#define PMU_FORMAT_ATTR(_name, _format) \
843static ssize_t \
844_name##_show(struct device *dev, \
845 struct device_attribute *attr, \
846 char *page) \
847{ \
848 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
849 return sprintf(page, _format "\n"); \
850} \
851 \
852static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
853
854#endif
855