1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <linux/types.h>
18#include <linux/ioctl.h>
19#include <asm/byteorder.h>
20
21
22
23
24
25
26
27
28enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34
35 PERF_TYPE_MAX,
36};
37
38
39
40
41
42
43enum perf_hw_id {
44
45
46
47 PERF_COUNT_HW_CPU_CYCLES = 0,
48 PERF_COUNT_HW_INSTRUCTIONS = 1,
49 PERF_COUNT_HW_CACHE_REFERENCES = 2,
50 PERF_COUNT_HW_CACHE_MISSES = 3,
51 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
52 PERF_COUNT_HW_BRANCH_MISSES = 5,
53 PERF_COUNT_HW_BUS_CYCLES = 6,
54
55 PERF_COUNT_HW_MAX,
56};
57
58
59
60
61
62
63
64
65enum perf_hw_cache_id {
66 PERF_COUNT_HW_CACHE_L1D = 0,
67 PERF_COUNT_HW_CACHE_L1I = 1,
68 PERF_COUNT_HW_CACHE_LL = 2,
69 PERF_COUNT_HW_CACHE_DTLB = 3,
70 PERF_COUNT_HW_CACHE_ITLB = 4,
71 PERF_COUNT_HW_CACHE_BPU = 5,
72
73 PERF_COUNT_HW_CACHE_MAX,
74};
75
76enum perf_hw_cache_op_id {
77 PERF_COUNT_HW_CACHE_OP_READ = 0,
78 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
79 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
80
81 PERF_COUNT_HW_CACHE_OP_MAX,
82};
83
84enum perf_hw_cache_op_result_id {
85 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
86 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
87
88 PERF_COUNT_HW_CACHE_RESULT_MAX,
89};
90
91
92
93
94
95
96
97enum perf_sw_ids {
98 PERF_COUNT_SW_CPU_CLOCK = 0,
99 PERF_COUNT_SW_TASK_CLOCK = 1,
100 PERF_COUNT_SW_PAGE_FAULTS = 2,
101 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
102 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
103 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
104 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
105
106 PERF_COUNT_SW_MAX,
107};
108
109
110
111
112
113enum perf_event_sample_format {
114 PERF_SAMPLE_IP = 1U << 0,
115 PERF_SAMPLE_TID = 1U << 1,
116 PERF_SAMPLE_TIME = 1U << 2,
117 PERF_SAMPLE_ADDR = 1U << 3,
118 PERF_SAMPLE_READ = 1U << 4,
119 PERF_SAMPLE_CALLCHAIN = 1U << 5,
120 PERF_SAMPLE_ID = 1U << 6,
121 PERF_SAMPLE_CPU = 1U << 7,
122 PERF_SAMPLE_PERIOD = 1U << 8,
123 PERF_SAMPLE_STREAM_ID = 1U << 9,
124 PERF_SAMPLE_RAW = 1U << 10,
125
126 PERF_SAMPLE_MAX = 1U << 11,
127};
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149enum perf_event_read_format {
150 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
151 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
152 PERF_FORMAT_ID = 1U << 2,
153 PERF_FORMAT_GROUP = 1U << 3,
154
155 PERF_FORMAT_MAX = 1U << 4,
156};
157
158#define PERF_ATTR_SIZE_VER0 64
159
160
161
162
163struct perf_event_attr {
164
165
166
167
168 __u32 type;
169
170
171
172
173 __u32 size;
174
175
176
177
178 __u64 config;
179
180 union {
181 __u64 sample_period;
182 __u64 sample_freq;
183 };
184
185 __u64 sample_type;
186 __u64 read_format;
187
188 __u64 disabled : 1,
189 inherit : 1,
190 pinned : 1,
191 exclusive : 1,
192 exclude_user : 1,
193 exclude_kernel : 1,
194 exclude_hv : 1,
195 exclude_idle : 1,
196 mmap : 1,
197 comm : 1,
198 freq : 1,
199 inherit_stat : 1,
200 enable_on_exec : 1,
201 task : 1,
202 watermark : 1,
203
204 __reserved_1 : 49;
205
206 union {
207 __u32 wakeup_events;
208 __u32 wakeup_watermark;
209 };
210 __u32 __reserved_2;
211
212 __u64 __reserved_3;
213};
214
215
216
217
218#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
219#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
220#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
221#define PERF_EVENT_IOC_RESET _IO ('$', 3)
222#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64)
223#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
224
225enum perf_event_ioc_flags {
226 PERF_IOC_FLAG_GROUP = 1U << 0,
227};
228
229
230
231
232struct perf_event_mmap_page {
233 __u32 version;
234 __u32 compat_version;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258 __u32 lock;
259 __u32 index;
260 __s64 offset;
261 __u64 time_enabled;
262 __u64 time_running;
263
264
265
266
267
268 __u64 __reserved[123];
269
270
271
272
273
274
275
276
277
278
279
280
281 __u64 data_head;
282 __u64 data_tail;
283};
284
285#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0)
286#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
287#define PERF_RECORD_MISC_KERNEL (1 << 0)
288#define PERF_RECORD_MISC_USER (2 << 0)
289#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
290
291struct perf_event_header {
292 __u32 type;
293 __u16 misc;
294 __u16 size;
295};
296
297enum perf_event_type {
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 PERF_RECORD_MMAP = 1,
314
315
316
317
318
319
320
321
322 PERF_RECORD_LOST = 2,
323
324
325
326
327
328
329
330
331
332 PERF_RECORD_COMM = 3,
333
334
335
336
337
338
339
340
341
342 PERF_RECORD_EXIT = 4,
343
344
345
346
347
348
349
350
351
352 PERF_RECORD_THROTTLE = 5,
353 PERF_RECORD_UNTHROTTLE = 6,
354
355
356
357
358
359
360
361
362
363 PERF_RECORD_FORK = 7,
364
365
366
367
368
369
370
371
372
373 PERF_RECORD_READ = 8,
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408 PERF_RECORD_SAMPLE = 9,
409
410 PERF_RECORD_MAX,
411};
412
413enum perf_callchain_context {
414 PERF_CONTEXT_HV = (__u64)-32,
415 PERF_CONTEXT_KERNEL = (__u64)-128,
416 PERF_CONTEXT_USER = (__u64)-512,
417
418 PERF_CONTEXT_GUEST = (__u64)-2048,
419 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
420 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
421
422 PERF_CONTEXT_MAX = (__u64)-4095,
423};
424
425#define PERF_FLAG_FD_NO_GROUP (1U << 0)
426#define PERF_FLAG_FD_OUTPUT (1U << 1)
427
428#ifdef __KERNEL__
429
430
431
432
433#ifdef CONFIG_PERF_EVENTS
434# include <asm/perf_event.h>
435#endif
436
437#include <linux/list.h>
438#include <linux/mutex.h>
439#include <linux/rculist.h>
440#include <linux/rcupdate.h>
441#include <linux/spinlock.h>
442#include <linux/hrtimer.h>
443#include <linux/fs.h>
444#include <linux/pid_namespace.h>
445#include <linux/workqueue.h>
446#include <asm/atomic.h>
447
448#define PERF_MAX_STACK_DEPTH 255
449
450struct perf_callchain_entry {
451 __u64 nr;
452 __u64 ip[PERF_MAX_STACK_DEPTH];
453};
454
455struct perf_raw_record {
456 u32 size;
457 void *data;
458};
459
460struct task_struct;
461
462
463
464
465struct hw_perf_event {
466#ifdef CONFIG_PERF_EVENTS
467 union {
468 struct {
469 u64 config;
470 unsigned long config_base;
471 unsigned long event_base;
472 int idx;
473 };
474 struct {
475 s64 remaining;
476 struct hrtimer hrtimer;
477 };
478 };
479 atomic64_t prev_count;
480 u64 sample_period;
481 u64 last_period;
482 atomic64_t period_left;
483 u64 interrupts;
484
485 u64 freq_count;
486 u64 freq_interrupts;
487 u64 freq_stamp;
488#endif
489};
490
491struct perf_event;
492
493
494
495
496struct pmu {
497 int (*enable) (struct perf_event *event);
498 void (*disable) (struct perf_event *event);
499 void (*read) (struct perf_event *event);
500 void (*unthrottle) (struct perf_event *event);
501};
502
503
504
505
506enum perf_event_active_state {
507 PERF_EVENT_STATE_ERROR = -2,
508 PERF_EVENT_STATE_OFF = -1,
509 PERF_EVENT_STATE_INACTIVE = 0,
510 PERF_EVENT_STATE_ACTIVE = 1,
511};
512
513struct file;
514
515struct perf_mmap_data {
516 struct rcu_head rcu_head;
517#ifdef CONFIG_PERF_USE_VMALLOC
518 struct work_struct work;
519#endif
520 int data_order;
521 int nr_pages;
522 int writable;
523 int nr_locked;
524
525 atomic_t poll;
526 atomic_t events;
527
528 atomic_long_t head;
529 atomic_long_t done_head;
530
531 atomic_t lock;
532 atomic_t wakeup;
533 atomic_t lost;
534
535 long watermark;
536
537 struct perf_event_mmap_page *user_page;
538 void *data_pages[0];
539};
540
541struct perf_pending_entry {
542 struct perf_pending_entry *next;
543 void (*func)(struct perf_pending_entry *);
544};
545
546
547
548
549struct perf_event {
550#ifdef CONFIG_PERF_EVENTS
551 struct list_head group_entry;
552 struct list_head event_entry;
553 struct list_head sibling_list;
554 int nr_siblings;
555 struct perf_event *group_leader;
556 struct perf_event *output;
557 const struct pmu *pmu;
558
559 enum perf_event_active_state state;
560 atomic64_t count;
561
562
563
564
565
566
567
568
569
570
571 u64 total_time_enabled;
572 u64 total_time_running;
573
574
575
576
577
578
579
580
581
582
583
584 u64 tstamp_enabled;
585 u64 tstamp_running;
586 u64 tstamp_stopped;
587
588 struct perf_event_attr attr;
589 struct hw_perf_event hw;
590
591 struct perf_event_context *ctx;
592 struct file *filp;
593
594
595
596
597
598 atomic64_t child_total_time_enabled;
599 atomic64_t child_total_time_running;
600
601
602
603
604 struct mutex child_mutex;
605 struct list_head child_list;
606 struct perf_event *parent;
607
608 int oncpu;
609 int cpu;
610
611 struct list_head owner_entry;
612 struct task_struct *owner;
613
614
615 struct mutex mmap_mutex;
616 atomic_t mmap_count;
617 struct perf_mmap_data *data;
618
619
620 wait_queue_head_t waitq;
621 struct fasync_struct *fasync;
622
623
624 int pending_wakeup;
625 int pending_kill;
626 int pending_disable;
627 struct perf_pending_entry pending;
628
629 atomic_t event_limit;
630
631 void (*destroy)(struct perf_event *);
632 struct rcu_head rcu_head;
633
634 struct pid_namespace *ns;
635 u64 id;
636#endif
637};
638
639
640
641
642
643
644struct perf_event_context {
645
646
647
648
649 spinlock_t lock;
650
651
652
653
654
655 struct mutex mutex;
656
657 struct list_head group_list;
658 struct list_head event_list;
659 int nr_events;
660 int nr_active;
661 int is_active;
662 int nr_stat;
663 atomic_t refcount;
664 struct task_struct *task;
665
666
667
668
669 u64 time;
670 u64 timestamp;
671
672
673
674
675
676 struct perf_event_context *parent_ctx;
677 u64 parent_gen;
678 u64 generation;
679 int pin_count;
680 struct rcu_head rcu_head;
681};
682
683
684
685
686struct perf_cpu_context {
687 struct perf_event_context ctx;
688 struct perf_event_context *task_ctx;
689 int active_oncpu;
690 int max_pertask;
691 int exclusive;
692
693
694
695
696
697
698 int recursion[4];
699};
700
701struct perf_output_handle {
702 struct perf_event *event;
703 struct perf_mmap_data *data;
704 unsigned long head;
705 unsigned long offset;
706 int nmi;
707 int sample;
708 int locked;
709 unsigned long flags;
710};
711
712#ifdef CONFIG_PERF_EVENTS
713
714
715
716
717extern int perf_max_events;
718
719extern const struct pmu *hw_perf_event_init(struct perf_event *event);
720
721extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
722extern void perf_event_task_sched_out(struct task_struct *task,
723 struct task_struct *next, int cpu);
724extern void perf_event_task_tick(struct task_struct *task, int cpu);
725extern int perf_event_init_task(struct task_struct *child);
726extern void perf_event_exit_task(struct task_struct *child);
727extern void perf_event_free_task(struct task_struct *task);
728extern void set_perf_event_pending(void);
729extern void perf_event_do_pending(void);
730extern void perf_event_print_debug(void);
731extern void __perf_disable(void);
732extern bool __perf_enable(void);
733extern void perf_disable(void);
734extern void perf_enable(void);
735extern int perf_event_task_disable(void);
736extern int perf_event_task_enable(void);
737extern int hw_perf_group_sched_in(struct perf_event *group_leader,
738 struct perf_cpu_context *cpuctx,
739 struct perf_event_context *ctx, int cpu);
740extern void perf_event_update_userpage(struct perf_event *event);
741
742struct perf_sample_data {
743 u64 type;
744
745 u64 ip;
746 struct {
747 u32 pid;
748 u32 tid;
749 } tid_entry;
750 u64 time;
751 u64 addr;
752 u64 id;
753 u64 stream_id;
754 struct {
755 u32 cpu;
756 u32 reserved;
757 } cpu_entry;
758 u64 period;
759 struct perf_callchain_entry *callchain;
760 struct perf_raw_record *raw;
761};
762
763extern void perf_output_sample(struct perf_output_handle *handle,
764 struct perf_event_header *header,
765 struct perf_sample_data *data,
766 struct perf_event *event);
767extern void perf_prepare_sample(struct perf_event_header *header,
768 struct perf_sample_data *data,
769 struct perf_event *event,
770 struct pt_regs *regs);
771
772extern int perf_event_overflow(struct perf_event *event, int nmi,
773 struct perf_sample_data *data,
774 struct pt_regs *regs);
775
776
777
778
779static inline int is_software_event(struct perf_event *event)
780{
781 return (event->attr.type != PERF_TYPE_RAW) &&
782 (event->attr.type != PERF_TYPE_HARDWARE) &&
783 (event->attr.type != PERF_TYPE_HW_CACHE);
784}
785
786extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
787
788extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
789
790static inline void
791perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
792{
793 if (atomic_read(&perf_swevent_enabled[event_id]))
794 __perf_sw_event(event_id, nr, nmi, regs, addr);
795}
796
797extern void __perf_event_mmap(struct vm_area_struct *vma);
798
799static inline void perf_event_mmap(struct vm_area_struct *vma)
800{
801 if (vma->vm_flags & VM_EXEC)
802 __perf_event_mmap(vma);
803}
804
805extern void perf_event_comm(struct task_struct *tsk);
806extern void perf_event_fork(struct task_struct *tsk);
807
808extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
809
810extern int sysctl_perf_event_paranoid;
811extern int sysctl_perf_event_mlock;
812extern int sysctl_perf_event_sample_rate;
813
814extern void perf_event_init(void);
815extern void perf_tp_event(int event_id, u64 addr, u64 count,
816 void *record, int entry_size);
817
818#ifndef perf_misc_flags
819#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
820 PERF_RECORD_MISC_KERNEL)
821#define perf_instruction_pointer(regs) instruction_pointer(regs)
822#endif
823
824extern int perf_output_begin(struct perf_output_handle *handle,
825 struct perf_event *event, unsigned int size,
826 int nmi, int sample);
827extern void perf_output_end(struct perf_output_handle *handle);
828extern void perf_output_copy(struct perf_output_handle *handle,
829 const void *buf, unsigned int len);
830#else
831static inline void
832perf_event_task_sched_in(struct task_struct *task, int cpu) { }
833static inline void
834perf_event_task_sched_out(struct task_struct *task,
835 struct task_struct *next, int cpu) { }
836static inline void
837perf_event_task_tick(struct task_struct *task, int cpu) { }
838static inline int perf_event_init_task(struct task_struct *child) { return 0; }
839static inline void perf_event_exit_task(struct task_struct *child) { }
840static inline void perf_event_free_task(struct task_struct *task) { }
841static inline void perf_event_do_pending(void) { }
842static inline void perf_event_print_debug(void) { }
843static inline void perf_disable(void) { }
844static inline void perf_enable(void) { }
845static inline int perf_event_task_disable(void) { return -EINVAL; }
846static inline int perf_event_task_enable(void) { return -EINVAL; }
847
848static inline void
849perf_sw_event(u32 event_id, u64 nr, int nmi,
850 struct pt_regs *regs, u64 addr) { }
851
852static inline void perf_event_mmap(struct vm_area_struct *vma) { }
853static inline void perf_event_comm(struct task_struct *tsk) { }
854static inline void perf_event_fork(struct task_struct *tsk) { }
855static inline void perf_event_init(void) { }
856
857#endif
858
859#define perf_output_put(handle, x) \
860 perf_output_copy((handle), &(x), sizeof(x))
861
862#endif
863#endif
864