1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <linux/types.h>
18#include <linux/ioctl.h>
19#include <asm/byteorder.h>
20
21
22
23
24
25
26
27
28enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX,
37};
38
39
40
41
42
43
44enum perf_hw_id {
45
46
47
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
58
59 PERF_COUNT_HW_MAX,
60};
61
62
63
64
65
66
67
68
69enum perf_hw_cache_id {
70 PERF_COUNT_HW_CACHE_L1D = 0,
71 PERF_COUNT_HW_CACHE_L1I = 1,
72 PERF_COUNT_HW_CACHE_LL = 2,
73 PERF_COUNT_HW_CACHE_DTLB = 3,
74 PERF_COUNT_HW_CACHE_ITLB = 4,
75 PERF_COUNT_HW_CACHE_BPU = 5,
76 PERF_COUNT_HW_CACHE_NODE = 6,
77
78 PERF_COUNT_HW_CACHE_MAX,
79};
80
81enum perf_hw_cache_op_id {
82 PERF_COUNT_HW_CACHE_OP_READ = 0,
83 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
85
86 PERF_COUNT_HW_CACHE_OP_MAX,
87};
88
89enum perf_hw_cache_op_result_id {
90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
92
93 PERF_COUNT_HW_CACHE_RESULT_MAX,
94};
95
96
97
98
99
100
101
102enum perf_sw_ids {
103 PERF_COUNT_SW_CPU_CLOCK = 0,
104 PERF_COUNT_SW_TASK_CLOCK = 1,
105 PERF_COUNT_SW_PAGE_FAULTS = 2,
106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
107 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
111 PERF_COUNT_SW_EMULATION_FAULTS = 8,
112
113 PERF_COUNT_SW_MAX,
114};
115
116
117
118
119
120enum perf_event_sample_format {
121 PERF_SAMPLE_IP = 1U << 0,
122 PERF_SAMPLE_TID = 1U << 1,
123 PERF_SAMPLE_TIME = 1U << 2,
124 PERF_SAMPLE_ADDR = 1U << 3,
125 PERF_SAMPLE_READ = 1U << 4,
126 PERF_SAMPLE_CALLCHAIN = 1U << 5,
127 PERF_SAMPLE_ID = 1U << 6,
128 PERF_SAMPLE_CPU = 1U << 7,
129 PERF_SAMPLE_PERIOD = 1U << 8,
130 PERF_SAMPLE_STREAM_ID = 1U << 9,
131 PERF_SAMPLE_RAW = 1U << 10,
132 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
133
134 PERF_SAMPLE_MAX = 1U << 12,
135};
136
137
138
139
140
141
142
143
144
145
146
147enum perf_branch_sample_type {
148 PERF_SAMPLE_BRANCH_USER = 1U << 0,
149 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1,
150 PERF_SAMPLE_BRANCH_HV = 1U << 2,
151
152 PERF_SAMPLE_BRANCH_ANY = 1U << 3,
153 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4,
154 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5,
155 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6,
156
157 PERF_SAMPLE_BRANCH_MAX = 1U << 7,
158};
159
160#define PERF_SAMPLE_BRANCH_PLM_ALL \
161 (PERF_SAMPLE_BRANCH_USER|\
162 PERF_SAMPLE_BRANCH_KERNEL|\
163 PERF_SAMPLE_BRANCH_HV)
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185enum perf_event_read_format {
186 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
187 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
188 PERF_FORMAT_ID = 1U << 2,
189 PERF_FORMAT_GROUP = 1U << 3,
190
191 PERF_FORMAT_MAX = 1U << 4,
192};
193
194#define PERF_ATTR_SIZE_VER0 64
195#define PERF_ATTR_SIZE_VER1 72
196#define PERF_ATTR_SIZE_VER2 80
197
198
199
200
201struct perf_event_attr {
202
203
204
205
206 __u32 type;
207
208
209
210
211 __u32 size;
212
213
214
215
216 __u64 config;
217
218 union {
219 __u64 sample_period;
220 __u64 sample_freq;
221 };
222
223 __u64 sample_type;
224 __u64 read_format;
225
226 __u64 disabled : 1,
227 inherit : 1,
228 pinned : 1,
229 exclusive : 1,
230 exclude_user : 1,
231 exclude_kernel : 1,
232 exclude_hv : 1,
233 exclude_idle : 1,
234 mmap : 1,
235 comm : 1,
236 freq : 1,
237 inherit_stat : 1,
238 enable_on_exec : 1,
239 task : 1,
240 watermark : 1,
241
242
243
244
245
246
247
248
249
250
251 precise_ip : 2,
252 mmap_data : 1,
253 sample_id_all : 1,
254
255 exclude_host : 1,
256 exclude_guest : 1,
257
258 __reserved_1 : 43;
259
260 union {
261 __u32 wakeup_events;
262 __u32 wakeup_watermark;
263 };
264
265 __u32 bp_type;
266 union {
267 __u64 bp_addr;
268 __u64 config1;
269 };
270 union {
271 __u64 bp_len;
272 __u64 config2;
273 };
274 __u64 branch_sample_type;
275};
276
277#define perf_flags(attr) (*(&(attr)->read_format + 1))
278
279
280
281
282#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
283#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
284#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
285#define PERF_EVENT_IOC_RESET _IO ('$', 3)
286#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
287#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
288#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
289
290enum perf_event_ioc_flags {
291 PERF_IOC_FLAG_GROUP = 1U << 0,
292};
293
294
295
296
297struct perf_event_mmap_page {
298 __u32 version;
299 __u32 compat_version;
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336 __u32 lock;
337 __u32 index;
338 __s64 offset;
339 __u64 time_enabled;
340 __u64 time_running;
341 union {
342 __u64 capabilities;
343 __u64 cap_usr_time : 1,
344 cap_usr_rdpmc : 1,
345 cap_____res : 62;
346 };
347
348
349
350
351
352
353
354
355
356
357 __u16 pmc_width;
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383 __u16 time_shift;
384 __u32 time_mult;
385 __u64 time_offset;
386
387
388
389
390
391 __u64 __reserved[120];
392
393
394
395
396
397
398
399
400
401
402
403
404 __u64 data_head;
405 __u64 data_tail;
406};
407
408#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
409#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
410#define PERF_RECORD_MISC_KERNEL (1 << 0)
411#define PERF_RECORD_MISC_USER (2 << 0)
412#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
413#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
414#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
415
416
417
418
419
420
421#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
422
423
424
425#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
426
427struct perf_event_header {
428 __u32 type;
429 __u16 misc;
430 __u16 size;
431};
432
433enum perf_event_type {
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 PERF_RECORD_MMAP = 1,
459
460
461
462
463
464
465
466
467 PERF_RECORD_LOST = 2,
468
469
470
471
472
473
474
475
476
477 PERF_RECORD_COMM = 3,
478
479
480
481
482
483
484
485
486
487 PERF_RECORD_EXIT = 4,
488
489
490
491
492
493
494
495
496
497 PERF_RECORD_THROTTLE = 5,
498 PERF_RECORD_UNTHROTTLE = 6,
499
500
501
502
503
504
505
506
507
508 PERF_RECORD_FORK = 7,
509
510
511
512
513
514
515
516
517
518 PERF_RECORD_READ = 8,
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555 PERF_RECORD_SAMPLE = 9,
556
557 PERF_RECORD_MAX,
558};
559
560#define PERF_MAX_STACK_DEPTH 127
561
562enum perf_callchain_context {
563 PERF_CONTEXT_HV = (__u64)-32,
564 PERF_CONTEXT_KERNEL = (__u64)-128,
565 PERF_CONTEXT_USER = (__u64)-512,
566
567 PERF_CONTEXT_GUEST = (__u64)-2048,
568 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
569 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
570
571 PERF_CONTEXT_MAX = (__u64)-4095,
572};
573
574#define PERF_FLAG_FD_NO_GROUP (1U << 0)
575#define PERF_FLAG_FD_OUTPUT (1U << 1)
576#define PERF_FLAG_PID_CGROUP (1U << 2)
577
578#ifdef __KERNEL__
579
580
581
582
583#ifdef CONFIG_PERF_EVENTS
584# include <linux/cgroup.h>
585# include <asm/perf_event.h>
586# include <asm/local64.h>
587#endif
588
589struct perf_guest_info_callbacks {
590 int (*is_in_guest)(void);
591 int (*is_user_mode)(void);
592 unsigned long (*get_guest_ip)(void);
593};
594
595#ifdef CONFIG_HAVE_HW_BREAKPOINT
596#include <asm/hw_breakpoint.h>
597#endif
598
599#include <linux/list.h>
600#include <linux/mutex.h>
601#include <linux/rculist.h>
602#include <linux/rcupdate.h>
603#include <linux/spinlock.h>
604#include <linux/hrtimer.h>
605#include <linux/fs.h>
606#include <linux/pid_namespace.h>
607#include <linux/workqueue.h>
608#include <linux/ftrace.h>
609#include <linux/cpu.h>
610#include <linux/irq_work.h>
611#include <linux/static_key.h>
612#include <linux/atomic.h>
613#include <linux/sysfs.h>
614#include <asm/local.h>
615
616struct perf_callchain_entry {
617 __u64 nr;
618 __u64 ip[PERF_MAX_STACK_DEPTH];
619};
620
621struct perf_raw_record {
622 u32 size;
623 void *data;
624};
625
626
627
628
629
630
631
632
633
634
635
636
637struct perf_branch_entry {
638 __u64 from;
639 __u64 to;
640 __u64 mispred:1,
641 predicted:1,
642 reserved:62;
643};
644
645
646
647
648
649
650
651
652
653
654struct perf_branch_stack {
655 __u64 nr;
656 struct perf_branch_entry entries[0];
657};
658
659struct task_struct;
660
661
662
663
664struct hw_perf_event_extra {
665 u64 config;
666 unsigned int reg;
667 int alloc;
668 int idx;
669};
670
671
672
673
674struct hw_perf_event {
675#ifdef CONFIG_PERF_EVENTS
676 union {
677 struct {
678 u64 config;
679 u64 last_tag;
680 unsigned long config_base;
681 unsigned long event_base;
682 int event_base_rdpmc;
683 int idx;
684 int last_cpu;
685
686 struct hw_perf_event_extra extra_reg;
687 struct hw_perf_event_extra branch_reg;
688 };
689 struct {
690 struct hrtimer hrtimer;
691 };
692#ifdef CONFIG_HAVE_HW_BREAKPOINT
693 struct {
694 struct arch_hw_breakpoint info;
695 struct list_head bp_list;
696
697
698
699
700
701 struct task_struct *bp_target;
702 };
703#endif
704 };
705 int state;
706 local64_t prev_count;
707 u64 sample_period;
708 u64 last_period;
709 local64_t period_left;
710 u64 interrupts_seq;
711 u64 interrupts;
712
713 u64 freq_time_stamp;
714 u64 freq_count_stamp;
715#endif
716};
717
718
719
720
721#define PERF_HES_STOPPED 0x01
722#define PERF_HES_UPTODATE 0x02
723#define PERF_HES_ARCH 0x04
724
725struct perf_event;
726
727
728
729
730#define PERF_EVENT_TXN 0x1
731
732
733
734
735struct pmu {
736 struct list_head entry;
737
738 struct device *dev;
739 const struct attribute_group **attr_groups;
740 char *name;
741 int type;
742
743 int * __percpu pmu_disable_count;
744 struct perf_cpu_context * __percpu pmu_cpu_context;
745 int task_ctx_nr;
746
747
748
749
750
751 void (*pmu_enable) (struct pmu *pmu);
752 void (*pmu_disable) (struct pmu *pmu);
753
754
755
756
757
758 int (*event_init) (struct perf_event *event);
759
760#define PERF_EF_START 0x01
761#define PERF_EF_RELOAD 0x02
762#define PERF_EF_UPDATE 0x04
763
764
765
766
767
768 int (*add) (struct perf_event *event, int flags);
769 void (*del) (struct perf_event *event, int flags);
770
771
772
773
774
775
776 void (*start) (struct perf_event *event, int flags);
777 void (*stop) (struct perf_event *event, int flags);
778
779
780
781
782 void (*read) (struct perf_event *event);
783
784
785
786
787
788
789
790
791
792 void (*start_txn) (struct pmu *pmu);
793
794
795
796
797
798
799 int (*commit_txn) (struct pmu *pmu);
800
801
802
803
804 void (*cancel_txn) (struct pmu *pmu);
805
806
807
808
809
810 int (*event_idx) (struct perf_event *event);
811
812
813
814
815 void (*flush_branch_stack) (void);
816};
817
818
819
820
821enum perf_event_active_state {
822 PERF_EVENT_STATE_ERROR = -2,
823 PERF_EVENT_STATE_OFF = -1,
824 PERF_EVENT_STATE_INACTIVE = 0,
825 PERF_EVENT_STATE_ACTIVE = 1,
826};
827
828struct file;
829struct perf_sample_data;
830
831typedef void (*perf_overflow_handler_t)(struct perf_event *,
832 struct perf_sample_data *,
833 struct pt_regs *regs);
834
835enum perf_group_flag {
836 PERF_GROUP_SOFTWARE = 0x1,
837};
838
839#define SWEVENT_HLIST_BITS 8
840#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
841
842struct swevent_hlist {
843 struct hlist_head heads[SWEVENT_HLIST_SIZE];
844 struct rcu_head rcu_head;
845};
846
847#define PERF_ATTACH_CONTEXT 0x01
848#define PERF_ATTACH_GROUP 0x02
849#define PERF_ATTACH_TASK 0x04
850
851#ifdef CONFIG_CGROUP_PERF
852
853
854
855
856struct perf_cgroup_info {
857 u64 time;
858 u64 timestamp;
859};
860
861struct perf_cgroup {
862 struct cgroup_subsys_state css;
863 struct perf_cgroup_info *info;
864};
865#endif
866
867struct ring_buffer;
868
869
870
871
872struct perf_event {
873#ifdef CONFIG_PERF_EVENTS
874 struct list_head group_entry;
875 struct list_head event_entry;
876 struct list_head sibling_list;
877 struct hlist_node hlist_entry;
878 int nr_siblings;
879 int group_flags;
880 struct perf_event *group_leader;
881 struct pmu *pmu;
882
883 enum perf_event_active_state state;
884 unsigned int attach_state;
885 local64_t count;
886 atomic64_t child_count;
887
888
889
890
891
892
893
894
895
896
897 u64 total_time_enabled;
898 u64 total_time_running;
899
900
901
902
903
904
905
906
907
908
909
910 u64 tstamp_enabled;
911 u64 tstamp_running;
912 u64 tstamp_stopped;
913
914
915
916
917
918
919
920
921
922 u64 shadow_ctx_time;
923
924 struct perf_event_attr attr;
925 u16 header_size;
926 u16 id_header_size;
927 u16 read_size;
928 struct hw_perf_event hw;
929
930 struct perf_event_context *ctx;
931 atomic_long_t refcount;
932
933
934
935
936
937 atomic64_t child_total_time_enabled;
938 atomic64_t child_total_time_running;
939
940
941
942
943 struct mutex child_mutex;
944 struct list_head child_list;
945 struct perf_event *parent;
946
947 int oncpu;
948 int cpu;
949
950 struct list_head owner_entry;
951 struct task_struct *owner;
952
953
954 struct mutex mmap_mutex;
955 atomic_t mmap_count;
956 int mmap_locked;
957 struct user_struct *mmap_user;
958 struct ring_buffer *rb;
959 struct list_head rb_entry;
960
961
962 wait_queue_head_t waitq;
963 struct fasync_struct *fasync;
964
965
966 int pending_wakeup;
967 int pending_kill;
968 int pending_disable;
969 struct irq_work pending;
970
971 atomic_t event_limit;
972
973 void (*destroy)(struct perf_event *);
974 struct rcu_head rcu_head;
975
976 struct pid_namespace *ns;
977 u64 id;
978
979 perf_overflow_handler_t overflow_handler;
980 void *overflow_handler_context;
981
982#ifdef CONFIG_EVENT_TRACING
983 struct ftrace_event_call *tp_event;
984 struct event_filter *filter;
985#ifdef CONFIG_FUNCTION_TRACER
986 struct ftrace_ops ftrace_ops;
987#endif
988#endif
989
990#ifdef CONFIG_CGROUP_PERF
991 struct perf_cgroup *cgrp;
992 int cgrp_defer_enabled;
993#endif
994
995#endif
996};
997
998enum perf_event_context_type {
999 task_context,
1000 cpu_context,
1001};
1002
1003
1004
1005
1006
1007
1008struct perf_event_context {
1009 struct pmu *pmu;
1010 enum perf_event_context_type type;
1011
1012
1013
1014
1015 raw_spinlock_t lock;
1016
1017
1018
1019
1020
1021 struct mutex mutex;
1022
1023 struct list_head pinned_groups;
1024 struct list_head flexible_groups;
1025 struct list_head event_list;
1026 int nr_events;
1027 int nr_active;
1028 int is_active;
1029 int nr_stat;
1030 int nr_freq;
1031 int rotate_disable;
1032 atomic_t refcount;
1033 struct task_struct *task;
1034
1035
1036
1037
1038 u64 time;
1039 u64 timestamp;
1040
1041
1042
1043
1044
1045 struct perf_event_context *parent_ctx;
1046 u64 parent_gen;
1047 u64 generation;
1048 int pin_count;
1049 int nr_cgroups;
1050 int nr_branch_stack;
1051 struct rcu_head rcu_head;
1052};
1053
1054
1055
1056
1057
1058#define PERF_NR_CONTEXTS 4
1059
1060
1061
1062
1063struct perf_cpu_context {
1064 struct perf_event_context ctx;
1065 struct perf_event_context *task_ctx;
1066 int active_oncpu;
1067 int exclusive;
1068 struct list_head rotation_list;
1069 int jiffies_interval;
1070 struct pmu *active_pmu;
1071 struct perf_cgroup *cgrp;
1072};
1073
1074struct perf_output_handle {
1075 struct perf_event *event;
1076 struct ring_buffer *rb;
1077 unsigned long wakeup;
1078 unsigned long size;
1079 void *addr;
1080 int page;
1081};
1082
1083#ifdef CONFIG_PERF_EVENTS
1084
1085extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
1086extern void perf_pmu_unregister(struct pmu *pmu);
1087
1088extern int perf_num_counters(void);
1089extern const char *perf_pmu_name(void);
1090extern void __perf_event_task_sched_in(struct task_struct *prev,
1091 struct task_struct *task);
1092extern void __perf_event_task_sched_out(struct task_struct *prev,
1093 struct task_struct *next);
1094extern int perf_event_init_task(struct task_struct *child);
1095extern void perf_event_exit_task(struct task_struct *child);
1096extern void perf_event_free_task(struct task_struct *task);
1097extern void perf_event_delayed_put(struct task_struct *task);
1098extern void perf_event_print_debug(void);
1099extern void perf_pmu_disable(struct pmu *pmu);
1100extern void perf_pmu_enable(struct pmu *pmu);
1101extern int perf_event_task_disable(void);
1102extern int perf_event_task_enable(void);
1103extern int perf_event_refresh(struct perf_event *event, int refresh);
1104extern void perf_event_update_userpage(struct perf_event *event);
1105extern int perf_event_release_kernel(struct perf_event *event);
1106extern struct perf_event *
1107perf_event_create_kernel_counter(struct perf_event_attr *attr,
1108 int cpu,
1109 struct task_struct *task,
1110 perf_overflow_handler_t callback,
1111 void *context);
1112extern void perf_pmu_migrate_context(struct pmu *pmu,
1113 int src_cpu, int dst_cpu);
1114extern u64 perf_event_read_value(struct perf_event *event,
1115 u64 *enabled, u64 *running);
1116
1117
1118struct perf_sample_data {
1119 u64 type;
1120
1121 u64 ip;
1122 struct {
1123 u32 pid;
1124 u32 tid;
1125 } tid_entry;
1126 u64 time;
1127 u64 addr;
1128 u64 id;
1129 u64 stream_id;
1130 struct {
1131 u32 cpu;
1132 u32 reserved;
1133 } cpu_entry;
1134 u64 period;
1135 struct perf_callchain_entry *callchain;
1136 struct perf_raw_record *raw;
1137 struct perf_branch_stack *br_stack;
1138};
1139
1140static inline void perf_sample_data_init(struct perf_sample_data *data,
1141 u64 addr, u64 period)
1142{
1143
1144 data->addr = addr;
1145 data->raw = NULL;
1146 data->br_stack = NULL;
1147 data->period = period;
1148}
1149
1150extern void perf_output_sample(struct perf_output_handle *handle,
1151 struct perf_event_header *header,
1152 struct perf_sample_data *data,
1153 struct perf_event *event);
1154extern void perf_prepare_sample(struct perf_event_header *header,
1155 struct perf_sample_data *data,
1156 struct perf_event *event,
1157 struct pt_regs *regs);
1158
1159extern int perf_event_overflow(struct perf_event *event,
1160 struct perf_sample_data *data,
1161 struct pt_regs *regs);
1162
1163static inline bool is_sampling_event(struct perf_event *event)
1164{
1165 return event->attr.sample_period != 0;
1166}
1167
1168
1169
1170
1171static inline int is_software_event(struct perf_event *event)
1172{
1173 return event->pmu->task_ctx_nr == perf_sw_context;
1174}
1175
1176extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1177
1178extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1179
1180#ifndef perf_arch_fetch_caller_regs
1181static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1182#endif
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1193{
1194 memset(regs, 0, sizeof(*regs));
1195
1196 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1197}
1198
1199static __always_inline void
1200perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1201{
1202 struct pt_regs hot_regs;
1203
1204 if (static_key_false(&perf_swevent_enabled[event_id])) {
1205 if (!regs) {
1206 perf_fetch_caller_regs(&hot_regs);
1207 regs = &hot_regs;
1208 }
1209 __perf_sw_event(event_id, nr, regs, addr);
1210 }
1211}
1212
1213extern struct static_key_deferred perf_sched_events;
1214
1215static inline void perf_event_task_sched_in(struct task_struct *prev,
1216 struct task_struct *task)
1217{
1218 if (static_key_false(&perf_sched_events.key))
1219 __perf_event_task_sched_in(prev, task);
1220}
1221
1222static inline void perf_event_task_sched_out(struct task_struct *prev,
1223 struct task_struct *next)
1224{
1225 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
1226
1227 if (static_key_false(&perf_sched_events.key))
1228 __perf_event_task_sched_out(prev, next);
1229}
1230
1231extern void perf_event_mmap(struct vm_area_struct *vma);
1232extern struct perf_guest_info_callbacks *perf_guest_cbs;
1233extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1234extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1235
1236extern void perf_event_comm(struct task_struct *tsk);
1237extern void perf_event_fork(struct task_struct *tsk);
1238
1239
1240DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1241
1242extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
1243extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
1244
1245static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1246{
1247 if (entry->nr < PERF_MAX_STACK_DEPTH)
1248 entry->ip[entry->nr++] = ip;
1249}
1250
1251extern int sysctl_perf_event_paranoid;
1252extern int sysctl_perf_event_mlock;
1253extern int sysctl_perf_event_sample_rate;
1254
1255extern int perf_proc_update_handler(struct ctl_table *table, int write,
1256 void __user *buffer, size_t *lenp,
1257 loff_t *ppos);
1258
1259static inline bool perf_paranoid_tracepoint_raw(void)
1260{
1261 return sysctl_perf_event_paranoid > -1;
1262}
1263
1264static inline bool perf_paranoid_cpu(void)
1265{
1266 return sysctl_perf_event_paranoid > 0;
1267}
1268
1269static inline bool perf_paranoid_kernel(void)
1270{
1271 return sysctl_perf_event_paranoid > 1;
1272}
1273
1274extern void perf_event_init(void);
1275extern void perf_tp_event(u64 addr, u64 count, void *record,
1276 int entry_size, struct pt_regs *regs,
1277 struct hlist_head *head, int rctx,
1278 struct task_struct *task);
1279extern void perf_bp_event(struct perf_event *event, void *data);
1280
1281#ifndef perf_misc_flags
1282# define perf_misc_flags(regs) \
1283 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1284# define perf_instruction_pointer(regs) instruction_pointer(regs)
1285#endif
1286
1287static inline bool has_branch_stack(struct perf_event *event)
1288{
1289 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1290}
1291
1292extern int perf_output_begin(struct perf_output_handle *handle,
1293 struct perf_event *event, unsigned int size);
1294extern void perf_output_end(struct perf_output_handle *handle);
1295extern void perf_output_copy(struct perf_output_handle *handle,
1296 const void *buf, unsigned int len);
1297extern int perf_swevent_get_recursion_context(void);
1298extern void perf_swevent_put_recursion_context(int rctx);
1299extern void perf_event_enable(struct perf_event *event);
1300extern void perf_event_disable(struct perf_event *event);
1301extern int __perf_event_disable(void *info);
1302extern void perf_event_task_tick(void);
1303#else
1304static inline void
1305perf_event_task_sched_in(struct task_struct *prev,
1306 struct task_struct *task) { }
1307static inline void
1308perf_event_task_sched_out(struct task_struct *prev,
1309 struct task_struct *next) { }
1310static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1311static inline void perf_event_exit_task(struct task_struct *child) { }
1312static inline void perf_event_free_task(struct task_struct *task) { }
1313static inline void perf_event_delayed_put(struct task_struct *task) { }
1314static inline void perf_event_print_debug(void) { }
1315static inline int perf_event_task_disable(void) { return -EINVAL; }
1316static inline int perf_event_task_enable(void) { return -EINVAL; }
1317static inline int perf_event_refresh(struct perf_event *event, int refresh)
1318{
1319 return -EINVAL;
1320}
1321
1322static inline void
1323perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1324static inline void
1325perf_bp_event(struct perf_event *event, void *data) { }
1326
1327static inline int perf_register_guest_info_callbacks
1328(struct perf_guest_info_callbacks *callbacks) { return 0; }
1329static inline int perf_unregister_guest_info_callbacks
1330(struct perf_guest_info_callbacks *callbacks) { return 0; }
1331
1332static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1333static inline void perf_event_comm(struct task_struct *tsk) { }
1334static inline void perf_event_fork(struct task_struct *tsk) { }
1335static inline void perf_event_init(void) { }
1336static inline int perf_swevent_get_recursion_context(void) { return -1; }
1337static inline void perf_swevent_put_recursion_context(int rctx) { }
1338static inline void perf_event_enable(struct perf_event *event) { }
1339static inline void perf_event_disable(struct perf_event *event) { }
1340static inline int __perf_event_disable(void *info) { return -1; }
1341static inline void perf_event_task_tick(void) { }
1342#endif
1343
1344#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1345
1346
1347
1348
1349#define perf_cpu_notifier(fn) \
1350do { \
1351 static struct notifier_block fn##_nb __cpuinitdata = \
1352 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1353 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1354 (void *)(unsigned long)smp_processor_id()); \
1355 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1356 (void *)(unsigned long)smp_processor_id()); \
1357 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1358 (void *)(unsigned long)smp_processor_id()); \
1359 register_cpu_notifier(&fn##_nb); \
1360} while (0)
1361
1362
1363#define PMU_FORMAT_ATTR(_name, _format) \
1364static ssize_t \
1365_name##_show(struct device *dev, \
1366 struct device_attribute *attr, \
1367 char *page) \
1368{ \
1369 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1370 return sprintf(page, _format "\n"); \
1371} \
1372 \
1373static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1374
1375#endif
1376#endif
1377