1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <linux/types.h>
18#include <linux/ioctl.h>
19#include <asm/byteorder.h>
20
21
22
23
24
25
26
27
28enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX,
37};
38
39
40
41
42
43
44enum perf_hw_id {
45
46
47
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55
56 PERF_COUNT_HW_MAX,
57};
58
59
60
61
62
63
64
65
66enum perf_hw_cache_id {
67 PERF_COUNT_HW_CACHE_L1D = 0,
68 PERF_COUNT_HW_CACHE_L1I = 1,
69 PERF_COUNT_HW_CACHE_LL = 2,
70 PERF_COUNT_HW_CACHE_DTLB = 3,
71 PERF_COUNT_HW_CACHE_ITLB = 4,
72 PERF_COUNT_HW_CACHE_BPU = 5,
73
74 PERF_COUNT_HW_CACHE_MAX,
75};
76
77enum perf_hw_cache_op_id {
78 PERF_COUNT_HW_CACHE_OP_READ = 0,
79 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
80 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
81
82 PERF_COUNT_HW_CACHE_OP_MAX,
83};
84
85enum perf_hw_cache_op_result_id {
86 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
87 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
88
89 PERF_COUNT_HW_CACHE_RESULT_MAX,
90};
91
92
93
94
95
96
97
98enum perf_sw_ids {
99 PERF_COUNT_SW_CPU_CLOCK = 0,
100 PERF_COUNT_SW_TASK_CLOCK = 1,
101 PERF_COUNT_SW_PAGE_FAULTS = 2,
102 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
103 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
104 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
105 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
106 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
107 PERF_COUNT_SW_EMULATION_FAULTS = 8,
108
109 PERF_COUNT_SW_MAX,
110};
111
112
113
114
115
116enum perf_event_sample_format {
117 PERF_SAMPLE_IP = 1U << 0,
118 PERF_SAMPLE_TID = 1U << 1,
119 PERF_SAMPLE_TIME = 1U << 2,
120 PERF_SAMPLE_ADDR = 1U << 3,
121 PERF_SAMPLE_READ = 1U << 4,
122 PERF_SAMPLE_CALLCHAIN = 1U << 5,
123 PERF_SAMPLE_ID = 1U << 6,
124 PERF_SAMPLE_CPU = 1U << 7,
125 PERF_SAMPLE_PERIOD = 1U << 8,
126 PERF_SAMPLE_STREAM_ID = 1U << 9,
127 PERF_SAMPLE_RAW = 1U << 10,
128
129 PERF_SAMPLE_MAX = 1U << 11,
130};
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152enum perf_event_read_format {
153 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
154 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
155 PERF_FORMAT_ID = 1U << 2,
156 PERF_FORMAT_GROUP = 1U << 3,
157
158 PERF_FORMAT_MAX = 1U << 4,
159};
160
161#define PERF_ATTR_SIZE_VER0 64
162
163
164
165
166struct perf_event_attr {
167
168
169
170
171 __u32 type;
172
173
174
175
176 __u32 size;
177
178
179
180
181 __u64 config;
182
183 union {
184 __u64 sample_period;
185 __u64 sample_freq;
186 };
187
188 __u64 sample_type;
189 __u64 read_format;
190
191 __u64 disabled : 1,
192 inherit : 1,
193 pinned : 1,
194 exclusive : 1,
195 exclude_user : 1,
196 exclude_kernel : 1,
197 exclude_hv : 1,
198 exclude_idle : 1,
199 mmap : 1,
200 comm : 1,
201 freq : 1,
202 inherit_stat : 1,
203 enable_on_exec : 1,
204 task : 1,
205 watermark : 1,
206
207
208
209
210
211
212
213
214
215
216 precise_ip : 2,
217 mmap_data : 1,
218 sample_id_all : 1,
219
220 __reserved_1 : 45;
221
222 union {
223 __u32 wakeup_events;
224 __u32 wakeup_watermark;
225 };
226
227 __u32 bp_type;
228 __u64 bp_addr;
229 __u64 bp_len;
230};
231
232
233
234
235#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
236#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
237#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
238#define PERF_EVENT_IOC_RESET _IO ('$', 3)
239#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
240#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
241#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
242
243enum perf_event_ioc_flags {
244 PERF_IOC_FLAG_GROUP = 1U << 0,
245};
246
247
248
249
250struct perf_event_mmap_page {
251 __u32 version;
252 __u32 compat_version;
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276 __u32 lock;
277 __u32 index;
278 __s64 offset;
279 __u64 time_enabled;
280 __u64 time_running;
281
282
283
284
285
286 __u64 __reserved[123];
287
288
289
290
291
292
293
294
295
296
297
298
299 __u64 data_head;
300 __u64 data_tail;
301};
302
303#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
304#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
305#define PERF_RECORD_MISC_KERNEL (1 << 0)
306#define PERF_RECORD_MISC_USER (2 << 0)
307#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
308#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
309#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
310
311
312
313
314
315
316#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
317
318
319
320#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
321
322struct perf_event_header {
323 __u32 type;
324 __u16 misc;
325 __u16 size;
326};
327
328enum perf_event_type {
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353 PERF_RECORD_MMAP = 1,
354
355
356
357
358
359
360
361
362 PERF_RECORD_LOST = 2,
363
364
365
366
367
368
369
370
371
372 PERF_RECORD_COMM = 3,
373
374
375
376
377
378
379
380
381
382 PERF_RECORD_EXIT = 4,
383
384
385
386
387
388
389
390
391
392 PERF_RECORD_THROTTLE = 5,
393 PERF_RECORD_UNTHROTTLE = 6,
394
395
396
397
398
399
400
401
402
403 PERF_RECORD_FORK = 7,
404
405
406
407
408
409
410
411
412
413 PERF_RECORD_READ = 8,
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448 PERF_RECORD_SAMPLE = 9,
449
450 PERF_RECORD_MAX,
451};
452
453enum perf_callchain_context {
454 PERF_CONTEXT_HV = (__u64)-32,
455 PERF_CONTEXT_KERNEL = (__u64)-128,
456 PERF_CONTEXT_USER = (__u64)-512,
457
458 PERF_CONTEXT_GUEST = (__u64)-2048,
459 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
460 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
461
462 PERF_CONTEXT_MAX = (__u64)-4095,
463};
464
465#define PERF_FLAG_FD_NO_GROUP (1U << 0)
466#define PERF_FLAG_FD_OUTPUT (1U << 1)
467
468#ifdef __KERNEL__
469
470
471
472
473#ifdef CONFIG_PERF_EVENTS
474# include <asm/perf_event.h>
475# include <asm/local64.h>
476#endif
477
478struct perf_guest_info_callbacks {
479 int (*is_in_guest) (void);
480 int (*is_user_mode) (void);
481 unsigned long (*get_guest_ip) (void);
482};
483
484#ifdef CONFIG_HAVE_HW_BREAKPOINT
485#include <asm/hw_breakpoint.h>
486#endif
487
488#include <linux/list.h>
489#include <linux/mutex.h>
490#include <linux/rculist.h>
491#include <linux/rcupdate.h>
492#include <linux/spinlock.h>
493#include <linux/hrtimer.h>
494#include <linux/fs.h>
495#include <linux/pid_namespace.h>
496#include <linux/workqueue.h>
497#include <linux/ftrace.h>
498#include <linux/cpu.h>
499#include <linux/irq_work.h>
500#include <linux/jump_label_ref.h>
501#include <asm/atomic.h>
502#include <asm/local.h>
503
504#define PERF_MAX_STACK_DEPTH 255
505
506struct perf_callchain_entry {
507 __u64 nr;
508 __u64 ip[PERF_MAX_STACK_DEPTH];
509};
510
511struct perf_raw_record {
512 u32 size;
513 void *data;
514};
515
516struct perf_branch_entry {
517 __u64 from;
518 __u64 to;
519 __u64 flags;
520};
521
522struct perf_branch_stack {
523 __u64 nr;
524 struct perf_branch_entry entries[0];
525};
526
527struct task_struct;
528
529
530
531
532struct hw_perf_event {
533#ifdef CONFIG_PERF_EVENTS
534 union {
535 struct {
536 u64 config;
537 u64 last_tag;
538 unsigned long config_base;
539 unsigned long event_base;
540 int idx;
541 int last_cpu;
542 };
543 struct {
544 struct hrtimer hrtimer;
545 };
546#ifdef CONFIG_HAVE_HW_BREAKPOINT
547 struct {
548 struct arch_hw_breakpoint info;
549 struct list_head bp_list;
550
551
552
553
554
555 struct task_struct *bp_target;
556 };
557#endif
558 };
559 int state;
560 local64_t prev_count;
561 u64 sample_period;
562 u64 last_period;
563 local64_t period_left;
564 u64 interrupts;
565
566 u64 freq_time_stamp;
567 u64 freq_count_stamp;
568#endif
569};
570
571
572
573
574#define PERF_HES_STOPPED 0x01
575#define PERF_HES_UPTODATE 0x02
576#define PERF_HES_ARCH 0x04
577
578struct perf_event;
579
580
581
582
583#define PERF_EVENT_TXN 0x1
584
585
586
587
588struct pmu {
589 struct list_head entry;
590
591 struct device *dev;
592 char *name;
593 int type;
594
595 int * __percpu pmu_disable_count;
596 struct perf_cpu_context * __percpu pmu_cpu_context;
597 int task_ctx_nr;
598
599
600
601
602
603 void (*pmu_enable) (struct pmu *pmu);
604 void (*pmu_disable) (struct pmu *pmu);
605
606
607
608
609
610 int (*event_init) (struct perf_event *event);
611
612#define PERF_EF_START 0x01
613#define PERF_EF_RELOAD 0x02
614#define PERF_EF_UPDATE 0x04
615
616
617
618
619
620 int (*add) (struct perf_event *event, int flags);
621 void (*del) (struct perf_event *event, int flags);
622
623
624
625
626
627
628 void (*start) (struct perf_event *event, int flags);
629 void (*stop) (struct perf_event *event, int flags);
630
631
632
633
634 void (*read) (struct perf_event *event);
635
636
637
638
639
640
641
642
643
644 void (*start_txn) (struct pmu *pmu);
645
646
647
648
649
650
651 int (*commit_txn) (struct pmu *pmu);
652
653
654
655
656 void (*cancel_txn) (struct pmu *pmu);
657};
658
659
660
661
662enum perf_event_active_state {
663 PERF_EVENT_STATE_ERROR = -2,
664 PERF_EVENT_STATE_OFF = -1,
665 PERF_EVENT_STATE_INACTIVE = 0,
666 PERF_EVENT_STATE_ACTIVE = 1,
667};
668
669struct file;
670
671#define PERF_BUFFER_WRITABLE 0x01
672
673struct perf_buffer {
674 atomic_t refcount;
675 struct rcu_head rcu_head;
676#ifdef CONFIG_PERF_USE_VMALLOC
677 struct work_struct work;
678 int page_order;
679#endif
680 int nr_pages;
681 int writable;
682
683 atomic_t poll;
684
685 local_t head;
686 local_t nest;
687 local_t events;
688 local_t wakeup;
689 local_t lost;
690
691 long watermark;
692
693 struct perf_event_mmap_page *user_page;
694 void *data_pages[0];
695};
696
697struct perf_sample_data;
698
699typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
700 struct perf_sample_data *,
701 struct pt_regs *regs);
702
703enum perf_group_flag {
704 PERF_GROUP_SOFTWARE = 0x1,
705};
706
707#define SWEVENT_HLIST_BITS 8
708#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
709
710struct swevent_hlist {
711 struct hlist_head heads[SWEVENT_HLIST_SIZE];
712 struct rcu_head rcu_head;
713};
714
715#define PERF_ATTACH_CONTEXT 0x01
716#define PERF_ATTACH_GROUP 0x02
717#define PERF_ATTACH_TASK 0x04
718
719
720
721
722struct perf_event {
723#ifdef CONFIG_PERF_EVENTS
724 struct list_head group_entry;
725 struct list_head event_entry;
726 struct list_head sibling_list;
727 struct hlist_node hlist_entry;
728 int nr_siblings;
729 int group_flags;
730 struct perf_event *group_leader;
731 struct pmu *pmu;
732
733 enum perf_event_active_state state;
734 unsigned int attach_state;
735 local64_t count;
736 atomic64_t child_count;
737
738
739
740
741
742
743
744
745
746
747 u64 total_time_enabled;
748 u64 total_time_running;
749
750
751
752
753
754
755
756
757
758
759
760 u64 tstamp_enabled;
761 u64 tstamp_running;
762 u64 tstamp_stopped;
763
764
765
766
767
768
769
770
771
772 u64 shadow_ctx_time;
773
774 struct perf_event_attr attr;
775 u16 header_size;
776 u16 id_header_size;
777 u16 read_size;
778 struct hw_perf_event hw;
779
780 struct perf_event_context *ctx;
781 struct file *filp;
782
783
784
785
786
787 atomic64_t child_total_time_enabled;
788 atomic64_t child_total_time_running;
789
790
791
792
793 struct mutex child_mutex;
794 struct list_head child_list;
795 struct perf_event *parent;
796
797 int oncpu;
798 int cpu;
799
800 struct list_head owner_entry;
801 struct task_struct *owner;
802
803
804 struct mutex mmap_mutex;
805 atomic_t mmap_count;
806 int mmap_locked;
807 struct user_struct *mmap_user;
808 struct perf_buffer *buffer;
809
810
811 wait_queue_head_t waitq;
812 struct fasync_struct *fasync;
813
814
815 int pending_wakeup;
816 int pending_kill;
817 int pending_disable;
818 struct irq_work pending;
819
820 atomic_t event_limit;
821
822 void (*destroy)(struct perf_event *);
823 struct rcu_head rcu_head;
824
825 struct pid_namespace *ns;
826 u64 id;
827
828 perf_overflow_handler_t overflow_handler;
829
830#ifdef CONFIG_EVENT_TRACING
831 struct ftrace_event_call *tp_event;
832 struct event_filter *filter;
833#endif
834
835#endif
836};
837
838enum perf_event_context_type {
839 task_context,
840 cpu_context,
841};
842
843
844
845
846
847
848struct perf_event_context {
849 enum perf_event_context_type type;
850 struct pmu *pmu;
851
852
853
854
855 raw_spinlock_t lock;
856
857
858
859
860
861 struct mutex mutex;
862
863 struct list_head pinned_groups;
864 struct list_head flexible_groups;
865 struct list_head event_list;
866 int nr_events;
867 int nr_active;
868 int is_active;
869 int nr_stat;
870 int rotate_disable;
871 atomic_t refcount;
872 struct task_struct *task;
873
874
875
876
877 u64 time;
878 u64 timestamp;
879
880
881
882
883
884 struct perf_event_context *parent_ctx;
885 u64 parent_gen;
886 u64 generation;
887 int pin_count;
888 struct rcu_head rcu_head;
889};
890
891
892
893
894
895#define PERF_NR_CONTEXTS 4
896
897
898
899
900struct perf_cpu_context {
901 struct perf_event_context ctx;
902 struct perf_event_context *task_ctx;
903 int active_oncpu;
904 int exclusive;
905 struct list_head rotation_list;
906 int jiffies_interval;
907 struct pmu *active_pmu;
908};
909
910struct perf_output_handle {
911 struct perf_event *event;
912 struct perf_buffer *buffer;
913 unsigned long wakeup;
914 unsigned long size;
915 void *addr;
916 int page;
917 int nmi;
918 int sample;
919};
920
921#ifdef CONFIG_PERF_EVENTS
922
923extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
924extern void perf_pmu_unregister(struct pmu *pmu);
925
926extern int perf_num_counters(void);
927extern const char *perf_pmu_name(void);
928extern void __perf_event_task_sched_in(struct task_struct *task);
929extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
930extern int perf_event_init_task(struct task_struct *child);
931extern void perf_event_exit_task(struct task_struct *child);
932extern void perf_event_free_task(struct task_struct *task);
933extern void perf_event_delayed_put(struct task_struct *task);
934extern void perf_event_print_debug(void);
935extern void perf_pmu_disable(struct pmu *pmu);
936extern void perf_pmu_enable(struct pmu *pmu);
937extern int perf_event_task_disable(void);
938extern int perf_event_task_enable(void);
939extern void perf_event_update_userpage(struct perf_event *event);
940extern int perf_event_release_kernel(struct perf_event *event);
941extern struct perf_event *
942perf_event_create_kernel_counter(struct perf_event_attr *attr,
943 int cpu,
944 struct task_struct *task,
945 perf_overflow_handler_t callback);
946extern u64 perf_event_read_value(struct perf_event *event,
947 u64 *enabled, u64 *running);
948
949struct perf_sample_data {
950 u64 type;
951
952 u64 ip;
953 struct {
954 u32 pid;
955 u32 tid;
956 } tid_entry;
957 u64 time;
958 u64 addr;
959 u64 id;
960 u64 stream_id;
961 struct {
962 u32 cpu;
963 u32 reserved;
964 } cpu_entry;
965 u64 period;
966 struct perf_callchain_entry *callchain;
967 struct perf_raw_record *raw;
968};
969
970static inline
971void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
972{
973 data->addr = addr;
974 data->raw = NULL;
975}
976
977extern void perf_output_sample(struct perf_output_handle *handle,
978 struct perf_event_header *header,
979 struct perf_sample_data *data,
980 struct perf_event *event);
981extern void perf_prepare_sample(struct perf_event_header *header,
982 struct perf_sample_data *data,
983 struct perf_event *event,
984 struct pt_regs *regs);
985
986extern int perf_event_overflow(struct perf_event *event, int nmi,
987 struct perf_sample_data *data,
988 struct pt_regs *regs);
989
990static inline bool is_sampling_event(struct perf_event *event)
991{
992 return event->attr.sample_period != 0;
993}
994
995
996
997
998static inline int is_software_event(struct perf_event *event)
999{
1000 return event->pmu->task_ctx_nr == perf_sw_context;
1001}
1002
1003extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
1004
1005extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
1006
1007#ifndef perf_arch_fetch_caller_regs
1008static inline void
1009perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1010#endif
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1021{
1022 memset(regs, 0, sizeof(*regs));
1023
1024 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1025}
1026
1027static __always_inline void
1028perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1029{
1030 struct pt_regs hot_regs;
1031
1032 JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1033 return;
1034
1035have_event:
1036 if (!regs) {
1037 perf_fetch_caller_regs(&hot_regs);
1038 regs = &hot_regs;
1039 }
1040 __perf_sw_event(event_id, nr, nmi, regs, addr);
1041}
1042
1043extern atomic_t perf_task_events;
1044
1045static inline void perf_event_task_sched_in(struct task_struct *task)
1046{
1047 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1048}
1049
1050static inline
1051void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1052{
1053 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1054
1055 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1056}
1057
1058extern void perf_event_mmap(struct vm_area_struct *vma);
1059extern struct perf_guest_info_callbacks *perf_guest_cbs;
1060extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1061extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1062
1063extern void perf_event_comm(struct task_struct *tsk);
1064extern void perf_event_fork(struct task_struct *tsk);
1065
1066
1067DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1068
1069extern void perf_callchain_user(struct perf_callchain_entry *entry,
1070 struct pt_regs *regs);
1071extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1072 struct pt_regs *regs);
1073
1074
1075static inline void
1076perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1077{
1078 if (entry->nr < PERF_MAX_STACK_DEPTH)
1079 entry->ip[entry->nr++] = ip;
1080}
1081
1082extern int sysctl_perf_event_paranoid;
1083extern int sysctl_perf_event_mlock;
1084extern int sysctl_perf_event_sample_rate;
1085
1086static inline bool perf_paranoid_tracepoint_raw(void)
1087{
1088 return sysctl_perf_event_paranoid > -1;
1089}
1090
1091static inline bool perf_paranoid_cpu(void)
1092{
1093 return sysctl_perf_event_paranoid > 0;
1094}
1095
1096static inline bool perf_paranoid_kernel(void)
1097{
1098 return sysctl_perf_event_paranoid > 1;
1099}
1100
1101extern void perf_event_init(void);
1102extern void perf_tp_event(u64 addr, u64 count, void *record,
1103 int entry_size, struct pt_regs *regs,
1104 struct hlist_head *head, int rctx);
1105extern void perf_bp_event(struct perf_event *event, void *data);
1106
1107#ifndef perf_misc_flags
1108#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1109 PERF_RECORD_MISC_KERNEL)
1110#define perf_instruction_pointer(regs) instruction_pointer(regs)
1111#endif
1112
1113extern int perf_output_begin(struct perf_output_handle *handle,
1114 struct perf_event *event, unsigned int size,
1115 int nmi, int sample);
1116extern void perf_output_end(struct perf_output_handle *handle);
1117extern void perf_output_copy(struct perf_output_handle *handle,
1118 const void *buf, unsigned int len);
1119extern int perf_swevent_get_recursion_context(void);
1120extern void perf_swevent_put_recursion_context(int rctx);
1121extern void perf_event_enable(struct perf_event *event);
1122extern void perf_event_disable(struct perf_event *event);
1123extern void perf_event_task_tick(void);
1124#else
1125static inline void
1126perf_event_task_sched_in(struct task_struct *task) { }
1127static inline void
1128perf_event_task_sched_out(struct task_struct *task,
1129 struct task_struct *next) { }
1130static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1131static inline void perf_event_exit_task(struct task_struct *child) { }
1132static inline void perf_event_free_task(struct task_struct *task) { }
1133static inline void perf_event_delayed_put(struct task_struct *task) { }
1134static inline void perf_event_print_debug(void) { }
1135static inline int perf_event_task_disable(void) { return -EINVAL; }
1136static inline int perf_event_task_enable(void) { return -EINVAL; }
1137
1138static inline void
1139perf_sw_event(u32 event_id, u64 nr, int nmi,
1140 struct pt_regs *regs, u64 addr) { }
1141static inline void
1142perf_bp_event(struct perf_event *event, void *data) { }
1143
1144static inline int perf_register_guest_info_callbacks
1145(struct perf_guest_info_callbacks *callbacks) { return 0; }
1146static inline int perf_unregister_guest_info_callbacks
1147(struct perf_guest_info_callbacks *callbacks) { return 0; }
1148
1149static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1150static inline void perf_event_comm(struct task_struct *tsk) { }
1151static inline void perf_event_fork(struct task_struct *tsk) { }
1152static inline void perf_event_init(void) { }
1153static inline int perf_swevent_get_recursion_context(void) { return -1; }
1154static inline void perf_swevent_put_recursion_context(int rctx) { }
1155static inline void perf_event_enable(struct perf_event *event) { }
1156static inline void perf_event_disable(struct perf_event *event) { }
1157static inline void perf_event_task_tick(void) { }
1158#endif
1159
1160#define perf_output_put(handle, x) \
1161 perf_output_copy((handle), &(x), sizeof(x))
1162
1163
1164
1165
1166#define perf_cpu_notifier(fn) \
1167do { \
1168 static struct notifier_block fn##_nb __cpuinitdata = \
1169 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1170 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1171 (void *)(unsigned long)smp_processor_id()); \
1172 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1173 (void *)(unsigned long)smp_processor_id()); \
1174 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1175 (void *)(unsigned long)smp_processor_id()); \
1176 register_cpu_notifier(&fn##_nb); \
1177} while (0)
1178
1179#endif
1180#endif
1181