1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_PERF_EVENT_H
15#define _LINUX_PERF_EVENT_H
16
17#include <uapi/linux/perf_event.h>
18
19
20
21
22
23#ifdef CONFIG_PERF_EVENTS
24# include <asm/perf_event.h>
25# include <asm/local64.h>
26#endif
27
28struct perf_guest_info_callbacks {
29 int (*is_in_guest)(void);
30 int (*is_user_mode)(void);
31 unsigned long (*get_guest_ip)(void);
32};
33
34#ifdef CONFIG_HAVE_HW_BREAKPOINT
35#include <asm/hw_breakpoint.h>
36#endif
37
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/rculist.h>
41#include <linux/rcupdate.h>
42#include <linux/spinlock.h>
43#include <linux/hrtimer.h>
44#include <linux/fs.h>
45#include <linux/pid_namespace.h>
46#include <linux/workqueue.h>
47#include <linux/ftrace.h>
48#include <linux/cpu.h>
49#include <linux/irq_work.h>
50#include <linux/static_key.h>
51#include <linux/jump_label_ratelimit.h>
52#include <linux/atomic.h>
53#include <linux/sysfs.h>
54#include <linux/perf_regs.h>
55#include <linux/workqueue.h>
56#include <linux/cgroup.h>
57#include <asm/local.h>
58
59struct perf_callchain_entry {
60 __u64 nr;
61 __u64 ip[PERF_MAX_STACK_DEPTH];
62};
63
64struct perf_raw_record {
65 u32 size;
66 void *data;
67};
68
69
70
71
72
73
74
75
76
77
78struct perf_branch_stack {
79 __u64 nr;
80 struct perf_branch_entry entries[0];
81};
82
83struct task_struct;
84
85
86
87
88struct hw_perf_event_extra {
89 u64 config;
90 unsigned int reg;
91 int alloc;
92 int idx;
93};
94
95
96
97
98struct hw_perf_event {
99#ifdef CONFIG_PERF_EVENTS
100 union {
101 struct {
102 u64 config;
103 u64 last_tag;
104 unsigned long config_base;
105 unsigned long event_base;
106 int event_base_rdpmc;
107 int idx;
108 int last_cpu;
109 int flags;
110
111 struct hw_perf_event_extra extra_reg;
112 struct hw_perf_event_extra branch_reg;
113 };
114 struct {
115 struct hrtimer hrtimer;
116 };
117 struct {
118
119 struct list_head tp_list;
120 };
121 struct {
122 int cqm_state;
123 u32 cqm_rmid;
124 struct list_head cqm_events_entry;
125 struct list_head cqm_groups_entry;
126 struct list_head cqm_group_entry;
127 };
128 struct {
129 int itrace_started;
130 };
131#ifdef CONFIG_HAVE_HW_BREAKPOINT
132 struct {
133
134
135
136
137
138 struct arch_hw_breakpoint info;
139 struct list_head bp_list;
140 };
141#endif
142 };
143
144
145
146
147 struct task_struct *target;
148
149
150
151
152#define PERF_HES_STOPPED 0x01
153#define PERF_HES_UPTODATE 0x02
154#define PERF_HES_ARCH 0x04
155
156 int state;
157
158
159
160
161
162 local64_t prev_count;
163
164
165
166
167 u64 sample_period;
168
169
170
171
172 u64 last_period;
173
174
175
176
177
178
179 local64_t period_left;
180
181
182
183
184
185 u64 interrupts_seq;
186 u64 interrupts;
187
188
189
190
191
192 u64 freq_time_stamp;
193 u64 freq_count_stamp;
194#endif
195};
196
197struct perf_event;
198
199
200
201
202#define PERF_PMU_TXN_ADD 0x1
203#define PERF_PMU_TXN_READ 0x2
204
205
206
207
208#define PERF_PMU_CAP_NO_INTERRUPT 0x01
209#define PERF_PMU_CAP_NO_NMI 0x02
210#define PERF_PMU_CAP_AUX_NO_SG 0x04
211#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
212#define PERF_PMU_CAP_EXCLUSIVE 0x10
213#define PERF_PMU_CAP_ITRACE 0x20
214
215
216
217
218struct pmu {
219 struct list_head entry;
220
221 struct module *module;
222 struct device *dev;
223 const struct attribute_group **attr_groups;
224 const char *name;
225 int type;
226
227
228
229
230 int capabilities;
231
232 int * __percpu pmu_disable_count;
233 struct perf_cpu_context * __percpu pmu_cpu_context;
234 atomic_t exclusive_cnt;
235 int task_ctx_nr;
236 int hrtimer_interval_ms;
237
238
239
240
241
242 void (*pmu_enable) (struct pmu *pmu);
243 void (*pmu_disable) (struct pmu *pmu);
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 int (*event_init) (struct perf_event *event);
262
263
264
265
266
267 void (*event_mapped) (struct perf_event *event);
268 void (*event_unmapped) (struct perf_event *event);
269
270
271
272
273
274#define PERF_EF_START 0x01
275#define PERF_EF_RELOAD 0x02
276#define PERF_EF_UPDATE 0x04
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 int (*add) (struct perf_event *event, int flags);
297 void (*del) (struct perf_event *event, int flags);
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317 void (*start) (struct perf_event *event, int flags);
318 void (*stop) (struct perf_event *event, int flags);
319
320
321
322
323
324
325
326 void (*read) (struct perf_event *event);
327
328
329
330
331
332
333
334
335
336
337
338 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags);
339
340
341
342
343
344
345
346
347 int (*commit_txn) (struct pmu *pmu);
348
349
350
351
352
353
354 void (*cancel_txn) (struct pmu *pmu);
355
356
357
358
359
360 int (*event_idx) (struct perf_event *event);
361
362
363
364
365 void (*sched_task) (struct perf_event_context *ctx,
366 bool sched_in);
367
368
369
370 size_t task_ctx_size;
371
372
373
374
375
376 u64 (*count) (struct perf_event *event);
377
378
379
380
381 void *(*setup_aux) (int cpu, void **pages,
382 int nr_pages, bool overwrite);
383
384
385
386
387
388 void (*free_aux) (void *aux);
389
390
391
392
393 int (*filter_match) (struct perf_event *event);
394};
395
396
397
398
399enum perf_event_active_state {
400 PERF_EVENT_STATE_EXIT = -3,
401 PERF_EVENT_STATE_ERROR = -2,
402 PERF_EVENT_STATE_OFF = -1,
403 PERF_EVENT_STATE_INACTIVE = 0,
404 PERF_EVENT_STATE_ACTIVE = 1,
405};
406
407struct file;
408struct perf_sample_data;
409
410typedef void (*perf_overflow_handler_t)(struct perf_event *,
411 struct perf_sample_data *,
412 struct pt_regs *regs);
413
414enum perf_group_flag {
415 PERF_GROUP_SOFTWARE = 0x1,
416};
417
418#define SWEVENT_HLIST_BITS 8
419#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
420
421struct swevent_hlist {
422 struct hlist_head heads[SWEVENT_HLIST_SIZE];
423 struct rcu_head rcu_head;
424};
425
426#define PERF_ATTACH_CONTEXT 0x01
427#define PERF_ATTACH_GROUP 0x02
428#define PERF_ATTACH_TASK 0x04
429#define PERF_ATTACH_TASK_DATA 0x08
430
431struct perf_cgroup;
432struct ring_buffer;
433
434
435
436
437struct perf_event {
438#ifdef CONFIG_PERF_EVENTS
439
440
441
442
443
444 struct list_head event_entry;
445
446
447
448
449
450
451
452
453
454 struct list_head group_entry;
455 struct list_head sibling_list;
456
457
458
459
460
461
462 struct list_head migrate_entry;
463
464 struct hlist_node hlist_entry;
465 struct list_head active_entry;
466 int nr_siblings;
467 int group_flags;
468 struct perf_event *group_leader;
469 struct pmu *pmu;
470
471 enum perf_event_active_state state;
472 unsigned int attach_state;
473 local64_t count;
474 atomic64_t child_count;
475
476
477
478
479
480
481
482
483
484
485 u64 total_time_enabled;
486 u64 total_time_running;
487
488
489
490
491
492
493
494
495
496
497
498 u64 tstamp_enabled;
499 u64 tstamp_running;
500 u64 tstamp_stopped;
501
502
503
504
505
506
507
508
509
510 u64 shadow_ctx_time;
511
512 struct perf_event_attr attr;
513 u16 header_size;
514 u16 id_header_size;
515 u16 read_size;
516 struct hw_perf_event hw;
517
518 struct perf_event_context *ctx;
519 atomic_long_t refcount;
520
521
522
523
524
525 atomic64_t child_total_time_enabled;
526 atomic64_t child_total_time_running;
527
528
529
530
531 struct mutex child_mutex;
532 struct list_head child_list;
533 struct perf_event *parent;
534
535 int oncpu;
536 int cpu;
537
538 struct list_head owner_entry;
539 struct task_struct *owner;
540
541
542 struct mutex mmap_mutex;
543 atomic_t mmap_count;
544
545 struct ring_buffer *rb;
546 struct list_head rb_entry;
547 unsigned long rcu_batches;
548 int rcu_pending;
549
550
551 wait_queue_head_t waitq;
552 struct fasync_struct *fasync;
553
554
555 int pending_wakeup;
556 int pending_kill;
557 int pending_disable;
558 struct irq_work pending;
559
560 atomic_t event_limit;
561
562 void (*destroy)(struct perf_event *);
563 struct rcu_head rcu_head;
564
565 struct pid_namespace *ns;
566 u64 id;
567
568 u64 (*clock)(void);
569 perf_overflow_handler_t overflow_handler;
570 void *overflow_handler_context;
571
572#ifdef CONFIG_EVENT_TRACING
573 struct trace_event_call *tp_event;
574 struct event_filter *filter;
575#ifdef CONFIG_FUNCTION_TRACER
576 struct ftrace_ops ftrace_ops;
577#endif
578#endif
579
580#ifdef CONFIG_CGROUP_PERF
581 struct perf_cgroup *cgrp;
582 int cgrp_defer_enabled;
583#endif
584
585#endif
586};
587
588
589
590
591
592
593struct perf_event_context {
594 struct pmu *pmu;
595
596
597
598
599 raw_spinlock_t lock;
600
601
602
603
604
605 struct mutex mutex;
606
607 struct list_head active_ctx_list;
608 struct list_head pinned_groups;
609 struct list_head flexible_groups;
610 struct list_head event_list;
611 int nr_events;
612 int nr_active;
613 int is_active;
614 int nr_stat;
615 int nr_freq;
616 int rotate_disable;
617 atomic_t refcount;
618 struct task_struct *task;
619
620
621
622
623 u64 time;
624 u64 timestamp;
625
626
627
628
629
630 struct perf_event_context *parent_ctx;
631 u64 parent_gen;
632 u64 generation;
633 int pin_count;
634 int nr_cgroups;
635 void *task_ctx_data;
636 struct rcu_head rcu_head;
637
638 struct delayed_work orphans_remove;
639 bool orphans_remove_sched;
640};
641
642
643
644
645
646#define PERF_NR_CONTEXTS 4
647
648
649
650
651struct perf_cpu_context {
652 struct perf_event_context ctx;
653 struct perf_event_context *task_ctx;
654 int active_oncpu;
655 int exclusive;
656
657 raw_spinlock_t hrtimer_lock;
658 struct hrtimer hrtimer;
659 ktime_t hrtimer_interval;
660 unsigned int hrtimer_active;
661
662 struct pmu *unique_pmu;
663 struct perf_cgroup *cgrp;
664};
665
666struct perf_output_handle {
667 struct perf_event *event;
668 struct ring_buffer *rb;
669 unsigned long wakeup;
670 unsigned long size;
671 union {
672 void *addr;
673 unsigned long head;
674 };
675 int page;
676};
677
678#ifdef CONFIG_CGROUP_PERF
679
680
681
682
683
684struct perf_cgroup_info {
685 u64 time;
686 u64 timestamp;
687};
688
689struct perf_cgroup {
690 struct cgroup_subsys_state css;
691 struct perf_cgroup_info __percpu *info;
692};
693
694
695
696
697
698
699static inline struct perf_cgroup *
700perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
701{
702 return container_of(task_css_check(task, perf_event_cgrp_id,
703 ctx ? lockdep_is_held(&ctx->lock)
704 : true),
705 struct perf_cgroup, css);
706}
707#endif
708
709#ifdef CONFIG_PERF_EVENTS
710
711extern void *perf_aux_output_begin(struct perf_output_handle *handle,
712 struct perf_event *event);
713extern void perf_aux_output_end(struct perf_output_handle *handle,
714 unsigned long size, bool truncated);
715extern int perf_aux_output_skip(struct perf_output_handle *handle,
716 unsigned long size);
717extern void *perf_get_aux(struct perf_output_handle *handle);
718
719extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
720extern void perf_pmu_unregister(struct pmu *pmu);
721
722extern int perf_num_counters(void);
723extern const char *perf_pmu_name(void);
724extern void __perf_event_task_sched_in(struct task_struct *prev,
725 struct task_struct *task);
726extern void __perf_event_task_sched_out(struct task_struct *prev,
727 struct task_struct *next);
728extern int perf_event_init_task(struct task_struct *child);
729extern void perf_event_exit_task(struct task_struct *child);
730extern void perf_event_free_task(struct task_struct *task);
731extern void perf_event_delayed_put(struct task_struct *task);
732extern struct perf_event *perf_event_get(unsigned int fd);
733extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
734extern void perf_event_print_debug(void);
735extern void perf_pmu_disable(struct pmu *pmu);
736extern void perf_pmu_enable(struct pmu *pmu);
737extern void perf_sched_cb_dec(struct pmu *pmu);
738extern void perf_sched_cb_inc(struct pmu *pmu);
739extern int perf_event_task_disable(void);
740extern int perf_event_task_enable(void);
741extern int perf_event_refresh(struct perf_event *event, int refresh);
742extern void perf_event_update_userpage(struct perf_event *event);
743extern int perf_event_release_kernel(struct perf_event *event);
744extern struct perf_event *
745perf_event_create_kernel_counter(struct perf_event_attr *attr,
746 int cpu,
747 struct task_struct *task,
748 perf_overflow_handler_t callback,
749 void *context);
750extern void perf_pmu_migrate_context(struct pmu *pmu,
751 int src_cpu, int dst_cpu);
752extern u64 perf_event_read_local(struct perf_event *event);
753extern u64 perf_event_read_value(struct perf_event *event,
754 u64 *enabled, u64 *running);
755
756
757struct perf_sample_data {
758
759
760
761
762 u64 addr;
763 struct perf_raw_record *raw;
764 struct perf_branch_stack *br_stack;
765 u64 period;
766 u64 weight;
767 u64 txn;
768 union perf_mem_data_src data_src;
769
770
771
772
773
774 u64 type;
775 u64 ip;
776 struct {
777 u32 pid;
778 u32 tid;
779 } tid_entry;
780 u64 time;
781 u64 id;
782 u64 stream_id;
783 struct {
784 u32 cpu;
785 u32 reserved;
786 } cpu_entry;
787 struct perf_callchain_entry *callchain;
788
789
790
791
792
793 struct perf_regs regs_user;
794 struct pt_regs regs_user_copy;
795
796 struct perf_regs regs_intr;
797 u64 stack_user_size;
798} ____cacheline_aligned;
799
800
801#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
802 PERF_MEM_S(LVL, NA) |\
803 PERF_MEM_S(SNOOP, NA) |\
804 PERF_MEM_S(LOCK, NA) |\
805 PERF_MEM_S(TLB, NA))
806
807static inline void perf_sample_data_init(struct perf_sample_data *data,
808 u64 addr, u64 period)
809{
810
811 data->addr = addr;
812 data->raw = NULL;
813 data->br_stack = NULL;
814 data->period = period;
815 data->weight = 0;
816 data->data_src.val = PERF_MEM_NA;
817 data->txn = 0;
818}
819
820extern void perf_output_sample(struct perf_output_handle *handle,
821 struct perf_event_header *header,
822 struct perf_sample_data *data,
823 struct perf_event *event);
824extern void perf_prepare_sample(struct perf_event_header *header,
825 struct perf_sample_data *data,
826 struct perf_event *event,
827 struct pt_regs *regs);
828
829extern int perf_event_overflow(struct perf_event *event,
830 struct perf_sample_data *data,
831 struct pt_regs *regs);
832
833extern void perf_event_output(struct perf_event *event,
834 struct perf_sample_data *data,
835 struct pt_regs *regs);
836
837extern void
838perf_event_header__init_id(struct perf_event_header *header,
839 struct perf_sample_data *data,
840 struct perf_event *event);
841extern void
842perf_event__output_id_sample(struct perf_event *event,
843 struct perf_output_handle *handle,
844 struct perf_sample_data *sample);
845
846extern void
847perf_log_lost_samples(struct perf_event *event, u64 lost);
848
849static inline bool is_sampling_event(struct perf_event *event)
850{
851 return event->attr.sample_period != 0;
852}
853
854
855
856
857static inline int is_software_event(struct perf_event *event)
858{
859 return event->pmu->task_ctx_nr == perf_sw_context;
860}
861
862extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
863
864extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
865extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
866
867#ifndef perf_arch_fetch_caller_regs
868static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
869#endif
870
871
872
873
874
875
876
877
878
879static inline void perf_fetch_caller_regs(struct pt_regs *regs)
880{
881 memset(regs, 0, sizeof(*regs));
882
883 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
884}
885
886static __always_inline void
887perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
888{
889 if (static_key_false(&perf_swevent_enabled[event_id]))
890 __perf_sw_event(event_id, nr, regs, addr);
891}
892
893DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
894
895
896
897
898
899
900static __always_inline void
901perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
902{
903 if (static_key_false(&perf_swevent_enabled[event_id])) {
904 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
905
906 perf_fetch_caller_regs(regs);
907 ___perf_sw_event(event_id, nr, regs, addr);
908 }
909}
910
911extern struct static_key_deferred perf_sched_events;
912
913static __always_inline bool
914perf_sw_migrate_enabled(void)
915{
916 if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
917 return true;
918 return false;
919}
920
921static inline void perf_event_task_migrate(struct task_struct *task)
922{
923 if (perf_sw_migrate_enabled())
924 task->sched_migrated = 1;
925}
926
927static inline void perf_event_task_sched_in(struct task_struct *prev,
928 struct task_struct *task)
929{
930 if (static_key_false(&perf_sched_events.key))
931 __perf_event_task_sched_in(prev, task);
932
933 if (perf_sw_migrate_enabled() && task->sched_migrated) {
934 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
935
936 perf_fetch_caller_regs(regs);
937 ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
938 task->sched_migrated = 0;
939 }
940}
941
942static inline void perf_event_task_sched_out(struct task_struct *prev,
943 struct task_struct *next)
944{
945 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
946
947 if (static_key_false(&perf_sched_events.key))
948 __perf_event_task_sched_out(prev, next);
949}
950
951static inline u64 __perf_event_count(struct perf_event *event)
952{
953 return local64_read(&event->count) + atomic64_read(&event->child_count);
954}
955
956extern void perf_event_mmap(struct vm_area_struct *vma);
957extern struct perf_guest_info_callbacks *perf_guest_cbs;
958extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
959extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
960
961extern void perf_event_exec(void);
962extern void perf_event_comm(struct task_struct *tsk, bool exec);
963extern void perf_event_fork(struct task_struct *tsk);
964
965
966DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
967
968extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
969extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
970
971static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
972{
973 if (entry->nr < PERF_MAX_STACK_DEPTH)
974 entry->ip[entry->nr++] = ip;
975}
976
977extern int sysctl_perf_event_paranoid;
978extern int sysctl_perf_event_mlock;
979extern int sysctl_perf_event_sample_rate;
980extern int sysctl_perf_cpu_time_max_percent;
981
982extern void perf_sample_event_took(u64 sample_len_ns);
983
984extern int perf_proc_update_handler(struct ctl_table *table, int write,
985 void __user *buffer, size_t *lenp,
986 loff_t *ppos);
987extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
988 void __user *buffer, size_t *lenp,
989 loff_t *ppos);
990
991
992static inline bool perf_paranoid_tracepoint_raw(void)
993{
994 return sysctl_perf_event_paranoid > -1;
995}
996
997static inline bool perf_paranoid_cpu(void)
998{
999 return sysctl_perf_event_paranoid > 0;
1000}
1001
1002static inline bool perf_paranoid_kernel(void)
1003{
1004 return sysctl_perf_event_paranoid > 1;
1005}
1006
1007extern void perf_event_init(void);
1008extern void perf_tp_event(u64 addr, u64 count, void *record,
1009 int entry_size, struct pt_regs *regs,
1010 struct hlist_head *head, int rctx,
1011 struct task_struct *task);
1012extern void perf_bp_event(struct perf_event *event, void *data);
1013
1014#ifndef perf_misc_flags
1015# define perf_misc_flags(regs) \
1016 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1017# define perf_instruction_pointer(regs) instruction_pointer(regs)
1018#endif
1019
1020static inline bool has_branch_stack(struct perf_event *event)
1021{
1022 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1023}
1024
1025static inline bool needs_branch_stack(struct perf_event *event)
1026{
1027 return event->attr.branch_sample_type != 0;
1028}
1029
1030static inline bool has_aux(struct perf_event *event)
1031{
1032 return event->pmu->setup_aux;
1033}
1034
1035extern int perf_output_begin(struct perf_output_handle *handle,
1036 struct perf_event *event, unsigned int size);
1037extern void perf_output_end(struct perf_output_handle *handle);
1038extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1039 const void *buf, unsigned int len);
1040extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1041 unsigned int len);
1042extern int perf_swevent_get_recursion_context(void);
1043extern void perf_swevent_put_recursion_context(int rctx);
1044extern u64 perf_swevent_set_period(struct perf_event *event);
1045extern void perf_event_enable(struct perf_event *event);
1046extern void perf_event_disable(struct perf_event *event);
1047extern int __perf_event_disable(void *info);
1048extern void perf_event_task_tick(void);
1049#else
1050static inline void *
1051perf_aux_output_begin(struct perf_output_handle *handle,
1052 struct perf_event *event) { return NULL; }
1053static inline void
1054perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
1055 bool truncated) { }
1056static inline int
1057perf_aux_output_skip(struct perf_output_handle *handle,
1058 unsigned long size) { return -EINVAL; }
1059static inline void *
1060perf_get_aux(struct perf_output_handle *handle) { return NULL; }
1061static inline void
1062perf_event_task_migrate(struct task_struct *task) { }
1063static inline void
1064perf_event_task_sched_in(struct task_struct *prev,
1065 struct task_struct *task) { }
1066static inline void
1067perf_event_task_sched_out(struct task_struct *prev,
1068 struct task_struct *next) { }
1069static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1070static inline void perf_event_exit_task(struct task_struct *child) { }
1071static inline void perf_event_free_task(struct task_struct *task) { }
1072static inline void perf_event_delayed_put(struct task_struct *task) { }
1073static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
1074static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1075{
1076 return ERR_PTR(-EINVAL);
1077}
1078static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; }
1079static inline void perf_event_print_debug(void) { }
1080static inline int perf_event_task_disable(void) { return -EINVAL; }
1081static inline int perf_event_task_enable(void) { return -EINVAL; }
1082static inline int perf_event_refresh(struct perf_event *event, int refresh)
1083{
1084 return -EINVAL;
1085}
1086
1087static inline void
1088perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
1089static inline void
1090perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
1091static inline void
1092perf_bp_event(struct perf_event *event, void *data) { }
1093
1094static inline int perf_register_guest_info_callbacks
1095(struct perf_guest_info_callbacks *callbacks) { return 0; }
1096static inline int perf_unregister_guest_info_callbacks
1097(struct perf_guest_info_callbacks *callbacks) { return 0; }
1098
1099static inline void perf_event_mmap(struct vm_area_struct *vma) { }
1100static inline void perf_event_exec(void) { }
1101static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
1102static inline void perf_event_fork(struct task_struct *tsk) { }
1103static inline void perf_event_init(void) { }
1104static inline int perf_swevent_get_recursion_context(void) { return -1; }
1105static inline void perf_swevent_put_recursion_context(int rctx) { }
1106static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
1107static inline void perf_event_enable(struct perf_event *event) { }
1108static inline void perf_event_disable(struct perf_event *event) { }
1109static inline int __perf_event_disable(void *info) { return -1; }
1110static inline void perf_event_task_tick(void) { }
1111static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
1112#endif
1113
1114#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
1115extern bool perf_event_can_stop_tick(void);
1116#else
1117static inline bool perf_event_can_stop_tick(void) { return true; }
1118#endif
1119
1120#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1121extern void perf_restore_debug_store(void);
1122#else
1123static inline void perf_restore_debug_store(void) { }
1124#endif
1125
1126#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1127
1128
1129
1130
1131#define perf_cpu_notifier(fn) \
1132do { \
1133 static struct notifier_block fn##_nb = \
1134 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1135 unsigned long cpu = smp_processor_id(); \
1136 unsigned long flags; \
1137 \
1138 cpu_notifier_register_begin(); \
1139 fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
1140 (void *)(unsigned long)cpu); \
1141 local_irq_save(flags); \
1142 fn(&fn##_nb, (unsigned long)CPU_STARTING, \
1143 (void *)(unsigned long)cpu); \
1144 local_irq_restore(flags); \
1145 fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
1146 (void *)(unsigned long)cpu); \
1147 __register_cpu_notifier(&fn##_nb); \
1148 cpu_notifier_register_done(); \
1149} while (0)
1150
1151
1152
1153
1154
1155#define __perf_cpu_notifier(fn) \
1156do { \
1157 static struct notifier_block fn##_nb = \
1158 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1159 \
1160 __register_cpu_notifier(&fn##_nb); \
1161} while (0)
1162
1163struct perf_pmu_events_attr {
1164 struct device_attribute attr;
1165 u64 id;
1166 const char *event_str;
1167};
1168
1169ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1170 char *page);
1171
1172#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
1173static struct perf_pmu_events_attr _var = { \
1174 .attr = __ATTR(_name, 0444, _show, NULL), \
1175 .id = _id, \
1176};
1177
1178#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
1179static struct perf_pmu_events_attr _var = { \
1180 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1181 .id = 0, \
1182 .event_str = _str, \
1183};
1184
1185#define PMU_FORMAT_ATTR(_name, _format) \
1186static ssize_t \
1187_name##_show(struct device *dev, \
1188 struct device_attribute *attr, \
1189 char *page) \
1190{ \
1191 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1192 return sprintf(page, _format "\n"); \
1193} \
1194 \
1195static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1196
1197#endif
1198