linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <uapi/linux/perf_event.h>
  18
  19/*
  20 * Kernel-internal data types and definitions:
  21 */
  22
  23#ifdef CONFIG_PERF_EVENTS
  24# include <asm/perf_event.h>
  25# include <asm/local64.h>
  26#endif
  27
  28struct perf_guest_info_callbacks {
  29        int                             (*is_in_guest)(void);
  30        int                             (*is_user_mode)(void);
  31        unsigned long                   (*get_guest_ip)(void);
  32};
  33
  34#ifdef CONFIG_HAVE_HW_BREAKPOINT
  35#include <asm/hw_breakpoint.h>
  36#endif
  37
  38#include <linux/list.h>
  39#include <linux/mutex.h>
  40#include <linux/rculist.h>
  41#include <linux/rcupdate.h>
  42#include <linux/spinlock.h>
  43#include <linux/hrtimer.h>
  44#include <linux/fs.h>
  45#include <linux/pid_namespace.h>
  46#include <linux/workqueue.h>
  47#include <linux/ftrace.h>
  48#include <linux/cpu.h>
  49#include <linux/irq_work.h>
  50#include <linux/static_key.h>
  51#include <linux/atomic.h>
  52#include <linux/sysfs.h>
  53#include <linux/perf_regs.h>
  54#include <asm/local.h>
  55
  56struct perf_callchain_entry {
  57        __u64                           nr;
  58        __u64                           ip[PERF_MAX_STACK_DEPTH];
  59};
  60
  61struct perf_raw_record {
  62        u32                             size;
  63        void                            *data;
  64};
  65
  66/*
  67 * single taken branch record layout:
  68 *
  69 *      from: source instruction (may not always be a branch insn)
  70 *        to: branch target
  71 *   mispred: branch target was mispredicted
  72 * predicted: branch target was predicted
  73 *
  74 * support for mispred, predicted is optional. In case it
  75 * is not supported mispred = predicted = 0.
  76 */
  77struct perf_branch_entry {
  78        __u64   from;
  79        __u64   to;
  80        __u64   mispred:1,  /* target mispredicted */
  81                predicted:1,/* target predicted */
  82                reserved:62;
  83};
  84
  85/*
  86 * branch stack layout:
  87 *  nr: number of taken branches stored in entries[]
  88 *
  89 * Note that nr can vary from sample to sample
  90 * branches (to, from) are stored from most recent
  91 * to least recent, i.e., entries[0] contains the most
  92 * recent branch.
  93 */
  94struct perf_branch_stack {
  95        __u64                           nr;
  96        struct perf_branch_entry        entries[0];
  97};
  98
  99struct perf_regs_user {
 100        __u64           abi;
 101        struct pt_regs  *regs;
 102};
 103
 104struct task_struct;
 105
 106/*
 107 * extra PMU register associated with an event
 108 */
 109struct hw_perf_event_extra {
 110        u64             config; /* register value */
 111        unsigned int    reg;    /* register address or index */
 112        int             alloc;  /* extra register already allocated */
 113        int             idx;    /* index in shared_regs->regs[] */
 114};
 115
 116/**
 117 * struct hw_perf_event - performance event hardware details:
 118 */
 119struct hw_perf_event {
 120#ifdef CONFIG_PERF_EVENTS
 121        union {
 122                struct { /* hardware */
 123                        u64             config;
 124                        u64             last_tag;
 125                        unsigned long   config_base;
 126                        unsigned long   event_base;
 127                        int             event_base_rdpmc;
 128                        int             idx;
 129                        int             last_cpu;
 130                        int             flags;
 131
 132                        struct hw_perf_event_extra extra_reg;
 133                        struct hw_perf_event_extra branch_reg;
 134                };
 135                struct { /* software */
 136                        struct hrtimer  hrtimer;
 137                };
 138                struct { /* tracepoint */
 139                        struct task_struct      *tp_target;
 140                        /* for tp_event->class */
 141                        struct list_head        tp_list;
 142                };
 143#ifdef CONFIG_HAVE_HW_BREAKPOINT
 144                struct { /* breakpoint */
 145                        /*
 146                         * Crufty hack to avoid the chicken and egg
 147                         * problem hw_breakpoint has with context
 148                         * creation and event initalization.
 149                         */
 150                        struct task_struct              *bp_target;
 151                        struct arch_hw_breakpoint       info;
 152                        struct list_head                bp_list;
 153                };
 154#endif
 155        };
 156        int                             state;
 157        local64_t                       prev_count;
 158        u64                             sample_period;
 159        u64                             last_period;
 160        local64_t                       period_left;
 161        u64                             interrupts_seq;
 162        u64                             interrupts;
 163
 164        u64                             freq_time_stamp;
 165        u64                             freq_count_stamp;
 166#endif
 167};
 168
 169/*
 170 * hw_perf_event::state flags
 171 */
 172#define PERF_HES_STOPPED        0x01 /* the counter is stopped */
 173#define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
 174#define PERF_HES_ARCH           0x04
 175
 176struct perf_event;
 177
 178/*
 179 * Common implementation detail of pmu::{start,commit,cancel}_txn
 180 */
 181#define PERF_EVENT_TXN 0x1
 182
 183/**
 184 * struct pmu - generic performance monitoring unit
 185 */
 186struct pmu {
 187        struct list_head                entry;
 188
 189        struct device                   *dev;
 190        const struct attribute_group    **attr_groups;
 191        char                            *name;
 192        int                             type;
 193
 194        int * __percpu                  pmu_disable_count;
 195        struct perf_cpu_context * __percpu pmu_cpu_context;
 196        int                             task_ctx_nr;
 197
 198        /*
 199         * Fully disable/enable this PMU, can be used to protect from the PMI
 200         * as well as for lazy/batch writing of the MSRs.
 201         */
 202        void (*pmu_enable)              (struct pmu *pmu); /* optional */
 203        void (*pmu_disable)             (struct pmu *pmu); /* optional */
 204
 205        /*
 206         * Try and initialize the event for this PMU.
 207         * Should return -ENOENT when the @event doesn't match this PMU.
 208         */
 209        int (*event_init)               (struct perf_event *event);
 210
 211#define PERF_EF_START   0x01            /* start the counter when adding    */
 212#define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
 213#define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
 214
 215        /*
 216         * Adds/Removes a counter to/from the PMU, can be done inside
 217         * a transaction, see the ->*_txn() methods.
 218         */
 219        int  (*add)                     (struct perf_event *event, int flags);
 220        void (*del)                     (struct perf_event *event, int flags);
 221
 222        /*
 223         * Starts/Stops a counter present on the PMU. The PMI handler
 224         * should stop the counter when perf_event_overflow() returns
 225         * !0. ->start() will be used to continue.
 226         */
 227        void (*start)                   (struct perf_event *event, int flags);
 228        void (*stop)                    (struct perf_event *event, int flags);
 229
 230        /*
 231         * Updates the counter value of the event.
 232         */
 233        void (*read)                    (struct perf_event *event);
 234
 235        /*
 236         * Group events scheduling is treated as a transaction, add
 237         * group events as a whole and perform one schedulability test.
 238         * If the test fails, roll back the whole group
 239         *
 240         * Start the transaction, after this ->add() doesn't need to
 241         * do schedulability tests.
 242         */
 243        void (*start_txn)               (struct pmu *pmu); /* optional */
 244        /*
 245         * If ->start_txn() disabled the ->add() schedulability test
 246         * then ->commit_txn() is required to perform one. On success
 247         * the transaction is closed. On error the transaction is kept
 248         * open until ->cancel_txn() is called.
 249         */
 250        int  (*commit_txn)              (struct pmu *pmu); /* optional */
 251        /*
 252         * Will cancel the transaction, assumes ->del() is called
 253         * for each successful ->add() during the transaction.
 254         */
 255        void (*cancel_txn)              (struct pmu *pmu); /* optional */
 256
 257        /*
 258         * Will return the value for perf_event_mmap_page::index for this event,
 259         * if no implementation is provided it will default to: event->hw.idx + 1.
 260         */
 261        int (*event_idx)                (struct perf_event *event); /*optional */
 262
 263        /*
 264         * flush branch stack on context-switches (needed in cpu-wide mode)
 265         */
 266        void (*flush_branch_stack)      (void);
 267};
 268
 269/**
 270 * enum perf_event_active_state - the states of a event
 271 */
 272enum perf_event_active_state {
 273        PERF_EVENT_STATE_ERROR          = -2,
 274        PERF_EVENT_STATE_OFF            = -1,
 275        PERF_EVENT_STATE_INACTIVE       =  0,
 276        PERF_EVENT_STATE_ACTIVE         =  1,
 277};
 278
 279struct file;
 280struct perf_sample_data;
 281
 282typedef void (*perf_overflow_handler_t)(struct perf_event *,
 283                                        struct perf_sample_data *,
 284                                        struct pt_regs *regs);
 285
 286enum perf_group_flag {
 287        PERF_GROUP_SOFTWARE             = 0x1,
 288};
 289
 290#define SWEVENT_HLIST_BITS              8
 291#define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
 292
 293struct swevent_hlist {
 294        struct hlist_head               heads[SWEVENT_HLIST_SIZE];
 295        struct rcu_head                 rcu_head;
 296};
 297
 298#define PERF_ATTACH_CONTEXT     0x01
 299#define PERF_ATTACH_GROUP       0x02
 300#define PERF_ATTACH_TASK        0x04
 301
 302struct perf_cgroup;
 303struct ring_buffer;
 304
 305/**
 306 * struct perf_event - performance event kernel representation:
 307 */
 308struct perf_event {
 309#ifdef CONFIG_PERF_EVENTS
 310        struct list_head                group_entry;
 311        struct list_head                event_entry;
 312        struct list_head                sibling_list;
 313        struct hlist_node               hlist_entry;
 314        int                             nr_siblings;
 315        int                             group_flags;
 316        struct perf_event               *group_leader;
 317        struct pmu                      *pmu;
 318
 319        enum perf_event_active_state    state;
 320        unsigned int                    attach_state;
 321        local64_t                       count;
 322        atomic64_t                      child_count;
 323
 324        /*
 325         * These are the total time in nanoseconds that the event
 326         * has been enabled (i.e. eligible to run, and the task has
 327         * been scheduled in, if this is a per-task event)
 328         * and running (scheduled onto the CPU), respectively.
 329         *
 330         * They are computed from tstamp_enabled, tstamp_running and
 331         * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
 332         */
 333        u64                             total_time_enabled;
 334        u64                             total_time_running;
 335
 336        /*
 337         * These are timestamps used for computing total_time_enabled
 338         * and total_time_running when the event is in INACTIVE or
 339         * ACTIVE state, measured in nanoseconds from an arbitrary point
 340         * in time.
 341         * tstamp_enabled: the notional time when the event was enabled
 342         * tstamp_running: the notional time when the event was scheduled on
 343         * tstamp_stopped: in INACTIVE state, the notional time when the
 344         *      event was scheduled off.
 345         */
 346        u64                             tstamp_enabled;
 347        u64                             tstamp_running;
 348        u64                             tstamp_stopped;
 349
 350        /*
 351         * timestamp shadows the actual context timing but it can
 352         * be safely used in NMI interrupt context. It reflects the
 353         * context time as it was when the event was last scheduled in.
 354         *
 355         * ctx_time already accounts for ctx->timestamp. Therefore to
 356         * compute ctx_time for a sample, simply add perf_clock().
 357         */
 358        u64                             shadow_ctx_time;
 359
 360        struct perf_event_attr          attr;
 361        u16                             header_size;
 362        u16                             id_header_size;
 363        u16                             read_size;
 364        struct hw_perf_event            hw;
 365
 366        struct perf_event_context       *ctx;
 367        atomic_long_t                   refcount;
 368
 369        /*
 370         * These accumulate total time (in nanoseconds) that children
 371         * events have been enabled and running, respectively.
 372         */
 373        atomic64_t                      child_total_time_enabled;
 374        atomic64_t                      child_total_time_running;
 375
 376        /*
 377         * Protect attach/detach and child_list:
 378         */
 379        struct mutex                    child_mutex;
 380        struct list_head                child_list;
 381        struct perf_event               *parent;
 382
 383        int                             oncpu;
 384        int                             cpu;
 385
 386        struct list_head                owner_entry;
 387        struct task_struct              *owner;
 388
 389        /* mmap bits */
 390        struct mutex                    mmap_mutex;
 391        atomic_t                        mmap_count;
 392
 393        struct ring_buffer              *rb;
 394        struct list_head                rb_entry;
 395
 396        /* poll related */
 397        wait_queue_head_t               waitq;
 398        struct fasync_struct            *fasync;
 399
 400        /* delayed work for NMIs and such */
 401        int                             pending_wakeup;
 402        int                             pending_kill;
 403        int                             pending_disable;
 404        struct irq_work                 pending;
 405
 406        atomic_t                        event_limit;
 407
 408        void (*destroy)(struct perf_event *);
 409        struct rcu_head                 rcu_head;
 410
 411        struct pid_namespace            *ns;
 412        u64                             id;
 413
 414        perf_overflow_handler_t         overflow_handler;
 415        void                            *overflow_handler_context;
 416
 417#ifdef CONFIG_EVENT_TRACING
 418        struct ftrace_event_call        *tp_event;
 419        struct event_filter             *filter;
 420#ifdef CONFIG_FUNCTION_TRACER
 421        struct ftrace_ops               ftrace_ops;
 422#endif
 423#endif
 424
 425#ifdef CONFIG_CGROUP_PERF
 426        struct perf_cgroup              *cgrp; /* cgroup event is attach to */
 427        int                             cgrp_defer_enabled;
 428#endif
 429
 430#endif /* CONFIG_PERF_EVENTS */
 431};
 432
 433enum perf_event_context_type {
 434        task_context,
 435        cpu_context,
 436};
 437
 438/**
 439 * struct perf_event_context - event context structure
 440 *
 441 * Used as a container for task events and CPU events as well:
 442 */
 443struct perf_event_context {
 444        struct pmu                      *pmu;
 445        enum perf_event_context_type    type;
 446        /*
 447         * Protect the states of the events in the list,
 448         * nr_active, and the list:
 449         */
 450        raw_spinlock_t                  lock;
 451        /*
 452         * Protect the list of events.  Locking either mutex or lock
 453         * is sufficient to ensure the list doesn't change; to change
 454         * the list you need to lock both the mutex and the spinlock.
 455         */
 456        struct mutex                    mutex;
 457
 458        struct list_head                pinned_groups;
 459        struct list_head                flexible_groups;
 460        struct list_head                event_list;
 461        int                             nr_events;
 462        int                             nr_active;
 463        int                             is_active;
 464        int                             nr_stat;
 465        int                             nr_freq;
 466        int                             rotate_disable;
 467        atomic_t                        refcount;
 468        struct task_struct              *task;
 469
 470        /*
 471         * Context clock, runs when context enabled.
 472         */
 473        u64                             time;
 474        u64                             timestamp;
 475
 476        /*
 477         * These fields let us detect when two contexts have both
 478         * been cloned (inherited) from a common ancestor.
 479         */
 480        struct perf_event_context       *parent_ctx;
 481        u64                             parent_gen;
 482        u64                             generation;
 483        int                             pin_count;
 484        int                             nr_cgroups;      /* cgroup evts */
 485        int                             nr_branch_stack; /* branch_stack evt */
 486        struct rcu_head                 rcu_head;
 487};
 488
 489/*
 490 * Number of contexts where an event can trigger:
 491 *      task, softirq, hardirq, nmi.
 492 */
 493#define PERF_NR_CONTEXTS        4
 494
 495/**
 496 * struct perf_event_cpu_context - per cpu event context structure
 497 */
 498struct perf_cpu_context {
 499        struct perf_event_context       ctx;
 500        struct perf_event_context       *task_ctx;
 501        int                             active_oncpu;
 502        int                             exclusive;
 503        struct list_head                rotation_list;
 504        int                             jiffies_interval;
 505        struct pmu                      *unique_pmu;
 506        struct perf_cgroup              *cgrp;
 507};
 508
 509struct perf_output_handle {
 510        struct perf_event               *event;
 511        struct ring_buffer              *rb;
 512        unsigned long                   wakeup;
 513        unsigned long                   size;
 514        void                            *addr;
 515        int                             page;
 516};
 517
 518#ifdef CONFIG_PERF_EVENTS
 519
 520extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
 521extern void perf_pmu_unregister(struct pmu *pmu);
 522
 523extern int perf_num_counters(void);
 524extern const char *perf_pmu_name(void);
 525extern void __perf_event_task_sched_in(struct task_struct *prev,
 526                                       struct task_struct *task);
 527extern void __perf_event_task_sched_out(struct task_struct *prev,
 528                                        struct task_struct *next);
 529extern int perf_event_init_task(struct task_struct *child);
 530extern void perf_event_exit_task(struct task_struct *child);
 531extern void perf_event_free_task(struct task_struct *task);
 532extern void perf_event_delayed_put(struct task_struct *task);
 533extern void perf_event_print_debug(void);
 534extern void perf_pmu_disable(struct pmu *pmu);
 535extern void perf_pmu_enable(struct pmu *pmu);
 536extern int perf_event_task_disable(void);
 537extern int perf_event_task_enable(void);
 538extern int perf_event_refresh(struct perf_event *event, int refresh);
 539extern void perf_event_update_userpage(struct perf_event *event);
 540extern int perf_event_release_kernel(struct perf_event *event);
 541extern struct perf_event *
 542perf_event_create_kernel_counter(struct perf_event_attr *attr,
 543                                int cpu,
 544                                struct task_struct *task,
 545                                perf_overflow_handler_t callback,
 546                                void *context);
 547extern void perf_pmu_migrate_context(struct pmu *pmu,
 548                                int src_cpu, int dst_cpu);
 549extern u64 perf_event_read_value(struct perf_event *event,
 550                                 u64 *enabled, u64 *running);
 551
 552
 553struct perf_sample_data {
 554        u64                             type;
 555
 556        u64                             ip;
 557        struct {
 558                u32     pid;
 559                u32     tid;
 560        }                               tid_entry;
 561        u64                             time;
 562        u64                             addr;
 563        u64                             id;
 564        u64                             stream_id;
 565        struct {
 566                u32     cpu;
 567                u32     reserved;
 568        }                               cpu_entry;
 569        u64                             period;
 570        union  perf_mem_data_src        data_src;
 571        struct perf_callchain_entry     *callchain;
 572        struct perf_raw_record          *raw;
 573        struct perf_branch_stack        *br_stack;
 574        struct perf_regs_user           regs_user;
 575        u64                             stack_user_size;
 576        u64                             weight;
 577};
 578
 579static inline void perf_sample_data_init(struct perf_sample_data *data,
 580                                         u64 addr, u64 period)
 581{
 582        /* remaining struct members initialized in perf_prepare_sample() */
 583        data->addr = addr;
 584        data->raw  = NULL;
 585        data->br_stack = NULL;
 586        data->period = period;
 587        data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
 588        data->regs_user.regs = NULL;
 589        data->stack_user_size = 0;
 590        data->weight = 0;
 591        data->data_src.val = 0;
 592}
 593
 594extern void perf_output_sample(struct perf_output_handle *handle,
 595                               struct perf_event_header *header,
 596                               struct perf_sample_data *data,
 597                               struct perf_event *event);
 598extern void perf_prepare_sample(struct perf_event_header *header,
 599                                struct perf_sample_data *data,
 600                                struct perf_event *event,
 601                                struct pt_regs *regs);
 602
 603extern int perf_event_overflow(struct perf_event *event,
 604                                 struct perf_sample_data *data,
 605                                 struct pt_regs *regs);
 606
 607static inline bool is_sampling_event(struct perf_event *event)
 608{
 609        return event->attr.sample_period != 0;
 610}
 611
 612/*
 613 * Return 1 for a software event, 0 for a hardware event
 614 */
 615static inline int is_software_event(struct perf_event *event)
 616{
 617        return event->pmu->task_ctx_nr == perf_sw_context;
 618}
 619
 620extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 621
 622extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 623
 624#ifndef perf_arch_fetch_caller_regs
 625static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
 626#endif
 627
 628/*
 629 * Take a snapshot of the regs. Skip ip and frame pointer to
 630 * the nth caller. We only need a few of the regs:
 631 * - ip for PERF_SAMPLE_IP
 632 * - cs for user_mode() tests
 633 * - bp for callchains
 634 * - eflags, for future purposes, just in case
 635 */
 636static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 637{
 638        memset(regs, 0, sizeof(*regs));
 639
 640        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 641}
 642
 643static __always_inline void
 644perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 645{
 646        struct pt_regs hot_regs;
 647
 648        if (static_key_false(&perf_swevent_enabled[event_id])) {
 649                if (!regs) {
 650                        perf_fetch_caller_regs(&hot_regs);
 651                        regs = &hot_regs;
 652                }
 653                __perf_sw_event(event_id, nr, regs, addr);
 654        }
 655}
 656
 657extern struct static_key_deferred perf_sched_events;
 658
 659static inline void perf_event_task_sched_in(struct task_struct *prev,
 660                                            struct task_struct *task)
 661{
 662        if (static_key_false(&perf_sched_events.key))
 663                __perf_event_task_sched_in(prev, task);
 664}
 665
 666static inline void perf_event_task_sched_out(struct task_struct *prev,
 667                                             struct task_struct *next)
 668{
 669        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 670
 671        if (static_key_false(&perf_sched_events.key))
 672                __perf_event_task_sched_out(prev, next);
 673}
 674
 675extern void perf_event_mmap(struct vm_area_struct *vma);
 676extern struct perf_guest_info_callbacks *perf_guest_cbs;
 677extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 678extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 679
 680extern void perf_event_comm(struct task_struct *tsk);
 681extern void perf_event_fork(struct task_struct *tsk);
 682
 683/* Callchains */
 684DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 685
 686extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
 687extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
 688
 689static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 690{
 691        if (entry->nr < PERF_MAX_STACK_DEPTH)
 692                entry->ip[entry->nr++] = ip;
 693}
 694
 695extern int sysctl_perf_event_paranoid;
 696extern int sysctl_perf_event_mlock;
 697extern int sysctl_perf_event_sample_rate;
 698
 699extern int perf_proc_update_handler(struct ctl_table *table, int write,
 700                void __user *buffer, size_t *lenp,
 701                loff_t *ppos);
 702
 703static inline bool perf_paranoid_tracepoint_raw(void)
 704{
 705        return sysctl_perf_event_paranoid > -1;
 706}
 707
 708static inline bool perf_paranoid_cpu(void)
 709{
 710        return sysctl_perf_event_paranoid > 0;
 711}
 712
 713static inline bool perf_paranoid_kernel(void)
 714{
 715        return sysctl_perf_event_paranoid > 1;
 716}
 717
 718extern void perf_event_init(void);
 719extern void perf_tp_event(u64 addr, u64 count, void *record,
 720                          int entry_size, struct pt_regs *regs,
 721                          struct hlist_head *head, int rctx,
 722                          struct task_struct *task);
 723extern void perf_bp_event(struct perf_event *event, void *data);
 724
 725#ifndef perf_misc_flags
 726# define perf_misc_flags(regs) \
 727                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
 728# define perf_instruction_pointer(regs) instruction_pointer(regs)
 729#endif
 730
 731static inline bool has_branch_stack(struct perf_event *event)
 732{
 733        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
 734}
 735
 736extern int perf_output_begin(struct perf_output_handle *handle,
 737                             struct perf_event *event, unsigned int size);
 738extern void perf_output_end(struct perf_output_handle *handle);
 739extern unsigned int perf_output_copy(struct perf_output_handle *handle,
 740                             const void *buf, unsigned int len);
 741extern unsigned int perf_output_skip(struct perf_output_handle *handle,
 742                                     unsigned int len);
 743extern int perf_swevent_get_recursion_context(void);
 744extern void perf_swevent_put_recursion_context(int rctx);
 745extern void perf_event_enable(struct perf_event *event);
 746extern void perf_event_disable(struct perf_event *event);
 747extern int __perf_event_disable(void *info);
 748extern void perf_event_task_tick(void);
 749#else
 750static inline void
 751perf_event_task_sched_in(struct task_struct *prev,
 752                         struct task_struct *task)                      { }
 753static inline void
 754perf_event_task_sched_out(struct task_struct *prev,
 755                          struct task_struct *next)                     { }
 756static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
 757static inline void perf_event_exit_task(struct task_struct *child)      { }
 758static inline void perf_event_free_task(struct task_struct *task)       { }
 759static inline void perf_event_delayed_put(struct task_struct *task)     { }
 760static inline void perf_event_print_debug(void)                         { }
 761static inline int perf_event_task_disable(void)                         { return -EINVAL; }
 762static inline int perf_event_task_enable(void)                          { return -EINVAL; }
 763static inline int perf_event_refresh(struct perf_event *event, int refresh)
 764{
 765        return -EINVAL;
 766}
 767
 768static inline void
 769perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
 770static inline void
 771perf_bp_event(struct perf_event *event, void *data)                     { }
 772
 773static inline int perf_register_guest_info_callbacks
 774(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
 775static inline int perf_unregister_guest_info_callbacks
 776(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
 777
 778static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
 779static inline void perf_event_comm(struct task_struct *tsk)             { }
 780static inline void perf_event_fork(struct task_struct *tsk)             { }
 781static inline void perf_event_init(void)                                { }
 782static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
 783static inline void perf_swevent_put_recursion_context(int rctx)         { }
 784static inline void perf_event_enable(struct perf_event *event)          { }
 785static inline void perf_event_disable(struct perf_event *event)         { }
 786static inline int __perf_event_disable(void *info)                      { return -1; }
 787static inline void perf_event_task_tick(void)                           { }
 788#endif
 789
 790#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
 791extern bool perf_event_can_stop_tick(void);
 792#else
 793static inline bool perf_event_can_stop_tick(void)                       { return true; }
 794#endif
 795
 796#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
 797extern void perf_restore_debug_store(void);
 798#else
 799static inline void perf_restore_debug_store(void)                       { }
 800#endif
 801
 802#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
 803
 804/*
 805 * This has to have a higher priority than migration_notifier in sched.c.
 806 */
 807#define perf_cpu_notifier(fn)                                           \
 808do {                                                                    \
 809        static struct notifier_block fn##_nb __cpuinitdata =            \
 810                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
 811        unsigned long cpu = smp_processor_id();                         \
 812        unsigned long flags;                                            \
 813        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
 814                (void *)(unsigned long)cpu);                            \
 815        local_irq_save(flags);                                          \
 816        fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
 817                (void *)(unsigned long)cpu);                            \
 818        local_irq_restore(flags);                                       \
 819        fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
 820                (void *)(unsigned long)cpu);                            \
 821        register_cpu_notifier(&fn##_nb);                                \
 822} while (0)
 823
 824
 825struct perf_pmu_events_attr {
 826        struct device_attribute attr;
 827        u64 id;
 828        const char *event_str;
 829};
 830
 831#define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
 832static struct perf_pmu_events_attr _var = {                             \
 833        .attr = __ATTR(_name, 0444, _show, NULL),                       \
 834        .id   =  _id,                                                   \
 835};
 836
 837#define PMU_FORMAT_ATTR(_name, _format)                                 \
 838static ssize_t                                                          \
 839_name##_show(struct device *dev,                                        \
 840                               struct device_attribute *attr,           \
 841                               char *page)                              \
 842{                                                                       \
 843        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
 844        return sprintf(page, _format "\n");                             \
 845}                                                                       \
 846                                                                        \
 847static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
 848
 849#endif /* _LINUX_PERF_EVENT_H */
 850