linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <uapi/linux/perf_event.h>
  18
  19/*
  20 * Kernel-internal data types and definitions:
  21 */
  22
  23#ifdef CONFIG_PERF_EVENTS
  24# include <asm/perf_event.h>
  25# include <asm/local64.h>
  26#endif
  27
  28struct perf_guest_info_callbacks {
  29        int                             (*is_in_guest)(void);
  30        int                             (*is_user_mode)(void);
  31        unsigned long                   (*get_guest_ip)(void);
  32};
  33
  34#ifdef CONFIG_HAVE_HW_BREAKPOINT
  35#include <asm/hw_breakpoint.h>
  36#endif
  37
  38#include <linux/list.h>
  39#include <linux/mutex.h>
  40#include <linux/rculist.h>
  41#include <linux/rcupdate.h>
  42#include <linux/spinlock.h>
  43#include <linux/hrtimer.h>
  44#include <linux/fs.h>
  45#include <linux/pid_namespace.h>
  46#include <linux/workqueue.h>
  47#include <linux/ftrace.h>
  48#include <linux/cpu.h>
  49#include <linux/irq_work.h>
  50#include <linux/static_key.h>
  51#include <linux/jump_label_ratelimit.h>
  52#include <linux/atomic.h>
  53#include <linux/sysfs.h>
  54#include <linux/perf_regs.h>
  55#include <linux/workqueue.h>
  56#include <linux/cgroup.h>
  57#include <asm/local.h>
  58
  59struct perf_callchain_entry {
  60        __u64                           nr;
  61        __u64                           ip[PERF_MAX_STACK_DEPTH];
  62};
  63
  64struct perf_raw_record {
  65        u32                             size;
  66        void                            *data;
  67};
  68
  69/*
  70 * branch stack layout:
  71 *  nr: number of taken branches stored in entries[]
  72 *
  73 * Note that nr can vary from sample to sample
  74 * branches (to, from) are stored from most recent
  75 * to least recent, i.e., entries[0] contains the most
  76 * recent branch.
  77 */
  78struct perf_branch_stack {
  79        __u64                           nr;
  80        struct perf_branch_entry        entries[0];
  81};
  82
  83struct task_struct;
  84
  85/*
  86 * extra PMU register associated with an event
  87 */
  88struct hw_perf_event_extra {
  89        u64             config; /* register value */
  90        unsigned int    reg;    /* register address or index */
  91        int             alloc;  /* extra register already allocated */
  92        int             idx;    /* index in shared_regs->regs[] */
  93};
  94
  95/**
  96 * struct hw_perf_event - performance event hardware details:
  97 */
  98struct hw_perf_event {
  99#ifdef CONFIG_PERF_EVENTS
 100        union {
 101                struct { /* hardware */
 102                        u64             config;
 103                        u64             last_tag;
 104                        unsigned long   config_base;
 105                        unsigned long   event_base;
 106                        int             event_base_rdpmc;
 107                        int             idx;
 108                        int             last_cpu;
 109                        int             flags;
 110
 111                        struct hw_perf_event_extra extra_reg;
 112                        struct hw_perf_event_extra branch_reg;
 113                };
 114                struct { /* software */
 115                        struct hrtimer  hrtimer;
 116                };
 117                struct { /* tracepoint */
 118                        /* for tp_event->class */
 119                        struct list_head        tp_list;
 120                };
 121                struct { /* intel_cqm */
 122                        int                     cqm_state;
 123                        u32                     cqm_rmid;
 124                        int                     is_group_event;
 125                        struct list_head        cqm_events_entry;
 126                        struct list_head        cqm_groups_entry;
 127                        struct list_head        cqm_group_entry;
 128                };
 129                struct { /* itrace */
 130                        int                     itrace_started;
 131                };
 132                struct { /* amd_power */
 133                        u64     pwr_acc;
 134                        u64     ptsc;
 135                };
 136#ifdef CONFIG_HAVE_HW_BREAKPOINT
 137                struct { /* breakpoint */
 138                        /*
 139                         * Crufty hack to avoid the chicken and egg
 140                         * problem hw_breakpoint has with context
 141                         * creation and event initalization.
 142                         */
 143                        struct arch_hw_breakpoint       info;
 144                        struct list_head                bp_list;
 145                };
 146#endif
 147        };
 148        /*
 149         * If the event is a per task event, this will point to the task in
 150         * question. See the comment in perf_event_alloc().
 151         */
 152        struct task_struct              *target;
 153
 154/*
 155 * hw_perf_event::state flags; used to track the PERF_EF_* state.
 156 */
 157#define PERF_HES_STOPPED        0x01 /* the counter is stopped */
 158#define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
 159#define PERF_HES_ARCH           0x04
 160
 161        int                             state;
 162
 163        /*
 164         * The last observed hardware counter value, updated with a
 165         * local64_cmpxchg() such that pmu::read() can be called nested.
 166         */
 167        local64_t                       prev_count;
 168
 169        /*
 170         * The period to start the next sample with.
 171         */
 172        u64                             sample_period;
 173
 174        /*
 175         * The period we started this sample with.
 176         */
 177        u64                             last_period;
 178
 179        /*
 180         * However much is left of the current period; note that this is
 181         * a full 64bit value and allows for generation of periods longer
 182         * than hardware might allow.
 183         */
 184        local64_t                       period_left;
 185
 186        /*
 187         * State for throttling the event, see __perf_event_overflow() and
 188         * perf_adjust_freq_unthr_context().
 189         */
 190        u64                             interrupts_seq;
 191        u64                             interrupts;
 192
 193        /*
 194         * State for freq target events, see __perf_event_overflow() and
 195         * perf_adjust_freq_unthr_context().
 196         */
 197        u64                             freq_time_stamp;
 198        u64                             freq_count_stamp;
 199#endif
 200};
 201
 202struct perf_event;
 203
 204/*
 205 * Common implementation detail of pmu::{start,commit,cancel}_txn
 206 */
 207#define PERF_PMU_TXN_ADD  0x1           /* txn to add/schedule event on PMU */
 208#define PERF_PMU_TXN_READ 0x2           /* txn to read event group from PMU */
 209
 210/**
 211 * pmu::capabilities flags
 212 */
 213#define PERF_PMU_CAP_NO_INTERRUPT               0x01
 214#define PERF_PMU_CAP_NO_NMI                     0x02
 215#define PERF_PMU_CAP_AUX_NO_SG                  0x04
 216#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF           0x08
 217#define PERF_PMU_CAP_EXCLUSIVE                  0x10
 218#define PERF_PMU_CAP_ITRACE                     0x20
 219
 220/**
 221 * struct pmu - generic performance monitoring unit
 222 */
 223struct pmu {
 224        struct list_head                entry;
 225
 226        struct module                   *module;
 227        struct device                   *dev;
 228        const struct attribute_group    **attr_groups;
 229        const char                      *name;
 230        int                             type;
 231
 232        /*
 233         * various common per-pmu feature flags
 234         */
 235        int                             capabilities;
 236
 237        int * __percpu                  pmu_disable_count;
 238        struct perf_cpu_context * __percpu pmu_cpu_context;
 239        atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
 240        int                             task_ctx_nr;
 241        int                             hrtimer_interval_ms;
 242
 243        /*
 244         * Fully disable/enable this PMU, can be used to protect from the PMI
 245         * as well as for lazy/batch writing of the MSRs.
 246         */
 247        void (*pmu_enable)              (struct pmu *pmu); /* optional */
 248        void (*pmu_disable)             (struct pmu *pmu); /* optional */
 249
 250        /*
 251         * Try and initialize the event for this PMU.
 252         *
 253         * Returns:
 254         *  -ENOENT     -- @event is not for this PMU
 255         *
 256         *  -ENODEV     -- @event is for this PMU but PMU not present
 257         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
 258         *  -EINVAL     -- @event is for this PMU but @event is not valid
 259         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
 260         *  -EACCESS    -- @event is for this PMU, @event is valid, but no privilidges
 261         *
 262         *  0           -- @event is for this PMU and valid
 263         *
 264         * Other error return values are allowed.
 265         */
 266        int (*event_init)               (struct perf_event *event);
 267
 268        /*
 269         * Notification that the event was mapped or unmapped.  Called
 270         * in the context of the mapping task.
 271         */
 272        void (*event_mapped)            (struct perf_event *event); /*optional*/
 273        void (*event_unmapped)          (struct perf_event *event); /*optional*/
 274
 275        /*
 276         * Flags for ->add()/->del()/ ->start()/->stop(). There are
 277         * matching hw_perf_event::state flags.
 278         */
 279#define PERF_EF_START   0x01            /* start the counter when adding    */
 280#define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
 281#define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
 282
 283        /*
 284         * Adds/Removes a counter to/from the PMU, can be done inside a
 285         * transaction, see the ->*_txn() methods.
 286         *
 287         * The add/del callbacks will reserve all hardware resources required
 288         * to service the event, this includes any counter constraint
 289         * scheduling etc.
 290         *
 291         * Called with IRQs disabled and the PMU disabled on the CPU the event
 292         * is on.
 293         *
 294         * ->add() called without PERF_EF_START should result in the same state
 295         *  as ->add() followed by ->stop().
 296         *
 297         * ->del() must always PERF_EF_UPDATE stop an event. If it calls
 298         *  ->stop() that must deal with already being stopped without
 299         *  PERF_EF_UPDATE.
 300         */
 301        int  (*add)                     (struct perf_event *event, int flags);
 302        void (*del)                     (struct perf_event *event, int flags);
 303
 304        /*
 305         * Starts/Stops a counter present on the PMU.
 306         *
 307         * The PMI handler should stop the counter when perf_event_overflow()
 308         * returns !0. ->start() will be used to continue.
 309         *
 310         * Also used to change the sample period.
 311         *
 312         * Called with IRQs disabled and the PMU disabled on the CPU the event
 313         * is on -- will be called from NMI context with the PMU generates
 314         * NMIs.
 315         *
 316         * ->stop() with PERF_EF_UPDATE will read the counter and update
 317         *  period/count values like ->read() would.
 318         *
 319         * ->start() with PERF_EF_RELOAD will reprogram the the counter
 320         *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
 321         */
 322        void (*start)                   (struct perf_event *event, int flags);
 323        void (*stop)                    (struct perf_event *event, int flags);
 324
 325        /*
 326         * Updates the counter value of the event.
 327         *
 328         * For sampling capable PMUs this will also update the software period
 329         * hw_perf_event::period_left field.
 330         */
 331        void (*read)                    (struct perf_event *event);
 332
 333        /*
 334         * Group events scheduling is treated as a transaction, add
 335         * group events as a whole and perform one schedulability test.
 336         * If the test fails, roll back the whole group
 337         *
 338         * Start the transaction, after this ->add() doesn't need to
 339         * do schedulability tests.
 340         *
 341         * Optional.
 342         */
 343        void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
 344        /*
 345         * If ->start_txn() disabled the ->add() schedulability test
 346         * then ->commit_txn() is required to perform one. On success
 347         * the transaction is closed. On error the transaction is kept
 348         * open until ->cancel_txn() is called.
 349         *
 350         * Optional.
 351         */
 352        int  (*commit_txn)              (struct pmu *pmu);
 353        /*
 354         * Will cancel the transaction, assumes ->del() is called
 355         * for each successful ->add() during the transaction.
 356         *
 357         * Optional.
 358         */
 359        void (*cancel_txn)              (struct pmu *pmu);
 360
 361        /*
 362         * Will return the value for perf_event_mmap_page::index for this event,
 363         * if no implementation is provided it will default to: event->hw.idx + 1.
 364         */
 365        int (*event_idx)                (struct perf_event *event); /*optional */
 366
 367        /*
 368         * context-switches callback
 369         */
 370        void (*sched_task)              (struct perf_event_context *ctx,
 371                                        bool sched_in);
 372        /*
 373         * PMU specific data size
 374         */
 375        size_t                          task_ctx_size;
 376
 377
 378        /*
 379         * Return the count value for a counter.
 380         */
 381        u64 (*count)                    (struct perf_event *event); /*optional*/
 382
 383        /*
 384         * Set up pmu-private data structures for an AUX area
 385         */
 386        void *(*setup_aux)              (int cpu, void **pages,
 387                                         int nr_pages, bool overwrite);
 388                                        /* optional */
 389
 390        /*
 391         * Free pmu-private AUX data structures
 392         */
 393        void (*free_aux)                (void *aux); /* optional */
 394
 395        /*
 396         * Filter events for PMU-specific reasons.
 397         */
 398        int (*filter_match)             (struct perf_event *event); /* optional */
 399};
 400
 401/**
 402 * enum perf_event_active_state - the states of a event
 403 */
 404enum perf_event_active_state {
 405        PERF_EVENT_STATE_DEAD           = -4,
 406        PERF_EVENT_STATE_EXIT           = -3,
 407        PERF_EVENT_STATE_ERROR          = -2,
 408        PERF_EVENT_STATE_OFF            = -1,
 409        PERF_EVENT_STATE_INACTIVE       =  0,
 410        PERF_EVENT_STATE_ACTIVE         =  1,
 411};
 412
 413struct file;
 414struct perf_sample_data;
 415
 416typedef void (*perf_overflow_handler_t)(struct perf_event *,
 417                                        struct perf_sample_data *,
 418                                        struct pt_regs *regs);
 419
 420enum perf_group_flag {
 421        PERF_GROUP_SOFTWARE             = 0x1,
 422};
 423
 424#define SWEVENT_HLIST_BITS              8
 425#define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
 426
 427struct swevent_hlist {
 428        struct hlist_head               heads[SWEVENT_HLIST_SIZE];
 429        struct rcu_head                 rcu_head;
 430};
 431
 432#define PERF_ATTACH_CONTEXT     0x01
 433#define PERF_ATTACH_GROUP       0x02
 434#define PERF_ATTACH_TASK        0x04
 435#define PERF_ATTACH_TASK_DATA   0x08
 436
 437struct perf_cgroup;
 438struct ring_buffer;
 439
 440/**
 441 * struct perf_event - performance event kernel representation:
 442 */
 443struct perf_event {
 444#ifdef CONFIG_PERF_EVENTS
 445        /*
 446         * entry onto perf_event_context::event_list;
 447         *   modifications require ctx->lock
 448         *   RCU safe iterations.
 449         */
 450        struct list_head                event_entry;
 451
 452        /*
 453         * XXX: group_entry and sibling_list should be mutually exclusive;
 454         * either you're a sibling on a group, or you're the group leader.
 455         * Rework the code to always use the same list element.
 456         *
 457         * Locked for modification by both ctx->mutex and ctx->lock; holding
 458         * either sufficies for read.
 459         */
 460        struct list_head                group_entry;
 461        struct list_head                sibling_list;
 462
 463        /*
 464         * We need storage to track the entries in perf_pmu_migrate_context; we
 465         * cannot use the event_entry because of RCU and we want to keep the
 466         * group in tact which avoids us using the other two entries.
 467         */
 468        struct list_head                migrate_entry;
 469
 470        struct hlist_node               hlist_entry;
 471        struct list_head                active_entry;
 472        int                             nr_siblings;
 473        int                             group_flags;
 474        struct perf_event               *group_leader;
 475        struct pmu                      *pmu;
 476        void                            *pmu_private;
 477
 478        enum perf_event_active_state    state;
 479        unsigned int                    attach_state;
 480        local64_t                       count;
 481        atomic64_t                      child_count;
 482
 483        /*
 484         * These are the total time in nanoseconds that the event
 485         * has been enabled (i.e. eligible to run, and the task has
 486         * been scheduled in, if this is a per-task event)
 487         * and running (scheduled onto the CPU), respectively.
 488         *
 489         * They are computed from tstamp_enabled, tstamp_running and
 490         * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
 491         */
 492        u64                             total_time_enabled;
 493        u64                             total_time_running;
 494
 495        /*
 496         * These are timestamps used for computing total_time_enabled
 497         * and total_time_running when the event is in INACTIVE or
 498         * ACTIVE state, measured in nanoseconds from an arbitrary point
 499         * in time.
 500         * tstamp_enabled: the notional time when the event was enabled
 501         * tstamp_running: the notional time when the event was scheduled on
 502         * tstamp_stopped: in INACTIVE state, the notional time when the
 503         *      event was scheduled off.
 504         */
 505        u64                             tstamp_enabled;
 506        u64                             tstamp_running;
 507        u64                             tstamp_stopped;
 508
 509        /*
 510         * timestamp shadows the actual context timing but it can
 511         * be safely used in NMI interrupt context. It reflects the
 512         * context time as it was when the event was last scheduled in.
 513         *
 514         * ctx_time already accounts for ctx->timestamp. Therefore to
 515         * compute ctx_time for a sample, simply add perf_clock().
 516         */
 517        u64                             shadow_ctx_time;
 518
 519        struct perf_event_attr          attr;
 520        u16                             header_size;
 521        u16                             id_header_size;
 522        u16                             read_size;
 523        struct hw_perf_event            hw;
 524
 525        struct perf_event_context       *ctx;
 526        atomic_long_t                   refcount;
 527
 528        /*
 529         * These accumulate total time (in nanoseconds) that children
 530         * events have been enabled and running, respectively.
 531         */
 532        atomic64_t                      child_total_time_enabled;
 533        atomic64_t                      child_total_time_running;
 534
 535        /*
 536         * Protect attach/detach and child_list:
 537         */
 538        struct mutex                    child_mutex;
 539        struct list_head                child_list;
 540        struct perf_event               *parent;
 541
 542        int                             oncpu;
 543        int                             cpu;
 544
 545        struct list_head                owner_entry;
 546        struct task_struct              *owner;
 547
 548        /* mmap bits */
 549        struct mutex                    mmap_mutex;
 550        atomic_t                        mmap_count;
 551
 552        struct ring_buffer              *rb;
 553        struct list_head                rb_entry;
 554        unsigned long                   rcu_batches;
 555        int                             rcu_pending;
 556
 557        /* poll related */
 558        wait_queue_head_t               waitq;
 559        struct fasync_struct            *fasync;
 560
 561        /* delayed work for NMIs and such */
 562        int                             pending_wakeup;
 563        int                             pending_kill;
 564        int                             pending_disable;
 565        struct irq_work                 pending;
 566
 567        atomic_t                        event_limit;
 568
 569        void (*destroy)(struct perf_event *);
 570        struct rcu_head                 rcu_head;
 571
 572        struct pid_namespace            *ns;
 573        u64                             id;
 574
 575        u64                             (*clock)(void);
 576        perf_overflow_handler_t         overflow_handler;
 577        void                            *overflow_handler_context;
 578
 579#ifdef CONFIG_EVENT_TRACING
 580        struct trace_event_call         *tp_event;
 581        struct event_filter             *filter;
 582#ifdef CONFIG_FUNCTION_TRACER
 583        struct ftrace_ops               ftrace_ops;
 584#endif
 585#endif
 586
 587#ifdef CONFIG_CGROUP_PERF
 588        struct perf_cgroup              *cgrp; /* cgroup event is attach to */
 589        int                             cgrp_defer_enabled;
 590#endif
 591
 592#endif /* CONFIG_PERF_EVENTS */
 593};
 594
 595/**
 596 * struct perf_event_context - event context structure
 597 *
 598 * Used as a container for task events and CPU events as well:
 599 */
 600struct perf_event_context {
 601        struct pmu                      *pmu;
 602        /*
 603         * Protect the states of the events in the list,
 604         * nr_active, and the list:
 605         */
 606        raw_spinlock_t                  lock;
 607        /*
 608         * Protect the list of events.  Locking either mutex or lock
 609         * is sufficient to ensure the list doesn't change; to change
 610         * the list you need to lock both the mutex and the spinlock.
 611         */
 612        struct mutex                    mutex;
 613
 614        struct list_head                active_ctx_list;
 615        struct list_head                pinned_groups;
 616        struct list_head                flexible_groups;
 617        struct list_head                event_list;
 618        int                             nr_events;
 619        int                             nr_active;
 620        int                             is_active;
 621        int                             nr_stat;
 622        int                             nr_freq;
 623        int                             rotate_disable;
 624        atomic_t                        refcount;
 625        struct task_struct              *task;
 626
 627        /*
 628         * Context clock, runs when context enabled.
 629         */
 630        u64                             time;
 631        u64                             timestamp;
 632
 633        /*
 634         * These fields let us detect when two contexts have both
 635         * been cloned (inherited) from a common ancestor.
 636         */
 637        struct perf_event_context       *parent_ctx;
 638        u64                             parent_gen;
 639        u64                             generation;
 640        int                             pin_count;
 641        int                             nr_cgroups;      /* cgroup evts */
 642        void                            *task_ctx_data; /* pmu specific data */
 643        struct rcu_head                 rcu_head;
 644};
 645
 646/*
 647 * Number of contexts where an event can trigger:
 648 *      task, softirq, hardirq, nmi.
 649 */
 650#define PERF_NR_CONTEXTS        4
 651
 652/**
 653 * struct perf_event_cpu_context - per cpu event context structure
 654 */
 655struct perf_cpu_context {
 656        struct perf_event_context       ctx;
 657        struct perf_event_context       *task_ctx;
 658        int                             active_oncpu;
 659        int                             exclusive;
 660
 661        raw_spinlock_t                  hrtimer_lock;
 662        struct hrtimer                  hrtimer;
 663        ktime_t                         hrtimer_interval;
 664        unsigned int                    hrtimer_active;
 665
 666        struct pmu                      *unique_pmu;
 667        struct perf_cgroup              *cgrp;
 668};
 669
 670struct perf_output_handle {
 671        struct perf_event               *event;
 672        struct ring_buffer              *rb;
 673        unsigned long                   wakeup;
 674        unsigned long                   size;
 675        union {
 676                void                    *addr;
 677                unsigned long           head;
 678        };
 679        int                             page;
 680};
 681
 682#ifdef CONFIG_CGROUP_PERF
 683
 684/*
 685 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 686 * This is a per-cpu dynamically allocated data structure.
 687 */
 688struct perf_cgroup_info {
 689        u64                             time;
 690        u64                             timestamp;
 691};
 692
 693struct perf_cgroup {
 694        struct cgroup_subsys_state      css;
 695        struct perf_cgroup_info __percpu *info;
 696};
 697
 698/*
 699 * Must ensure cgroup is pinned (css_get) before calling
 700 * this function. In other words, we cannot call this function
 701 * if there is no cgroup event for the current CPU context.
 702 */
 703static inline struct perf_cgroup *
 704perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
 705{
 706        return container_of(task_css_check(task, perf_event_cgrp_id,
 707                                           ctx ? lockdep_is_held(&ctx->lock)
 708                                               : true),
 709                            struct perf_cgroup, css);
 710}
 711#endif /* CONFIG_CGROUP_PERF */
 712
 713#ifdef CONFIG_PERF_EVENTS
 714
 715extern void *perf_aux_output_begin(struct perf_output_handle *handle,
 716                                   struct perf_event *event);
 717extern void perf_aux_output_end(struct perf_output_handle *handle,
 718                                unsigned long size, bool truncated);
 719extern int perf_aux_output_skip(struct perf_output_handle *handle,
 720                                unsigned long size);
 721extern void *perf_get_aux(struct perf_output_handle *handle);
 722
 723extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
 724extern void perf_pmu_unregister(struct pmu *pmu);
 725
 726extern int perf_num_counters(void);
 727extern const char *perf_pmu_name(void);
 728extern void __perf_event_task_sched_in(struct task_struct *prev,
 729                                       struct task_struct *task);
 730extern void __perf_event_task_sched_out(struct task_struct *prev,
 731                                        struct task_struct *next);
 732extern int perf_event_init_task(struct task_struct *child);
 733extern void perf_event_exit_task(struct task_struct *child);
 734extern void perf_event_free_task(struct task_struct *task);
 735extern void perf_event_delayed_put(struct task_struct *task);
 736extern struct file *perf_event_get(unsigned int fd);
 737extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 738extern void perf_event_print_debug(void);
 739extern void perf_pmu_disable(struct pmu *pmu);
 740extern void perf_pmu_enable(struct pmu *pmu);
 741extern void perf_sched_cb_dec(struct pmu *pmu);
 742extern void perf_sched_cb_inc(struct pmu *pmu);
 743extern int perf_event_task_disable(void);
 744extern int perf_event_task_enable(void);
 745extern int perf_event_refresh(struct perf_event *event, int refresh);
 746extern void perf_event_update_userpage(struct perf_event *event);
 747extern int perf_event_release_kernel(struct perf_event *event);
 748extern struct perf_event *
 749perf_event_create_kernel_counter(struct perf_event_attr *attr,
 750                                int cpu,
 751                                struct task_struct *task,
 752                                perf_overflow_handler_t callback,
 753                                void *context);
 754extern void perf_pmu_migrate_context(struct pmu *pmu,
 755                                int src_cpu, int dst_cpu);
 756extern u64 perf_event_read_local(struct perf_event *event);
 757extern u64 perf_event_read_value(struct perf_event *event,
 758                                 u64 *enabled, u64 *running);
 759
 760
 761struct perf_sample_data {
 762        /*
 763         * Fields set by perf_sample_data_init(), group so as to
 764         * minimize the cachelines touched.
 765         */
 766        u64                             addr;
 767        struct perf_raw_record          *raw;
 768        struct perf_branch_stack        *br_stack;
 769        u64                             period;
 770        u64                             weight;
 771        u64                             txn;
 772        union  perf_mem_data_src        data_src;
 773
 774        /*
 775         * The other fields, optionally {set,used} by
 776         * perf_{prepare,output}_sample().
 777         */
 778        u64                             type;
 779        u64                             ip;
 780        struct {
 781                u32     pid;
 782                u32     tid;
 783        }                               tid_entry;
 784        u64                             time;
 785        u64                             id;
 786        u64                             stream_id;
 787        struct {
 788                u32     cpu;
 789                u32     reserved;
 790        }                               cpu_entry;
 791        struct perf_callchain_entry     *callchain;
 792
 793        /*
 794         * regs_user may point to task_pt_regs or to regs_user_copy, depending
 795         * on arch details.
 796         */
 797        struct perf_regs                regs_user;
 798        struct pt_regs                  regs_user_copy;
 799
 800        struct perf_regs                regs_intr;
 801        u64                             stack_user_size;
 802} ____cacheline_aligned;
 803
 804/* default value for data source */
 805#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
 806                    PERF_MEM_S(LVL, NA)   |\
 807                    PERF_MEM_S(SNOOP, NA) |\
 808                    PERF_MEM_S(LOCK, NA)  |\
 809                    PERF_MEM_S(TLB, NA))
 810
 811static inline void perf_sample_data_init(struct perf_sample_data *data,
 812                                         u64 addr, u64 period)
 813{
 814        /* remaining struct members initialized in perf_prepare_sample() */
 815        data->addr = addr;
 816        data->raw  = NULL;
 817        data->br_stack = NULL;
 818        data->period = period;
 819        data->weight = 0;
 820        data->data_src.val = PERF_MEM_NA;
 821        data->txn = 0;
 822}
 823
 824extern void perf_output_sample(struct perf_output_handle *handle,
 825                               struct perf_event_header *header,
 826                               struct perf_sample_data *data,
 827                               struct perf_event *event);
 828extern void perf_prepare_sample(struct perf_event_header *header,
 829                                struct perf_sample_data *data,
 830                                struct perf_event *event,
 831                                struct pt_regs *regs);
 832
 833extern int perf_event_overflow(struct perf_event *event,
 834                                 struct perf_sample_data *data,
 835                                 struct pt_regs *regs);
 836
 837extern void perf_event_output(struct perf_event *event,
 838                                struct perf_sample_data *data,
 839                                struct pt_regs *regs);
 840
 841extern void
 842perf_event_header__init_id(struct perf_event_header *header,
 843                           struct perf_sample_data *data,
 844                           struct perf_event *event);
 845extern void
 846perf_event__output_id_sample(struct perf_event *event,
 847                             struct perf_output_handle *handle,
 848                             struct perf_sample_data *sample);
 849
 850extern void
 851perf_log_lost_samples(struct perf_event *event, u64 lost);
 852
 853static inline bool is_sampling_event(struct perf_event *event)
 854{
 855        return event->attr.sample_period != 0;
 856}
 857
 858/*
 859 * Return 1 for a software event, 0 for a hardware event
 860 */
 861static inline int is_software_event(struct perf_event *event)
 862{
 863        return event->pmu->task_ctx_nr == perf_sw_context;
 864}
 865
 866extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 867
 868extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
 869extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 870
 871#ifndef perf_arch_fetch_caller_regs
 872static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
 873#endif
 874
 875/*
 876 * Take a snapshot of the regs. Skip ip and frame pointer to
 877 * the nth caller. We only need a few of the regs:
 878 * - ip for PERF_SAMPLE_IP
 879 * - cs for user_mode() tests
 880 * - bp for callchains
 881 * - eflags, for future purposes, just in case
 882 */
 883static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 884{
 885        memset(regs, 0, sizeof(*regs));
 886
 887        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 888}
 889
 890static __always_inline void
 891perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 892{
 893        if (static_key_false(&perf_swevent_enabled[event_id]))
 894                __perf_sw_event(event_id, nr, regs, addr);
 895}
 896
 897DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
 898
 899/*
 900 * 'Special' version for the scheduler, it hard assumes no recursion,
 901 * which is guaranteed by us not actually scheduling inside other swevents
 902 * because those disable preemption.
 903 */
 904static __always_inline void
 905perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
 906{
 907        if (static_key_false(&perf_swevent_enabled[event_id])) {
 908                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
 909
 910                perf_fetch_caller_regs(regs);
 911                ___perf_sw_event(event_id, nr, regs, addr);
 912        }
 913}
 914
 915extern struct static_key_false perf_sched_events;
 916
 917static __always_inline bool
 918perf_sw_migrate_enabled(void)
 919{
 920        if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
 921                return true;
 922        return false;
 923}
 924
 925static inline void perf_event_task_migrate(struct task_struct *task)
 926{
 927        if (perf_sw_migrate_enabled())
 928                task->sched_migrated = 1;
 929}
 930
 931static inline void perf_event_task_sched_in(struct task_struct *prev,
 932                                            struct task_struct *task)
 933{
 934        if (static_branch_unlikely(&perf_sched_events))
 935                __perf_event_task_sched_in(prev, task);
 936
 937        if (perf_sw_migrate_enabled() && task->sched_migrated) {
 938                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
 939
 940                perf_fetch_caller_regs(regs);
 941                ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
 942                task->sched_migrated = 0;
 943        }
 944}
 945
 946static inline void perf_event_task_sched_out(struct task_struct *prev,
 947                                             struct task_struct *next)
 948{
 949        perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
 950
 951        if (static_branch_unlikely(&perf_sched_events))
 952                __perf_event_task_sched_out(prev, next);
 953}
 954
 955static inline u64 __perf_event_count(struct perf_event *event)
 956{
 957        return local64_read(&event->count) + atomic64_read(&event->child_count);
 958}
 959
 960extern void perf_event_mmap(struct vm_area_struct *vma);
 961extern struct perf_guest_info_callbacks *perf_guest_cbs;
 962extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 963extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 964
 965extern void perf_event_exec(void);
 966extern void perf_event_comm(struct task_struct *tsk, bool exec);
 967extern void perf_event_fork(struct task_struct *tsk);
 968
 969/* Callchains */
 970DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 971
 972extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
 973extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
 974extern struct perf_callchain_entry *
 975get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
 976                   bool crosstask, bool add_mark);
 977extern int get_callchain_buffers(void);
 978extern void put_callchain_buffers(void);
 979
 980static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 981{
 982        if (entry->nr < PERF_MAX_STACK_DEPTH) {
 983                entry->ip[entry->nr++] = ip;
 984                return 0;
 985        } else {
 986                return -1; /* no more room, stop walking the stack */
 987        }
 988}
 989
 990extern int sysctl_perf_event_paranoid;
 991extern int sysctl_perf_event_mlock;
 992extern int sysctl_perf_event_sample_rate;
 993extern int sysctl_perf_cpu_time_max_percent;
 994
 995extern void perf_sample_event_took(u64 sample_len_ns);
 996
 997extern int perf_proc_update_handler(struct ctl_table *table, int write,
 998                void __user *buffer, size_t *lenp,
 999                loff_t *ppos);
1000extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1001                void __user *buffer, size_t *lenp,
1002                loff_t *ppos);
1003
1004
1005static inline bool perf_paranoid_tracepoint_raw(void)
1006{
1007        return sysctl_perf_event_paranoid > -1;
1008}
1009
1010static inline bool perf_paranoid_cpu(void)
1011{
1012        return sysctl_perf_event_paranoid > 0;
1013}
1014
1015static inline bool perf_paranoid_kernel(void)
1016{
1017        return sysctl_perf_event_paranoid > 1;
1018}
1019
1020extern void perf_event_init(void);
1021extern void perf_tp_event(u64 addr, u64 count, void *record,
1022                          int entry_size, struct pt_regs *regs,
1023                          struct hlist_head *head, int rctx,
1024                          struct task_struct *task);
1025extern void perf_bp_event(struct perf_event *event, void *data);
1026
1027#ifndef perf_misc_flags
1028# define perf_misc_flags(regs) \
1029                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1030# define perf_instruction_pointer(regs) instruction_pointer(regs)
1031#endif
1032
1033static inline bool has_branch_stack(struct perf_event *event)
1034{
1035        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1036}
1037
1038static inline bool needs_branch_stack(struct perf_event *event)
1039{
1040        return event->attr.branch_sample_type != 0;
1041}
1042
1043static inline bool has_aux(struct perf_event *event)
1044{
1045        return event->pmu->setup_aux;
1046}
1047
1048extern int perf_output_begin(struct perf_output_handle *handle,
1049                             struct perf_event *event, unsigned int size);
1050extern void perf_output_end(struct perf_output_handle *handle);
1051extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1052                             const void *buf, unsigned int len);
1053extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1054                                     unsigned int len);
1055extern int perf_swevent_get_recursion_context(void);
1056extern void perf_swevent_put_recursion_context(int rctx);
1057extern u64 perf_swevent_set_period(struct perf_event *event);
1058extern void perf_event_enable(struct perf_event *event);
1059extern void perf_event_disable(struct perf_event *event);
1060extern void perf_event_disable_local(struct perf_event *event);
1061extern void perf_event_task_tick(void);
1062#else /* !CONFIG_PERF_EVENTS: */
1063static inline void *
1064perf_aux_output_begin(struct perf_output_handle *handle,
1065                      struct perf_event *event)                         { return NULL; }
1066static inline void
1067perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
1068                    bool truncated)                                     { }
1069static inline int
1070perf_aux_output_skip(struct perf_output_handle *handle,
1071                     unsigned long size)                                { return -EINVAL; }
1072static inline void *
1073perf_get_aux(struct perf_output_handle *handle)                         { return NULL; }
1074static inline void
1075perf_event_task_migrate(struct task_struct *task)                       { }
1076static inline void
1077perf_event_task_sched_in(struct task_struct *prev,
1078                         struct task_struct *task)                      { }
1079static inline void
1080perf_event_task_sched_out(struct task_struct *prev,
1081                          struct task_struct *next)                     { }
1082static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1083static inline void perf_event_exit_task(struct task_struct *child)      { }
1084static inline void perf_event_free_task(struct task_struct *task)       { }
1085static inline void perf_event_delayed_put(struct task_struct *task)     { }
1086static inline struct file *perf_event_get(unsigned int fd)      { return ERR_PTR(-EINVAL); }
1087static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1088{
1089        return ERR_PTR(-EINVAL);
1090}
1091static inline u64 perf_event_read_local(struct perf_event *event)       { return -EINVAL; }
1092static inline void perf_event_print_debug(void)                         { }
1093static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1094static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1095static inline int perf_event_refresh(struct perf_event *event, int refresh)
1096{
1097        return -EINVAL;
1098}
1099
1100static inline void
1101perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
1102static inline void
1103perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                     { }
1104static inline void
1105perf_bp_event(struct perf_event *event, void *data)                     { }
1106
1107static inline int perf_register_guest_info_callbacks
1108(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1109static inline int perf_unregister_guest_info_callbacks
1110(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1111
1112static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1113static inline void perf_event_exec(void)                                { }
1114static inline void perf_event_comm(struct task_struct *tsk, bool exec)  { }
1115static inline void perf_event_fork(struct task_struct *tsk)             { }
1116static inline void perf_event_init(void)                                { }
1117static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1118static inline void perf_swevent_put_recursion_context(int rctx)         { }
1119static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
1120static inline void perf_event_enable(struct perf_event *event)          { }
1121static inline void perf_event_disable(struct perf_event *event)         { }
1122static inline int __perf_event_disable(void *info)                      { return -1; }
1123static inline void perf_event_task_tick(void)                           { }
1124static inline int perf_event_release_kernel(struct perf_event *event)   { return 0; }
1125#endif
1126
1127#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1128extern void perf_restore_debug_store(void);
1129#else
1130static inline void perf_restore_debug_store(void)                       { }
1131#endif
1132
1133#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1134
1135/*
1136 * This has to have a higher priority than migration_notifier in sched/core.c.
1137 */
1138#define perf_cpu_notifier(fn)                                           \
1139do {                                                                    \
1140        static struct notifier_block fn##_nb =                          \
1141                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
1142        unsigned long cpu = smp_processor_id();                         \
1143        unsigned long flags;                                            \
1144                                                                        \
1145        cpu_notifier_register_begin();                                  \
1146        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
1147                (void *)(unsigned long)cpu);                            \
1148        local_irq_save(flags);                                          \
1149        fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
1150                (void *)(unsigned long)cpu);                            \
1151        local_irq_restore(flags);                                       \
1152        fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
1153                (void *)(unsigned long)cpu);                            \
1154        __register_cpu_notifier(&fn##_nb);                              \
1155        cpu_notifier_register_done();                                   \
1156} while (0)
1157
1158/*
1159 * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
1160 * callback for already online CPUs.
1161 */
1162#define __perf_cpu_notifier(fn)                                         \
1163do {                                                                    \
1164        static struct notifier_block fn##_nb =                          \
1165                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
1166                                                                        \
1167        __register_cpu_notifier(&fn##_nb);                              \
1168} while (0)
1169
1170struct perf_pmu_events_attr {
1171        struct device_attribute attr;
1172        u64 id;
1173        const char *event_str;
1174};
1175
1176ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1177                              char *page);
1178
1179#define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
1180static struct perf_pmu_events_attr _var = {                             \
1181        .attr = __ATTR(_name, 0444, _show, NULL),                       \
1182        .id   =  _id,                                                   \
1183};
1184
1185#define PMU_EVENT_ATTR_STRING(_name, _var, _str)                            \
1186static struct perf_pmu_events_attr _var = {                                 \
1187        .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1188        .id             = 0,                                                \
1189        .event_str      = _str,                                             \
1190};
1191
1192#define PMU_FORMAT_ATTR(_name, _format)                                 \
1193static ssize_t                                                          \
1194_name##_show(struct device *dev,                                        \
1195                               struct device_attribute *attr,           \
1196                               char *page)                              \
1197{                                                                       \
1198        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
1199        return sprintf(page, _format "\n");                             \
1200}                                                                       \
1201                                                                        \
1202static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1203
1204#endif /* _LINUX_PERF_EVENT_H */
1205