linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <uapi/linux/perf_event.h>
  18#include <uapi/linux/bpf_perf_event.h>
  19
  20/*
  21 * Kernel-internal data types and definitions:
  22 */
  23
  24#ifdef CONFIG_PERF_EVENTS
  25# include <asm/perf_event.h>
  26# include <asm/local64.h>
  27#endif
  28
  29struct perf_guest_info_callbacks {
  30        int                             (*is_in_guest)(void);
  31        int                             (*is_user_mode)(void);
  32        unsigned long                   (*get_guest_ip)(void);
  33        void                            (*handle_intel_pt_intr)(void);
  34};
  35
  36#ifdef CONFIG_HAVE_HW_BREAKPOINT
  37#include <asm/hw_breakpoint.h>
  38#endif
  39
  40#include <linux/list.h>
  41#include <linux/mutex.h>
  42#include <linux/rculist.h>
  43#include <linux/rcupdate.h>
  44#include <linux/spinlock.h>
  45#include <linux/hrtimer.h>
  46#include <linux/fs.h>
  47#include <linux/pid_namespace.h>
  48#include <linux/workqueue.h>
  49#include <linux/ftrace.h>
  50#include <linux/cpu.h>
  51#include <linux/irq_work.h>
  52#include <linux/static_key.h>
  53#include <linux/jump_label_ratelimit.h>
  54#include <linux/atomic.h>
  55#include <linux/sysfs.h>
  56#include <linux/perf_regs.h>
  57#include <linux/cgroup.h>
  58#include <linux/refcount.h>
  59#include <linux/security.h>
  60#include <asm/local.h>
  61
  62struct perf_callchain_entry {
  63        __u64                           nr;
  64        __u64                           ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
  65};
  66
  67struct perf_callchain_entry_ctx {
  68        struct perf_callchain_entry *entry;
  69        u32                         max_stack;
  70        u32                         nr;
  71        short                       contexts;
  72        bool                        contexts_maxed;
  73};
  74
  75typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
  76                                     unsigned long off, unsigned long len);
  77
  78struct perf_raw_frag {
  79        union {
  80                struct perf_raw_frag    *next;
  81                unsigned long           pad;
  82        };
  83        perf_copy_f                     copy;
  84        void                            *data;
  85        u32                             size;
  86} __packed;
  87
  88struct perf_raw_record {
  89        struct perf_raw_frag            frag;
  90        u32                             size;
  91};
  92
  93/*
  94 * branch stack layout:
  95 *  nr: number of taken branches stored in entries[]
  96 *  hw_idx: The low level index of raw branch records
  97 *          for the most recent branch.
  98 *          -1ULL means invalid/unknown.
  99 *
 100 * Note that nr can vary from sample to sample
 101 * branches (to, from) are stored from most recent
 102 * to least recent, i.e., entries[0] contains the most
 103 * recent branch.
 104 * The entries[] is an abstraction of raw branch records,
 105 * which may not be stored in age order in HW, e.g. Intel LBR.
 106 * The hw_idx is to expose the low level index of raw
 107 * branch record for the most recent branch aka entries[0].
 108 * The hw_idx index is between -1 (unknown) and max depth,
 109 * which can be retrieved in /sys/devices/cpu/caps/branches.
 110 * For the architectures whose raw branch records are
 111 * already stored in age order, the hw_idx should be 0.
 112 */
 113struct perf_branch_stack {
 114        __u64                           nr;
 115        __u64                           hw_idx;
 116        struct perf_branch_entry        entries[0];
 117};
 118
 119struct task_struct;
 120
 121/*
 122 * extra PMU register associated with an event
 123 */
 124struct hw_perf_event_extra {
 125        u64             config; /* register value */
 126        unsigned int    reg;    /* register address or index */
 127        int             alloc;  /* extra register already allocated */
 128        int             idx;    /* index in shared_regs->regs[] */
 129};
 130
 131/**
 132 * struct hw_perf_event - performance event hardware details:
 133 */
 134struct hw_perf_event {
 135#ifdef CONFIG_PERF_EVENTS
 136        union {
 137                struct { /* hardware */
 138                        u64             config;
 139                        u64             last_tag;
 140                        unsigned long   config_base;
 141                        unsigned long   event_base;
 142                        int             event_base_rdpmc;
 143                        int             idx;
 144                        int             last_cpu;
 145                        int             flags;
 146
 147                        struct hw_perf_event_extra extra_reg;
 148                        struct hw_perf_event_extra branch_reg;
 149                };
 150                struct { /* software */
 151                        struct hrtimer  hrtimer;
 152                };
 153                struct { /* tracepoint */
 154                        /* for tp_event->class */
 155                        struct list_head        tp_list;
 156                };
 157                struct { /* amd_power */
 158                        u64     pwr_acc;
 159                        u64     ptsc;
 160                };
 161#ifdef CONFIG_HAVE_HW_BREAKPOINT
 162                struct { /* breakpoint */
 163                        /*
 164                         * Crufty hack to avoid the chicken and egg
 165                         * problem hw_breakpoint has with context
 166                         * creation and event initalization.
 167                         */
 168                        struct arch_hw_breakpoint       info;
 169                        struct list_head                bp_list;
 170                };
 171#endif
 172                struct { /* amd_iommu */
 173                        u8      iommu_bank;
 174                        u8      iommu_cntr;
 175                        u16     padding;
 176                        u64     conf;
 177                        u64     conf1;
 178                };
 179        };
 180        /*
 181         * If the event is a per task event, this will point to the task in
 182         * question. See the comment in perf_event_alloc().
 183         */
 184        struct task_struct              *target;
 185
 186        /*
 187         * PMU would store hardware filter configuration
 188         * here.
 189         */
 190        void                            *addr_filters;
 191
 192        /* Last sync'ed generation of filters */
 193        unsigned long                   addr_filters_gen;
 194
 195/*
 196 * hw_perf_event::state flags; used to track the PERF_EF_* state.
 197 */
 198#define PERF_HES_STOPPED        0x01 /* the counter is stopped */
 199#define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
 200#define PERF_HES_ARCH           0x04
 201
 202        int                             state;
 203
 204        /*
 205         * The last observed hardware counter value, updated with a
 206         * local64_cmpxchg() such that pmu::read() can be called nested.
 207         */
 208        local64_t                       prev_count;
 209
 210        /*
 211         * The period to start the next sample with.
 212         */
 213        u64                             sample_period;
 214
 215        /*
 216         * The period we started this sample with.
 217         */
 218        u64                             last_period;
 219
 220        /*
 221         * However much is left of the current period; note that this is
 222         * a full 64bit value and allows for generation of periods longer
 223         * than hardware might allow.
 224         */
 225        local64_t                       period_left;
 226
 227        /*
 228         * State for throttling the event, see __perf_event_overflow() and
 229         * perf_adjust_freq_unthr_context().
 230         */
 231        u64                             interrupts_seq;
 232        u64                             interrupts;
 233
 234        /*
 235         * State for freq target events, see __perf_event_overflow() and
 236         * perf_adjust_freq_unthr_context().
 237         */
 238        u64                             freq_time_stamp;
 239        u64                             freq_count_stamp;
 240#endif
 241};
 242
 243struct perf_event;
 244
 245/*
 246 * Common implementation detail of pmu::{start,commit,cancel}_txn
 247 */
 248#define PERF_PMU_TXN_ADD  0x1           /* txn to add/schedule event on PMU */
 249#define PERF_PMU_TXN_READ 0x2           /* txn to read event group from PMU */
 250
 251/**
 252 * pmu::capabilities flags
 253 */
 254#define PERF_PMU_CAP_NO_INTERRUPT               0x01
 255#define PERF_PMU_CAP_NO_NMI                     0x02
 256#define PERF_PMU_CAP_AUX_NO_SG                  0x04
 257#define PERF_PMU_CAP_EXTENDED_REGS              0x08
 258#define PERF_PMU_CAP_EXCLUSIVE                  0x10
 259#define PERF_PMU_CAP_ITRACE                     0x20
 260#define PERF_PMU_CAP_HETEROGENEOUS_CPUS         0x40
 261#define PERF_PMU_CAP_NO_EXCLUDE                 0x80
 262#define PERF_PMU_CAP_AUX_OUTPUT                 0x100
 263
 264struct perf_output_handle;
 265
 266/**
 267 * struct pmu - generic performance monitoring unit
 268 */
 269struct pmu {
 270        struct list_head                entry;
 271
 272        struct module                   *module;
 273        struct device                   *dev;
 274        const struct attribute_group    **attr_groups;
 275        const struct attribute_group    **attr_update;
 276        const char                      *name;
 277        int                             type;
 278
 279        /*
 280         * various common per-pmu feature flags
 281         */
 282        int                             capabilities;
 283
 284        int __percpu                    *pmu_disable_count;
 285        struct perf_cpu_context __percpu *pmu_cpu_context;
 286        atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
 287        int                             task_ctx_nr;
 288        int                             hrtimer_interval_ms;
 289
 290        /* number of address filters this PMU can do */
 291        unsigned int                    nr_addr_filters;
 292
 293        /*
 294         * Fully disable/enable this PMU, can be used to protect from the PMI
 295         * as well as for lazy/batch writing of the MSRs.
 296         */
 297        void (*pmu_enable)              (struct pmu *pmu); /* optional */
 298        void (*pmu_disable)             (struct pmu *pmu); /* optional */
 299
 300        /*
 301         * Try and initialize the event for this PMU.
 302         *
 303         * Returns:
 304         *  -ENOENT     -- @event is not for this PMU
 305         *
 306         *  -ENODEV     -- @event is for this PMU but PMU not present
 307         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
 308         *  -EINVAL     -- @event is for this PMU but @event is not valid
 309         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
 310         *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
 311         *
 312         *  0           -- @event is for this PMU and valid
 313         *
 314         * Other error return values are allowed.
 315         */
 316        int (*event_init)               (struct perf_event *event);
 317
 318        /*
 319         * Notification that the event was mapped or unmapped.  Called
 320         * in the context of the mapping task.
 321         */
 322        void (*event_mapped)            (struct perf_event *event, struct mm_struct *mm); /* optional */
 323        void (*event_unmapped)          (struct perf_event *event, struct mm_struct *mm); /* optional */
 324
 325        /*
 326         * Flags for ->add()/->del()/ ->start()/->stop(). There are
 327         * matching hw_perf_event::state flags.
 328         */
 329#define PERF_EF_START   0x01            /* start the counter when adding    */
 330#define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
 331#define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
 332
 333        /*
 334         * Adds/Removes a counter to/from the PMU, can be done inside a
 335         * transaction, see the ->*_txn() methods.
 336         *
 337         * The add/del callbacks will reserve all hardware resources required
 338         * to service the event, this includes any counter constraint
 339         * scheduling etc.
 340         *
 341         * Called with IRQs disabled and the PMU disabled on the CPU the event
 342         * is on.
 343         *
 344         * ->add() called without PERF_EF_START should result in the same state
 345         *  as ->add() followed by ->stop().
 346         *
 347         * ->del() must always PERF_EF_UPDATE stop an event. If it calls
 348         *  ->stop() that must deal with already being stopped without
 349         *  PERF_EF_UPDATE.
 350         */
 351        int  (*add)                     (struct perf_event *event, int flags);
 352        void (*del)                     (struct perf_event *event, int flags);
 353
 354        /*
 355         * Starts/Stops a counter present on the PMU.
 356         *
 357         * The PMI handler should stop the counter when perf_event_overflow()
 358         * returns !0. ->start() will be used to continue.
 359         *
 360         * Also used to change the sample period.
 361         *
 362         * Called with IRQs disabled and the PMU disabled on the CPU the event
 363         * is on -- will be called from NMI context with the PMU generates
 364         * NMIs.
 365         *
 366         * ->stop() with PERF_EF_UPDATE will read the counter and update
 367         *  period/count values like ->read() would.
 368         *
 369         * ->start() with PERF_EF_RELOAD will reprogram the the counter
 370         *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
 371         */
 372        void (*start)                   (struct perf_event *event, int flags);
 373        void (*stop)                    (struct perf_event *event, int flags);
 374
 375        /*
 376         * Updates the counter value of the event.
 377         *
 378         * For sampling capable PMUs this will also update the software period
 379         * hw_perf_event::period_left field.
 380         */
 381        void (*read)                    (struct perf_event *event);
 382
 383        /*
 384         * Group events scheduling is treated as a transaction, add
 385         * group events as a whole and perform one schedulability test.
 386         * If the test fails, roll back the whole group
 387         *
 388         * Start the transaction, after this ->add() doesn't need to
 389         * do schedulability tests.
 390         *
 391         * Optional.
 392         */
 393        void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
 394        /*
 395         * If ->start_txn() disabled the ->add() schedulability test
 396         * then ->commit_txn() is required to perform one. On success
 397         * the transaction is closed. On error the transaction is kept
 398         * open until ->cancel_txn() is called.
 399         *
 400         * Optional.
 401         */
 402        int  (*commit_txn)              (struct pmu *pmu);
 403        /*
 404         * Will cancel the transaction, assumes ->del() is called
 405         * for each successful ->add() during the transaction.
 406         *
 407         * Optional.
 408         */
 409        void (*cancel_txn)              (struct pmu *pmu);
 410
 411        /*
 412         * Will return the value for perf_event_mmap_page::index for this event,
 413         * if no implementation is provided it will default to: event->hw.idx + 1.
 414         */
 415        int (*event_idx)                (struct perf_event *event); /*optional */
 416
 417        /*
 418         * context-switches callback
 419         */
 420        void (*sched_task)              (struct perf_event_context *ctx,
 421                                        bool sched_in);
 422        /*
 423         * PMU specific data size
 424         */
 425        size_t                          task_ctx_size;
 426
 427        /*
 428         * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
 429         * can be synchronized using this function. See Intel LBR callstack support
 430         * implementation and Perf core context switch handling callbacks for usage
 431         * examples.
 432         */
 433        void (*swap_task_ctx)           (struct perf_event_context *prev,
 434                                         struct perf_event_context *next);
 435                                        /* optional */
 436
 437        /*
 438         * Set up pmu-private data structures for an AUX area
 439         */
 440        void *(*setup_aux)              (struct perf_event *event, void **pages,
 441                                         int nr_pages, bool overwrite);
 442                                        /* optional */
 443
 444        /*
 445         * Free pmu-private AUX data structures
 446         */
 447        void (*free_aux)                (void *aux); /* optional */
 448
 449        /*
 450         * Take a snapshot of the AUX buffer without touching the event
 451         * state, so that preempting ->start()/->stop() callbacks does
 452         * not interfere with their logic. Called in PMI context.
 453         *
 454         * Returns the size of AUX data copied to the output handle.
 455         *
 456         * Optional.
 457         */
 458        long (*snapshot_aux)            (struct perf_event *event,
 459                                         struct perf_output_handle *handle,
 460                                         unsigned long size);
 461
 462        /*
 463         * Validate address range filters: make sure the HW supports the
 464         * requested configuration and number of filters; return 0 if the
 465         * supplied filters are valid, -errno otherwise.
 466         *
 467         * Runs in the context of the ioctl()ing process and is not serialized
 468         * with the rest of the PMU callbacks.
 469         */
 470        int (*addr_filters_validate)    (struct list_head *filters);
 471                                        /* optional */
 472
 473        /*
 474         * Synchronize address range filter configuration:
 475         * translate hw-agnostic filters into hardware configuration in
 476         * event::hw::addr_filters.
 477         *
 478         * Runs as a part of filter sync sequence that is done in ->start()
 479         * callback by calling perf_event_addr_filters_sync().
 480         *
 481         * May (and should) traverse event::addr_filters::list, for which its
 482         * caller provides necessary serialization.
 483         */
 484        void (*addr_filters_sync)       (struct perf_event *event);
 485                                        /* optional */
 486
 487        /*
 488         * Check if event can be used for aux_output purposes for
 489         * events of this PMU.
 490         *
 491         * Runs from perf_event_open(). Should return 0 for "no match"
 492         * or non-zero for "match".
 493         */
 494        int (*aux_output_match)         (struct perf_event *event);
 495                                        /* optional */
 496
 497        /*
 498         * Filter events for PMU-specific reasons.
 499         */
 500        int (*filter_match)             (struct perf_event *event); /* optional */
 501
 502        /*
 503         * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
 504         */
 505        int (*check_period)             (struct perf_event *event, u64 value); /* optional */
 506};
 507
 508enum perf_addr_filter_action_t {
 509        PERF_ADDR_FILTER_ACTION_STOP = 0,
 510        PERF_ADDR_FILTER_ACTION_START,
 511        PERF_ADDR_FILTER_ACTION_FILTER,
 512};
 513
 514/**
 515 * struct perf_addr_filter - address range filter definition
 516 * @entry:      event's filter list linkage
 517 * @path:       object file's path for file-based filters
 518 * @offset:     filter range offset
 519 * @size:       filter range size (size==0 means single address trigger)
 520 * @action:     filter/start/stop
 521 *
 522 * This is a hardware-agnostic filter configuration as specified by the user.
 523 */
 524struct perf_addr_filter {
 525        struct list_head        entry;
 526        struct path             path;
 527        unsigned long           offset;
 528        unsigned long           size;
 529        enum perf_addr_filter_action_t  action;
 530};
 531
 532/**
 533 * struct perf_addr_filters_head - container for address range filters
 534 * @list:       list of filters for this event
 535 * @lock:       spinlock that serializes accesses to the @list and event's
 536 *              (and its children's) filter generations.
 537 * @nr_file_filters:    number of file-based filters
 538 *
 539 * A child event will use parent's @list (and therefore @lock), so they are
 540 * bundled together; see perf_event_addr_filters().
 541 */
 542struct perf_addr_filters_head {
 543        struct list_head        list;
 544        raw_spinlock_t          lock;
 545        unsigned int            nr_file_filters;
 546};
 547
 548struct perf_addr_filter_range {
 549        unsigned long           start;
 550        unsigned long           size;
 551};
 552
 553/**
 554 * enum perf_event_state - the states of an event:
 555 */
 556enum perf_event_state {
 557        PERF_EVENT_STATE_DEAD           = -4,
 558        PERF_EVENT_STATE_EXIT           = -3,
 559        PERF_EVENT_STATE_ERROR          = -2,
 560        PERF_EVENT_STATE_OFF            = -1,
 561        PERF_EVENT_STATE_INACTIVE       =  0,
 562        PERF_EVENT_STATE_ACTIVE         =  1,
 563};
 564
 565struct file;
 566struct perf_sample_data;
 567
 568typedef void (*perf_overflow_handler_t)(struct perf_event *,
 569                                        struct perf_sample_data *,
 570                                        struct pt_regs *regs);
 571
 572/*
 573 * Event capabilities. For event_caps and groups caps.
 574 *
 575 * PERF_EV_CAP_SOFTWARE: Is a software event.
 576 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
 577 * from any CPU in the package where it is active.
 578 */
 579#define PERF_EV_CAP_SOFTWARE            BIT(0)
 580#define PERF_EV_CAP_READ_ACTIVE_PKG     BIT(1)
 581
 582#define SWEVENT_HLIST_BITS              8
 583#define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
 584
 585struct swevent_hlist {
 586        struct hlist_head               heads[SWEVENT_HLIST_SIZE];
 587        struct rcu_head                 rcu_head;
 588};
 589
 590#define PERF_ATTACH_CONTEXT     0x01
 591#define PERF_ATTACH_GROUP       0x02
 592#define PERF_ATTACH_TASK        0x04
 593#define PERF_ATTACH_TASK_DATA   0x08
 594#define PERF_ATTACH_ITRACE      0x10
 595
 596struct perf_cgroup;
 597struct perf_buffer;
 598
 599struct pmu_event_list {
 600        raw_spinlock_t          lock;
 601        struct list_head        list;
 602};
 603
 604#define for_each_sibling_event(sibling, event)                  \
 605        if ((event)->group_leader == (event))                   \
 606                list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
 607
 608/**
 609 * struct perf_event - performance event kernel representation:
 610 */
 611struct perf_event {
 612#ifdef CONFIG_PERF_EVENTS
 613        /*
 614         * entry onto perf_event_context::event_list;
 615         *   modifications require ctx->lock
 616         *   RCU safe iterations.
 617         */
 618        struct list_head                event_entry;
 619
 620        /*
 621         * Locked for modification by both ctx->mutex and ctx->lock; holding
 622         * either sufficies for read.
 623         */
 624        struct list_head                sibling_list;
 625        struct list_head                active_list;
 626        /*
 627         * Node on the pinned or flexible tree located at the event context;
 628         */
 629        struct rb_node                  group_node;
 630        u64                             group_index;
 631        /*
 632         * We need storage to track the entries in perf_pmu_migrate_context; we
 633         * cannot use the event_entry because of RCU and we want to keep the
 634         * group in tact which avoids us using the other two entries.
 635         */
 636        struct list_head                migrate_entry;
 637
 638        struct hlist_node               hlist_entry;
 639        struct list_head                active_entry;
 640        int                             nr_siblings;
 641
 642        /* Not serialized. Only written during event initialization. */
 643        int                             event_caps;
 644        /* The cumulative AND of all event_caps for events in this group. */
 645        int                             group_caps;
 646
 647        struct perf_event               *group_leader;
 648        struct pmu                      *pmu;
 649        void                            *pmu_private;
 650
 651        enum perf_event_state           state;
 652        unsigned int                    attach_state;
 653        local64_t                       count;
 654        atomic64_t                      child_count;
 655
 656        /*
 657         * These are the total time in nanoseconds that the event
 658         * has been enabled (i.e. eligible to run, and the task has
 659         * been scheduled in, if this is a per-task event)
 660         * and running (scheduled onto the CPU), respectively.
 661         */
 662        u64                             total_time_enabled;
 663        u64                             total_time_running;
 664        u64                             tstamp;
 665
 666        /*
 667         * timestamp shadows the actual context timing but it can
 668         * be safely used in NMI interrupt context. It reflects the
 669         * context time as it was when the event was last scheduled in.
 670         *
 671         * ctx_time already accounts for ctx->timestamp. Therefore to
 672         * compute ctx_time for a sample, simply add perf_clock().
 673         */
 674        u64                             shadow_ctx_time;
 675
 676        struct perf_event_attr          attr;
 677        u16                             header_size;
 678        u16                             id_header_size;
 679        u16                             read_size;
 680        struct hw_perf_event            hw;
 681
 682        struct perf_event_context       *ctx;
 683        atomic_long_t                   refcount;
 684
 685        /*
 686         * These accumulate total time (in nanoseconds) that children
 687         * events have been enabled and running, respectively.
 688         */
 689        atomic64_t                      child_total_time_enabled;
 690        atomic64_t                      child_total_time_running;
 691
 692        /*
 693         * Protect attach/detach and child_list:
 694         */
 695        struct mutex                    child_mutex;
 696        struct list_head                child_list;
 697        struct perf_event               *parent;
 698
 699        int                             oncpu;
 700        int                             cpu;
 701
 702        struct list_head                owner_entry;
 703        struct task_struct              *owner;
 704
 705        /* mmap bits */
 706        struct mutex                    mmap_mutex;
 707        atomic_t                        mmap_count;
 708
 709        struct perf_buffer              *rb;
 710        struct list_head                rb_entry;
 711        unsigned long                   rcu_batches;
 712        int                             rcu_pending;
 713
 714        /* poll related */
 715        wait_queue_head_t               waitq;
 716        struct fasync_struct            *fasync;
 717
 718        /* delayed work for NMIs and such */
 719        int                             pending_wakeup;
 720        int                             pending_kill;
 721        int                             pending_disable;
 722        struct irq_work                 pending;
 723
 724        atomic_t                        event_limit;
 725
 726        /* address range filters */
 727        struct perf_addr_filters_head   addr_filters;
 728        /* vma address array for file-based filders */
 729        struct perf_addr_filter_range   *addr_filter_ranges;
 730        unsigned long                   addr_filters_gen;
 731
 732        /* for aux_output events */
 733        struct perf_event               *aux_event;
 734
 735        void (*destroy)(struct perf_event *);
 736        struct rcu_head                 rcu_head;
 737
 738        struct pid_namespace            *ns;
 739        u64                             id;
 740
 741        u64                             (*clock)(void);
 742        perf_overflow_handler_t         overflow_handler;
 743        void                            *overflow_handler_context;
 744#ifdef CONFIG_BPF_SYSCALL
 745        perf_overflow_handler_t         orig_overflow_handler;
 746        struct bpf_prog                 *prog;
 747#endif
 748
 749#ifdef CONFIG_EVENT_TRACING
 750        struct trace_event_call         *tp_event;
 751        struct event_filter             *filter;
 752#ifdef CONFIG_FUNCTION_TRACER
 753        struct ftrace_ops               ftrace_ops;
 754#endif
 755#endif
 756
 757#ifdef CONFIG_CGROUP_PERF
 758        struct perf_cgroup              *cgrp; /* cgroup event is attach to */
 759#endif
 760
 761#ifdef CONFIG_SECURITY
 762        void *security;
 763#endif
 764        struct list_head                sb_list;
 765#endif /* CONFIG_PERF_EVENTS */
 766};
 767
 768
 769struct perf_event_groups {
 770        struct rb_root  tree;
 771        u64             index;
 772};
 773
 774/**
 775 * struct perf_event_context - event context structure
 776 *
 777 * Used as a container for task events and CPU events as well:
 778 */
 779struct perf_event_context {
 780        struct pmu                      *pmu;
 781        /*
 782         * Protect the states of the events in the list,
 783         * nr_active, and the list:
 784         */
 785        raw_spinlock_t                  lock;
 786        /*
 787         * Protect the list of events.  Locking either mutex or lock
 788         * is sufficient to ensure the list doesn't change; to change
 789         * the list you need to lock both the mutex and the spinlock.
 790         */
 791        struct mutex                    mutex;
 792
 793        struct list_head                active_ctx_list;
 794        struct perf_event_groups        pinned_groups;
 795        struct perf_event_groups        flexible_groups;
 796        struct list_head                event_list;
 797
 798        struct list_head                pinned_active;
 799        struct list_head                flexible_active;
 800
 801        int                             nr_events;
 802        int                             nr_active;
 803        int                             is_active;
 804        int                             nr_stat;
 805        int                             nr_freq;
 806        int                             rotate_disable;
 807        /*
 808         * Set when nr_events != nr_active, except tolerant to events not
 809         * necessary to be active due to scheduling constraints, such as cgroups.
 810         */
 811        int                             rotate_necessary;
 812        refcount_t                      refcount;
 813        struct task_struct              *task;
 814
 815        /*
 816         * Context clock, runs when context enabled.
 817         */
 818        u64                             time;
 819        u64                             timestamp;
 820
 821        /*
 822         * These fields let us detect when two contexts have both
 823         * been cloned (inherited) from a common ancestor.
 824         */
 825        struct perf_event_context       *parent_ctx;
 826        u64                             parent_gen;
 827        u64                             generation;
 828        int                             pin_count;
 829#ifdef CONFIG_CGROUP_PERF
 830        int                             nr_cgroups;      /* cgroup evts */
 831#endif
 832        void                            *task_ctx_data; /* pmu specific data */
 833        struct rcu_head                 rcu_head;
 834};
 835
 836/*
 837 * Number of contexts where an event can trigger:
 838 *      task, softirq, hardirq, nmi.
 839 */
 840#define PERF_NR_CONTEXTS        4
 841
 842/**
 843 * struct perf_event_cpu_context - per cpu event context structure
 844 */
 845struct perf_cpu_context {
 846        struct perf_event_context       ctx;
 847        struct perf_event_context       *task_ctx;
 848        int                             active_oncpu;
 849        int                             exclusive;
 850
 851        raw_spinlock_t                  hrtimer_lock;
 852        struct hrtimer                  hrtimer;
 853        ktime_t                         hrtimer_interval;
 854        unsigned int                    hrtimer_active;
 855
 856#ifdef CONFIG_CGROUP_PERF
 857        struct perf_cgroup              *cgrp;
 858        struct list_head                cgrp_cpuctx_entry;
 859#endif
 860
 861        struct list_head                sched_cb_entry;
 862        int                             sched_cb_usage;
 863
 864        int                             online;
 865        /*
 866         * Per-CPU storage for iterators used in visit_groups_merge. The default
 867         * storage is of size 2 to hold the CPU and any CPU event iterators.
 868         */
 869        int                             heap_size;
 870        struct perf_event               **heap;
 871        struct perf_event               *heap_default[2];
 872};
 873
 874struct perf_output_handle {
 875        struct perf_event               *event;
 876        struct perf_buffer              *rb;
 877        unsigned long                   wakeup;
 878        unsigned long                   size;
 879        u64                             aux_flags;
 880        union {
 881                void                    *addr;
 882                unsigned long           head;
 883        };
 884        int                             page;
 885};
 886
 887struct bpf_perf_event_data_kern {
 888        bpf_user_pt_regs_t *regs;
 889        struct perf_sample_data *data;
 890        struct perf_event *event;
 891};
 892
 893#ifdef CONFIG_CGROUP_PERF
 894
 895/*
 896 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 897 * This is a per-cpu dynamically allocated data structure.
 898 */
 899struct perf_cgroup_info {
 900        u64                             time;
 901        u64                             timestamp;
 902};
 903
 904struct perf_cgroup {
 905        struct cgroup_subsys_state      css;
 906        struct perf_cgroup_info __percpu *info;
 907};
 908
 909/*
 910 * Must ensure cgroup is pinned (css_get) before calling
 911 * this function. In other words, we cannot call this function
 912 * if there is no cgroup event for the current CPU context.
 913 */
 914static inline struct perf_cgroup *
 915perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
 916{
 917        return container_of(task_css_check(task, perf_event_cgrp_id,
 918                                           ctx ? lockdep_is_held(&ctx->lock)
 919                                               : true),
 920                            struct perf_cgroup, css);
 921}
 922#endif /* CONFIG_CGROUP_PERF */
 923
 924#ifdef CONFIG_PERF_EVENTS
 925
 926extern void *perf_aux_output_begin(struct perf_output_handle *handle,
 927                                   struct perf_event *event);
 928extern void perf_aux_output_end(struct perf_output_handle *handle,
 929                                unsigned long size);
 930extern int perf_aux_output_skip(struct perf_output_handle *handle,
 931                                unsigned long size);
 932extern void *perf_get_aux(struct perf_output_handle *handle);
 933extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
 934extern void perf_event_itrace_started(struct perf_event *event);
 935
 936extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
 937extern void perf_pmu_unregister(struct pmu *pmu);
 938
 939extern int perf_num_counters(void);
 940extern const char *perf_pmu_name(void);
 941extern void __perf_event_task_sched_in(struct task_struct *prev,
 942                                       struct task_struct *task);
 943extern void __perf_event_task_sched_out(struct task_struct *prev,
 944                                        struct task_struct *next);
 945extern int perf_event_init_task(struct task_struct *child);
 946extern void perf_event_exit_task(struct task_struct *child);
 947extern void perf_event_free_task(struct task_struct *task);
 948extern void perf_event_delayed_put(struct task_struct *task);
 949extern struct file *perf_event_get(unsigned int fd);
 950extern const struct perf_event *perf_get_event(struct file *file);
 951extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 952extern void perf_event_print_debug(void);
 953extern void perf_pmu_disable(struct pmu *pmu);
 954extern void perf_pmu_enable(struct pmu *pmu);
 955extern void perf_sched_cb_dec(struct pmu *pmu);
 956extern void perf_sched_cb_inc(struct pmu *pmu);
 957extern int perf_event_task_disable(void);
 958extern int perf_event_task_enable(void);
 959
 960extern void perf_pmu_resched(struct pmu *pmu);
 961
 962extern int perf_event_refresh(struct perf_event *event, int refresh);
 963extern void perf_event_update_userpage(struct perf_event *event);
 964extern int perf_event_release_kernel(struct perf_event *event);
 965extern struct perf_event *
 966perf_event_create_kernel_counter(struct perf_event_attr *attr,
 967                                int cpu,
 968                                struct task_struct *task,
 969                                perf_overflow_handler_t callback,
 970                                void *context);
 971extern void perf_pmu_migrate_context(struct pmu *pmu,
 972                                int src_cpu, int dst_cpu);
 973int perf_event_read_local(struct perf_event *event, u64 *value,
 974                          u64 *enabled, u64 *running);
 975extern u64 perf_event_read_value(struct perf_event *event,
 976                                 u64 *enabled, u64 *running);
 977
 978
 979struct perf_sample_data {
 980        /*
 981         * Fields set by perf_sample_data_init(), group so as to
 982         * minimize the cachelines touched.
 983         */
 984        u64                             addr;
 985        struct perf_raw_record          *raw;
 986        struct perf_branch_stack        *br_stack;
 987        u64                             period;
 988        u64                             weight;
 989        u64                             txn;
 990        union  perf_mem_data_src        data_src;
 991
 992        /*
 993         * The other fields, optionally {set,used} by
 994         * perf_{prepare,output}_sample().
 995         */
 996        u64                             type;
 997        u64                             ip;
 998        struct {
 999                u32     pid;
1000                u32     tid;
1001        }                               tid_entry;
1002        u64                             time;
1003        u64                             id;
1004        u64                             stream_id;
1005        struct {
1006                u32     cpu;
1007                u32     reserved;
1008        }                               cpu_entry;
1009        struct perf_callchain_entry     *callchain;
1010        u64                             aux_size;
1011
1012        /*
1013         * regs_user may point to task_pt_regs or to regs_user_copy, depending
1014         * on arch details.
1015         */
1016        struct perf_regs                regs_user;
1017        struct pt_regs                  regs_user_copy;
1018
1019        struct perf_regs                regs_intr;
1020        u64                             stack_user_size;
1021
1022        u64                             phys_addr;
1023        u64                             cgroup;
1024} ____cacheline_aligned;
1025
1026/* default value for data source */
1027#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1028                    PERF_MEM_S(LVL, NA)   |\
1029                    PERF_MEM_S(SNOOP, NA) |\
1030                    PERF_MEM_S(LOCK, NA)  |\
1031                    PERF_MEM_S(TLB, NA))
1032
1033static inline void perf_sample_data_init(struct perf_sample_data *data,
1034                                         u64 addr, u64 period)
1035{
1036        /* remaining struct members initialized in perf_prepare_sample() */
1037        data->addr = addr;
1038        data->raw  = NULL;
1039        data->br_stack = NULL;
1040        data->period = period;
1041        data->weight = 0;
1042        data->data_src.val = PERF_MEM_NA;
1043        data->txn = 0;
1044}
1045
1046extern void perf_output_sample(struct perf_output_handle *handle,
1047                               struct perf_event_header *header,
1048                               struct perf_sample_data *data,
1049                               struct perf_event *event);
1050extern void perf_prepare_sample(struct perf_event_header *header,
1051                                struct perf_sample_data *data,
1052                                struct perf_event *event,
1053                                struct pt_regs *regs);
1054
1055extern int perf_event_overflow(struct perf_event *event,
1056                                 struct perf_sample_data *data,
1057                                 struct pt_regs *regs);
1058
1059extern void perf_event_output_forward(struct perf_event *event,
1060                                     struct perf_sample_data *data,
1061                                     struct pt_regs *regs);
1062extern void perf_event_output_backward(struct perf_event *event,
1063                                       struct perf_sample_data *data,
1064                                       struct pt_regs *regs);
1065extern int perf_event_output(struct perf_event *event,
1066                             struct perf_sample_data *data,
1067                             struct pt_regs *regs);
1068
1069static inline bool
1070is_default_overflow_handler(struct perf_event *event)
1071{
1072        if (likely(event->overflow_handler == perf_event_output_forward))
1073                return true;
1074        if (unlikely(event->overflow_handler == perf_event_output_backward))
1075                return true;
1076        return false;
1077}
1078
1079extern void
1080perf_event_header__init_id(struct perf_event_header *header,
1081                           struct perf_sample_data *data,
1082                           struct perf_event *event);
1083extern void
1084perf_event__output_id_sample(struct perf_event *event,
1085                             struct perf_output_handle *handle,
1086                             struct perf_sample_data *sample);
1087
1088extern void
1089perf_log_lost_samples(struct perf_event *event, u64 lost);
1090
1091static inline bool event_has_any_exclude_flag(struct perf_event *event)
1092{
1093        struct perf_event_attr *attr = &event->attr;
1094
1095        return attr->exclude_idle || attr->exclude_user ||
1096               attr->exclude_kernel || attr->exclude_hv ||
1097               attr->exclude_guest || attr->exclude_host;
1098}
1099
1100static inline bool is_sampling_event(struct perf_event *event)
1101{
1102        return event->attr.sample_period != 0;
1103}
1104
1105/*
1106 * Return 1 for a software event, 0 for a hardware event
1107 */
1108static inline int is_software_event(struct perf_event *event)
1109{
1110        return event->event_caps & PERF_EV_CAP_SOFTWARE;
1111}
1112
1113/*
1114 * Return 1 for event in sw context, 0 for event in hw context
1115 */
1116static inline int in_software_context(struct perf_event *event)
1117{
1118        return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1119}
1120
1121static inline int is_exclusive_pmu(struct pmu *pmu)
1122{
1123        return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1124}
1125
1126extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1127
1128extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1129extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1130
1131#ifndef perf_arch_fetch_caller_regs
1132static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1133#endif
1134
1135/*
1136 * When generating a perf sample in-line, instead of from an interrupt /
1137 * exception, we lack a pt_regs. This is typically used from software events
1138 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1139 *
1140 * We typically don't need a full set, but (for x86) do require:
1141 * - ip for PERF_SAMPLE_IP
1142 * - cs for user_mode() tests
1143 * - sp for PERF_SAMPLE_CALLCHAIN
1144 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1145 *
1146 * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1147 * things like PERF_SAMPLE_REGS_INTR.
1148 */
1149static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1150{
1151        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1152}
1153
1154static __always_inline void
1155perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1156{
1157        if (static_key_false(&perf_swevent_enabled[event_id]))
1158                __perf_sw_event(event_id, nr, regs, addr);
1159}
1160
1161DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1162
1163/*
1164 * 'Special' version for the scheduler, it hard assumes no recursion,
1165 * which is guaranteed by us not actually scheduling inside other swevents
1166 * because those disable preemption.
1167 */
1168static __always_inline void
1169perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1170{
1171        if (static_key_false(&perf_swevent_enabled[event_id])) {
1172                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1173
1174                perf_fetch_caller_regs(regs);
1175                ___perf_sw_event(event_id, nr, regs, addr);
1176        }
1177}
1178
1179extern struct static_key_false perf_sched_events;
1180
1181static __always_inline bool
1182perf_sw_migrate_enabled(void)
1183{
1184        if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1185                return true;
1186        return false;
1187}
1188
1189static inline void perf_event_task_migrate(struct task_struct *task)
1190{
1191        if (perf_sw_migrate_enabled())
1192                task->sched_migrated = 1;
1193}
1194
1195static inline void perf_event_task_sched_in(struct task_struct *prev,
1196                                            struct task_struct *task)
1197{
1198        if (static_branch_unlikely(&perf_sched_events))
1199                __perf_event_task_sched_in(prev, task);
1200
1201        if (perf_sw_migrate_enabled() && task->sched_migrated) {
1202                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1203
1204                perf_fetch_caller_regs(regs);
1205                ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1206                task->sched_migrated = 0;
1207        }
1208}
1209
1210static inline void perf_event_task_sched_out(struct task_struct *prev,
1211                                             struct task_struct *next)
1212{
1213        perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1214
1215        if (static_branch_unlikely(&perf_sched_events))
1216                __perf_event_task_sched_out(prev, next);
1217}
1218
1219extern void perf_event_mmap(struct vm_area_struct *vma);
1220
1221extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1222                               bool unregister, const char *sym);
1223extern void perf_event_bpf_event(struct bpf_prog *prog,
1224                                 enum perf_bpf_event_type type,
1225                                 u16 flags);
1226
1227extern struct perf_guest_info_callbacks *perf_guest_cbs;
1228extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1229extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1230
1231extern void perf_event_exec(void);
1232extern void perf_event_comm(struct task_struct *tsk, bool exec);
1233extern void perf_event_namespaces(struct task_struct *tsk);
1234extern void perf_event_fork(struct task_struct *tsk);
1235
1236/* Callchains */
1237DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1238
1239extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1240extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1241extern struct perf_callchain_entry *
1242get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1243                   u32 max_stack, bool crosstask, bool add_mark);
1244extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1245extern int get_callchain_buffers(int max_stack);
1246extern void put_callchain_buffers(void);
1247
1248extern int sysctl_perf_event_max_stack;
1249extern int sysctl_perf_event_max_contexts_per_stack;
1250
1251static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1252{
1253        if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1254                struct perf_callchain_entry *entry = ctx->entry;
1255                entry->ip[entry->nr++] = ip;
1256                ++ctx->contexts;
1257                return 0;
1258        } else {
1259                ctx->contexts_maxed = true;
1260                return -1; /* no more room, stop walking the stack */
1261        }
1262}
1263
1264static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1265{
1266        if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1267                struct perf_callchain_entry *entry = ctx->entry;
1268                entry->ip[entry->nr++] = ip;
1269                ++ctx->nr;
1270                return 0;
1271        } else {
1272                return -1; /* no more room, stop walking the stack */
1273        }
1274}
1275
1276extern int sysctl_perf_event_paranoid;
1277extern int sysctl_perf_event_mlock;
1278extern int sysctl_perf_event_sample_rate;
1279extern int sysctl_perf_cpu_time_max_percent;
1280
1281extern void perf_sample_event_took(u64 sample_len_ns);
1282
1283extern int perf_proc_update_handler(struct ctl_table *table, int write,
1284                void __user *buffer, size_t *lenp,
1285                loff_t *ppos);
1286extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1287                void __user *buffer, size_t *lenp,
1288                loff_t *ppos);
1289
1290int perf_event_max_stack_handler(struct ctl_table *table, int write,
1291                                 void __user *buffer, size_t *lenp, loff_t *ppos);
1292
1293/* Access to perf_event_open(2) syscall. */
1294#define PERF_SECURITY_OPEN              0
1295
1296/* Finer grained perf_event_open(2) access control. */
1297#define PERF_SECURITY_CPU               1
1298#define PERF_SECURITY_KERNEL            2
1299#define PERF_SECURITY_TRACEPOINT        3
1300
1301static inline int perf_is_paranoid(void)
1302{
1303        return sysctl_perf_event_paranoid > -1;
1304}
1305
1306static inline int perf_allow_kernel(struct perf_event_attr *attr)
1307{
1308        if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
1309                return -EACCES;
1310
1311        return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1312}
1313
1314static inline int perf_allow_cpu(struct perf_event_attr *attr)
1315{
1316        if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
1317                return -EACCES;
1318
1319        return security_perf_event_open(attr, PERF_SECURITY_CPU);
1320}
1321
1322static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1323{
1324        if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
1325                return -EPERM;
1326
1327        return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1328}
1329
1330extern void perf_event_init(void);
1331extern void perf_tp_event(u16 event_type, u64 count, void *record,
1332                          int entry_size, struct pt_regs *regs,
1333                          struct hlist_head *head, int rctx,
1334                          struct task_struct *task);
1335extern void perf_bp_event(struct perf_event *event, void *data);
1336
1337#ifndef perf_misc_flags
1338# define perf_misc_flags(regs) \
1339                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1340# define perf_instruction_pointer(regs) instruction_pointer(regs)
1341#endif
1342#ifndef perf_arch_bpf_user_pt_regs
1343# define perf_arch_bpf_user_pt_regs(regs) regs
1344#endif
1345
1346static inline bool has_branch_stack(struct perf_event *event)
1347{
1348        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1349}
1350
1351static inline bool needs_branch_stack(struct perf_event *event)
1352{
1353        return event->attr.branch_sample_type != 0;
1354}
1355
1356static inline bool has_aux(struct perf_event *event)
1357{
1358        return event->pmu->setup_aux;
1359}
1360
1361static inline bool is_write_backward(struct perf_event *event)
1362{
1363        return !!event->attr.write_backward;
1364}
1365
1366static inline bool has_addr_filter(struct perf_event *event)
1367{
1368        return event->pmu->nr_addr_filters;
1369}
1370
1371/*
1372 * An inherited event uses parent's filters
1373 */
1374static inline struct perf_addr_filters_head *
1375perf_event_addr_filters(struct perf_event *event)
1376{
1377        struct perf_addr_filters_head *ifh = &event->addr_filters;
1378
1379        if (event->parent)
1380                ifh = &event->parent->addr_filters;
1381
1382        return ifh;
1383}
1384
1385extern void perf_event_addr_filters_sync(struct perf_event *event);
1386
1387extern int perf_output_begin(struct perf_output_handle *handle,
1388                             struct perf_event *event, unsigned int size);
1389extern int perf_output_begin_forward(struct perf_output_handle *handle,
1390                                    struct perf_event *event,
1391                                    unsigned int size);
1392extern int perf_output_begin_backward(struct perf_output_handle *handle,
1393                                      struct perf_event *event,
1394                                      unsigned int size);
1395
1396extern void perf_output_end(struct perf_output_handle *handle);
1397extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1398                             const void *buf, unsigned int len);
1399extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1400                                     unsigned int len);
1401extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1402                                 struct perf_output_handle *handle,
1403                                 unsigned long from, unsigned long to);
1404extern int perf_swevent_get_recursion_context(void);
1405extern void perf_swevent_put_recursion_context(int rctx);
1406extern u64 perf_swevent_set_period(struct perf_event *event);
1407extern void perf_event_enable(struct perf_event *event);
1408extern void perf_event_disable(struct perf_event *event);
1409extern void perf_event_disable_local(struct perf_event *event);
1410extern void perf_event_disable_inatomic(struct perf_event *event);
1411extern void perf_event_task_tick(void);
1412extern int perf_event_account_interrupt(struct perf_event *event);
1413extern int perf_event_period(struct perf_event *event, u64 value);
1414extern u64 perf_event_pause(struct perf_event *event, bool reset);
1415#else /* !CONFIG_PERF_EVENTS: */
1416static inline void *
1417perf_aux_output_begin(struct perf_output_handle *handle,
1418                      struct perf_event *event)                         { return NULL; }
1419static inline void
1420perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1421                                                                        { }
1422static inline int
1423perf_aux_output_skip(struct perf_output_handle *handle,
1424                     unsigned long size)                                { return -EINVAL; }
1425static inline void *
1426perf_get_aux(struct perf_output_handle *handle)                         { return NULL; }
1427static inline void
1428perf_event_task_migrate(struct task_struct *task)                       { }
1429static inline void
1430perf_event_task_sched_in(struct task_struct *prev,
1431                         struct task_struct *task)                      { }
1432static inline void
1433perf_event_task_sched_out(struct task_struct *prev,
1434                          struct task_struct *next)                     { }
1435static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1436static inline void perf_event_exit_task(struct task_struct *child)      { }
1437static inline void perf_event_free_task(struct task_struct *task)       { }
1438static inline void perf_event_delayed_put(struct task_struct *task)     { }
1439static inline struct file *perf_event_get(unsigned int fd)      { return ERR_PTR(-EINVAL); }
1440static inline const struct perf_event *perf_get_event(struct file *file)
1441{
1442        return ERR_PTR(-EINVAL);
1443}
1444static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1445{
1446        return ERR_PTR(-EINVAL);
1447}
1448static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1449                                        u64 *enabled, u64 *running)
1450{
1451        return -EINVAL;
1452}
1453static inline void perf_event_print_debug(void)                         { }
1454static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1455static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1456static inline int perf_event_refresh(struct perf_event *event, int refresh)
1457{
1458        return -EINVAL;
1459}
1460
1461static inline void
1462perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
1463static inline void
1464perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                     { }
1465static inline void
1466perf_bp_event(struct perf_event *event, void *data)                     { }
1467
1468static inline int perf_register_guest_info_callbacks
1469(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1470static inline int perf_unregister_guest_info_callbacks
1471(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1472
1473static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1474
1475typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1476static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1477                                      bool unregister, const char *sym) { }
1478static inline void perf_event_bpf_event(struct bpf_prog *prog,
1479                                        enum perf_bpf_event_type type,
1480                                        u16 flags)                      { }
1481static inline void perf_event_exec(void)                                { }
1482static inline void perf_event_comm(struct task_struct *tsk, bool exec)  { }
1483static inline void perf_event_namespaces(struct task_struct *tsk)       { }
1484static inline void perf_event_fork(struct task_struct *tsk)             { }
1485static inline void perf_event_init(void)                                { }
1486static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1487static inline void perf_swevent_put_recursion_context(int rctx)         { }
1488static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
1489static inline void perf_event_enable(struct perf_event *event)          { }
1490static inline void perf_event_disable(struct perf_event *event)         { }
1491static inline int __perf_event_disable(void *info)                      { return -1; }
1492static inline void perf_event_task_tick(void)                           { }
1493static inline int perf_event_release_kernel(struct perf_event *event)   { return 0; }
1494static inline int perf_event_period(struct perf_event *event, u64 value)
1495{
1496        return -EINVAL;
1497}
1498static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1499{
1500        return 0;
1501}
1502#endif
1503
1504#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1505extern void perf_restore_debug_store(void);
1506#else
1507static inline void perf_restore_debug_store(void)                       { }
1508#endif
1509
1510static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1511{
1512        return frag->pad < sizeof(u64);
1513}
1514
1515#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1516
1517struct perf_pmu_events_attr {
1518        struct device_attribute attr;
1519        u64 id;
1520        const char *event_str;
1521};
1522
1523struct perf_pmu_events_ht_attr {
1524        struct device_attribute                 attr;
1525        u64                                     id;
1526        const char                              *event_str_ht;
1527        const char                              *event_str_noht;
1528};
1529
1530ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1531                              char *page);
1532
1533#define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
1534static struct perf_pmu_events_attr _var = {                             \
1535        .attr = __ATTR(_name, 0444, _show, NULL),                       \
1536        .id   =  _id,                                                   \
1537};
1538
1539#define PMU_EVENT_ATTR_STRING(_name, _var, _str)                            \
1540static struct perf_pmu_events_attr _var = {                                 \
1541        .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1542        .id             = 0,                                                \
1543        .event_str      = _str,                                             \
1544};
1545
1546#define PMU_FORMAT_ATTR(_name, _format)                                 \
1547static ssize_t                                                          \
1548_name##_show(struct device *dev,                                        \
1549                               struct device_attribute *attr,           \
1550                               char *page)                              \
1551{                                                                       \
1552        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
1553        return sprintf(page, _format "\n");                             \
1554}                                                                       \
1555                                                                        \
1556static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1557
1558/* Performance counter hotplug functions */
1559#ifdef CONFIG_PERF_EVENTS
1560int perf_event_init_cpu(unsigned int cpu);
1561int perf_event_exit_cpu(unsigned int cpu);
1562#else
1563#define perf_event_init_cpu     NULL
1564#define perf_event_exit_cpu     NULL
1565#endif
1566
1567extern void __weak arch_perf_update_userpage(struct perf_event *event,
1568                                             struct perf_event_mmap_page *userpg,
1569                                             u64 now);
1570
1571#endif /* _LINUX_PERF_EVENT_H */
1572