linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <uapi/linux/perf_event.h>
  18#include <uapi/linux/bpf_perf_event.h>
  19
  20/*
  21 * Kernel-internal data types and definitions:
  22 */
  23
  24#ifdef CONFIG_PERF_EVENTS
  25# include <asm/perf_event.h>
  26# include <asm/local64.h>
  27#endif
  28
  29struct perf_guest_info_callbacks {
  30        int                             (*is_in_guest)(void);
  31        int                             (*is_user_mode)(void);
  32        unsigned long                   (*get_guest_ip)(void);
  33        void                            (*handle_intel_pt_intr)(void);
  34};
  35
  36#ifdef CONFIG_HAVE_HW_BREAKPOINT
  37#include <asm/hw_breakpoint.h>
  38#endif
  39
  40#include <linux/list.h>
  41#include <linux/mutex.h>
  42#include <linux/rculist.h>
  43#include <linux/rcupdate.h>
  44#include <linux/spinlock.h>
  45#include <linux/hrtimer.h>
  46#include <linux/fs.h>
  47#include <linux/pid_namespace.h>
  48#include <linux/workqueue.h>
  49#include <linux/ftrace.h>
  50#include <linux/cpu.h>
  51#include <linux/irq_work.h>
  52#include <linux/static_key.h>
  53#include <linux/jump_label_ratelimit.h>
  54#include <linux/atomic.h>
  55#include <linux/sysfs.h>
  56#include <linux/perf_regs.h>
  57#include <linux/cgroup.h>
  58#include <linux/refcount.h>
  59#include <linux/security.h>
  60#include <asm/local.h>
  61
  62struct perf_callchain_entry {
  63        __u64                           nr;
  64        __u64                           ip[]; /* /proc/sys/kernel/perf_event_max_stack */
  65};
  66
  67struct perf_callchain_entry_ctx {
  68        struct perf_callchain_entry *entry;
  69        u32                         max_stack;
  70        u32                         nr;
  71        short                       contexts;
  72        bool                        contexts_maxed;
  73};
  74
  75typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
  76                                     unsigned long off, unsigned long len);
  77
  78struct perf_raw_frag {
  79        union {
  80                struct perf_raw_frag    *next;
  81                unsigned long           pad;
  82        };
  83        perf_copy_f                     copy;
  84        void                            *data;
  85        u32                             size;
  86} __packed;
  87
  88struct perf_raw_record {
  89        struct perf_raw_frag            frag;
  90        u32                             size;
  91};
  92
  93/*
  94 * branch stack layout:
  95 *  nr: number of taken branches stored in entries[]
  96 *  hw_idx: The low level index of raw branch records
  97 *          for the most recent branch.
  98 *          -1ULL means invalid/unknown.
  99 *
 100 * Note that nr can vary from sample to sample
 101 * branches (to, from) are stored from most recent
 102 * to least recent, i.e., entries[0] contains the most
 103 * recent branch.
 104 * The entries[] is an abstraction of raw branch records,
 105 * which may not be stored in age order in HW, e.g. Intel LBR.
 106 * The hw_idx is to expose the low level index of raw
 107 * branch record for the most recent branch aka entries[0].
 108 * The hw_idx index is between -1 (unknown) and max depth,
 109 * which can be retrieved in /sys/devices/cpu/caps/branches.
 110 * For the architectures whose raw branch records are
 111 * already stored in age order, the hw_idx should be 0.
 112 */
 113struct perf_branch_stack {
 114        __u64                           nr;
 115        __u64                           hw_idx;
 116        struct perf_branch_entry        entries[];
 117};
 118
 119struct task_struct;
 120
 121/*
 122 * extra PMU register associated with an event
 123 */
 124struct hw_perf_event_extra {
 125        u64             config; /* register value */
 126        unsigned int    reg;    /* register address or index */
 127        int             alloc;  /* extra register already allocated */
 128        int             idx;    /* index in shared_regs->regs[] */
 129};
 130
 131/**
 132 * struct hw_perf_event - performance event hardware details:
 133 */
 134struct hw_perf_event {
 135#ifdef CONFIG_PERF_EVENTS
 136        union {
 137                struct { /* hardware */
 138                        u64             config;
 139                        u64             last_tag;
 140                        unsigned long   config_base;
 141                        unsigned long   event_base;
 142                        int             event_base_rdpmc;
 143                        int             idx;
 144                        int             last_cpu;
 145                        int             flags;
 146
 147                        struct hw_perf_event_extra extra_reg;
 148                        struct hw_perf_event_extra branch_reg;
 149                };
 150                struct { /* software */
 151                        struct hrtimer  hrtimer;
 152                };
 153                struct { /* tracepoint */
 154                        /* for tp_event->class */
 155                        struct list_head        tp_list;
 156                };
 157                struct { /* amd_power */
 158                        u64     pwr_acc;
 159                        u64     ptsc;
 160                };
 161#ifdef CONFIG_HAVE_HW_BREAKPOINT
 162                struct { /* breakpoint */
 163                        /*
 164                         * Crufty hack to avoid the chicken and egg
 165                         * problem hw_breakpoint has with context
 166                         * creation and event initalization.
 167                         */
 168                        struct arch_hw_breakpoint       info;
 169                        struct list_head                bp_list;
 170                };
 171#endif
 172                struct { /* amd_iommu */
 173                        u8      iommu_bank;
 174                        u8      iommu_cntr;
 175                        u16     padding;
 176                        u64     conf;
 177                        u64     conf1;
 178                };
 179        };
 180        /*
 181         * If the event is a per task event, this will point to the task in
 182         * question. See the comment in perf_event_alloc().
 183         */
 184        struct task_struct              *target;
 185
 186        /*
 187         * PMU would store hardware filter configuration
 188         * here.
 189         */
 190        void                            *addr_filters;
 191
 192        /* Last sync'ed generation of filters */
 193        unsigned long                   addr_filters_gen;
 194
 195/*
 196 * hw_perf_event::state flags; used to track the PERF_EF_* state.
 197 */
 198#define PERF_HES_STOPPED        0x01 /* the counter is stopped */
 199#define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
 200#define PERF_HES_ARCH           0x04
 201
 202        int                             state;
 203
 204        /*
 205         * The last observed hardware counter value, updated with a
 206         * local64_cmpxchg() such that pmu::read() can be called nested.
 207         */
 208        local64_t                       prev_count;
 209
 210        /*
 211         * The period to start the next sample with.
 212         */
 213        u64                             sample_period;
 214
 215        /*
 216         * The period we started this sample with.
 217         */
 218        u64                             last_period;
 219
 220        /*
 221         * However much is left of the current period; note that this is
 222         * a full 64bit value and allows for generation of periods longer
 223         * than hardware might allow.
 224         */
 225        local64_t                       period_left;
 226
 227        /*
 228         * State for throttling the event, see __perf_event_overflow() and
 229         * perf_adjust_freq_unthr_context().
 230         */
 231        u64                             interrupts_seq;
 232        u64                             interrupts;
 233
 234        /*
 235         * State for freq target events, see __perf_event_overflow() and
 236         * perf_adjust_freq_unthr_context().
 237         */
 238        u64                             freq_time_stamp;
 239        u64                             freq_count_stamp;
 240#endif
 241};
 242
 243struct perf_event;
 244
 245/*
 246 * Common implementation detail of pmu::{start,commit,cancel}_txn
 247 */
 248#define PERF_PMU_TXN_ADD  0x1           /* txn to add/schedule event on PMU */
 249#define PERF_PMU_TXN_READ 0x2           /* txn to read event group from PMU */
 250
 251/**
 252 * pmu::capabilities flags
 253 */
 254#define PERF_PMU_CAP_NO_INTERRUPT               0x01
 255#define PERF_PMU_CAP_NO_NMI                     0x02
 256#define PERF_PMU_CAP_AUX_NO_SG                  0x04
 257#define PERF_PMU_CAP_EXTENDED_REGS              0x08
 258#define PERF_PMU_CAP_EXCLUSIVE                  0x10
 259#define PERF_PMU_CAP_ITRACE                     0x20
 260#define PERF_PMU_CAP_HETEROGENEOUS_CPUS         0x40
 261#define PERF_PMU_CAP_NO_EXCLUDE                 0x80
 262#define PERF_PMU_CAP_AUX_OUTPUT                 0x100
 263
 264struct perf_output_handle;
 265
 266/**
 267 * struct pmu - generic performance monitoring unit
 268 */
 269struct pmu {
 270        struct list_head                entry;
 271
 272        struct module                   *module;
 273        struct device                   *dev;
 274        const struct attribute_group    **attr_groups;
 275        const struct attribute_group    **attr_update;
 276        const char                      *name;
 277        int                             type;
 278
 279        /*
 280         * various common per-pmu feature flags
 281         */
 282        int                             capabilities;
 283
 284        int __percpu                    *pmu_disable_count;
 285        struct perf_cpu_context __percpu *pmu_cpu_context;
 286        atomic_t                        exclusive_cnt; /* < 0: cpu; > 0: tsk */
 287        int                             task_ctx_nr;
 288        int                             hrtimer_interval_ms;
 289
 290        /* number of address filters this PMU can do */
 291        unsigned int                    nr_addr_filters;
 292
 293        /*
 294         * Fully disable/enable this PMU, can be used to protect from the PMI
 295         * as well as for lazy/batch writing of the MSRs.
 296         */
 297        void (*pmu_enable)              (struct pmu *pmu); /* optional */
 298        void (*pmu_disable)             (struct pmu *pmu); /* optional */
 299
 300        /*
 301         * Try and initialize the event for this PMU.
 302         *
 303         * Returns:
 304         *  -ENOENT     -- @event is not for this PMU
 305         *
 306         *  -ENODEV     -- @event is for this PMU but PMU not present
 307         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
 308         *  -EINVAL     -- @event is for this PMU but @event is not valid
 309         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
 310         *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
 311         *
 312         *  0           -- @event is for this PMU and valid
 313         *
 314         * Other error return values are allowed.
 315         */
 316        int (*event_init)               (struct perf_event *event);
 317
 318        /*
 319         * Notification that the event was mapped or unmapped.  Called
 320         * in the context of the mapping task.
 321         */
 322        void (*event_mapped)            (struct perf_event *event, struct mm_struct *mm); /* optional */
 323        void (*event_unmapped)          (struct perf_event *event, struct mm_struct *mm); /* optional */
 324
 325        /*
 326         * Flags for ->add()/->del()/ ->start()/->stop(). There are
 327         * matching hw_perf_event::state flags.
 328         */
 329#define PERF_EF_START   0x01            /* start the counter when adding    */
 330#define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
 331#define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
 332
 333        /*
 334         * Adds/Removes a counter to/from the PMU, can be done inside a
 335         * transaction, see the ->*_txn() methods.
 336         *
 337         * The add/del callbacks will reserve all hardware resources required
 338         * to service the event, this includes any counter constraint
 339         * scheduling etc.
 340         *
 341         * Called with IRQs disabled and the PMU disabled on the CPU the event
 342         * is on.
 343         *
 344         * ->add() called without PERF_EF_START should result in the same state
 345         *  as ->add() followed by ->stop().
 346         *
 347         * ->del() must always PERF_EF_UPDATE stop an event. If it calls
 348         *  ->stop() that must deal with already being stopped without
 349         *  PERF_EF_UPDATE.
 350         */
 351        int  (*add)                     (struct perf_event *event, int flags);
 352        void (*del)                     (struct perf_event *event, int flags);
 353
 354        /*
 355         * Starts/Stops a counter present on the PMU.
 356         *
 357         * The PMI handler should stop the counter when perf_event_overflow()
 358         * returns !0. ->start() will be used to continue.
 359         *
 360         * Also used to change the sample period.
 361         *
 362         * Called with IRQs disabled and the PMU disabled on the CPU the event
 363         * is on -- will be called from NMI context with the PMU generates
 364         * NMIs.
 365         *
 366         * ->stop() with PERF_EF_UPDATE will read the counter and update
 367         *  period/count values like ->read() would.
 368         *
 369         * ->start() with PERF_EF_RELOAD will reprogram the counter
 370         *  value, must be preceded by a ->stop() with PERF_EF_UPDATE.
 371         */
 372        void (*start)                   (struct perf_event *event, int flags);
 373        void (*stop)                    (struct perf_event *event, int flags);
 374
 375        /*
 376         * Updates the counter value of the event.
 377         *
 378         * For sampling capable PMUs this will also update the software period
 379         * hw_perf_event::period_left field.
 380         */
 381        void (*read)                    (struct perf_event *event);
 382
 383        /*
 384         * Group events scheduling is treated as a transaction, add
 385         * group events as a whole and perform one schedulability test.
 386         * If the test fails, roll back the whole group
 387         *
 388         * Start the transaction, after this ->add() doesn't need to
 389         * do schedulability tests.
 390         *
 391         * Optional.
 392         */
 393        void (*start_txn)               (struct pmu *pmu, unsigned int txn_flags);
 394        /*
 395         * If ->start_txn() disabled the ->add() schedulability test
 396         * then ->commit_txn() is required to perform one. On success
 397         * the transaction is closed. On error the transaction is kept
 398         * open until ->cancel_txn() is called.
 399         *
 400         * Optional.
 401         */
 402        int  (*commit_txn)              (struct pmu *pmu);
 403        /*
 404         * Will cancel the transaction, assumes ->del() is called
 405         * for each successful ->add() during the transaction.
 406         *
 407         * Optional.
 408         */
 409        void (*cancel_txn)              (struct pmu *pmu);
 410
 411        /*
 412         * Will return the value for perf_event_mmap_page::index for this event,
 413         * if no implementation is provided it will default to: event->hw.idx + 1.
 414         */
 415        int (*event_idx)                (struct perf_event *event); /*optional */
 416
 417        /*
 418         * context-switches callback
 419         */
 420        void (*sched_task)              (struct perf_event_context *ctx,
 421                                        bool sched_in);
 422
 423        /*
 424         * Kmem cache of PMU specific data
 425         */
 426        struct kmem_cache               *task_ctx_cache;
 427
 428        /*
 429         * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
 430         * can be synchronized using this function. See Intel LBR callstack support
 431         * implementation and Perf core context switch handling callbacks for usage
 432         * examples.
 433         */
 434        void (*swap_task_ctx)           (struct perf_event_context *prev,
 435                                         struct perf_event_context *next);
 436                                        /* optional */
 437
 438        /*
 439         * Set up pmu-private data structures for an AUX area
 440         */
 441        void *(*setup_aux)              (struct perf_event *event, void **pages,
 442                                         int nr_pages, bool overwrite);
 443                                        /* optional */
 444
 445        /*
 446         * Free pmu-private AUX data structures
 447         */
 448        void (*free_aux)                (void *aux); /* optional */
 449
 450        /*
 451         * Take a snapshot of the AUX buffer without touching the event
 452         * state, so that preempting ->start()/->stop() callbacks does
 453         * not interfere with their logic. Called in PMI context.
 454         *
 455         * Returns the size of AUX data copied to the output handle.
 456         *
 457         * Optional.
 458         */
 459        long (*snapshot_aux)            (struct perf_event *event,
 460                                         struct perf_output_handle *handle,
 461                                         unsigned long size);
 462
 463        /*
 464         * Validate address range filters: make sure the HW supports the
 465         * requested configuration and number of filters; return 0 if the
 466         * supplied filters are valid, -errno otherwise.
 467         *
 468         * Runs in the context of the ioctl()ing process and is not serialized
 469         * with the rest of the PMU callbacks.
 470         */
 471        int (*addr_filters_validate)    (struct list_head *filters);
 472                                        /* optional */
 473
 474        /*
 475         * Synchronize address range filter configuration:
 476         * translate hw-agnostic filters into hardware configuration in
 477         * event::hw::addr_filters.
 478         *
 479         * Runs as a part of filter sync sequence that is done in ->start()
 480         * callback by calling perf_event_addr_filters_sync().
 481         *
 482         * May (and should) traverse event::addr_filters::list, for which its
 483         * caller provides necessary serialization.
 484         */
 485        void (*addr_filters_sync)       (struct perf_event *event);
 486                                        /* optional */
 487
 488        /*
 489         * Check if event can be used for aux_output purposes for
 490         * events of this PMU.
 491         *
 492         * Runs from perf_event_open(). Should return 0 for "no match"
 493         * or non-zero for "match".
 494         */
 495        int (*aux_output_match)         (struct perf_event *event);
 496                                        /* optional */
 497
 498        /*
 499         * Filter events for PMU-specific reasons.
 500         */
 501        int (*filter_match)             (struct perf_event *event); /* optional */
 502
 503        /*
 504         * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
 505         */
 506        int (*check_period)             (struct perf_event *event, u64 value); /* optional */
 507};
 508
 509enum perf_addr_filter_action_t {
 510        PERF_ADDR_FILTER_ACTION_STOP = 0,
 511        PERF_ADDR_FILTER_ACTION_START,
 512        PERF_ADDR_FILTER_ACTION_FILTER,
 513};
 514
 515/**
 516 * struct perf_addr_filter - address range filter definition
 517 * @entry:      event's filter list linkage
 518 * @path:       object file's path for file-based filters
 519 * @offset:     filter range offset
 520 * @size:       filter range size (size==0 means single address trigger)
 521 * @action:     filter/start/stop
 522 *
 523 * This is a hardware-agnostic filter configuration as specified by the user.
 524 */
 525struct perf_addr_filter {
 526        struct list_head        entry;
 527        struct path             path;
 528        unsigned long           offset;
 529        unsigned long           size;
 530        enum perf_addr_filter_action_t  action;
 531};
 532
 533/**
 534 * struct perf_addr_filters_head - container for address range filters
 535 * @list:       list of filters for this event
 536 * @lock:       spinlock that serializes accesses to the @list and event's
 537 *              (and its children's) filter generations.
 538 * @nr_file_filters:    number of file-based filters
 539 *
 540 * A child event will use parent's @list (and therefore @lock), so they are
 541 * bundled together; see perf_event_addr_filters().
 542 */
 543struct perf_addr_filters_head {
 544        struct list_head        list;
 545        raw_spinlock_t          lock;
 546        unsigned int            nr_file_filters;
 547};
 548
 549struct perf_addr_filter_range {
 550        unsigned long           start;
 551        unsigned long           size;
 552};
 553
 554/**
 555 * enum perf_event_state - the states of an event:
 556 */
 557enum perf_event_state {
 558        PERF_EVENT_STATE_DEAD           = -4,
 559        PERF_EVENT_STATE_EXIT           = -3,
 560        PERF_EVENT_STATE_ERROR          = -2,
 561        PERF_EVENT_STATE_OFF            = -1,
 562        PERF_EVENT_STATE_INACTIVE       =  0,
 563        PERF_EVENT_STATE_ACTIVE         =  1,
 564};
 565
 566struct file;
 567struct perf_sample_data;
 568
 569typedef void (*perf_overflow_handler_t)(struct perf_event *,
 570                                        struct perf_sample_data *,
 571                                        struct pt_regs *regs);
 572
 573/*
 574 * Event capabilities. For event_caps and groups caps.
 575 *
 576 * PERF_EV_CAP_SOFTWARE: Is a software event.
 577 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
 578 * from any CPU in the package where it is active.
 579 */
 580#define PERF_EV_CAP_SOFTWARE            BIT(0)
 581#define PERF_EV_CAP_READ_ACTIVE_PKG     BIT(1)
 582
 583#define SWEVENT_HLIST_BITS              8
 584#define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
 585
 586struct swevent_hlist {
 587        struct hlist_head               heads[SWEVENT_HLIST_SIZE];
 588        struct rcu_head                 rcu_head;
 589};
 590
 591#define PERF_ATTACH_CONTEXT     0x01
 592#define PERF_ATTACH_GROUP       0x02
 593#define PERF_ATTACH_TASK        0x04
 594#define PERF_ATTACH_TASK_DATA   0x08
 595#define PERF_ATTACH_ITRACE      0x10
 596
 597struct perf_cgroup;
 598struct perf_buffer;
 599
 600struct pmu_event_list {
 601        raw_spinlock_t          lock;
 602        struct list_head        list;
 603};
 604
 605#define for_each_sibling_event(sibling, event)                  \
 606        if ((event)->group_leader == (event))                   \
 607                list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
 608
 609/**
 610 * struct perf_event - performance event kernel representation:
 611 */
 612struct perf_event {
 613#ifdef CONFIG_PERF_EVENTS
 614        /*
 615         * entry onto perf_event_context::event_list;
 616         *   modifications require ctx->lock
 617         *   RCU safe iterations.
 618         */
 619        struct list_head                event_entry;
 620
 621        /*
 622         * Locked for modification by both ctx->mutex and ctx->lock; holding
 623         * either sufficies for read.
 624         */
 625        struct list_head                sibling_list;
 626        struct list_head                active_list;
 627        /*
 628         * Node on the pinned or flexible tree located at the event context;
 629         */
 630        struct rb_node                  group_node;
 631        u64                             group_index;
 632        /*
 633         * We need storage to track the entries in perf_pmu_migrate_context; we
 634         * cannot use the event_entry because of RCU and we want to keep the
 635         * group in tact which avoids us using the other two entries.
 636         */
 637        struct list_head                migrate_entry;
 638
 639        struct hlist_node               hlist_entry;
 640        struct list_head                active_entry;
 641        int                             nr_siblings;
 642
 643        /* Not serialized. Only written during event initialization. */
 644        int                             event_caps;
 645        /* The cumulative AND of all event_caps for events in this group. */
 646        int                             group_caps;
 647
 648        struct perf_event               *group_leader;
 649        struct pmu                      *pmu;
 650        void                            *pmu_private;
 651
 652        enum perf_event_state           state;
 653        unsigned int                    attach_state;
 654        local64_t                       count;
 655        atomic64_t                      child_count;
 656
 657        /*
 658         * These are the total time in nanoseconds that the event
 659         * has been enabled (i.e. eligible to run, and the task has
 660         * been scheduled in, if this is a per-task event)
 661         * and running (scheduled onto the CPU), respectively.
 662         */
 663        u64                             total_time_enabled;
 664        u64                             total_time_running;
 665        u64                             tstamp;
 666
 667        /*
 668         * timestamp shadows the actual context timing but it can
 669         * be safely used in NMI interrupt context. It reflects the
 670         * context time as it was when the event was last scheduled in.
 671         *
 672         * ctx_time already accounts for ctx->timestamp. Therefore to
 673         * compute ctx_time for a sample, simply add perf_clock().
 674         */
 675        u64                             shadow_ctx_time;
 676
 677        struct perf_event_attr          attr;
 678        u16                             header_size;
 679        u16                             id_header_size;
 680        u16                             read_size;
 681        struct hw_perf_event            hw;
 682
 683        struct perf_event_context       *ctx;
 684        atomic_long_t                   refcount;
 685
 686        /*
 687         * These accumulate total time (in nanoseconds) that children
 688         * events have been enabled and running, respectively.
 689         */
 690        atomic64_t                      child_total_time_enabled;
 691        atomic64_t                      child_total_time_running;
 692
 693        /*
 694         * Protect attach/detach and child_list:
 695         */
 696        struct mutex                    child_mutex;
 697        struct list_head                child_list;
 698        struct perf_event               *parent;
 699
 700        int                             oncpu;
 701        int                             cpu;
 702
 703        struct list_head                owner_entry;
 704        struct task_struct              *owner;
 705
 706        /* mmap bits */
 707        struct mutex                    mmap_mutex;
 708        atomic_t                        mmap_count;
 709
 710        struct perf_buffer              *rb;
 711        struct list_head                rb_entry;
 712        unsigned long                   rcu_batches;
 713        int                             rcu_pending;
 714
 715        /* poll related */
 716        wait_queue_head_t               waitq;
 717        struct fasync_struct            *fasync;
 718
 719        /* delayed work for NMIs and such */
 720        int                             pending_wakeup;
 721        int                             pending_kill;
 722        int                             pending_disable;
 723        struct irq_work                 pending;
 724
 725        atomic_t                        event_limit;
 726
 727        /* address range filters */
 728        struct perf_addr_filters_head   addr_filters;
 729        /* vma address array for file-based filders */
 730        struct perf_addr_filter_range   *addr_filter_ranges;
 731        unsigned long                   addr_filters_gen;
 732
 733        /* for aux_output events */
 734        struct perf_event               *aux_event;
 735
 736        void (*destroy)(struct perf_event *);
 737        struct rcu_head                 rcu_head;
 738
 739        struct pid_namespace            *ns;
 740        u64                             id;
 741
 742        u64                             (*clock)(void);
 743        perf_overflow_handler_t         overflow_handler;
 744        void                            *overflow_handler_context;
 745#ifdef CONFIG_BPF_SYSCALL
 746        perf_overflow_handler_t         orig_overflow_handler;
 747        struct bpf_prog                 *prog;
 748#endif
 749
 750#ifdef CONFIG_EVENT_TRACING
 751        struct trace_event_call         *tp_event;
 752        struct event_filter             *filter;
 753#ifdef CONFIG_FUNCTION_TRACER
 754        struct ftrace_ops               ftrace_ops;
 755#endif
 756#endif
 757
 758#ifdef CONFIG_CGROUP_PERF
 759        struct perf_cgroup              *cgrp; /* cgroup event is attach to */
 760#endif
 761
 762#ifdef CONFIG_SECURITY
 763        void *security;
 764#endif
 765        struct list_head                sb_list;
 766#endif /* CONFIG_PERF_EVENTS */
 767};
 768
 769
 770struct perf_event_groups {
 771        struct rb_root  tree;
 772        u64             index;
 773};
 774
 775/**
 776 * struct perf_event_context - event context structure
 777 *
 778 * Used as a container for task events and CPU events as well:
 779 */
 780struct perf_event_context {
 781        struct pmu                      *pmu;
 782        /*
 783         * Protect the states of the events in the list,
 784         * nr_active, and the list:
 785         */
 786        raw_spinlock_t                  lock;
 787        /*
 788         * Protect the list of events.  Locking either mutex or lock
 789         * is sufficient to ensure the list doesn't change; to change
 790         * the list you need to lock both the mutex and the spinlock.
 791         */
 792        struct mutex                    mutex;
 793
 794        struct list_head                active_ctx_list;
 795        struct perf_event_groups        pinned_groups;
 796        struct perf_event_groups        flexible_groups;
 797        struct list_head                event_list;
 798
 799        struct list_head                pinned_active;
 800        struct list_head                flexible_active;
 801
 802        int                             nr_events;
 803        int                             nr_active;
 804        int                             is_active;
 805        int                             nr_stat;
 806        int                             nr_freq;
 807        int                             rotate_disable;
 808        /*
 809         * Set when nr_events != nr_active, except tolerant to events not
 810         * necessary to be active due to scheduling constraints, such as cgroups.
 811         */
 812        int                             rotate_necessary;
 813        refcount_t                      refcount;
 814        struct task_struct              *task;
 815
 816        /*
 817         * Context clock, runs when context enabled.
 818         */
 819        u64                             time;
 820        u64                             timestamp;
 821
 822        /*
 823         * These fields let us detect when two contexts have both
 824         * been cloned (inherited) from a common ancestor.
 825         */
 826        struct perf_event_context       *parent_ctx;
 827        u64                             parent_gen;
 828        u64                             generation;
 829        int                             pin_count;
 830#ifdef CONFIG_CGROUP_PERF
 831        int                             nr_cgroups;      /* cgroup evts */
 832#endif
 833        void                            *task_ctx_data; /* pmu specific data */
 834        struct rcu_head                 rcu_head;
 835};
 836
 837/*
 838 * Number of contexts where an event can trigger:
 839 *      task, softirq, hardirq, nmi.
 840 */
 841#define PERF_NR_CONTEXTS        4
 842
 843/**
 844 * struct perf_event_cpu_context - per cpu event context structure
 845 */
 846struct perf_cpu_context {
 847        struct perf_event_context       ctx;
 848        struct perf_event_context       *task_ctx;
 849        int                             active_oncpu;
 850        int                             exclusive;
 851
 852        raw_spinlock_t                  hrtimer_lock;
 853        struct hrtimer                  hrtimer;
 854        ktime_t                         hrtimer_interval;
 855        unsigned int                    hrtimer_active;
 856
 857#ifdef CONFIG_CGROUP_PERF
 858        struct perf_cgroup              *cgrp;
 859        struct list_head                cgrp_cpuctx_entry;
 860#endif
 861
 862        struct list_head                sched_cb_entry;
 863        int                             sched_cb_usage;
 864
 865        int                             online;
 866        /*
 867         * Per-CPU storage for iterators used in visit_groups_merge. The default
 868         * storage is of size 2 to hold the CPU and any CPU event iterators.
 869         */
 870        int                             heap_size;
 871        struct perf_event               **heap;
 872        struct perf_event               *heap_default[2];
 873};
 874
 875struct perf_output_handle {
 876        struct perf_event               *event;
 877        struct perf_buffer              *rb;
 878        unsigned long                   wakeup;
 879        unsigned long                   size;
 880        u64                             aux_flags;
 881        union {
 882                void                    *addr;
 883                unsigned long           head;
 884        };
 885        int                             page;
 886};
 887
 888struct bpf_perf_event_data_kern {
 889        bpf_user_pt_regs_t *regs;
 890        struct perf_sample_data *data;
 891        struct perf_event *event;
 892};
 893
 894#ifdef CONFIG_CGROUP_PERF
 895
 896/*
 897 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 898 * This is a per-cpu dynamically allocated data structure.
 899 */
 900struct perf_cgroup_info {
 901        u64                             time;
 902        u64                             timestamp;
 903};
 904
 905struct perf_cgroup {
 906        struct cgroup_subsys_state      css;
 907        struct perf_cgroup_info __percpu *info;
 908};
 909
 910/*
 911 * Must ensure cgroup is pinned (css_get) before calling
 912 * this function. In other words, we cannot call this function
 913 * if there is no cgroup event for the current CPU context.
 914 */
 915static inline struct perf_cgroup *
 916perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
 917{
 918        return container_of(task_css_check(task, perf_event_cgrp_id,
 919                                           ctx ? lockdep_is_held(&ctx->lock)
 920                                               : true),
 921                            struct perf_cgroup, css);
 922}
 923#endif /* CONFIG_CGROUP_PERF */
 924
 925#ifdef CONFIG_PERF_EVENTS
 926
 927extern void *perf_aux_output_begin(struct perf_output_handle *handle,
 928                                   struct perf_event *event);
 929extern void perf_aux_output_end(struct perf_output_handle *handle,
 930                                unsigned long size);
 931extern int perf_aux_output_skip(struct perf_output_handle *handle,
 932                                unsigned long size);
 933extern void *perf_get_aux(struct perf_output_handle *handle);
 934extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
 935extern void perf_event_itrace_started(struct perf_event *event);
 936
 937extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
 938extern void perf_pmu_unregister(struct pmu *pmu);
 939
 940extern int perf_num_counters(void);
 941extern const char *perf_pmu_name(void);
 942extern void __perf_event_task_sched_in(struct task_struct *prev,
 943                                       struct task_struct *task);
 944extern void __perf_event_task_sched_out(struct task_struct *prev,
 945                                        struct task_struct *next);
 946extern int perf_event_init_task(struct task_struct *child);
 947extern void perf_event_exit_task(struct task_struct *child);
 948extern void perf_event_free_task(struct task_struct *task);
 949extern void perf_event_delayed_put(struct task_struct *task);
 950extern struct file *perf_event_get(unsigned int fd);
 951extern const struct perf_event *perf_get_event(struct file *file);
 952extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 953extern void perf_event_print_debug(void);
 954extern void perf_pmu_disable(struct pmu *pmu);
 955extern void perf_pmu_enable(struct pmu *pmu);
 956extern void perf_sched_cb_dec(struct pmu *pmu);
 957extern void perf_sched_cb_inc(struct pmu *pmu);
 958extern int perf_event_task_disable(void);
 959extern int perf_event_task_enable(void);
 960
 961extern void perf_pmu_resched(struct pmu *pmu);
 962
 963extern int perf_event_refresh(struct perf_event *event, int refresh);
 964extern void perf_event_update_userpage(struct perf_event *event);
 965extern int perf_event_release_kernel(struct perf_event *event);
 966extern struct perf_event *
 967perf_event_create_kernel_counter(struct perf_event_attr *attr,
 968                                int cpu,
 969                                struct task_struct *task,
 970                                perf_overflow_handler_t callback,
 971                                void *context);
 972extern void perf_pmu_migrate_context(struct pmu *pmu,
 973                                int src_cpu, int dst_cpu);
 974int perf_event_read_local(struct perf_event *event, u64 *value,
 975                          u64 *enabled, u64 *running);
 976extern u64 perf_event_read_value(struct perf_event *event,
 977                                 u64 *enabled, u64 *running);
 978
 979
 980struct perf_sample_data {
 981        /*
 982         * Fields set by perf_sample_data_init(), group so as to
 983         * minimize the cachelines touched.
 984         */
 985        u64                             addr;
 986        struct perf_raw_record          *raw;
 987        struct perf_branch_stack        *br_stack;
 988        u64                             period;
 989        u64                             weight;
 990        u64                             txn;
 991        union  perf_mem_data_src        data_src;
 992
 993        /*
 994         * The other fields, optionally {set,used} by
 995         * perf_{prepare,output}_sample().
 996         */
 997        u64                             type;
 998        u64                             ip;
 999        struct {
1000                u32     pid;
1001                u32     tid;
1002        }                               tid_entry;
1003        u64                             time;
1004        u64                             id;
1005        u64                             stream_id;
1006        struct {
1007                u32     cpu;
1008                u32     reserved;
1009        }                               cpu_entry;
1010        struct perf_callchain_entry     *callchain;
1011        u64                             aux_size;
1012
1013        /*
1014         * regs_user may point to task_pt_regs or to regs_user_copy, depending
1015         * on arch details.
1016         */
1017        struct perf_regs                regs_user;
1018        struct pt_regs                  regs_user_copy;
1019
1020        struct perf_regs                regs_intr;
1021        u64                             stack_user_size;
1022
1023        u64                             phys_addr;
1024        u64                             cgroup;
1025} ____cacheline_aligned;
1026
1027/* default value for data source */
1028#define PERF_MEM_NA (PERF_MEM_S(OP, NA)   |\
1029                    PERF_MEM_S(LVL, NA)   |\
1030                    PERF_MEM_S(SNOOP, NA) |\
1031                    PERF_MEM_S(LOCK, NA)  |\
1032                    PERF_MEM_S(TLB, NA))
1033
1034static inline void perf_sample_data_init(struct perf_sample_data *data,
1035                                         u64 addr, u64 period)
1036{
1037        /* remaining struct members initialized in perf_prepare_sample() */
1038        data->addr = addr;
1039        data->raw  = NULL;
1040        data->br_stack = NULL;
1041        data->period = period;
1042        data->weight = 0;
1043        data->data_src.val = PERF_MEM_NA;
1044        data->txn = 0;
1045}
1046
1047extern void perf_output_sample(struct perf_output_handle *handle,
1048                               struct perf_event_header *header,
1049                               struct perf_sample_data *data,
1050                               struct perf_event *event);
1051extern void perf_prepare_sample(struct perf_event_header *header,
1052                                struct perf_sample_data *data,
1053                                struct perf_event *event,
1054                                struct pt_regs *regs);
1055
1056extern int perf_event_overflow(struct perf_event *event,
1057                                 struct perf_sample_data *data,
1058                                 struct pt_regs *regs);
1059
1060extern void perf_event_output_forward(struct perf_event *event,
1061                                     struct perf_sample_data *data,
1062                                     struct pt_regs *regs);
1063extern void perf_event_output_backward(struct perf_event *event,
1064                                       struct perf_sample_data *data,
1065                                       struct pt_regs *regs);
1066extern int perf_event_output(struct perf_event *event,
1067                             struct perf_sample_data *data,
1068                             struct pt_regs *regs);
1069
1070static inline bool
1071is_default_overflow_handler(struct perf_event *event)
1072{
1073        if (likely(event->overflow_handler == perf_event_output_forward))
1074                return true;
1075        if (unlikely(event->overflow_handler == perf_event_output_backward))
1076                return true;
1077        return false;
1078}
1079
1080extern void
1081perf_event_header__init_id(struct perf_event_header *header,
1082                           struct perf_sample_data *data,
1083                           struct perf_event *event);
1084extern void
1085perf_event__output_id_sample(struct perf_event *event,
1086                             struct perf_output_handle *handle,
1087                             struct perf_sample_data *sample);
1088
1089extern void
1090perf_log_lost_samples(struct perf_event *event, u64 lost);
1091
1092static inline bool event_has_any_exclude_flag(struct perf_event *event)
1093{
1094        struct perf_event_attr *attr = &event->attr;
1095
1096        return attr->exclude_idle || attr->exclude_user ||
1097               attr->exclude_kernel || attr->exclude_hv ||
1098               attr->exclude_guest || attr->exclude_host;
1099}
1100
1101static inline bool is_sampling_event(struct perf_event *event)
1102{
1103        return event->attr.sample_period != 0;
1104}
1105
1106/*
1107 * Return 1 for a software event, 0 for a hardware event
1108 */
1109static inline int is_software_event(struct perf_event *event)
1110{
1111        return event->event_caps & PERF_EV_CAP_SOFTWARE;
1112}
1113
1114/*
1115 * Return 1 for event in sw context, 0 for event in hw context
1116 */
1117static inline int in_software_context(struct perf_event *event)
1118{
1119        return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1120}
1121
1122static inline int is_exclusive_pmu(struct pmu *pmu)
1123{
1124        return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1125}
1126
1127extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1128
1129extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1130extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
1131
1132#ifndef perf_arch_fetch_caller_regs
1133static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1134#endif
1135
1136/*
1137 * When generating a perf sample in-line, instead of from an interrupt /
1138 * exception, we lack a pt_regs. This is typically used from software events
1139 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1140 *
1141 * We typically don't need a full set, but (for x86) do require:
1142 * - ip for PERF_SAMPLE_IP
1143 * - cs for user_mode() tests
1144 * - sp for PERF_SAMPLE_CALLCHAIN
1145 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1146 *
1147 * NOTE: assumes @regs is otherwise already 0 filled; this is important for
1148 * things like PERF_SAMPLE_REGS_INTR.
1149 */
1150static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1151{
1152        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1153}
1154
1155static __always_inline void
1156perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
1157{
1158        if (static_key_false(&perf_swevent_enabled[event_id]))
1159                __perf_sw_event(event_id, nr, regs, addr);
1160}
1161
1162DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
1163
1164/*
1165 * 'Special' version for the scheduler, it hard assumes no recursion,
1166 * which is guaranteed by us not actually scheduling inside other swevents
1167 * because those disable preemption.
1168 */
1169static __always_inline void
1170perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
1171{
1172        if (static_key_false(&perf_swevent_enabled[event_id])) {
1173                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1174
1175                perf_fetch_caller_regs(regs);
1176                ___perf_sw_event(event_id, nr, regs, addr);
1177        }
1178}
1179
1180extern struct static_key_false perf_sched_events;
1181
1182static __always_inline bool
1183perf_sw_migrate_enabled(void)
1184{
1185        if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
1186                return true;
1187        return false;
1188}
1189
1190static inline void perf_event_task_migrate(struct task_struct *task)
1191{
1192        if (perf_sw_migrate_enabled())
1193                task->sched_migrated = 1;
1194}
1195
1196static inline void perf_event_task_sched_in(struct task_struct *prev,
1197                                            struct task_struct *task)
1198{
1199        if (static_branch_unlikely(&perf_sched_events))
1200                __perf_event_task_sched_in(prev, task);
1201
1202        if (perf_sw_migrate_enabled() && task->sched_migrated) {
1203                struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
1204
1205                perf_fetch_caller_regs(regs);
1206                ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
1207                task->sched_migrated = 0;
1208        }
1209}
1210
1211static inline void perf_event_task_sched_out(struct task_struct *prev,
1212                                             struct task_struct *next)
1213{
1214        perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
1215
1216        if (static_branch_unlikely(&perf_sched_events))
1217                __perf_event_task_sched_out(prev, next);
1218}
1219
1220extern void perf_event_mmap(struct vm_area_struct *vma);
1221
1222extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1223                               bool unregister, const char *sym);
1224extern void perf_event_bpf_event(struct bpf_prog *prog,
1225                                 enum perf_bpf_event_type type,
1226                                 u16 flags);
1227
1228extern struct perf_guest_info_callbacks *perf_guest_cbs;
1229extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1230extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1231
1232extern void perf_event_exec(void);
1233extern void perf_event_comm(struct task_struct *tsk, bool exec);
1234extern void perf_event_namespaces(struct task_struct *tsk);
1235extern void perf_event_fork(struct task_struct *tsk);
1236extern void perf_event_text_poke(const void *addr,
1237                                 const void *old_bytes, size_t old_len,
1238                                 const void *new_bytes, size_t new_len);
1239
1240/* Callchains */
1241DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1242
1243extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1244extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1245extern struct perf_callchain_entry *
1246get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1247                   u32 max_stack, bool crosstask, bool add_mark);
1248extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1249extern int get_callchain_buffers(int max_stack);
1250extern void put_callchain_buffers(void);
1251extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
1252extern void put_callchain_entry(int rctx);
1253
1254extern int sysctl_perf_event_max_stack;
1255extern int sysctl_perf_event_max_contexts_per_stack;
1256
1257static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1258{
1259        if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1260                struct perf_callchain_entry *entry = ctx->entry;
1261                entry->ip[entry->nr++] = ip;
1262                ++ctx->contexts;
1263                return 0;
1264        } else {
1265                ctx->contexts_maxed = true;
1266                return -1; /* no more room, stop walking the stack */
1267        }
1268}
1269
1270static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1271{
1272        if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1273                struct perf_callchain_entry *entry = ctx->entry;
1274                entry->ip[entry->nr++] = ip;
1275                ++ctx->nr;
1276                return 0;
1277        } else {
1278                return -1; /* no more room, stop walking the stack */
1279        }
1280}
1281
1282extern int sysctl_perf_event_paranoid;
1283extern int sysctl_perf_event_mlock;
1284extern int sysctl_perf_event_sample_rate;
1285extern int sysctl_perf_cpu_time_max_percent;
1286
1287extern void perf_sample_event_took(u64 sample_len_ns);
1288
1289int perf_proc_update_handler(struct ctl_table *table, int write,
1290                void *buffer, size_t *lenp, loff_t *ppos);
1291int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
1292                void *buffer, size_t *lenp, loff_t *ppos);
1293int perf_event_max_stack_handler(struct ctl_table *table, int write,
1294                void *buffer, size_t *lenp, loff_t *ppos);
1295
1296/* Access to perf_event_open(2) syscall. */
1297#define PERF_SECURITY_OPEN              0
1298
1299/* Finer grained perf_event_open(2) access control. */
1300#define PERF_SECURITY_CPU               1
1301#define PERF_SECURITY_KERNEL            2
1302#define PERF_SECURITY_TRACEPOINT        3
1303
1304static inline int perf_is_paranoid(void)
1305{
1306        return sysctl_perf_event_paranoid > -1;
1307}
1308
1309static inline int perf_allow_kernel(struct perf_event_attr *attr)
1310{
1311        if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
1312                return -EACCES;
1313
1314        return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
1315}
1316
1317static inline int perf_allow_cpu(struct perf_event_attr *attr)
1318{
1319        if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
1320                return -EACCES;
1321
1322        return security_perf_event_open(attr, PERF_SECURITY_CPU);
1323}
1324
1325static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
1326{
1327        if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
1328                return -EPERM;
1329
1330        return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
1331}
1332
1333extern void perf_event_init(void);
1334extern void perf_tp_event(u16 event_type, u64 count, void *record,
1335                          int entry_size, struct pt_regs *regs,
1336                          struct hlist_head *head, int rctx,
1337                          struct task_struct *task);
1338extern void perf_bp_event(struct perf_event *event, void *data);
1339
1340#ifndef perf_misc_flags
1341# define perf_misc_flags(regs) \
1342                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
1343# define perf_instruction_pointer(regs) instruction_pointer(regs)
1344#endif
1345#ifndef perf_arch_bpf_user_pt_regs
1346# define perf_arch_bpf_user_pt_regs(regs) regs
1347#endif
1348
1349static inline bool has_branch_stack(struct perf_event *event)
1350{
1351        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
1352}
1353
1354static inline bool needs_branch_stack(struct perf_event *event)
1355{
1356        return event->attr.branch_sample_type != 0;
1357}
1358
1359static inline bool has_aux(struct perf_event *event)
1360{
1361        return event->pmu->setup_aux;
1362}
1363
1364static inline bool is_write_backward(struct perf_event *event)
1365{
1366        return !!event->attr.write_backward;
1367}
1368
1369static inline bool has_addr_filter(struct perf_event *event)
1370{
1371        return event->pmu->nr_addr_filters;
1372}
1373
1374/*
1375 * An inherited event uses parent's filters
1376 */
1377static inline struct perf_addr_filters_head *
1378perf_event_addr_filters(struct perf_event *event)
1379{
1380        struct perf_addr_filters_head *ifh = &event->addr_filters;
1381
1382        if (event->parent)
1383                ifh = &event->parent->addr_filters;
1384
1385        return ifh;
1386}
1387
1388extern void perf_event_addr_filters_sync(struct perf_event *event);
1389
1390extern int perf_output_begin(struct perf_output_handle *handle,
1391                             struct perf_event *event, unsigned int size);
1392extern int perf_output_begin_forward(struct perf_output_handle *handle,
1393                                    struct perf_event *event,
1394                                    unsigned int size);
1395extern int perf_output_begin_backward(struct perf_output_handle *handle,
1396                                      struct perf_event *event,
1397                                      unsigned int size);
1398
1399extern void perf_output_end(struct perf_output_handle *handle);
1400extern unsigned int perf_output_copy(struct perf_output_handle *handle,
1401                             const void *buf, unsigned int len);
1402extern unsigned int perf_output_skip(struct perf_output_handle *handle,
1403                                     unsigned int len);
1404extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
1405                                 struct perf_output_handle *handle,
1406                                 unsigned long from, unsigned long to);
1407extern int perf_swevent_get_recursion_context(void);
1408extern void perf_swevent_put_recursion_context(int rctx);
1409extern u64 perf_swevent_set_period(struct perf_event *event);
1410extern void perf_event_enable(struct perf_event *event);
1411extern void perf_event_disable(struct perf_event *event);
1412extern void perf_event_disable_local(struct perf_event *event);
1413extern void perf_event_disable_inatomic(struct perf_event *event);
1414extern void perf_event_task_tick(void);
1415extern int perf_event_account_interrupt(struct perf_event *event);
1416extern int perf_event_period(struct perf_event *event, u64 value);
1417extern u64 perf_event_pause(struct perf_event *event, bool reset);
1418#else /* !CONFIG_PERF_EVENTS: */
1419static inline void *
1420perf_aux_output_begin(struct perf_output_handle *handle,
1421                      struct perf_event *event)                         { return NULL; }
1422static inline void
1423perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1424                                                                        { }
1425static inline int
1426perf_aux_output_skip(struct perf_output_handle *handle,
1427                     unsigned long size)                                { return -EINVAL; }
1428static inline void *
1429perf_get_aux(struct perf_output_handle *handle)                         { return NULL; }
1430static inline void
1431perf_event_task_migrate(struct task_struct *task)                       { }
1432static inline void
1433perf_event_task_sched_in(struct task_struct *prev,
1434                         struct task_struct *task)                      { }
1435static inline void
1436perf_event_task_sched_out(struct task_struct *prev,
1437                          struct task_struct *next)                     { }
1438static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1439static inline void perf_event_exit_task(struct task_struct *child)      { }
1440static inline void perf_event_free_task(struct task_struct *task)       { }
1441static inline void perf_event_delayed_put(struct task_struct *task)     { }
1442static inline struct file *perf_event_get(unsigned int fd)      { return ERR_PTR(-EINVAL); }
1443static inline const struct perf_event *perf_get_event(struct file *file)
1444{
1445        return ERR_PTR(-EINVAL);
1446}
1447static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
1448{
1449        return ERR_PTR(-EINVAL);
1450}
1451static inline int perf_event_read_local(struct perf_event *event, u64 *value,
1452                                        u64 *enabled, u64 *running)
1453{
1454        return -EINVAL;
1455}
1456static inline void perf_event_print_debug(void)                         { }
1457static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1458static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1459static inline int perf_event_refresh(struct perf_event *event, int refresh)
1460{
1461        return -EINVAL;
1462}
1463
1464static inline void
1465perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
1466static inline void
1467perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                     { }
1468static inline void
1469perf_bp_event(struct perf_event *event, void *data)                     { }
1470
1471static inline int perf_register_guest_info_callbacks
1472(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1473static inline int perf_unregister_guest_info_callbacks
1474(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
1475
1476static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1477
1478typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
1479static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len,
1480                                      bool unregister, const char *sym) { }
1481static inline void perf_event_bpf_event(struct bpf_prog *prog,
1482                                        enum perf_bpf_event_type type,
1483                                        u16 flags)                      { }
1484static inline void perf_event_exec(void)                                { }
1485static inline void perf_event_comm(struct task_struct *tsk, bool exec)  { }
1486static inline void perf_event_namespaces(struct task_struct *tsk)       { }
1487static inline void perf_event_fork(struct task_struct *tsk)             { }
1488static inline void perf_event_text_poke(const void *addr,
1489                                        const void *old_bytes,
1490                                        size_t old_len,
1491                                        const void *new_bytes,
1492                                        size_t new_len)                 { }
1493static inline void perf_event_init(void)                                { }
1494static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1495static inline void perf_swevent_put_recursion_context(int rctx)         { }
1496static inline u64 perf_swevent_set_period(struct perf_event *event)     { return 0; }
1497static inline void perf_event_enable(struct perf_event *event)          { }
1498static inline void perf_event_disable(struct perf_event *event)         { }
1499static inline int __perf_event_disable(void *info)                      { return -1; }
1500static inline void perf_event_task_tick(void)                           { }
1501static inline int perf_event_release_kernel(struct perf_event *event)   { return 0; }
1502static inline int perf_event_period(struct perf_event *event, u64 value)
1503{
1504        return -EINVAL;
1505}
1506static inline u64 perf_event_pause(struct perf_event *event, bool reset)
1507{
1508        return 0;
1509}
1510#endif
1511
1512#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
1513extern void perf_restore_debug_store(void);
1514#else
1515static inline void perf_restore_debug_store(void)                       { }
1516#endif
1517
1518static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
1519{
1520        return frag->pad < sizeof(u64);
1521}
1522
1523#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
1524
1525struct perf_pmu_events_attr {
1526        struct device_attribute attr;
1527        u64 id;
1528        const char *event_str;
1529};
1530
1531struct perf_pmu_events_ht_attr {
1532        struct device_attribute                 attr;
1533        u64                                     id;
1534        const char                              *event_str_ht;
1535        const char                              *event_str_noht;
1536};
1537
1538ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
1539                              char *page);
1540
1541#define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
1542static struct perf_pmu_events_attr _var = {                             \
1543        .attr = __ATTR(_name, 0444, _show, NULL),                       \
1544        .id   =  _id,                                                   \
1545};
1546
1547#define PMU_EVENT_ATTR_STRING(_name, _var, _str)                            \
1548static struct perf_pmu_events_attr _var = {                                 \
1549        .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
1550        .id             = 0,                                                \
1551        .event_str      = _str,                                             \
1552};
1553
1554#define PMU_FORMAT_ATTR(_name, _format)                                 \
1555static ssize_t                                                          \
1556_name##_show(struct device *dev,                                        \
1557                               struct device_attribute *attr,           \
1558                               char *page)                              \
1559{                                                                       \
1560        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
1561        return sprintf(page, _format "\n");                             \
1562}                                                                       \
1563                                                                        \
1564static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1565
1566/* Performance counter hotplug functions */
1567#ifdef CONFIG_PERF_EVENTS
1568int perf_event_init_cpu(unsigned int cpu);
1569int perf_event_exit_cpu(unsigned int cpu);
1570#else
1571#define perf_event_init_cpu     NULL
1572#define perf_event_exit_cpu     NULL
1573#endif
1574
1575extern void __weak arch_perf_update_userpage(struct perf_event *event,
1576                                             struct perf_event_mmap_page *userpg,
1577                                             u64 now);
1578
1579#endif /* _LINUX_PERF_EVENT_H */
1580