linux/arch/x86/kernel/cpu/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events x86 architecture header
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2009 Jaswinder Singh Rajput
   7 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
   8 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
   9 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  11 *
  12 *  For licencing details see kernel-base/COPYING
  13 */
  14
  15#include <linux/perf_event.h>
  16
  17#if 0
  18#undef wrmsrl
  19#define wrmsrl(msr, val)                                                \
  20do {                                                                    \
  21        unsigned int _msr = (msr);                                      \
  22        u64 _val = (val);                                               \
  23        trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr),         \
  24                        (unsigned long long)(_val));                    \
  25        native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32));       \
  26} while (0)
  27#endif
  28
  29/*
  30 *          |   NHM/WSM    |      SNB     |
  31 * register -------------------------------
  32 *          |  HT  | no HT |  HT  | no HT |
  33 *-----------------------------------------
  34 * offcore  | core | core  | cpu  | core  |
  35 * lbr_sel  | core | core  | cpu  | core  |
  36 * ld_lat   | cpu  | core  | cpu  | core  |
  37 *-----------------------------------------
  38 *
  39 * Given that there is a small number of shared regs,
  40 * we can pre-allocate their slot in the per-cpu
  41 * per-core reg tables.
  42 */
  43enum extra_reg_type {
  44        EXTRA_REG_NONE  = -1,   /* not used */
  45
  46        EXTRA_REG_RSP_0 = 0,    /* offcore_response_0 */
  47        EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
  48        EXTRA_REG_LBR   = 2,    /* lbr_select */
  49        EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
  50        EXTRA_REG_FE    = 4,    /* fe_* */
  51
  52        EXTRA_REG_MAX           /* number of entries needed */
  53};
  54
  55struct event_constraint {
  56        union {
  57                unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  58                u64             idxmsk64;
  59        };
  60        u64     code;
  61        u64     cmask;
  62        int     weight;
  63        int     overlap;
  64        int     flags;
  65};
  66/*
  67 * struct hw_perf_event.flags flags
  68 */
  69#define PERF_X86_EVENT_PEBS_LDLAT       0x0001 /* ld+ldlat data address sampling */
  70#define PERF_X86_EVENT_PEBS_ST          0x0002 /* st data address sampling */
  71#define PERF_X86_EVENT_PEBS_ST_HSW      0x0004 /* haswell style datala, store */
  72#define PERF_X86_EVENT_COMMITTED        0x0008 /* event passed commit_txn */
  73#define PERF_X86_EVENT_PEBS_LD_HSW      0x0010 /* haswell style datala, load */
  74#define PERF_X86_EVENT_PEBS_NA_HSW      0x0020 /* haswell style datala, unknown */
  75#define PERF_X86_EVENT_EXCL             0x0040 /* HT exclusivity on counter */
  76#define PERF_X86_EVENT_DYNAMIC          0x0080 /* dynamic alloc'd constraint */
  77#define PERF_X86_EVENT_RDPMC_ALLOWED    0x0100 /* grant rdpmc permission */
  78#define PERF_X86_EVENT_EXCL_ACCT        0x0200 /* accounted EXCL event */
  79#define PERF_X86_EVENT_AUTO_RELOAD      0x0400 /* use PEBS auto-reload */
  80#define PERF_X86_EVENT_FREERUNNING      0x0800 /* use freerunning PEBS */
  81
  82
  83struct amd_nb {
  84        int nb_id;  /* NorthBridge id */
  85        int refcnt; /* reference count */
  86        struct perf_event *owners[X86_PMC_IDX_MAX];
  87        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  88};
  89
  90/* The maximal number of PEBS events: */
  91#define MAX_PEBS_EVENTS         8
  92
  93/*
  94 * Flags PEBS can handle without an PMI.
  95 *
  96 * TID can only be handled by flushing at context switch.
  97 *
  98 */
  99#define PEBS_FREERUNNING_FLAGS \
 100        (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
 101        PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
 102        PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
 103        PERF_SAMPLE_TRANSACTION)
 104
 105/*
 106 * A debug store configuration.
 107 *
 108 * We only support architectures that use 64bit fields.
 109 */
 110struct debug_store {
 111        u64     bts_buffer_base;
 112        u64     bts_index;
 113        u64     bts_absolute_maximum;
 114        u64     bts_interrupt_threshold;
 115        u64     pebs_buffer_base;
 116        u64     pebs_index;
 117        u64     pebs_absolute_maximum;
 118        u64     pebs_interrupt_threshold;
 119        u64     pebs_event_reset[MAX_PEBS_EVENTS];
 120};
 121
 122/*
 123 * Per register state.
 124 */
 125struct er_account {
 126        raw_spinlock_t          lock;   /* per-core: protect structure */
 127        u64                 config;     /* extra MSR config */
 128        u64                 reg;        /* extra MSR number */
 129        atomic_t            ref;        /* reference count */
 130};
 131
 132/*
 133 * Per core/cpu state
 134 *
 135 * Used to coordinate shared registers between HT threads or
 136 * among events on a single PMU.
 137 */
 138struct intel_shared_regs {
 139        struct er_account       regs[EXTRA_REG_MAX];
 140        int                     refcnt;         /* per-core: #HT threads */
 141        unsigned                core_id;        /* per-core: core id */
 142};
 143
 144enum intel_excl_state_type {
 145        INTEL_EXCL_UNUSED    = 0, /* counter is unused */
 146        INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
 147        INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
 148};
 149
 150struct intel_excl_states {
 151        enum intel_excl_state_type state[X86_PMC_IDX_MAX];
 152        bool sched_started; /* true if scheduling has started */
 153};
 154
 155struct intel_excl_cntrs {
 156        raw_spinlock_t  lock;
 157
 158        struct intel_excl_states states[2];
 159
 160        union {
 161                u16     has_exclusive[2];
 162                u32     exclusive_present;
 163        };
 164
 165        int             refcnt;         /* per-core: #HT threads */
 166        unsigned        core_id;        /* per-core: core id */
 167};
 168
 169#define MAX_LBR_ENTRIES         32
 170
 171enum {
 172        X86_PERF_KFREE_SHARED = 0,
 173        X86_PERF_KFREE_EXCL   = 1,
 174        X86_PERF_KFREE_MAX
 175};
 176
 177struct cpu_hw_events {
 178        /*
 179         * Generic x86 PMC bits
 180         */
 181        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
 182        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 183        unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 184        int                     enabled;
 185
 186        int                     n_events; /* the # of events in the below arrays */
 187        int                     n_added;  /* the # last events in the below arrays;
 188                                             they've never been enabled yet */
 189        int                     n_txn;    /* the # last events in the below arrays;
 190                                             added in the current transaction */
 191        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
 192        u64                     tags[X86_PMC_IDX_MAX];
 193
 194        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 195        struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
 196
 197        int                     n_excl; /* the number of exclusive events */
 198
 199        unsigned int            txn_flags;
 200        int                     is_fake;
 201
 202        /*
 203         * Intel DebugStore bits
 204         */
 205        struct debug_store      *ds;
 206        u64                     pebs_enabled;
 207
 208        /*
 209         * Intel LBR bits
 210         */
 211        int                             lbr_users;
 212        void                            *lbr_context;
 213        struct perf_branch_stack        lbr_stack;
 214        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
 215        struct er_account               *lbr_sel;
 216        u64                             br_sel;
 217
 218        /*
 219         * Intel host/guest exclude bits
 220         */
 221        u64                             intel_ctrl_guest_mask;
 222        u64                             intel_ctrl_host_mask;
 223        struct perf_guest_switch_msr    guest_switch_msrs[X86_PMC_IDX_MAX];
 224
 225        /*
 226         * Intel checkpoint mask
 227         */
 228        u64                             intel_cp_status;
 229
 230        /*
 231         * manage shared (per-core, per-cpu) registers
 232         * used on Intel NHM/WSM/SNB
 233         */
 234        struct intel_shared_regs        *shared_regs;
 235        /*
 236         * manage exclusive counter access between hyperthread
 237         */
 238        struct event_constraint *constraint_list; /* in enable order */
 239        struct intel_excl_cntrs         *excl_cntrs;
 240        int excl_thread_id; /* 0 or 1 */
 241
 242        /*
 243         * AMD specific bits
 244         */
 245        struct amd_nb                   *amd_nb;
 246        /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
 247        u64                             perf_ctr_virt_mask;
 248
 249        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 250};
 251
 252#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
 253        { .idxmsk64 = (n) },            \
 254        .code = (c),                    \
 255        .cmask = (m),                   \
 256        .weight = (w),                  \
 257        .overlap = (o),                 \
 258        .flags = f,                     \
 259}
 260
 261#define EVENT_CONSTRAINT(c, n, m)       \
 262        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 263
 264#define INTEL_EXCLEVT_CONSTRAINT(c, n)  \
 265        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
 266                           0, PERF_X86_EVENT_EXCL)
 267
 268/*
 269 * The overlap flag marks event constraints with overlapping counter
 270 * masks. This is the case if the counter mask of such an event is not
 271 * a subset of any other counter mask of a constraint with an equal or
 272 * higher weight, e.g.:
 273 *
 274 *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
 275 *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
 276 *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
 277 *
 278 * The event scheduler may not select the correct counter in the first
 279 * cycle because it needs to know which subsequent events will be
 280 * scheduled. It may fail to schedule the events then. So we set the
 281 * overlap flag for such constraints to give the scheduler a hint which
 282 * events to select for counter rescheduling.
 283 *
 284 * Care must be taken as the rescheduling algorithm is O(n!) which
 285 * will increase scheduling cycles for an over-commited system
 286 * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
 287 * and its counter masks must be kept at a minimum.
 288 */
 289#define EVENT_CONSTRAINT_OVERLAP(c, n, m)       \
 290        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
 291
 292/*
 293 * Constraint on the Event code.
 294 */
 295#define INTEL_EVENT_CONSTRAINT(c, n)    \
 296        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 297
 298/*
 299 * Constraint on the Event code + UMask + fixed-mask
 300 *
 301 * filter mask to validate fixed counter events.
 302 * the following filters disqualify for fixed counters:
 303 *  - inv
 304 *  - edge
 305 *  - cnt-mask
 306 *  - in_tx
 307 *  - in_tx_checkpointed
 308 *  The other filters are supported by fixed counters.
 309 *  The any-thread option is supported starting with v3.
 310 */
 311#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
 312#define FIXED_EVENT_CONSTRAINT(c, n)    \
 313        EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
 314
 315/*
 316 * Constraint on the Event code + UMask
 317 */
 318#define INTEL_UEVENT_CONSTRAINT(c, n)   \
 319        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
 320
 321/* Like UEVENT_CONSTRAINT, but match flags too */
 322#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)     \
 323        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 324
 325#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
 326        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
 327                           HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
 328
 329#define INTEL_PLD_CONSTRAINT(c, n)      \
 330        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 331                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
 332
 333#define INTEL_PST_CONSTRAINT(c, n)      \
 334        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 335                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
 336
 337/* Event constraint, but match on all event flags too. */
 338#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
 339        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 340
 341/* Check only flags, but allow all event/umask */
 342#define INTEL_ALL_EVENT_CONSTRAINT(code, n)     \
 343        EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
 344
 345/* Check flags and event code, and set the HSW store flag */
 346#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
 347        __EVENT_CONSTRAINT(code, n,                     \
 348                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 349                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 350
 351/* Check flags and event code, and set the HSW load flag */
 352#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
 353        __EVENT_CONSTRAINT(code, n,                     \
 354                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 355                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 356
 357#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
 358        __EVENT_CONSTRAINT(code, n,                     \
 359                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 360                          HWEIGHT(n), 0, \
 361                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 362
 363/* Check flags and event code/umask, and set the HSW store flag */
 364#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
 365        __EVENT_CONSTRAINT(code, n,                     \
 366                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 367                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 368
 369#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
 370        __EVENT_CONSTRAINT(code, n,                     \
 371                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 372                          HWEIGHT(n), 0, \
 373                          PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
 374
 375/* Check flags and event code/umask, and set the HSW load flag */
 376#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
 377        __EVENT_CONSTRAINT(code, n,                     \
 378                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 379                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 380
 381#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
 382        __EVENT_CONSTRAINT(code, n,                     \
 383                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 384                          HWEIGHT(n), 0, \
 385                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 386
 387/* Check flags and event code/umask, and set the HSW N/A flag */
 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
 389        __EVENT_CONSTRAINT(code, n,                     \
 390                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 391                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 392
 393
 394/*
 395 * We define the end marker as having a weight of -1
 396 * to enable blacklisting of events using a counter bitmask
 397 * of zero and thus a weight of zero.
 398 * The end marker has a weight that cannot possibly be
 399 * obtained from counting the bits in the bitmask.
 400 */
 401#define EVENT_CONSTRAINT_END { .weight = -1 }
 402
 403/*
 404 * Check for end marker with weight == -1
 405 */
 406#define for_each_event_constraint(e, c) \
 407        for ((e) = (c); (e)->weight != -1; (e)++)
 408
 409/*
 410 * Extra registers for specific events.
 411 *
 412 * Some events need large masks and require external MSRs.
 413 * Those extra MSRs end up being shared for all events on
 414 * a PMU and sometimes between PMU of sibling HT threads.
 415 * In either case, the kernel needs to handle conflicting
 416 * accesses to those extra, shared, regs. The data structure
 417 * to manage those registers is stored in cpu_hw_event.
 418 */
 419struct extra_reg {
 420        unsigned int            event;
 421        unsigned int            msr;
 422        u64                     config_mask;
 423        u64                     valid_mask;
 424        int                     idx;  /* per_xxx->regs[] reg index */
 425        bool                    extra_msr_access;
 426};
 427
 428#define EVENT_EXTRA_REG(e, ms, m, vm, i) {      \
 429        .event = (e),                   \
 430        .msr = (ms),                    \
 431        .config_mask = (m),             \
 432        .valid_mask = (vm),             \
 433        .idx = EXTRA_REG_##i,           \
 434        .extra_msr_access = true,       \
 435        }
 436
 437#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)      \
 438        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
 439
 440#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
 441        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
 442                        ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
 443
 444#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
 445        INTEL_UEVENT_EXTRA_REG(c, \
 446                               MSR_PEBS_LD_LAT_THRESHOLD, \
 447                               0xffff, \
 448                               LDLAT)
 449
 450#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
 451
 452union perf_capabilities {
 453        struct {
 454                u64     lbr_format:6;
 455                u64     pebs_trap:1;
 456                u64     pebs_arch_reg:1;
 457                u64     pebs_format:4;
 458                u64     smm_freeze:1;
 459                /*
 460                 * PMU supports separate counter range for writing
 461                 * values > 32bit.
 462                 */
 463                u64     full_width_write:1;
 464        };
 465        u64     capabilities;
 466};
 467
 468struct x86_pmu_quirk {
 469        struct x86_pmu_quirk *next;
 470        void (*func)(void);
 471};
 472
 473union x86_pmu_config {
 474        struct {
 475                u64 event:8,
 476                    umask:8,
 477                    usr:1,
 478                    os:1,
 479                    edge:1,
 480                    pc:1,
 481                    interrupt:1,
 482                    __reserved1:1,
 483                    en:1,
 484                    inv:1,
 485                    cmask:8,
 486                    event2:4,
 487                    __reserved2:4,
 488                    go:1,
 489                    ho:1;
 490        } bits;
 491        u64 value;
 492};
 493
 494#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
 495
 496enum {
 497        x86_lbr_exclusive_lbr,
 498        x86_lbr_exclusive_bts,
 499        x86_lbr_exclusive_pt,
 500        x86_lbr_exclusive_max,
 501};
 502
 503/*
 504 * struct x86_pmu - generic x86 pmu
 505 */
 506struct x86_pmu {
 507        /*
 508         * Generic x86 PMC bits
 509         */
 510        const char      *name;
 511        int             version;
 512        int             (*handle_irq)(struct pt_regs *);
 513        void            (*disable_all)(void);
 514        void            (*enable_all)(int added);
 515        void            (*enable)(struct perf_event *);
 516        void            (*disable)(struct perf_event *);
 517        int             (*hw_config)(struct perf_event *event);
 518        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
 519        unsigned        eventsel;
 520        unsigned        perfctr;
 521        int             (*addr_offset)(int index, bool eventsel);
 522        int             (*rdpmc_index)(int index);
 523        u64             (*event_map)(int);
 524        int             max_events;
 525        int             num_counters;
 526        int             num_counters_fixed;
 527        int             cntval_bits;
 528        u64             cntval_mask;
 529        union {
 530                        unsigned long events_maskl;
 531                        unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
 532        };
 533        int             events_mask_len;
 534        int             apic;
 535        u64             max_period;
 536        struct event_constraint *
 537                        (*get_event_constraints)(struct cpu_hw_events *cpuc,
 538                                                 int idx,
 539                                                 struct perf_event *event);
 540
 541        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
 542                                                 struct perf_event *event);
 543
 544        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 545
 546        void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
 547
 548        void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
 549
 550        struct event_constraint *event_constraints;
 551        struct x86_pmu_quirk *quirks;
 552        int             perfctr_second_write;
 553        bool            late_ack;
 554        unsigned        (*limit_period)(struct perf_event *event, unsigned l);
 555
 556        /*
 557         * sysfs attrs
 558         */
 559        int             attr_rdpmc_broken;
 560        int             attr_rdpmc;
 561        struct attribute **format_attrs;
 562        struct attribute **event_attrs;
 563
 564        ssize_t         (*events_sysfs_show)(char *page, u64 config);
 565        struct attribute **cpu_events;
 566
 567        /*
 568         * CPU Hotplug hooks
 569         */
 570        int             (*cpu_prepare)(int cpu);
 571        void            (*cpu_starting)(int cpu);
 572        void            (*cpu_dying)(int cpu);
 573        void            (*cpu_dead)(int cpu);
 574
 575        void            (*check_microcode)(void);
 576        void            (*sched_task)(struct perf_event_context *ctx,
 577                                      bool sched_in);
 578
 579        /*
 580         * Intel Arch Perfmon v2+
 581         */
 582        u64                     intel_ctrl;
 583        union perf_capabilities intel_cap;
 584
 585        /*
 586         * Intel DebugStore bits
 587         */
 588        unsigned int    bts             :1,
 589                        bts_active      :1,
 590                        pebs            :1,
 591                        pebs_active     :1,
 592                        pebs_broken     :1;
 593        int             pebs_record_size;
 594        void            (*drain_pebs)(struct pt_regs *regs);
 595        struct event_constraint *pebs_constraints;
 596        void            (*pebs_aliases)(struct perf_event *event);
 597        int             max_pebs_events;
 598        unsigned long   free_running_flags;
 599
 600        /*
 601         * Intel LBR
 602         */
 603        unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
 604        int             lbr_nr;                    /* hardware stack size */
 605        u64             lbr_sel_mask;              /* LBR_SELECT valid bits */
 606        const int       *lbr_sel_map;              /* lbr_select mappings */
 607        bool            lbr_double_abort;          /* duplicated lbr aborts */
 608
 609        /*
 610         * Intel PT/LBR/BTS are exclusive
 611         */
 612        atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
 613
 614        /*
 615         * Extra registers for events
 616         */
 617        struct extra_reg *extra_regs;
 618        unsigned int flags;
 619
 620        /*
 621         * Intel host/guest support (KVM)
 622         */
 623        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 624};
 625
 626struct x86_perf_task_context {
 627        u64 lbr_from[MAX_LBR_ENTRIES];
 628        u64 lbr_to[MAX_LBR_ENTRIES];
 629        u64 lbr_info[MAX_LBR_ENTRIES];
 630        int tos;
 631        int lbr_callstack_users;
 632        int lbr_stack_state;
 633};
 634
 635#define x86_add_quirk(func_)                                            \
 636do {                                                                    \
 637        static struct x86_pmu_quirk __quirk __initdata = {              \
 638                .func = func_,                                          \
 639        };                                                              \
 640        __quirk.next = x86_pmu.quirks;                                  \
 641        x86_pmu.quirks = &__quirk;                                      \
 642} while (0)
 643
 644/*
 645 * x86_pmu flags
 646 */
 647#define PMU_FL_NO_HT_SHARING    0x1 /* no hyper-threading resource sharing */
 648#define PMU_FL_HAS_RSP_1        0x2 /* has 2 equivalent offcore_rsp regs   */
 649#define PMU_FL_EXCL_CNTRS       0x4 /* has exclusive counter requirements  */
 650#define PMU_FL_EXCL_ENABLED     0x8 /* exclusive counter active */
 651
 652#define EVENT_VAR(_id)  event_attr_##_id
 653#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 654
 655#define EVENT_ATTR(_name, _id)                                          \
 656static struct perf_pmu_events_attr EVENT_VAR(_id) = {                   \
 657        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 658        .id             = PERF_COUNT_HW_##_id,                          \
 659        .event_str      = NULL,                                         \
 660};
 661
 662#define EVENT_ATTR_STR(_name, v, str)                                   \
 663static struct perf_pmu_events_attr event_attr_##v = {                   \
 664        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 665        .id             = 0,                                            \
 666        .event_str      = str,                                          \
 667};
 668
 669extern struct x86_pmu x86_pmu __read_mostly;
 670
 671static inline bool x86_pmu_has_lbr_callstack(void)
 672{
 673        return  x86_pmu.lbr_sel_map &&
 674                x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
 675}
 676
 677DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 678
 679int x86_perf_event_set_period(struct perf_event *event);
 680
 681/*
 682 * Generalized hw caching related hw_event table, filled
 683 * in on a per model basis. A value of 0 means
 684 * 'not supported', -1 means 'hw_event makes no sense on
 685 * this CPU', any other value means the raw hw_event
 686 * ID.
 687 */
 688
 689#define C(x) PERF_COUNT_HW_CACHE_##x
 690
 691extern u64 __read_mostly hw_cache_event_ids
 692                                [PERF_COUNT_HW_CACHE_MAX]
 693                                [PERF_COUNT_HW_CACHE_OP_MAX]
 694                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 695extern u64 __read_mostly hw_cache_extra_regs
 696                                [PERF_COUNT_HW_CACHE_MAX]
 697                                [PERF_COUNT_HW_CACHE_OP_MAX]
 698                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 699
 700u64 x86_perf_event_update(struct perf_event *event);
 701
 702static inline unsigned int x86_pmu_config_addr(int index)
 703{
 704        return x86_pmu.eventsel + (x86_pmu.addr_offset ?
 705                                   x86_pmu.addr_offset(index, true) : index);
 706}
 707
 708static inline unsigned int x86_pmu_event_addr(int index)
 709{
 710        return x86_pmu.perfctr + (x86_pmu.addr_offset ?
 711                                  x86_pmu.addr_offset(index, false) : index);
 712}
 713
 714static inline int x86_pmu_rdpmc_index(int index)
 715{
 716        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 717}
 718
 719int x86_add_exclusive(unsigned int what);
 720
 721void x86_del_exclusive(unsigned int what);
 722
 723int x86_reserve_hardware(void);
 724
 725void x86_release_hardware(void);
 726
 727void hw_perf_lbr_event_destroy(struct perf_event *event);
 728
 729int x86_setup_perfctr(struct perf_event *event);
 730
 731int x86_pmu_hw_config(struct perf_event *event);
 732
 733void x86_pmu_disable_all(void);
 734
 735static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 736                                          u64 enable_mask)
 737{
 738        u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
 739
 740        if (hwc->extra_reg.reg)
 741                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
 742        wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
 743}
 744
 745void x86_pmu_enable_all(int added);
 746
 747int perf_assign_events(struct event_constraint **constraints, int n,
 748                        int wmin, int wmax, int gpmax, int *assign);
 749int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 750
 751void x86_pmu_stop(struct perf_event *event, int flags);
 752
 753static inline void x86_pmu_disable_event(struct perf_event *event)
 754{
 755        struct hw_perf_event *hwc = &event->hw;
 756
 757        wrmsrl(hwc->config_base, hwc->config);
 758}
 759
 760void x86_pmu_enable_event(struct perf_event *event);
 761
 762int x86_pmu_handle_irq(struct pt_regs *regs);
 763
 764extern struct event_constraint emptyconstraint;
 765
 766extern struct event_constraint unconstrained;
 767
 768static inline bool kernel_ip(unsigned long ip)
 769{
 770#ifdef CONFIG_X86_32
 771        return ip > PAGE_OFFSET;
 772#else
 773        return (long)ip < 0;
 774#endif
 775}
 776
 777/*
 778 * Not all PMUs provide the right context information to place the reported IP
 779 * into full context. Specifically segment registers are typically not
 780 * supplied.
 781 *
 782 * Assuming the address is a linear address (it is for IBS), we fake the CS and
 783 * vm86 mode using the known zero-based code segment and 'fix up' the registers
 784 * to reflect this.
 785 *
 786 * Intel PEBS/LBR appear to typically provide the effective address, nothing
 787 * much we can do about that but pray and treat it like a linear address.
 788 */
 789static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
 790{
 791        regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
 792        if (regs->flags & X86_VM_MASK)
 793                regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
 794        regs->ip = ip;
 795}
 796
 797ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
 798ssize_t intel_event_sysfs_show(char *page, u64 config);
 799
 800struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 801
 802#ifdef CONFIG_CPU_SUP_AMD
 803
 804int amd_pmu_init(void);
 805
 806#else /* CONFIG_CPU_SUP_AMD */
 807
 808static inline int amd_pmu_init(void)
 809{
 810        return 0;
 811}
 812
 813#endif /* CONFIG_CPU_SUP_AMD */
 814
 815#ifdef CONFIG_CPU_SUP_INTEL
 816
 817static inline bool intel_pmu_has_bts(struct perf_event *event)
 818{
 819        if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
 820            !event->attr.freq && event->hw.sample_period == 1)
 821                return true;
 822
 823        return false;
 824}
 825
 826int intel_pmu_save_and_restart(struct perf_event *event);
 827
 828struct event_constraint *
 829x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 830                          struct perf_event *event);
 831
 832struct intel_shared_regs *allocate_shared_regs(int cpu);
 833
 834int intel_pmu_init(void);
 835
 836void init_debug_store_on_cpu(int cpu);
 837
 838void fini_debug_store_on_cpu(int cpu);
 839
 840void release_ds_buffers(void);
 841
 842void reserve_ds_buffers(void);
 843
 844extern struct event_constraint bts_constraint;
 845
 846void intel_pmu_enable_bts(u64 config);
 847
 848void intel_pmu_disable_bts(void);
 849
 850int intel_pmu_drain_bts_buffer(void);
 851
 852extern struct event_constraint intel_core2_pebs_event_constraints[];
 853
 854extern struct event_constraint intel_atom_pebs_event_constraints[];
 855
 856extern struct event_constraint intel_slm_pebs_event_constraints[];
 857
 858extern struct event_constraint intel_nehalem_pebs_event_constraints[];
 859
 860extern struct event_constraint intel_westmere_pebs_event_constraints[];
 861
 862extern struct event_constraint intel_snb_pebs_event_constraints[];
 863
 864extern struct event_constraint intel_ivb_pebs_event_constraints[];
 865
 866extern struct event_constraint intel_hsw_pebs_event_constraints[];
 867
 868extern struct event_constraint intel_skl_pebs_event_constraints[];
 869
 870struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 871
 872void intel_pmu_pebs_enable(struct perf_event *event);
 873
 874void intel_pmu_pebs_disable(struct perf_event *event);
 875
 876void intel_pmu_pebs_enable_all(void);
 877
 878void intel_pmu_pebs_disable_all(void);
 879
 880void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 881
 882void intel_ds_init(void);
 883
 884void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
 885
 886void intel_pmu_lbr_reset(void);
 887
 888void intel_pmu_lbr_enable(struct perf_event *event);
 889
 890void intel_pmu_lbr_disable(struct perf_event *event);
 891
 892void intel_pmu_lbr_enable_all(bool pmi);
 893
 894void intel_pmu_lbr_disable_all(void);
 895
 896void intel_pmu_lbr_read(void);
 897
 898void intel_pmu_lbr_init_core(void);
 899
 900void intel_pmu_lbr_init_nhm(void);
 901
 902void intel_pmu_lbr_init_atom(void);
 903
 904void intel_pmu_lbr_init_snb(void);
 905
 906void intel_pmu_lbr_init_hsw(void);
 907
 908void intel_pmu_lbr_init_skl(void);
 909
 910int intel_pmu_setup_lbr_filter(struct perf_event *event);
 911
 912void intel_pt_interrupt(void);
 913
 914int intel_bts_interrupt(void);
 915
 916void intel_bts_enable_local(void);
 917
 918void intel_bts_disable_local(void);
 919
 920int p4_pmu_init(void);
 921
 922int p6_pmu_init(void);
 923
 924int knc_pmu_init(void);
 925
 926ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 927                          char *page);
 928
 929static inline int is_ht_workaround_enabled(void)
 930{
 931        return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
 932}
 933
 934#else /* CONFIG_CPU_SUP_INTEL */
 935
 936static inline void reserve_ds_buffers(void)
 937{
 938}
 939
 940static inline void release_ds_buffers(void)
 941{
 942}
 943
 944static inline int intel_pmu_init(void)
 945{
 946        return 0;
 947}
 948
 949static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
 950{
 951        return NULL;
 952}
 953
 954static inline int is_ht_workaround_enabled(void)
 955{
 956        return 0;
 957}
 958#endif /* CONFIG_CPU_SUP_INTEL */
 959