linux/arch/x86/events/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events x86 architecture header
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2009 Jaswinder Singh Rajput
   7 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
   8 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
   9 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  11 *
  12 *  For licencing details see kernel-base/COPYING
  13 */
  14
  15#include <linux/perf_event.h>
  16
  17#include <asm/intel_ds.h>
  18
  19/* To enable MSR tracing please use the generic trace points. */
  20
  21/*
  22 *          |   NHM/WSM    |      SNB     |
  23 * register -------------------------------
  24 *          |  HT  | no HT |  HT  | no HT |
  25 *-----------------------------------------
  26 * offcore  | core | core  | cpu  | core  |
  27 * lbr_sel  | core | core  | cpu  | core  |
  28 * ld_lat   | cpu  | core  | cpu  | core  |
  29 *-----------------------------------------
  30 *
  31 * Given that there is a small number of shared regs,
  32 * we can pre-allocate their slot in the per-cpu
  33 * per-core reg tables.
  34 */
  35enum extra_reg_type {
  36        EXTRA_REG_NONE  = -1,   /* not used */
  37
  38        EXTRA_REG_RSP_0 = 0,    /* offcore_response_0 */
  39        EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
  40        EXTRA_REG_LBR   = 2,    /* lbr_select */
  41        EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
  42        EXTRA_REG_FE    = 4,    /* fe_* */
  43
  44        EXTRA_REG_MAX           /* number of entries needed */
  45};
  46
  47struct event_constraint {
  48        union {
  49                unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  50                u64             idxmsk64;
  51        };
  52        u64     code;
  53        u64     cmask;
  54        int     weight;
  55        int     overlap;
  56        int     flags;
  57};
  58/*
  59 * struct hw_perf_event.flags flags
  60 */
  61#define PERF_X86_EVENT_PEBS_LDLAT       0x0001 /* ld+ldlat data address sampling */
  62#define PERF_X86_EVENT_PEBS_ST          0x0002 /* st data address sampling */
  63#define PERF_X86_EVENT_PEBS_ST_HSW      0x0004 /* haswell style datala, store */
  64#define PERF_X86_EVENT_COMMITTED        0x0008 /* event passed commit_txn */
  65#define PERF_X86_EVENT_PEBS_LD_HSW      0x0010 /* haswell style datala, load */
  66#define PERF_X86_EVENT_PEBS_NA_HSW      0x0020 /* haswell style datala, unknown */
  67#define PERF_X86_EVENT_EXCL             0x0040 /* HT exclusivity on counter */
  68#define PERF_X86_EVENT_DYNAMIC          0x0080 /* dynamic alloc'd constraint */
  69#define PERF_X86_EVENT_RDPMC_ALLOWED    0x0100 /* grant rdpmc permission */
  70#define PERF_X86_EVENT_EXCL_ACCT        0x0200 /* accounted EXCL event */
  71#define PERF_X86_EVENT_AUTO_RELOAD      0x0400 /* use PEBS auto-reload */
  72#define PERF_X86_EVENT_LARGE_PEBS       0x0800 /* use large PEBS */
  73
  74
  75struct amd_nb {
  76        int nb_id;  /* NorthBridge id */
  77        int refcnt; /* reference count */
  78        struct perf_event *owners[X86_PMC_IDX_MAX];
  79        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  80};
  81
  82#define PEBS_COUNTER_MASK       ((1ULL << MAX_PEBS_EVENTS) - 1)
  83
  84/*
  85 * Flags PEBS can handle without an PMI.
  86 *
  87 * TID can only be handled by flushing at context switch.
  88 * REGS_USER can be handled for events limited to ring 3.
  89 *
  90 */
  91#define LARGE_PEBS_FLAGS \
  92        (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
  93        PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
  94        PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
  95        PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
  96        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
  97        PERF_SAMPLE_PERIOD)
  98
  99#define PEBS_REGS \
 100        (PERF_REG_X86_AX | \
 101         PERF_REG_X86_BX | \
 102         PERF_REG_X86_CX | \
 103         PERF_REG_X86_DX | \
 104         PERF_REG_X86_DI | \
 105         PERF_REG_X86_SI | \
 106         PERF_REG_X86_SP | \
 107         PERF_REG_X86_BP | \
 108         PERF_REG_X86_IP | \
 109         PERF_REG_X86_FLAGS | \
 110         PERF_REG_X86_R8 | \
 111         PERF_REG_X86_R9 | \
 112         PERF_REG_X86_R10 | \
 113         PERF_REG_X86_R11 | \
 114         PERF_REG_X86_R12 | \
 115         PERF_REG_X86_R13 | \
 116         PERF_REG_X86_R14 | \
 117         PERF_REG_X86_R15)
 118
 119/*
 120 * Per register state.
 121 */
 122struct er_account {
 123        raw_spinlock_t      lock;       /* per-core: protect structure */
 124        u64                 config;     /* extra MSR config */
 125        u64                 reg;        /* extra MSR number */
 126        atomic_t            ref;        /* reference count */
 127};
 128
 129/*
 130 * Per core/cpu state
 131 *
 132 * Used to coordinate shared registers between HT threads or
 133 * among events on a single PMU.
 134 */
 135struct intel_shared_regs {
 136        struct er_account       regs[EXTRA_REG_MAX];
 137        int                     refcnt;         /* per-core: #HT threads */
 138        unsigned                core_id;        /* per-core: core id */
 139};
 140
 141enum intel_excl_state_type {
 142        INTEL_EXCL_UNUSED    = 0, /* counter is unused */
 143        INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
 144        INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
 145};
 146
 147struct intel_excl_states {
 148        enum intel_excl_state_type state[X86_PMC_IDX_MAX];
 149        bool sched_started; /* true if scheduling has started */
 150};
 151
 152struct intel_excl_cntrs {
 153        raw_spinlock_t  lock;
 154
 155        struct intel_excl_states states[2];
 156
 157        union {
 158                u16     has_exclusive[2];
 159                u32     exclusive_present;
 160        };
 161
 162        int             refcnt;         /* per-core: #HT threads */
 163        unsigned        core_id;        /* per-core: core id */
 164};
 165
 166struct x86_perf_task_context;
 167#define MAX_LBR_ENTRIES         32
 168
 169enum {
 170        X86_PERF_KFREE_SHARED = 0,
 171        X86_PERF_KFREE_EXCL   = 1,
 172        X86_PERF_KFREE_MAX
 173};
 174
 175struct cpu_hw_events {
 176        /*
 177         * Generic x86 PMC bits
 178         */
 179        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
 180        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 181        unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 182        int                     enabled;
 183
 184        int                     n_events; /* the # of events in the below arrays */
 185        int                     n_added;  /* the # last events in the below arrays;
 186                                             they've never been enabled yet */
 187        int                     n_txn;    /* the # last events in the below arrays;
 188                                             added in the current transaction */
 189        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
 190        u64                     tags[X86_PMC_IDX_MAX];
 191
 192        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 193        struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
 194
 195        int                     n_excl; /* the number of exclusive events */
 196
 197        unsigned int            txn_flags;
 198        int                     is_fake;
 199
 200        /*
 201         * Intel DebugStore bits
 202         */
 203        struct debug_store      *ds;
 204        void                    *ds_pebs_vaddr;
 205        void                    *ds_bts_vaddr;
 206        u64                     pebs_enabled;
 207        int                     n_pebs;
 208        int                     n_large_pebs;
 209
 210        /*
 211         * Intel LBR bits
 212         */
 213        int                             lbr_users;
 214        struct perf_branch_stack        lbr_stack;
 215        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
 216        struct er_account               *lbr_sel;
 217        u64                             br_sel;
 218        struct x86_perf_task_context    *last_task_ctx;
 219        int                             last_log_id;
 220
 221        /*
 222         * Intel host/guest exclude bits
 223         */
 224        u64                             intel_ctrl_guest_mask;
 225        u64                             intel_ctrl_host_mask;
 226        struct perf_guest_switch_msr    guest_switch_msrs[X86_PMC_IDX_MAX];
 227
 228        /*
 229         * Intel checkpoint mask
 230         */
 231        u64                             intel_cp_status;
 232
 233        /*
 234         * manage shared (per-core, per-cpu) registers
 235         * used on Intel NHM/WSM/SNB
 236         */
 237        struct intel_shared_regs        *shared_regs;
 238        /*
 239         * manage exclusive counter access between hyperthread
 240         */
 241        struct event_constraint *constraint_list; /* in enable order */
 242        struct intel_excl_cntrs         *excl_cntrs;
 243        int excl_thread_id; /* 0 or 1 */
 244
 245        /*
 246         * AMD specific bits
 247         */
 248        struct amd_nb                   *amd_nb;
 249        /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
 250        u64                             perf_ctr_virt_mask;
 251
 252        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 253};
 254
 255#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
 256        { .idxmsk64 = (n) },            \
 257        .code = (c),                    \
 258        .cmask = (m),                   \
 259        .weight = (w),                  \
 260        .overlap = (o),                 \
 261        .flags = f,                     \
 262}
 263
 264#define EVENT_CONSTRAINT(c, n, m)       \
 265        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 266
 267#define INTEL_EXCLEVT_CONSTRAINT(c, n)  \
 268        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
 269                           0, PERF_X86_EVENT_EXCL)
 270
 271/*
 272 * The overlap flag marks event constraints with overlapping counter
 273 * masks. This is the case if the counter mask of such an event is not
 274 * a subset of any other counter mask of a constraint with an equal or
 275 * higher weight, e.g.:
 276 *
 277 *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
 278 *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
 279 *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
 280 *
 281 * The event scheduler may not select the correct counter in the first
 282 * cycle because it needs to know which subsequent events will be
 283 * scheduled. It may fail to schedule the events then. So we set the
 284 * overlap flag for such constraints to give the scheduler a hint which
 285 * events to select for counter rescheduling.
 286 *
 287 * Care must be taken as the rescheduling algorithm is O(n!) which
 288 * will increase scheduling cycles for an over-committed system
 289 * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
 290 * and its counter masks must be kept at a minimum.
 291 */
 292#define EVENT_CONSTRAINT_OVERLAP(c, n, m)       \
 293        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
 294
 295/*
 296 * Constraint on the Event code.
 297 */
 298#define INTEL_EVENT_CONSTRAINT(c, n)    \
 299        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 300
 301/*
 302 * Constraint on the Event code + UMask + fixed-mask
 303 *
 304 * filter mask to validate fixed counter events.
 305 * the following filters disqualify for fixed counters:
 306 *  - inv
 307 *  - edge
 308 *  - cnt-mask
 309 *  - in_tx
 310 *  - in_tx_checkpointed
 311 *  The other filters are supported by fixed counters.
 312 *  The any-thread option is supported starting with v3.
 313 */
 314#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
 315#define FIXED_EVENT_CONSTRAINT(c, n)    \
 316        EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
 317
 318/*
 319 * Constraint on the Event code + UMask
 320 */
 321#define INTEL_UEVENT_CONSTRAINT(c, n)   \
 322        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
 323
 324/* Constraint on specific umask bit only + event */
 325#define INTEL_UBIT_EVENT_CONSTRAINT(c, n)       \
 326        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
 327
 328/* Like UEVENT_CONSTRAINT, but match flags too */
 329#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)     \
 330        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 331
 332#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
 333        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
 334                           HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
 335
 336#define INTEL_PLD_CONSTRAINT(c, n)      \
 337        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 338                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
 339
 340#define INTEL_PST_CONSTRAINT(c, n)      \
 341        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 342                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
 343
 344/* Event constraint, but match on all event flags too. */
 345#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
 346        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 347
 348/* Check only flags, but allow all event/umask */
 349#define INTEL_ALL_EVENT_CONSTRAINT(code, n)     \
 350        EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
 351
 352/* Check flags and event code, and set the HSW store flag */
 353#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
 354        __EVENT_CONSTRAINT(code, n,                     \
 355                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 356                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 357
 358/* Check flags and event code, and set the HSW load flag */
 359#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
 360        __EVENT_CONSTRAINT(code, n,                     \
 361                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 362                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 363
 364#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
 365        __EVENT_CONSTRAINT(code, n,                     \
 366                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 367                          HWEIGHT(n), 0, \
 368                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 369
 370/* Check flags and event code/umask, and set the HSW store flag */
 371#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
 372        __EVENT_CONSTRAINT(code, n,                     \
 373                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 374                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 375
 376#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
 377        __EVENT_CONSTRAINT(code, n,                     \
 378                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 379                          HWEIGHT(n), 0, \
 380                          PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
 381
 382/* Check flags and event code/umask, and set the HSW load flag */
 383#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
 384        __EVENT_CONSTRAINT(code, n,                     \
 385                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 386                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 387
 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
 389        __EVENT_CONSTRAINT(code, n,                     \
 390                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 391                          HWEIGHT(n), 0, \
 392                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 393
 394/* Check flags and event code/umask, and set the HSW N/A flag */
 395#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
 396        __EVENT_CONSTRAINT(code, n,                     \
 397                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 398                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 399
 400
 401/*
 402 * We define the end marker as having a weight of -1
 403 * to enable blacklisting of events using a counter bitmask
 404 * of zero and thus a weight of zero.
 405 * The end marker has a weight that cannot possibly be
 406 * obtained from counting the bits in the bitmask.
 407 */
 408#define EVENT_CONSTRAINT_END { .weight = -1 }
 409
 410/*
 411 * Check for end marker with weight == -1
 412 */
 413#define for_each_event_constraint(e, c) \
 414        for ((e) = (c); (e)->weight != -1; (e)++)
 415
 416/*
 417 * Extra registers for specific events.
 418 *
 419 * Some events need large masks and require external MSRs.
 420 * Those extra MSRs end up being shared for all events on
 421 * a PMU and sometimes between PMU of sibling HT threads.
 422 * In either case, the kernel needs to handle conflicting
 423 * accesses to those extra, shared, regs. The data structure
 424 * to manage those registers is stored in cpu_hw_event.
 425 */
 426struct extra_reg {
 427        unsigned int            event;
 428        unsigned int            msr;
 429        u64                     config_mask;
 430        u64                     valid_mask;
 431        int                     idx;  /* per_xxx->regs[] reg index */
 432        bool                    extra_msr_access;
 433};
 434
 435#define EVENT_EXTRA_REG(e, ms, m, vm, i) {      \
 436        .event = (e),                   \
 437        .msr = (ms),                    \
 438        .config_mask = (m),             \
 439        .valid_mask = (vm),             \
 440        .idx = EXTRA_REG_##i,           \
 441        .extra_msr_access = true,       \
 442        }
 443
 444#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)      \
 445        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
 446
 447#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
 448        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
 449                        ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
 450
 451#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
 452        INTEL_UEVENT_EXTRA_REG(c, \
 453                               MSR_PEBS_LD_LAT_THRESHOLD, \
 454                               0xffff, \
 455                               LDLAT)
 456
 457#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
 458
 459union perf_capabilities {
 460        struct {
 461                u64     lbr_format:6;
 462                u64     pebs_trap:1;
 463                u64     pebs_arch_reg:1;
 464                u64     pebs_format:4;
 465                u64     smm_freeze:1;
 466                /*
 467                 * PMU supports separate counter range for writing
 468                 * values > 32bit.
 469                 */
 470                u64     full_width_write:1;
 471        };
 472        u64     capabilities;
 473};
 474
 475struct x86_pmu_quirk {
 476        struct x86_pmu_quirk *next;
 477        void (*func)(void);
 478};
 479
 480union x86_pmu_config {
 481        struct {
 482                u64 event:8,
 483                    umask:8,
 484                    usr:1,
 485                    os:1,
 486                    edge:1,
 487                    pc:1,
 488                    interrupt:1,
 489                    __reserved1:1,
 490                    en:1,
 491                    inv:1,
 492                    cmask:8,
 493                    event2:4,
 494                    __reserved2:4,
 495                    go:1,
 496                    ho:1;
 497        } bits;
 498        u64 value;
 499};
 500
 501#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
 502
 503enum {
 504        x86_lbr_exclusive_lbr,
 505        x86_lbr_exclusive_bts,
 506        x86_lbr_exclusive_pt,
 507        x86_lbr_exclusive_max,
 508};
 509
 510/*
 511 * struct x86_pmu - generic x86 pmu
 512 */
 513struct x86_pmu {
 514        /*
 515         * Generic x86 PMC bits
 516         */
 517        const char      *name;
 518        int             version;
 519        int             (*handle_irq)(struct pt_regs *);
 520        void            (*disable_all)(void);
 521        void            (*enable_all)(int added);
 522        void            (*enable)(struct perf_event *);
 523        void            (*disable)(struct perf_event *);
 524        void            (*add)(struct perf_event *);
 525        void            (*del)(struct perf_event *);
 526        void            (*read)(struct perf_event *event);
 527        int             (*hw_config)(struct perf_event *event);
 528        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
 529        unsigned        eventsel;
 530        unsigned        perfctr;
 531        int             (*addr_offset)(int index, bool eventsel);
 532        int             (*rdpmc_index)(int index);
 533        u64             (*event_map)(int);
 534        int             max_events;
 535        int             num_counters;
 536        int             num_counters_fixed;
 537        int             cntval_bits;
 538        u64             cntval_mask;
 539        union {
 540                        unsigned long events_maskl;
 541                        unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
 542        };
 543        int             events_mask_len;
 544        int             apic;
 545        u64             max_period;
 546        struct event_constraint *
 547                        (*get_event_constraints)(struct cpu_hw_events *cpuc,
 548                                                 int idx,
 549                                                 struct perf_event *event);
 550
 551        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
 552                                                 struct perf_event *event);
 553
 554        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 555
 556        void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
 557
 558        void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
 559
 560        struct event_constraint *event_constraints;
 561        struct x86_pmu_quirk *quirks;
 562        int             perfctr_second_write;
 563        u64             (*limit_period)(struct perf_event *event, u64 l);
 564
 565        /* PMI handler bits */
 566        unsigned int    late_ack                :1,
 567                        counter_freezing        :1;
 568        /*
 569         * sysfs attrs
 570         */
 571        int             attr_rdpmc_broken;
 572        int             attr_rdpmc;
 573        struct attribute **format_attrs;
 574        struct attribute **event_attrs;
 575        struct attribute **caps_attrs;
 576
 577        ssize_t         (*events_sysfs_show)(char *page, u64 config);
 578        struct attribute **cpu_events;
 579
 580        unsigned long   attr_freeze_on_smi;
 581        struct attribute **attrs;
 582
 583        /*
 584         * CPU Hotplug hooks
 585         */
 586        int             (*cpu_prepare)(int cpu);
 587        void            (*cpu_starting)(int cpu);
 588        void            (*cpu_dying)(int cpu);
 589        void            (*cpu_dead)(int cpu);
 590
 591        void            (*check_microcode)(void);
 592        void            (*sched_task)(struct perf_event_context *ctx,
 593                                      bool sched_in);
 594
 595        /*
 596         * Intel Arch Perfmon v2+
 597         */
 598        u64                     intel_ctrl;
 599        union perf_capabilities intel_cap;
 600
 601        /*
 602         * Intel DebugStore bits
 603         */
 604        unsigned int    bts             :1,
 605                        bts_active      :1,
 606                        pebs            :1,
 607                        pebs_active     :1,
 608                        pebs_broken     :1,
 609                        pebs_prec_dist  :1,
 610                        pebs_no_tlb     :1;
 611        int             pebs_record_size;
 612        int             pebs_buffer_size;
 613        void            (*drain_pebs)(struct pt_regs *regs);
 614        struct event_constraint *pebs_constraints;
 615        void            (*pebs_aliases)(struct perf_event *event);
 616        int             max_pebs_events;
 617        unsigned long   large_pebs_flags;
 618
 619        /*
 620         * Intel LBR
 621         */
 622        unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
 623        int             lbr_nr;                    /* hardware stack size */
 624        u64             lbr_sel_mask;              /* LBR_SELECT valid bits */
 625        const int       *lbr_sel_map;              /* lbr_select mappings */
 626        bool            lbr_double_abort;          /* duplicated lbr aborts */
 627        bool            lbr_pt_coexist;            /* (LBR|BTS) may coexist with PT */
 628
 629        /*
 630         * Intel PT/LBR/BTS are exclusive
 631         */
 632        atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
 633
 634        /*
 635         * AMD bits
 636         */
 637        unsigned int    amd_nb_constraints : 1;
 638
 639        /*
 640         * Extra registers for events
 641         */
 642        struct extra_reg *extra_regs;
 643        unsigned int flags;
 644
 645        /*
 646         * Intel host/guest support (KVM)
 647         */
 648        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 649};
 650
 651struct x86_perf_task_context {
 652        u64 lbr_from[MAX_LBR_ENTRIES];
 653        u64 lbr_to[MAX_LBR_ENTRIES];
 654        u64 lbr_info[MAX_LBR_ENTRIES];
 655        int tos;
 656        int valid_lbrs;
 657        int lbr_callstack_users;
 658        int lbr_stack_state;
 659        int log_id;
 660};
 661
 662#define x86_add_quirk(func_)                                            \
 663do {                                                                    \
 664        static struct x86_pmu_quirk __quirk __initdata = {              \
 665                .func = func_,                                          \
 666        };                                                              \
 667        __quirk.next = x86_pmu.quirks;                                  \
 668        x86_pmu.quirks = &__quirk;                                      \
 669} while (0)
 670
 671/*
 672 * x86_pmu flags
 673 */
 674#define PMU_FL_NO_HT_SHARING    0x1 /* no hyper-threading resource sharing */
 675#define PMU_FL_HAS_RSP_1        0x2 /* has 2 equivalent offcore_rsp regs   */
 676#define PMU_FL_EXCL_CNTRS       0x4 /* has exclusive counter requirements  */
 677#define PMU_FL_EXCL_ENABLED     0x8 /* exclusive counter active */
 678#define PMU_FL_PEBS_ALL         0x10 /* all events are valid PEBS events */
 679
 680#define EVENT_VAR(_id)  event_attr_##_id
 681#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 682
 683#define EVENT_ATTR(_name, _id)                                          \
 684static struct perf_pmu_events_attr EVENT_VAR(_id) = {                   \
 685        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 686        .id             = PERF_COUNT_HW_##_id,                          \
 687        .event_str      = NULL,                                         \
 688};
 689
 690#define EVENT_ATTR_STR(_name, v, str)                                   \
 691static struct perf_pmu_events_attr event_attr_##v = {                   \
 692        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 693        .id             = 0,                                            \
 694        .event_str      = str,                                          \
 695};
 696
 697#define EVENT_ATTR_STR_HT(_name, v, noht, ht)                           \
 698static struct perf_pmu_events_ht_attr event_attr_##v = {                \
 699        .attr           = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
 700        .id             = 0,                                            \
 701        .event_str_noht = noht,                                         \
 702        .event_str_ht   = ht,                                           \
 703}
 704
 705extern struct x86_pmu x86_pmu __read_mostly;
 706
 707static inline bool x86_pmu_has_lbr_callstack(void)
 708{
 709        return  x86_pmu.lbr_sel_map &&
 710                x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
 711}
 712
 713DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 714
 715int x86_perf_event_set_period(struct perf_event *event);
 716
 717/*
 718 * Generalized hw caching related hw_event table, filled
 719 * in on a per model basis. A value of 0 means
 720 * 'not supported', -1 means 'hw_event makes no sense on
 721 * this CPU', any other value means the raw hw_event
 722 * ID.
 723 */
 724
 725#define C(x) PERF_COUNT_HW_CACHE_##x
 726
 727extern u64 __read_mostly hw_cache_event_ids
 728                                [PERF_COUNT_HW_CACHE_MAX]
 729                                [PERF_COUNT_HW_CACHE_OP_MAX]
 730                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 731extern u64 __read_mostly hw_cache_extra_regs
 732                                [PERF_COUNT_HW_CACHE_MAX]
 733                                [PERF_COUNT_HW_CACHE_OP_MAX]
 734                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 735
 736u64 x86_perf_event_update(struct perf_event *event);
 737
 738static inline unsigned int x86_pmu_config_addr(int index)
 739{
 740        return x86_pmu.eventsel + (x86_pmu.addr_offset ?
 741                                   x86_pmu.addr_offset(index, true) : index);
 742}
 743
 744static inline unsigned int x86_pmu_event_addr(int index)
 745{
 746        return x86_pmu.perfctr + (x86_pmu.addr_offset ?
 747                                  x86_pmu.addr_offset(index, false) : index);
 748}
 749
 750static inline int x86_pmu_rdpmc_index(int index)
 751{
 752        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 753}
 754
 755int x86_add_exclusive(unsigned int what);
 756
 757void x86_del_exclusive(unsigned int what);
 758
 759int x86_reserve_hardware(void);
 760
 761void x86_release_hardware(void);
 762
 763int x86_pmu_max_precise(void);
 764
 765void hw_perf_lbr_event_destroy(struct perf_event *event);
 766
 767int x86_setup_perfctr(struct perf_event *event);
 768
 769int x86_pmu_hw_config(struct perf_event *event);
 770
 771void x86_pmu_disable_all(void);
 772
 773static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 774                                          u64 enable_mask)
 775{
 776        u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
 777
 778        if (hwc->extra_reg.reg)
 779                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
 780        wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
 781}
 782
 783void x86_pmu_enable_all(int added);
 784
 785int perf_assign_events(struct event_constraint **constraints, int n,
 786                        int wmin, int wmax, int gpmax, int *assign);
 787int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 788
 789void x86_pmu_stop(struct perf_event *event, int flags);
 790
 791static inline void x86_pmu_disable_event(struct perf_event *event)
 792{
 793        struct hw_perf_event *hwc = &event->hw;
 794
 795        wrmsrl(hwc->config_base, hwc->config);
 796}
 797
 798void x86_pmu_enable_event(struct perf_event *event);
 799
 800int x86_pmu_handle_irq(struct pt_regs *regs);
 801
 802extern struct event_constraint emptyconstraint;
 803
 804extern struct event_constraint unconstrained;
 805
 806static inline bool kernel_ip(unsigned long ip)
 807{
 808#ifdef CONFIG_X86_32
 809        return ip > PAGE_OFFSET;
 810#else
 811        return (long)ip < 0;
 812#endif
 813}
 814
 815/*
 816 * Not all PMUs provide the right context information to place the reported IP
 817 * into full context. Specifically segment registers are typically not
 818 * supplied.
 819 *
 820 * Assuming the address is a linear address (it is for IBS), we fake the CS and
 821 * vm86 mode using the known zero-based code segment and 'fix up' the registers
 822 * to reflect this.
 823 *
 824 * Intel PEBS/LBR appear to typically provide the effective address, nothing
 825 * much we can do about that but pray and treat it like a linear address.
 826 */
 827static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
 828{
 829        regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
 830        if (regs->flags & X86_VM_MASK)
 831                regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
 832        regs->ip = ip;
 833}
 834
 835ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
 836ssize_t intel_event_sysfs_show(char *page, u64 config);
 837
 838struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 839
 840ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 841                          char *page);
 842ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
 843                          char *page);
 844
 845#ifdef CONFIG_CPU_SUP_AMD
 846
 847int amd_pmu_init(void);
 848
 849#else /* CONFIG_CPU_SUP_AMD */
 850
 851static inline int amd_pmu_init(void)
 852{
 853        return 0;
 854}
 855
 856#endif /* CONFIG_CPU_SUP_AMD */
 857
 858#ifdef CONFIG_CPU_SUP_INTEL
 859
 860static inline bool intel_pmu_has_bts(struct perf_event *event)
 861{
 862        struct hw_perf_event *hwc = &event->hw;
 863        unsigned int hw_event, bts_event;
 864
 865        if (event->attr.freq)
 866                return false;
 867
 868        hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
 869        bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 870
 871        return hw_event == bts_event && hwc->sample_period == 1;
 872}
 873
 874int intel_pmu_save_and_restart(struct perf_event *event);
 875
 876struct event_constraint *
 877x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 878                          struct perf_event *event);
 879
 880struct intel_shared_regs *allocate_shared_regs(int cpu);
 881
 882int intel_pmu_init(void);
 883
 884void init_debug_store_on_cpu(int cpu);
 885
 886void fini_debug_store_on_cpu(int cpu);
 887
 888void release_ds_buffers(void);
 889
 890void reserve_ds_buffers(void);
 891
 892extern struct event_constraint bts_constraint;
 893
 894void intel_pmu_enable_bts(u64 config);
 895
 896void intel_pmu_disable_bts(void);
 897
 898int intel_pmu_drain_bts_buffer(void);
 899
 900extern struct event_constraint intel_core2_pebs_event_constraints[];
 901
 902extern struct event_constraint intel_atom_pebs_event_constraints[];
 903
 904extern struct event_constraint intel_slm_pebs_event_constraints[];
 905
 906extern struct event_constraint intel_glm_pebs_event_constraints[];
 907
 908extern struct event_constraint intel_glp_pebs_event_constraints[];
 909
 910extern struct event_constraint intel_nehalem_pebs_event_constraints[];
 911
 912extern struct event_constraint intel_westmere_pebs_event_constraints[];
 913
 914extern struct event_constraint intel_snb_pebs_event_constraints[];
 915
 916extern struct event_constraint intel_ivb_pebs_event_constraints[];
 917
 918extern struct event_constraint intel_hsw_pebs_event_constraints[];
 919
 920extern struct event_constraint intel_bdw_pebs_event_constraints[];
 921
 922extern struct event_constraint intel_skl_pebs_event_constraints[];
 923
 924struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 925
 926void intel_pmu_pebs_add(struct perf_event *event);
 927
 928void intel_pmu_pebs_del(struct perf_event *event);
 929
 930void intel_pmu_pebs_enable(struct perf_event *event);
 931
 932void intel_pmu_pebs_disable(struct perf_event *event);
 933
 934void intel_pmu_pebs_enable_all(void);
 935
 936void intel_pmu_pebs_disable_all(void);
 937
 938void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 939
 940void intel_pmu_auto_reload_read(struct perf_event *event);
 941
 942void intel_ds_init(void);
 943
 944void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
 945
 946u64 lbr_from_signext_quirk_wr(u64 val);
 947
 948void intel_pmu_lbr_reset(void);
 949
 950void intel_pmu_lbr_add(struct perf_event *event);
 951
 952void intel_pmu_lbr_del(struct perf_event *event);
 953
 954void intel_pmu_lbr_enable_all(bool pmi);
 955
 956void intel_pmu_lbr_disable_all(void);
 957
 958void intel_pmu_lbr_read(void);
 959
 960void intel_pmu_lbr_init_core(void);
 961
 962void intel_pmu_lbr_init_nhm(void);
 963
 964void intel_pmu_lbr_init_atom(void);
 965
 966void intel_pmu_lbr_init_slm(void);
 967
 968void intel_pmu_lbr_init_snb(void);
 969
 970void intel_pmu_lbr_init_hsw(void);
 971
 972void intel_pmu_lbr_init_skl(void);
 973
 974void intel_pmu_lbr_init_knl(void);
 975
 976void intel_pmu_pebs_data_source_nhm(void);
 977
 978void intel_pmu_pebs_data_source_skl(bool pmem);
 979
 980int intel_pmu_setup_lbr_filter(struct perf_event *event);
 981
 982void intel_pt_interrupt(void);
 983
 984int intel_bts_interrupt(void);
 985
 986void intel_bts_enable_local(void);
 987
 988void intel_bts_disable_local(void);
 989
 990int p4_pmu_init(void);
 991
 992int p6_pmu_init(void);
 993
 994int knc_pmu_init(void);
 995
 996static inline int is_ht_workaround_enabled(void)
 997{
 998        return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
 999}
1000
1001#else /* CONFIG_CPU_SUP_INTEL */
1002
1003static inline void reserve_ds_buffers(void)
1004{
1005}
1006
1007static inline void release_ds_buffers(void)
1008{
1009}
1010
1011static inline int intel_pmu_init(void)
1012{
1013        return 0;
1014}
1015
1016static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
1017{
1018        return NULL;
1019}
1020
1021static inline int is_ht_workaround_enabled(void)
1022{
1023        return 0;
1024}
1025#endif /* CONFIG_CPU_SUP_INTEL */
1026