linux/arch/x86/events/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events x86 architecture header
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2009 Jaswinder Singh Rajput
   7 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
   8 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
   9 *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
  10 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
  11 *
  12 *  For licencing details see kernel-base/COPYING
  13 */
  14
  15#include <linux/perf_event.h>
  16
  17/* To enable MSR tracing please use the generic trace points. */
  18
  19/*
  20 *          |   NHM/WSM    |      SNB     |
  21 * register -------------------------------
  22 *          |  HT  | no HT |  HT  | no HT |
  23 *-----------------------------------------
  24 * offcore  | core | core  | cpu  | core  |
  25 * lbr_sel  | core | core  | cpu  | core  |
  26 * ld_lat   | cpu  | core  | cpu  | core  |
  27 *-----------------------------------------
  28 *
  29 * Given that there is a small number of shared regs,
  30 * we can pre-allocate their slot in the per-cpu
  31 * per-core reg tables.
  32 */
  33enum extra_reg_type {
  34        EXTRA_REG_NONE  = -1,   /* not used */
  35
  36        EXTRA_REG_RSP_0 = 0,    /* offcore_response_0 */
  37        EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
  38        EXTRA_REG_LBR   = 2,    /* lbr_select */
  39        EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
  40        EXTRA_REG_FE    = 4,    /* fe_* */
  41
  42        EXTRA_REG_MAX           /* number of entries needed */
  43};
  44
  45struct event_constraint {
  46        union {
  47                unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
  48                u64             idxmsk64;
  49        };
  50        u64     code;
  51        u64     cmask;
  52        int     weight;
  53        int     overlap;
  54        int     flags;
  55};
  56/*
  57 * struct hw_perf_event.flags flags
  58 */
  59#define PERF_X86_EVENT_PEBS_LDLAT       0x0001 /* ld+ldlat data address sampling */
  60#define PERF_X86_EVENT_PEBS_ST          0x0002 /* st data address sampling */
  61#define PERF_X86_EVENT_PEBS_ST_HSW      0x0004 /* haswell style datala, store */
  62#define PERF_X86_EVENT_COMMITTED        0x0008 /* event passed commit_txn */
  63#define PERF_X86_EVENT_PEBS_LD_HSW      0x0010 /* haswell style datala, load */
  64#define PERF_X86_EVENT_PEBS_NA_HSW      0x0020 /* haswell style datala, unknown */
  65#define PERF_X86_EVENT_EXCL             0x0040 /* HT exclusivity on counter */
  66#define PERF_X86_EVENT_DYNAMIC          0x0080 /* dynamic alloc'd constraint */
  67#define PERF_X86_EVENT_RDPMC_ALLOWED    0x0100 /* grant rdpmc permission */
  68#define PERF_X86_EVENT_EXCL_ACCT        0x0200 /* accounted EXCL event */
  69#define PERF_X86_EVENT_AUTO_RELOAD      0x0400 /* use PEBS auto-reload */
  70#define PERF_X86_EVENT_FREERUNNING      0x0800 /* use freerunning PEBS */
  71
  72
  73struct amd_nb {
  74        int nb_id;  /* NorthBridge id */
  75        int refcnt; /* reference count */
  76        struct perf_event *owners[X86_PMC_IDX_MAX];
  77        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
  78};
  79
  80/* The maximal number of PEBS events: */
  81#define MAX_PEBS_EVENTS         8
  82#define PEBS_COUNTER_MASK       ((1ULL << MAX_PEBS_EVENTS) - 1)
  83
  84/*
  85 * Flags PEBS can handle without an PMI.
  86 *
  87 * TID can only be handled by flushing at context switch.
  88 *
  89 */
  90#define PEBS_FREERUNNING_FLAGS \
  91        (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
  92        PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
  93        PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
  94        PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR)
  95
  96/*
  97 * A debug store configuration.
  98 *
  99 * We only support architectures that use 64bit fields.
 100 */
 101struct debug_store {
 102        u64     bts_buffer_base;
 103        u64     bts_index;
 104        u64     bts_absolute_maximum;
 105        u64     bts_interrupt_threshold;
 106        u64     pebs_buffer_base;
 107        u64     pebs_index;
 108        u64     pebs_absolute_maximum;
 109        u64     pebs_interrupt_threshold;
 110        u64     pebs_event_reset[MAX_PEBS_EVENTS];
 111};
 112
 113/*
 114 * Per register state.
 115 */
 116struct er_account {
 117        raw_spinlock_t      lock;       /* per-core: protect structure */
 118        u64                 config;     /* extra MSR config */
 119        u64                 reg;        /* extra MSR number */
 120        atomic_t            ref;        /* reference count */
 121};
 122
 123/*
 124 * Per core/cpu state
 125 *
 126 * Used to coordinate shared registers between HT threads or
 127 * among events on a single PMU.
 128 */
 129struct intel_shared_regs {
 130        struct er_account       regs[EXTRA_REG_MAX];
 131        int                     refcnt;         /* per-core: #HT threads */
 132        unsigned                core_id;        /* per-core: core id */
 133};
 134
 135enum intel_excl_state_type {
 136        INTEL_EXCL_UNUSED    = 0, /* counter is unused */
 137        INTEL_EXCL_SHARED    = 1, /* counter can be used by both threads */
 138        INTEL_EXCL_EXCLUSIVE = 2, /* counter can be used by one thread only */
 139};
 140
 141struct intel_excl_states {
 142        enum intel_excl_state_type state[X86_PMC_IDX_MAX];
 143        bool sched_started; /* true if scheduling has started */
 144};
 145
 146struct intel_excl_cntrs {
 147        raw_spinlock_t  lock;
 148
 149        struct intel_excl_states states[2];
 150
 151        union {
 152                u16     has_exclusive[2];
 153                u32     exclusive_present;
 154        };
 155
 156        int             refcnt;         /* per-core: #HT threads */
 157        unsigned        core_id;        /* per-core: core id */
 158};
 159
 160#define MAX_LBR_ENTRIES         32
 161
 162enum {
 163        X86_PERF_KFREE_SHARED = 0,
 164        X86_PERF_KFREE_EXCL   = 1,
 165        X86_PERF_KFREE_MAX
 166};
 167
 168struct cpu_hw_events {
 169        /*
 170         * Generic x86 PMC bits
 171         */
 172        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
 173        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 174        unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 175        int                     enabled;
 176
 177        int                     n_events; /* the # of events in the below arrays */
 178        int                     n_added;  /* the # last events in the below arrays;
 179                                             they've never been enabled yet */
 180        int                     n_txn;    /* the # last events in the below arrays;
 181                                             added in the current transaction */
 182        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
 183        u64                     tags[X86_PMC_IDX_MAX];
 184
 185        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
 186        struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
 187
 188        int                     n_excl; /* the number of exclusive events */
 189
 190        unsigned int            txn_flags;
 191        int                     is_fake;
 192
 193        /*
 194         * Intel DebugStore bits
 195         */
 196        struct debug_store      *ds;
 197        u64                     pebs_enabled;
 198        int                     n_pebs;
 199        int                     n_large_pebs;
 200
 201        /*
 202         * Intel LBR bits
 203         */
 204        int                             lbr_users;
 205        struct perf_branch_stack        lbr_stack;
 206        struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
 207        struct er_account               *lbr_sel;
 208        u64                             br_sel;
 209
 210        /*
 211         * Intel host/guest exclude bits
 212         */
 213        u64                             intel_ctrl_guest_mask;
 214        u64                             intel_ctrl_host_mask;
 215        struct perf_guest_switch_msr    guest_switch_msrs[X86_PMC_IDX_MAX];
 216
 217        /*
 218         * Intel checkpoint mask
 219         */
 220        u64                             intel_cp_status;
 221
 222        /*
 223         * manage shared (per-core, per-cpu) registers
 224         * used on Intel NHM/WSM/SNB
 225         */
 226        struct intel_shared_regs        *shared_regs;
 227        /*
 228         * manage exclusive counter access between hyperthread
 229         */
 230        struct event_constraint *constraint_list; /* in enable order */
 231        struct intel_excl_cntrs         *excl_cntrs;
 232        int excl_thread_id; /* 0 or 1 */
 233
 234        /*
 235         * AMD specific bits
 236         */
 237        struct amd_nb                   *amd_nb;
 238        /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
 239        u64                             perf_ctr_virt_mask;
 240
 241        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 242};
 243
 244#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
 245        { .idxmsk64 = (n) },            \
 246        .code = (c),                    \
 247        .cmask = (m),                   \
 248        .weight = (w),                  \
 249        .overlap = (o),                 \
 250        .flags = f,                     \
 251}
 252
 253#define EVENT_CONSTRAINT(c, n, m)       \
 254        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
 255
 256#define INTEL_EXCLEVT_CONSTRAINT(c, n)  \
 257        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
 258                           0, PERF_X86_EVENT_EXCL)
 259
 260/*
 261 * The overlap flag marks event constraints with overlapping counter
 262 * masks. This is the case if the counter mask of such an event is not
 263 * a subset of any other counter mask of a constraint with an equal or
 264 * higher weight, e.g.:
 265 *
 266 *  c_overlaps = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
 267 *  c_another1 = EVENT_CONSTRAINT(0, 0x07, 0);
 268 *  c_another2 = EVENT_CONSTRAINT(0, 0x38, 0);
 269 *
 270 * The event scheduler may not select the correct counter in the first
 271 * cycle because it needs to know which subsequent events will be
 272 * scheduled. It may fail to schedule the events then. So we set the
 273 * overlap flag for such constraints to give the scheduler a hint which
 274 * events to select for counter rescheduling.
 275 *
 276 * Care must be taken as the rescheduling algorithm is O(n!) which
 277 * will increase scheduling cycles for an over-committed system
 278 * dramatically.  The number of such EVENT_CONSTRAINT_OVERLAP() macros
 279 * and its counter masks must be kept at a minimum.
 280 */
 281#define EVENT_CONSTRAINT_OVERLAP(c, n, m)       \
 282        __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
 283
 284/*
 285 * Constraint on the Event code.
 286 */
 287#define INTEL_EVENT_CONSTRAINT(c, n)    \
 288        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 289
 290/*
 291 * Constraint on the Event code + UMask + fixed-mask
 292 *
 293 * filter mask to validate fixed counter events.
 294 * the following filters disqualify for fixed counters:
 295 *  - inv
 296 *  - edge
 297 *  - cnt-mask
 298 *  - in_tx
 299 *  - in_tx_checkpointed
 300 *  The other filters are supported by fixed counters.
 301 *  The any-thread option is supported starting with v3.
 302 */
 303#define FIXED_EVENT_FLAGS (X86_RAW_EVENT_MASK|HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)
 304#define FIXED_EVENT_CONSTRAINT(c, n)    \
 305        EVENT_CONSTRAINT(c, (1ULL << (32+n)), FIXED_EVENT_FLAGS)
 306
 307/*
 308 * Constraint on the Event code + UMask
 309 */
 310#define INTEL_UEVENT_CONSTRAINT(c, n)   \
 311        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
 312
 313/* Constraint on specific umask bit only + event */
 314#define INTEL_UBIT_EVENT_CONSTRAINT(c, n)       \
 315        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|(c))
 316
 317/* Like UEVENT_CONSTRAINT, but match flags too */
 318#define INTEL_FLAGS_UEVENT_CONSTRAINT(c, n)     \
 319        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 320
 321#define INTEL_EXCLUEVT_CONSTRAINT(c, n) \
 322        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
 323                           HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
 324
 325#define INTEL_PLD_CONSTRAINT(c, n)      \
 326        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 327                           HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
 328
 329#define INTEL_PST_CONSTRAINT(c, n)      \
 330        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 331                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
 332
 333/* Event constraint, but match on all event flags too. */
 334#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
 335        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
 336
 337/* Check only flags, but allow all event/umask */
 338#define INTEL_ALL_EVENT_CONSTRAINT(code, n)     \
 339        EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS)
 340
 341/* Check flags and event code, and set the HSW store flag */
 342#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
 343        __EVENT_CONSTRAINT(code, n,                     \
 344                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 345                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 346
 347/* Check flags and event code, and set the HSW load flag */
 348#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
 349        __EVENT_CONSTRAINT(code, n,                     \
 350                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 351                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 352
 353#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
 354        __EVENT_CONSTRAINT(code, n,                     \
 355                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
 356                          HWEIGHT(n), 0, \
 357                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 358
 359/* Check flags and event code/umask, and set the HSW store flag */
 360#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
 361        __EVENT_CONSTRAINT(code, n,                     \
 362                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 363                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
 364
 365#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
 366        __EVENT_CONSTRAINT(code, n,                     \
 367                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 368                          HWEIGHT(n), 0, \
 369                          PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
 370
 371/* Check flags and event code/umask, and set the HSW load flag */
 372#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
 373        __EVENT_CONSTRAINT(code, n,                     \
 374                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 375                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
 376
 377#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
 378        __EVENT_CONSTRAINT(code, n,                     \
 379                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 380                          HWEIGHT(n), 0, \
 381                          PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
 382
 383/* Check flags and event code/umask, and set the HSW N/A flag */
 384#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
 385        __EVENT_CONSTRAINT(code, n,                     \
 386                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
 387                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
 388
 389
 390/*
 391 * We define the end marker as having a weight of -1
 392 * to enable blacklisting of events using a counter bitmask
 393 * of zero and thus a weight of zero.
 394 * The end marker has a weight that cannot possibly be
 395 * obtained from counting the bits in the bitmask.
 396 */
 397#define EVENT_CONSTRAINT_END { .weight = -1 }
 398
 399/*
 400 * Check for end marker with weight == -1
 401 */
 402#define for_each_event_constraint(e, c) \
 403        for ((e) = (c); (e)->weight != -1; (e)++)
 404
 405/*
 406 * Extra registers for specific events.
 407 *
 408 * Some events need large masks and require external MSRs.
 409 * Those extra MSRs end up being shared for all events on
 410 * a PMU and sometimes between PMU of sibling HT threads.
 411 * In either case, the kernel needs to handle conflicting
 412 * accesses to those extra, shared, regs. The data structure
 413 * to manage those registers is stored in cpu_hw_event.
 414 */
 415struct extra_reg {
 416        unsigned int            event;
 417        unsigned int            msr;
 418        u64                     config_mask;
 419        u64                     valid_mask;
 420        int                     idx;  /* per_xxx->regs[] reg index */
 421        bool                    extra_msr_access;
 422};
 423
 424#define EVENT_EXTRA_REG(e, ms, m, vm, i) {      \
 425        .event = (e),                   \
 426        .msr = (ms),                    \
 427        .config_mask = (m),             \
 428        .valid_mask = (vm),             \
 429        .idx = EXTRA_REG_##i,           \
 430        .extra_msr_access = true,       \
 431        }
 432
 433#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)      \
 434        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx)
 435
 436#define INTEL_UEVENT_EXTRA_REG(event, msr, vm, idx) \
 437        EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT | \
 438                        ARCH_PERFMON_EVENTSEL_UMASK, vm, idx)
 439
 440#define INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(c) \
 441        INTEL_UEVENT_EXTRA_REG(c, \
 442                               MSR_PEBS_LD_LAT_THRESHOLD, \
 443                               0xffff, \
 444                               LDLAT)
 445
 446#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0)
 447
 448union perf_capabilities {
 449        struct {
 450                u64     lbr_format:6;
 451                u64     pebs_trap:1;
 452                u64     pebs_arch_reg:1;
 453                u64     pebs_format:4;
 454                u64     smm_freeze:1;
 455                /*
 456                 * PMU supports separate counter range for writing
 457                 * values > 32bit.
 458                 */
 459                u64     full_width_write:1;
 460        };
 461        u64     capabilities;
 462};
 463
 464struct x86_pmu_quirk {
 465        struct x86_pmu_quirk *next;
 466        void (*func)(void);
 467};
 468
 469union x86_pmu_config {
 470        struct {
 471                u64 event:8,
 472                    umask:8,
 473                    usr:1,
 474                    os:1,
 475                    edge:1,
 476                    pc:1,
 477                    interrupt:1,
 478                    __reserved1:1,
 479                    en:1,
 480                    inv:1,
 481                    cmask:8,
 482                    event2:4,
 483                    __reserved2:4,
 484                    go:1,
 485                    ho:1;
 486        } bits;
 487        u64 value;
 488};
 489
 490#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
 491
 492enum {
 493        x86_lbr_exclusive_lbr,
 494        x86_lbr_exclusive_bts,
 495        x86_lbr_exclusive_pt,
 496        x86_lbr_exclusive_max,
 497};
 498
 499/*
 500 * struct x86_pmu - generic x86 pmu
 501 */
 502struct x86_pmu {
 503        /*
 504         * Generic x86 PMC bits
 505         */
 506        const char      *name;
 507        int             version;
 508        int             (*handle_irq)(struct pt_regs *);
 509        void            (*disable_all)(void);
 510        void            (*enable_all)(int added);
 511        void            (*enable)(struct perf_event *);
 512        void            (*disable)(struct perf_event *);
 513        void            (*add)(struct perf_event *);
 514        void            (*del)(struct perf_event *);
 515        int             (*hw_config)(struct perf_event *event);
 516        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
 517        unsigned        eventsel;
 518        unsigned        perfctr;
 519        int             (*addr_offset)(int index, bool eventsel);
 520        int             (*rdpmc_index)(int index);
 521        u64             (*event_map)(int);
 522        int             max_events;
 523        int             num_counters;
 524        int             num_counters_fixed;
 525        int             cntval_bits;
 526        u64             cntval_mask;
 527        union {
 528                        unsigned long events_maskl;
 529                        unsigned long events_mask[BITS_TO_LONGS(ARCH_PERFMON_EVENTS_COUNT)];
 530        };
 531        int             events_mask_len;
 532        int             apic;
 533        u64             max_period;
 534        struct event_constraint *
 535                        (*get_event_constraints)(struct cpu_hw_events *cpuc,
 536                                                 int idx,
 537                                                 struct perf_event *event);
 538
 539        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
 540                                                 struct perf_event *event);
 541
 542        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 543
 544        void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
 545
 546        void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
 547
 548        struct event_constraint *event_constraints;
 549        struct x86_pmu_quirk *quirks;
 550        int             perfctr_second_write;
 551        bool            late_ack;
 552        unsigned        (*limit_period)(struct perf_event *event, unsigned l);
 553
 554        /*
 555         * sysfs attrs
 556         */
 557        int             attr_rdpmc_broken;
 558        int             attr_rdpmc;
 559        struct attribute **format_attrs;
 560        struct attribute **event_attrs;
 561        struct attribute **caps_attrs;
 562
 563        ssize_t         (*events_sysfs_show)(char *page, u64 config);
 564        struct attribute **cpu_events;
 565
 566        unsigned long   attr_freeze_on_smi;
 567        struct attribute **attrs;
 568
 569        /*
 570         * CPU Hotplug hooks
 571         */
 572        int             (*cpu_prepare)(int cpu);
 573        void            (*cpu_starting)(int cpu);
 574        void            (*cpu_dying)(int cpu);
 575        void            (*cpu_dead)(int cpu);
 576
 577        void            (*check_microcode)(void);
 578        void            (*sched_task)(struct perf_event_context *ctx,
 579                                      bool sched_in);
 580
 581        /*
 582         * Intel Arch Perfmon v2+
 583         */
 584        u64                     intel_ctrl;
 585        union perf_capabilities intel_cap;
 586
 587        /*
 588         * Intel DebugStore bits
 589         */
 590        unsigned int    bts             :1,
 591                        bts_active      :1,
 592                        pebs            :1,
 593                        pebs_active     :1,
 594                        pebs_broken     :1,
 595                        pebs_prec_dist  :1,
 596                        pebs_no_tlb     :1;
 597        int             pebs_record_size;
 598        int             pebs_buffer_size;
 599        void            (*drain_pebs)(struct pt_regs *regs);
 600        struct event_constraint *pebs_constraints;
 601        void            (*pebs_aliases)(struct perf_event *event);
 602        int             max_pebs_events;
 603        unsigned long   free_running_flags;
 604
 605        /*
 606         * Intel LBR
 607         */
 608        unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
 609        int             lbr_nr;                    /* hardware stack size */
 610        u64             lbr_sel_mask;              /* LBR_SELECT valid bits */
 611        const int       *lbr_sel_map;              /* lbr_select mappings */
 612        bool            lbr_double_abort;          /* duplicated lbr aborts */
 613        bool            lbr_pt_coexist;            /* (LBR|BTS) may coexist with PT */
 614
 615        /*
 616         * Intel PT/LBR/BTS are exclusive
 617         */
 618        atomic_t        lbr_exclusive[x86_lbr_exclusive_max];
 619
 620        /*
 621         * AMD bits
 622         */
 623        unsigned int    amd_nb_constraints : 1;
 624
 625        /*
 626         * Extra registers for events
 627         */
 628        struct extra_reg *extra_regs;
 629        unsigned int flags;
 630
 631        /*
 632         * Intel host/guest support (KVM)
 633         */
 634        struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
 635};
 636
 637struct x86_perf_task_context {
 638        u64 lbr_from[MAX_LBR_ENTRIES];
 639        u64 lbr_to[MAX_LBR_ENTRIES];
 640        u64 lbr_info[MAX_LBR_ENTRIES];
 641        int tos;
 642        int lbr_callstack_users;
 643        int lbr_stack_state;
 644};
 645
 646#define x86_add_quirk(func_)                                            \
 647do {                                                                    \
 648        static struct x86_pmu_quirk __quirk __initdata = {              \
 649                .func = func_,                                          \
 650        };                                                              \
 651        __quirk.next = x86_pmu.quirks;                                  \
 652        x86_pmu.quirks = &__quirk;                                      \
 653} while (0)
 654
 655/*
 656 * x86_pmu flags
 657 */
 658#define PMU_FL_NO_HT_SHARING    0x1 /* no hyper-threading resource sharing */
 659#define PMU_FL_HAS_RSP_1        0x2 /* has 2 equivalent offcore_rsp regs   */
 660#define PMU_FL_EXCL_CNTRS       0x4 /* has exclusive counter requirements  */
 661#define PMU_FL_EXCL_ENABLED     0x8 /* exclusive counter active */
 662
 663#define EVENT_VAR(_id)  event_attr_##_id
 664#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 665
 666#define EVENT_ATTR(_name, _id)                                          \
 667static struct perf_pmu_events_attr EVENT_VAR(_id) = {                   \
 668        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 669        .id             = PERF_COUNT_HW_##_id,                          \
 670        .event_str      = NULL,                                         \
 671};
 672
 673#define EVENT_ATTR_STR(_name, v, str)                                   \
 674static struct perf_pmu_events_attr event_attr_##v = {                   \
 675        .attr           = __ATTR(_name, 0444, events_sysfs_show, NULL), \
 676        .id             = 0,                                            \
 677        .event_str      = str,                                          \
 678};
 679
 680#define EVENT_ATTR_STR_HT(_name, v, noht, ht)                           \
 681static struct perf_pmu_events_ht_attr event_attr_##v = {                \
 682        .attr           = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
 683        .id             = 0,                                            \
 684        .event_str_noht = noht,                                         \
 685        .event_str_ht   = ht,                                           \
 686}
 687
 688extern struct x86_pmu x86_pmu __read_mostly;
 689
 690static inline bool x86_pmu_has_lbr_callstack(void)
 691{
 692        return  x86_pmu.lbr_sel_map &&
 693                x86_pmu.lbr_sel_map[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] > 0;
 694}
 695
 696DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 697
 698int x86_perf_event_set_period(struct perf_event *event);
 699
 700/*
 701 * Generalized hw caching related hw_event table, filled
 702 * in on a per model basis. A value of 0 means
 703 * 'not supported', -1 means 'hw_event makes no sense on
 704 * this CPU', any other value means the raw hw_event
 705 * ID.
 706 */
 707
 708#define C(x) PERF_COUNT_HW_CACHE_##x
 709
 710extern u64 __read_mostly hw_cache_event_ids
 711                                [PERF_COUNT_HW_CACHE_MAX]
 712                                [PERF_COUNT_HW_CACHE_OP_MAX]
 713                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 714extern u64 __read_mostly hw_cache_extra_regs
 715                                [PERF_COUNT_HW_CACHE_MAX]
 716                                [PERF_COUNT_HW_CACHE_OP_MAX]
 717                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 718
 719u64 x86_perf_event_update(struct perf_event *event);
 720
 721static inline unsigned int x86_pmu_config_addr(int index)
 722{
 723        return x86_pmu.eventsel + (x86_pmu.addr_offset ?
 724                                   x86_pmu.addr_offset(index, true) : index);
 725}
 726
 727static inline unsigned int x86_pmu_event_addr(int index)
 728{
 729        return x86_pmu.perfctr + (x86_pmu.addr_offset ?
 730                                  x86_pmu.addr_offset(index, false) : index);
 731}
 732
 733static inline int x86_pmu_rdpmc_index(int index)
 734{
 735        return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 736}
 737
 738int x86_add_exclusive(unsigned int what);
 739
 740void x86_del_exclusive(unsigned int what);
 741
 742int x86_reserve_hardware(void);
 743
 744void x86_release_hardware(void);
 745
 746int x86_pmu_max_precise(void);
 747
 748void hw_perf_lbr_event_destroy(struct perf_event *event);
 749
 750int x86_setup_perfctr(struct perf_event *event);
 751
 752int x86_pmu_hw_config(struct perf_event *event);
 753
 754void x86_pmu_disable_all(void);
 755
 756static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 757                                          u64 enable_mask)
 758{
 759        u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
 760
 761        if (hwc->extra_reg.reg)
 762                wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
 763        wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
 764}
 765
 766void x86_pmu_enable_all(int added);
 767
 768int perf_assign_events(struct event_constraint **constraints, int n,
 769                        int wmin, int wmax, int gpmax, int *assign);
 770int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 771
 772void x86_pmu_stop(struct perf_event *event, int flags);
 773
 774static inline void x86_pmu_disable_event(struct perf_event *event)
 775{
 776        struct hw_perf_event *hwc = &event->hw;
 777
 778        wrmsrl(hwc->config_base, hwc->config);
 779}
 780
 781void x86_pmu_enable_event(struct perf_event *event);
 782
 783int x86_pmu_handle_irq(struct pt_regs *regs);
 784
 785extern struct event_constraint emptyconstraint;
 786
 787extern struct event_constraint unconstrained;
 788
 789static inline bool kernel_ip(unsigned long ip)
 790{
 791#ifdef CONFIG_X86_32
 792        return ip > PAGE_OFFSET;
 793#else
 794        return (long)ip < 0;
 795#endif
 796}
 797
 798/*
 799 * Not all PMUs provide the right context information to place the reported IP
 800 * into full context. Specifically segment registers are typically not
 801 * supplied.
 802 *
 803 * Assuming the address is a linear address (it is for IBS), we fake the CS and
 804 * vm86 mode using the known zero-based code segment and 'fix up' the registers
 805 * to reflect this.
 806 *
 807 * Intel PEBS/LBR appear to typically provide the effective address, nothing
 808 * much we can do about that but pray and treat it like a linear address.
 809 */
 810static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
 811{
 812        regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
 813        if (regs->flags & X86_VM_MASK)
 814                regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
 815        regs->ip = ip;
 816}
 817
 818ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
 819ssize_t intel_event_sysfs_show(char *page, u64 config);
 820
 821struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 822
 823ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
 824                          char *page);
 825ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
 826                          char *page);
 827
 828#ifdef CONFIG_CPU_SUP_AMD
 829
 830int amd_pmu_init(void);
 831
 832#else /* CONFIG_CPU_SUP_AMD */
 833
 834static inline int amd_pmu_init(void)
 835{
 836        return 0;
 837}
 838
 839#endif /* CONFIG_CPU_SUP_AMD */
 840
 841#ifdef CONFIG_CPU_SUP_INTEL
 842
 843static inline bool intel_pmu_has_bts(struct perf_event *event)
 844{
 845        if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
 846            !event->attr.freq && event->hw.sample_period == 1)
 847                return true;
 848
 849        return false;
 850}
 851
 852int intel_pmu_save_and_restart(struct perf_event *event);
 853
 854struct event_constraint *
 855x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
 856                          struct perf_event *event);
 857
 858struct intel_shared_regs *allocate_shared_regs(int cpu);
 859
 860int intel_pmu_init(void);
 861
 862void init_debug_store_on_cpu(int cpu);
 863
 864void fini_debug_store_on_cpu(int cpu);
 865
 866void release_ds_buffers(void);
 867
 868void reserve_ds_buffers(void);
 869
 870extern struct event_constraint bts_constraint;
 871
 872void intel_pmu_enable_bts(u64 config);
 873
 874void intel_pmu_disable_bts(void);
 875
 876int intel_pmu_drain_bts_buffer(void);
 877
 878extern struct event_constraint intel_core2_pebs_event_constraints[];
 879
 880extern struct event_constraint intel_atom_pebs_event_constraints[];
 881
 882extern struct event_constraint intel_slm_pebs_event_constraints[];
 883
 884extern struct event_constraint intel_glm_pebs_event_constraints[];
 885
 886extern struct event_constraint intel_glp_pebs_event_constraints[];
 887
 888extern struct event_constraint intel_nehalem_pebs_event_constraints[];
 889
 890extern struct event_constraint intel_westmere_pebs_event_constraints[];
 891
 892extern struct event_constraint intel_snb_pebs_event_constraints[];
 893
 894extern struct event_constraint intel_ivb_pebs_event_constraints[];
 895
 896extern struct event_constraint intel_hsw_pebs_event_constraints[];
 897
 898extern struct event_constraint intel_bdw_pebs_event_constraints[];
 899
 900extern struct event_constraint intel_skl_pebs_event_constraints[];
 901
 902struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 903
 904void intel_pmu_pebs_add(struct perf_event *event);
 905
 906void intel_pmu_pebs_del(struct perf_event *event);
 907
 908void intel_pmu_pebs_enable(struct perf_event *event);
 909
 910void intel_pmu_pebs_disable(struct perf_event *event);
 911
 912void intel_pmu_pebs_enable_all(void);
 913
 914void intel_pmu_pebs_disable_all(void);
 915
 916void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
 917
 918void intel_ds_init(void);
 919
 920void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
 921
 922u64 lbr_from_signext_quirk_wr(u64 val);
 923
 924void intel_pmu_lbr_reset(void);
 925
 926void intel_pmu_lbr_add(struct perf_event *event);
 927
 928void intel_pmu_lbr_del(struct perf_event *event);
 929
 930void intel_pmu_lbr_enable_all(bool pmi);
 931
 932void intel_pmu_lbr_disable_all(void);
 933
 934void intel_pmu_lbr_read(void);
 935
 936void intel_pmu_lbr_init_core(void);
 937
 938void intel_pmu_lbr_init_nhm(void);
 939
 940void intel_pmu_lbr_init_atom(void);
 941
 942void intel_pmu_lbr_init_slm(void);
 943
 944void intel_pmu_lbr_init_snb(void);
 945
 946void intel_pmu_lbr_init_hsw(void);
 947
 948void intel_pmu_lbr_init_skl(void);
 949
 950void intel_pmu_lbr_init_knl(void);
 951
 952void intel_pmu_pebs_data_source_nhm(void);
 953
 954void intel_pmu_pebs_data_source_skl(bool pmem);
 955
 956int intel_pmu_setup_lbr_filter(struct perf_event *event);
 957
 958void intel_pt_interrupt(void);
 959
 960int intel_bts_interrupt(void);
 961
 962void intel_bts_enable_local(void);
 963
 964void intel_bts_disable_local(void);
 965
 966int p4_pmu_init(void);
 967
 968int p6_pmu_init(void);
 969
 970int knc_pmu_init(void);
 971
 972static inline int is_ht_workaround_enabled(void)
 973{
 974        return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
 975}
 976
 977#else /* CONFIG_CPU_SUP_INTEL */
 978
 979static inline void reserve_ds_buffers(void)
 980{
 981}
 982
 983static inline void release_ds_buffers(void)
 984{
 985}
 986
 987static inline int intel_pmu_init(void)
 988{
 989        return 0;
 990}
 991
 992static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
 993{
 994        return NULL;
 995}
 996
 997static inline int is_ht_workaround_enabled(void)
 998{
 999        return 0;
1000}
1001#endif /* CONFIG_CPU_SUP_INTEL */
1002