linux/arch/x86/events/intel/core.c
<<
>>
Prefs
   1/*
   2 * Per core/cpu state
   3 *
   4 * Used to coordinate shared registers between HT threads or
   5 * among events on a single PMU.
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/stddef.h>
  11#include <linux/types.h>
  12#include <linux/init.h>
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/nmi.h>
  16
  17#include <asm/cpufeature.h>
  18#include <asm/hardirq.h>
  19#include <asm/intel-family.h>
  20#include <asm/apic.h>
  21
  22#include "../perf_event.h"
  23
  24/*
  25 * Intel PerfMon, used on Core and later.
  26 */
  27static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
  28{
  29        [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
  30        [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
  31        [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
  32        [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
  33        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
  34        [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
  35        [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
  36        [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
  37};
  38
  39static struct event_constraint intel_core_event_constraints[] __read_mostly =
  40{
  41        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  42        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  43        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  44        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  45        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  46        INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  47        EVENT_CONSTRAINT_END
  48};
  49
  50static struct event_constraint intel_core2_event_constraints[] __read_mostly =
  51{
  52        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  53        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  54        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  55        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  56        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  57        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  58        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  59        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  60        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  61        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  62        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  63        INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  64        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  65        EVENT_CONSTRAINT_END
  66};
  67
  68static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
  69{
  70        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  71        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  72        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  73        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  74        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  75        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  76        INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  77        INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  78        INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  79        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  80        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  81        EVENT_CONSTRAINT_END
  82};
  83
  84static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  85{
  86        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
  87        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  88        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
  89        EVENT_EXTRA_END
  90};
  91
  92static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
  93{
  94        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  95        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  96        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  97        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  98        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
  99        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
 100        INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
 101        EVENT_CONSTRAINT_END
 102};
 103
 104static struct event_constraint intel_snb_event_constraints[] __read_mostly =
 105{
 106        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 107        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 108        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 109        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 110        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 111        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 112        INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 113        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
 114        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 115        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 116        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 117        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 118
 119        /*
 120         * When HT is off these events can only run on the bottom 4 counters
 121         * When HT is on, they are impacted by the HT bug and require EXCL access
 122         */
 123        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 124        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 125        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 126        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 127
 128        EVENT_CONSTRAINT_END
 129};
 130
 131static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
 132{
 133        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 134        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 135        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 136        INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
 137        INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
 138        INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
 139        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
 140        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 141        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 142        INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
 143        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 144        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 145        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 146
 147        /*
 148         * When HT is off these events can only run on the bottom 4 counters
 149         * When HT is on, they are impacted by the HT bug and require EXCL access
 150         */
 151        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 152        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 153        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 154        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 155
 156        EVENT_CONSTRAINT_END
 157};
 158
 159static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
 160{
 161        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 162        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 163        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 164        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
 165        EVENT_EXTRA_END
 166};
 167
 168static struct event_constraint intel_v1_event_constraints[] __read_mostly =
 169{
 170        EVENT_CONSTRAINT_END
 171};
 172
 173static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 174{
 175        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 176        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 177        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 178        EVENT_CONSTRAINT_END
 179};
 180
 181static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 182{
 183        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 184        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 185        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 186        EVENT_CONSTRAINT_END
 187};
 188
 189static struct event_constraint intel_skl_event_constraints[] = {
 190        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 191        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 192        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 193        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
 194
 195        /*
 196         * when HT is off, these can only run on the bottom 4 counters
 197         */
 198        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 199        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 200        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 201        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 202        INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
 203
 204        EVENT_CONSTRAINT_END
 205};
 206
 207static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
 208        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
 209        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
 210        EVENT_EXTRA_END
 211};
 212
 213static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 214        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 215        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 216        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 217        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 218        EVENT_EXTRA_END
 219};
 220
 221static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 222        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 223        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 224        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 225        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 226        EVENT_EXTRA_END
 227};
 228
 229static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
 230        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 231        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 232        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 233        /*
 234         * Note the low 8 bits eventsel code is not a continuous field, containing
 235         * some #GPing bits. These are masked out.
 236         */
 237        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 238        EVENT_EXTRA_END
 239};
 240
 241EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 242EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 243EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
 244
 245static struct attribute *nhm_mem_events_attrs[] = {
 246        EVENT_PTR(mem_ld_nhm),
 247        NULL,
 248};
 249
 250/*
 251 * topdown events for Intel Core CPUs.
 252 *
 253 * The events are all in slots, which is a free slot in a 4 wide
 254 * pipeline. Some events are already reported in slots, for cycle
 255 * events we multiply by the pipeline width (4).
 256 *
 257 * With Hyper Threading on, topdown metrics are either summed or averaged
 258 * between the threads of a core: (count_t0 + count_t1).
 259 *
 260 * For the average case the metric is always scaled to pipeline width,
 261 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
 262 */
 263
 264EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
 265        "event=0x3c,umask=0x0",                 /* cpu_clk_unhalted.thread */
 266        "event=0x3c,umask=0x0,any=1");          /* cpu_clk_unhalted.thread_any */
 267EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
 268EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
 269        "event=0xe,umask=0x1");                 /* uops_issued.any */
 270EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
 271        "event=0xc2,umask=0x2");                /* uops_retired.retire_slots */
 272EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
 273        "event=0x9c,umask=0x1");                /* idq_uops_not_delivered_core */
 274EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
 275        "event=0xd,umask=0x3,cmask=1",          /* int_misc.recovery_cycles */
 276        "event=0xd,umask=0x3,cmask=1,any=1");   /* int_misc.recovery_cycles_any */
 277EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
 278        "4", "2");
 279
 280static struct attribute *snb_events_attrs[] = {
 281        EVENT_PTR(td_slots_issued),
 282        EVENT_PTR(td_slots_retired),
 283        EVENT_PTR(td_fetch_bubbles),
 284        EVENT_PTR(td_total_slots),
 285        EVENT_PTR(td_total_slots_scale),
 286        EVENT_PTR(td_recovery_bubbles),
 287        EVENT_PTR(td_recovery_bubbles_scale),
 288        NULL,
 289};
 290
 291static struct attribute *snb_mem_events_attrs[] = {
 292        EVENT_PTR(mem_ld_snb),
 293        EVENT_PTR(mem_st_snb),
 294        NULL,
 295};
 296
 297static struct event_constraint intel_hsw_event_constraints[] = {
 298        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 299        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 300        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 301        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 302        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 303        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 304        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 305        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 306        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 307        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 308        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 309        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 310
 311        /*
 312         * When HT is off these events can only run on the bottom 4 counters
 313         * When HT is on, they are impacted by the HT bug and require EXCL access
 314         */
 315        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 316        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 317        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 318        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 319
 320        EVENT_CONSTRAINT_END
 321};
 322
 323static struct event_constraint intel_bdw_event_constraints[] = {
 324        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 325        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 326        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 327        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 328        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 329        /*
 330         * when HT is off, these can only run on the bottom 4 counters
 331         */
 332        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 333        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 334        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 335        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 336        EVENT_CONSTRAINT_END
 337};
 338
 339static u64 intel_pmu_event_map(int hw_event)
 340{
 341        return intel_perfmon_event_map[hw_event];
 342}
 343
 344/*
 345 * Notes on the events:
 346 * - data reads do not include code reads (comparable to earlier tables)
 347 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 348 * - remote node access includes remote memory, remote cache, remote mmio.
 349 * - prefetches are not included in the counts.
 350 * - icache miss does not include decoded icache
 351 */
 352
 353#define SKL_DEMAND_DATA_RD              BIT_ULL(0)
 354#define SKL_DEMAND_RFO                  BIT_ULL(1)
 355#define SKL_ANY_RESPONSE                BIT_ULL(16)
 356#define SKL_SUPPLIER_NONE               BIT_ULL(17)
 357#define SKL_L3_MISS_LOCAL_DRAM          BIT_ULL(26)
 358#define SKL_L3_MISS_REMOTE_HOP0_DRAM    BIT_ULL(27)
 359#define SKL_L3_MISS_REMOTE_HOP1_DRAM    BIT_ULL(28)
 360#define SKL_L3_MISS_REMOTE_HOP2P_DRAM   BIT_ULL(29)
 361#define SKL_L3_MISS                     (SKL_L3_MISS_LOCAL_DRAM| \
 362                                         SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 363                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 364                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 365#define SKL_SPL_HIT                     BIT_ULL(30)
 366#define SKL_SNOOP_NONE                  BIT_ULL(31)
 367#define SKL_SNOOP_NOT_NEEDED            BIT_ULL(32)
 368#define SKL_SNOOP_MISS                  BIT_ULL(33)
 369#define SKL_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 370#define SKL_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 371#define SKL_SNOOP_HITM                  BIT_ULL(36)
 372#define SKL_SNOOP_NON_DRAM              BIT_ULL(37)
 373#define SKL_ANY_SNOOP                   (SKL_SPL_HIT|SKL_SNOOP_NONE| \
 374                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 375                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 376                                         SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
 377#define SKL_DEMAND_READ                 SKL_DEMAND_DATA_RD
 378#define SKL_SNOOP_DRAM                  (SKL_SNOOP_NONE| \
 379                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 380                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 381                                         SKL_SNOOP_HITM|SKL_SPL_HIT)
 382#define SKL_DEMAND_WRITE                SKL_DEMAND_RFO
 383#define SKL_LLC_ACCESS                  SKL_ANY_RESPONSE
 384#define SKL_L3_MISS_REMOTE              (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 385                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 386                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 387
 388static __initconst const u64 skl_hw_cache_event_ids
 389                                [PERF_COUNT_HW_CACHE_MAX]
 390                                [PERF_COUNT_HW_CACHE_OP_MAX]
 391                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 392{
 393 [ C(L1D ) ] = {
 394        [ C(OP_READ) ] = {
 395                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 396                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 397        },
 398        [ C(OP_WRITE) ] = {
 399                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 400                [ C(RESULT_MISS)   ] = 0x0,
 401        },
 402        [ C(OP_PREFETCH) ] = {
 403                [ C(RESULT_ACCESS) ] = 0x0,
 404                [ C(RESULT_MISS)   ] = 0x0,
 405        },
 406 },
 407 [ C(L1I ) ] = {
 408        [ C(OP_READ) ] = {
 409                [ C(RESULT_ACCESS) ] = 0x0,
 410                [ C(RESULT_MISS)   ] = 0x283,   /* ICACHE_64B.MISS */
 411        },
 412        [ C(OP_WRITE) ] = {
 413                [ C(RESULT_ACCESS) ] = -1,
 414                [ C(RESULT_MISS)   ] = -1,
 415        },
 416        [ C(OP_PREFETCH) ] = {
 417                [ C(RESULT_ACCESS) ] = 0x0,
 418                [ C(RESULT_MISS)   ] = 0x0,
 419        },
 420 },
 421 [ C(LL  ) ] = {
 422        [ C(OP_READ) ] = {
 423                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 424                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 425        },
 426        [ C(OP_WRITE) ] = {
 427                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 428                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 429        },
 430        [ C(OP_PREFETCH) ] = {
 431                [ C(RESULT_ACCESS) ] = 0x0,
 432                [ C(RESULT_MISS)   ] = 0x0,
 433        },
 434 },
 435 [ C(DTLB) ] = {
 436        [ C(OP_READ) ] = {
 437                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 438                [ C(RESULT_MISS)   ] = 0xe08,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
 439        },
 440        [ C(OP_WRITE) ] = {
 441                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 442                [ C(RESULT_MISS)   ] = 0xe49,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
 443        },
 444        [ C(OP_PREFETCH) ] = {
 445                [ C(RESULT_ACCESS) ] = 0x0,
 446                [ C(RESULT_MISS)   ] = 0x0,
 447        },
 448 },
 449 [ C(ITLB) ] = {
 450        [ C(OP_READ) ] = {
 451                [ C(RESULT_ACCESS) ] = 0x2085,  /* ITLB_MISSES.STLB_HIT */
 452                [ C(RESULT_MISS)   ] = 0xe85,   /* ITLB_MISSES.WALK_COMPLETED */
 453        },
 454        [ C(OP_WRITE) ] = {
 455                [ C(RESULT_ACCESS) ] = -1,
 456                [ C(RESULT_MISS)   ] = -1,
 457        },
 458        [ C(OP_PREFETCH) ] = {
 459                [ C(RESULT_ACCESS) ] = -1,
 460                [ C(RESULT_MISS)   ] = -1,
 461        },
 462 },
 463 [ C(BPU ) ] = {
 464        [ C(OP_READ) ] = {
 465                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 466                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 467        },
 468        [ C(OP_WRITE) ] = {
 469                [ C(RESULT_ACCESS) ] = -1,
 470                [ C(RESULT_MISS)   ] = -1,
 471        },
 472        [ C(OP_PREFETCH) ] = {
 473                [ C(RESULT_ACCESS) ] = -1,
 474                [ C(RESULT_MISS)   ] = -1,
 475        },
 476 },
 477 [ C(NODE) ] = {
 478        [ C(OP_READ) ] = {
 479                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 480                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 481        },
 482        [ C(OP_WRITE) ] = {
 483                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 484                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 485        },
 486        [ C(OP_PREFETCH) ] = {
 487                [ C(RESULT_ACCESS) ] = 0x0,
 488                [ C(RESULT_MISS)   ] = 0x0,
 489        },
 490 },
 491};
 492
 493static __initconst const u64 skl_hw_cache_extra_regs
 494                                [PERF_COUNT_HW_CACHE_MAX]
 495                                [PERF_COUNT_HW_CACHE_OP_MAX]
 496                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 497{
 498 [ C(LL  ) ] = {
 499        [ C(OP_READ) ] = {
 500                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 501                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 502                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 503                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 504                                       SKL_SUPPLIER_NONE,
 505        },
 506        [ C(OP_WRITE) ] = {
 507                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 508                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 509                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 510                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 511                                       SKL_SUPPLIER_NONE,
 512        },
 513        [ C(OP_PREFETCH) ] = {
 514                [ C(RESULT_ACCESS) ] = 0x0,
 515                [ C(RESULT_MISS)   ] = 0x0,
 516        },
 517 },
 518 [ C(NODE) ] = {
 519        [ C(OP_READ) ] = {
 520                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 521                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 522                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 523                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 524        },
 525        [ C(OP_WRITE) ] = {
 526                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 527                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 528                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 529                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 530        },
 531        [ C(OP_PREFETCH) ] = {
 532                [ C(RESULT_ACCESS) ] = 0x0,
 533                [ C(RESULT_MISS)   ] = 0x0,
 534        },
 535 },
 536};
 537
 538#define SNB_DMND_DATA_RD        (1ULL << 0)
 539#define SNB_DMND_RFO            (1ULL << 1)
 540#define SNB_DMND_IFETCH         (1ULL << 2)
 541#define SNB_DMND_WB             (1ULL << 3)
 542#define SNB_PF_DATA_RD          (1ULL << 4)
 543#define SNB_PF_RFO              (1ULL << 5)
 544#define SNB_PF_IFETCH           (1ULL << 6)
 545#define SNB_LLC_DATA_RD         (1ULL << 7)
 546#define SNB_LLC_RFO             (1ULL << 8)
 547#define SNB_LLC_IFETCH          (1ULL << 9)
 548#define SNB_BUS_LOCKS           (1ULL << 10)
 549#define SNB_STRM_ST             (1ULL << 11)
 550#define SNB_OTHER               (1ULL << 15)
 551#define SNB_RESP_ANY            (1ULL << 16)
 552#define SNB_NO_SUPP             (1ULL << 17)
 553#define SNB_LLC_HITM            (1ULL << 18)
 554#define SNB_LLC_HITE            (1ULL << 19)
 555#define SNB_LLC_HITS            (1ULL << 20)
 556#define SNB_LLC_HITF            (1ULL << 21)
 557#define SNB_LOCAL               (1ULL << 22)
 558#define SNB_REMOTE              (0xffULL << 23)
 559#define SNB_SNP_NONE            (1ULL << 31)
 560#define SNB_SNP_NOT_NEEDED      (1ULL << 32)
 561#define SNB_SNP_MISS            (1ULL << 33)
 562#define SNB_NO_FWD              (1ULL << 34)
 563#define SNB_SNP_FWD             (1ULL << 35)
 564#define SNB_HITM                (1ULL << 36)
 565#define SNB_NON_DRAM            (1ULL << 37)
 566
 567#define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
 568#define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
 569#define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
 570
 571#define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
 572                                 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
 573                                 SNB_HITM)
 574
 575#define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
 576#define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
 577
 578#define SNB_L3_ACCESS           SNB_RESP_ANY
 579#define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
 580
 581static __initconst const u64 snb_hw_cache_extra_regs
 582                                [PERF_COUNT_HW_CACHE_MAX]
 583                                [PERF_COUNT_HW_CACHE_OP_MAX]
 584                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 585{
 586 [ C(LL  ) ] = {
 587        [ C(OP_READ) ] = {
 588                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
 589                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
 590        },
 591        [ C(OP_WRITE) ] = {
 592                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
 593                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
 594        },
 595        [ C(OP_PREFETCH) ] = {
 596                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
 597                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
 598        },
 599 },
 600 [ C(NODE) ] = {
 601        [ C(OP_READ) ] = {
 602                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
 603                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
 604        },
 605        [ C(OP_WRITE) ] = {
 606                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
 607                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
 608        },
 609        [ C(OP_PREFETCH) ] = {
 610                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
 611                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
 612        },
 613 },
 614};
 615
 616static __initconst const u64 snb_hw_cache_event_ids
 617                                [PERF_COUNT_HW_CACHE_MAX]
 618                                [PERF_COUNT_HW_CACHE_OP_MAX]
 619                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 620{
 621 [ C(L1D) ] = {
 622        [ C(OP_READ) ] = {
 623                [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
 624                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
 625        },
 626        [ C(OP_WRITE) ] = {
 627                [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
 628                [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
 629        },
 630        [ C(OP_PREFETCH) ] = {
 631                [ C(RESULT_ACCESS) ] = 0x0,
 632                [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
 633        },
 634 },
 635 [ C(L1I ) ] = {
 636        [ C(OP_READ) ] = {
 637                [ C(RESULT_ACCESS) ] = 0x0,
 638                [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
 639        },
 640        [ C(OP_WRITE) ] = {
 641                [ C(RESULT_ACCESS) ] = -1,
 642                [ C(RESULT_MISS)   ] = -1,
 643        },
 644        [ C(OP_PREFETCH) ] = {
 645                [ C(RESULT_ACCESS) ] = 0x0,
 646                [ C(RESULT_MISS)   ] = 0x0,
 647        },
 648 },
 649 [ C(LL  ) ] = {
 650        [ C(OP_READ) ] = {
 651                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 652                [ C(RESULT_ACCESS) ] = 0x01b7,
 653                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 654                [ C(RESULT_MISS)   ] = 0x01b7,
 655        },
 656        [ C(OP_WRITE) ] = {
 657                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 658                [ C(RESULT_ACCESS) ] = 0x01b7,
 659                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
 660                [ C(RESULT_MISS)   ] = 0x01b7,
 661        },
 662        [ C(OP_PREFETCH) ] = {
 663                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
 664                [ C(RESULT_ACCESS) ] = 0x01b7,
 665                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
 666                [ C(RESULT_MISS)   ] = 0x01b7,
 667        },
 668 },
 669 [ C(DTLB) ] = {
 670        [ C(OP_READ) ] = {
 671                [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
 672                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
 673        },
 674        [ C(OP_WRITE) ] = {
 675                [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
 676                [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 677        },
 678        [ C(OP_PREFETCH) ] = {
 679                [ C(RESULT_ACCESS) ] = 0x0,
 680                [ C(RESULT_MISS)   ] = 0x0,
 681        },
 682 },
 683 [ C(ITLB) ] = {
 684        [ C(OP_READ) ] = {
 685                [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
 686                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
 687        },
 688        [ C(OP_WRITE) ] = {
 689                [ C(RESULT_ACCESS) ] = -1,
 690                [ C(RESULT_MISS)   ] = -1,
 691        },
 692        [ C(OP_PREFETCH) ] = {
 693                [ C(RESULT_ACCESS) ] = -1,
 694                [ C(RESULT_MISS)   ] = -1,
 695        },
 696 },
 697 [ C(BPU ) ] = {
 698        [ C(OP_READ) ] = {
 699                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
 700                [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
 701        },
 702        [ C(OP_WRITE) ] = {
 703                [ C(RESULT_ACCESS) ] = -1,
 704                [ C(RESULT_MISS)   ] = -1,
 705        },
 706        [ C(OP_PREFETCH) ] = {
 707                [ C(RESULT_ACCESS) ] = -1,
 708                [ C(RESULT_MISS)   ] = -1,
 709        },
 710 },
 711 [ C(NODE) ] = {
 712        [ C(OP_READ) ] = {
 713                [ C(RESULT_ACCESS) ] = 0x01b7,
 714                [ C(RESULT_MISS)   ] = 0x01b7,
 715        },
 716        [ C(OP_WRITE) ] = {
 717                [ C(RESULT_ACCESS) ] = 0x01b7,
 718                [ C(RESULT_MISS)   ] = 0x01b7,
 719        },
 720        [ C(OP_PREFETCH) ] = {
 721                [ C(RESULT_ACCESS) ] = 0x01b7,
 722                [ C(RESULT_MISS)   ] = 0x01b7,
 723        },
 724 },
 725
 726};
 727
 728/*
 729 * Notes on the events:
 730 * - data reads do not include code reads (comparable to earlier tables)
 731 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 732 * - remote node access includes remote memory, remote cache, remote mmio.
 733 * - prefetches are not included in the counts because they are not
 734 *   reliably counted.
 735 */
 736
 737#define HSW_DEMAND_DATA_RD              BIT_ULL(0)
 738#define HSW_DEMAND_RFO                  BIT_ULL(1)
 739#define HSW_ANY_RESPONSE                BIT_ULL(16)
 740#define HSW_SUPPLIER_NONE               BIT_ULL(17)
 741#define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
 742#define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
 743#define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
 744#define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
 745#define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
 746                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 747                                         HSW_L3_MISS_REMOTE_HOP2P)
 748#define HSW_SNOOP_NONE                  BIT_ULL(31)
 749#define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
 750#define HSW_SNOOP_MISS                  BIT_ULL(33)
 751#define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 752#define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 753#define HSW_SNOOP_HITM                  BIT_ULL(36)
 754#define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
 755#define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
 756                                         HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
 757                                         HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
 758                                         HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
 759#define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
 760#define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
 761#define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
 762#define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
 763                                         HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
 764#define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
 765
 766#define BDW_L3_MISS_LOCAL               BIT(26)
 767#define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
 768                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 769                                         HSW_L3_MISS_REMOTE_HOP2P)
 770
 771
 772static __initconst const u64 hsw_hw_cache_event_ids
 773                                [PERF_COUNT_HW_CACHE_MAX]
 774                                [PERF_COUNT_HW_CACHE_OP_MAX]
 775                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 776{
 777 [ C(L1D ) ] = {
 778        [ C(OP_READ) ] = {
 779                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 780                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 781        },
 782        [ C(OP_WRITE) ] = {
 783                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 784                [ C(RESULT_MISS)   ] = 0x0,
 785        },
 786        [ C(OP_PREFETCH) ] = {
 787                [ C(RESULT_ACCESS) ] = 0x0,
 788                [ C(RESULT_MISS)   ] = 0x0,
 789        },
 790 },
 791 [ C(L1I ) ] = {
 792        [ C(OP_READ) ] = {
 793                [ C(RESULT_ACCESS) ] = 0x0,
 794                [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
 795        },
 796        [ C(OP_WRITE) ] = {
 797                [ C(RESULT_ACCESS) ] = -1,
 798                [ C(RESULT_MISS)   ] = -1,
 799        },
 800        [ C(OP_PREFETCH) ] = {
 801                [ C(RESULT_ACCESS) ] = 0x0,
 802                [ C(RESULT_MISS)   ] = 0x0,
 803        },
 804 },
 805 [ C(LL  ) ] = {
 806        [ C(OP_READ) ] = {
 807                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 808                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 809        },
 810        [ C(OP_WRITE) ] = {
 811                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 812                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 813        },
 814        [ C(OP_PREFETCH) ] = {
 815                [ C(RESULT_ACCESS) ] = 0x0,
 816                [ C(RESULT_MISS)   ] = 0x0,
 817        },
 818 },
 819 [ C(DTLB) ] = {
 820        [ C(OP_READ) ] = {
 821                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 822                [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
 823        },
 824        [ C(OP_WRITE) ] = {
 825                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 826                [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 827        },
 828        [ C(OP_PREFETCH) ] = {
 829                [ C(RESULT_ACCESS) ] = 0x0,
 830                [ C(RESULT_MISS)   ] = 0x0,
 831        },
 832 },
 833 [ C(ITLB) ] = {
 834        [ C(OP_READ) ] = {
 835                [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
 836                [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
 837        },
 838        [ C(OP_WRITE) ] = {
 839                [ C(RESULT_ACCESS) ] = -1,
 840                [ C(RESULT_MISS)   ] = -1,
 841        },
 842        [ C(OP_PREFETCH) ] = {
 843                [ C(RESULT_ACCESS) ] = -1,
 844                [ C(RESULT_MISS)   ] = -1,
 845        },
 846 },
 847 [ C(BPU ) ] = {
 848        [ C(OP_READ) ] = {
 849                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 850                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 851        },
 852        [ C(OP_WRITE) ] = {
 853                [ C(RESULT_ACCESS) ] = -1,
 854                [ C(RESULT_MISS)   ] = -1,
 855        },
 856        [ C(OP_PREFETCH) ] = {
 857                [ C(RESULT_ACCESS) ] = -1,
 858                [ C(RESULT_MISS)   ] = -1,
 859        },
 860 },
 861 [ C(NODE) ] = {
 862        [ C(OP_READ) ] = {
 863                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 864                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 865        },
 866        [ C(OP_WRITE) ] = {
 867                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 868                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 869        },
 870        [ C(OP_PREFETCH) ] = {
 871                [ C(RESULT_ACCESS) ] = 0x0,
 872                [ C(RESULT_MISS)   ] = 0x0,
 873        },
 874 },
 875};
 876
 877static __initconst const u64 hsw_hw_cache_extra_regs
 878                                [PERF_COUNT_HW_CACHE_MAX]
 879                                [PERF_COUNT_HW_CACHE_OP_MAX]
 880                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 881{
 882 [ C(LL  ) ] = {
 883        [ C(OP_READ) ] = {
 884                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
 885                                       HSW_LLC_ACCESS,
 886                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
 887                                       HSW_L3_MISS|HSW_ANY_SNOOP,
 888        },
 889        [ C(OP_WRITE) ] = {
 890                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
 891                                       HSW_LLC_ACCESS,
 892                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
 893                                       HSW_L3_MISS|HSW_ANY_SNOOP,
 894        },
 895        [ C(OP_PREFETCH) ] = {
 896                [ C(RESULT_ACCESS) ] = 0x0,
 897                [ C(RESULT_MISS)   ] = 0x0,
 898        },
 899 },
 900 [ C(NODE) ] = {
 901        [ C(OP_READ) ] = {
 902                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
 903                                       HSW_L3_MISS_LOCAL_DRAM|
 904                                       HSW_SNOOP_DRAM,
 905                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
 906                                       HSW_L3_MISS_REMOTE|
 907                                       HSW_SNOOP_DRAM,
 908        },
 909        [ C(OP_WRITE) ] = {
 910                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
 911                                       HSW_L3_MISS_LOCAL_DRAM|
 912                                       HSW_SNOOP_DRAM,
 913                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
 914                                       HSW_L3_MISS_REMOTE|
 915                                       HSW_SNOOP_DRAM,
 916        },
 917        [ C(OP_PREFETCH) ] = {
 918                [ C(RESULT_ACCESS) ] = 0x0,
 919                [ C(RESULT_MISS)   ] = 0x0,
 920        },
 921 },
 922};
 923
 924static __initconst const u64 westmere_hw_cache_event_ids
 925                                [PERF_COUNT_HW_CACHE_MAX]
 926                                [PERF_COUNT_HW_CACHE_OP_MAX]
 927                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 928{
 929 [ C(L1D) ] = {
 930        [ C(OP_READ) ] = {
 931                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
 932                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
 933        },
 934        [ C(OP_WRITE) ] = {
 935                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
 936                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
 937        },
 938        [ C(OP_PREFETCH) ] = {
 939                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
 940                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
 941        },
 942 },
 943 [ C(L1I ) ] = {
 944        [ C(OP_READ) ] = {
 945                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
 946                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
 947        },
 948        [ C(OP_WRITE) ] = {
 949                [ C(RESULT_ACCESS) ] = -1,
 950                [ C(RESULT_MISS)   ] = -1,
 951        },
 952        [ C(OP_PREFETCH) ] = {
 953                [ C(RESULT_ACCESS) ] = 0x0,
 954                [ C(RESULT_MISS)   ] = 0x0,
 955        },
 956 },
 957 [ C(LL  ) ] = {
 958        [ C(OP_READ) ] = {
 959                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 960                [ C(RESULT_ACCESS) ] = 0x01b7,
 961                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 962                [ C(RESULT_MISS)   ] = 0x01b7,
 963        },
 964        /*
 965         * Use RFO, not WRITEBACK, because a write miss would typically occur
 966         * on RFO.
 967         */
 968        [ C(OP_WRITE) ] = {
 969                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 970                [ C(RESULT_ACCESS) ] = 0x01b7,
 971                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
 972                [ C(RESULT_MISS)   ] = 0x01b7,
 973        },
 974        [ C(OP_PREFETCH) ] = {
 975                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
 976                [ C(RESULT_ACCESS) ] = 0x01b7,
 977                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
 978                [ C(RESULT_MISS)   ] = 0x01b7,
 979        },
 980 },
 981 [ C(DTLB) ] = {
 982        [ C(OP_READ) ] = {
 983                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
 984                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
 985        },
 986        [ C(OP_WRITE) ] = {
 987                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
 988                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
 989        },
 990        [ C(OP_PREFETCH) ] = {
 991                [ C(RESULT_ACCESS) ] = 0x0,
 992                [ C(RESULT_MISS)   ] = 0x0,
 993        },
 994 },
 995 [ C(ITLB) ] = {
 996        [ C(OP_READ) ] = {
 997                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
 998                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
 999        },
1000        [ C(OP_WRITE) ] = {
1001                [ C(RESULT_ACCESS) ] = -1,
1002                [ C(RESULT_MISS)   ] = -1,
1003        },
1004        [ C(OP_PREFETCH) ] = {
1005                [ C(RESULT_ACCESS) ] = -1,
1006                [ C(RESULT_MISS)   ] = -1,
1007        },
1008 },
1009 [ C(BPU ) ] = {
1010        [ C(OP_READ) ] = {
1011                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1012                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1013        },
1014        [ C(OP_WRITE) ] = {
1015                [ C(RESULT_ACCESS) ] = -1,
1016                [ C(RESULT_MISS)   ] = -1,
1017        },
1018        [ C(OP_PREFETCH) ] = {
1019                [ C(RESULT_ACCESS) ] = -1,
1020                [ C(RESULT_MISS)   ] = -1,
1021        },
1022 },
1023 [ C(NODE) ] = {
1024        [ C(OP_READ) ] = {
1025                [ C(RESULT_ACCESS) ] = 0x01b7,
1026                [ C(RESULT_MISS)   ] = 0x01b7,
1027        },
1028        [ C(OP_WRITE) ] = {
1029                [ C(RESULT_ACCESS) ] = 0x01b7,
1030                [ C(RESULT_MISS)   ] = 0x01b7,
1031        },
1032        [ C(OP_PREFETCH) ] = {
1033                [ C(RESULT_ACCESS) ] = 0x01b7,
1034                [ C(RESULT_MISS)   ] = 0x01b7,
1035        },
1036 },
1037};
1038
1039/*
1040 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1041 * See IA32 SDM Vol 3B 30.6.1.3
1042 */
1043
1044#define NHM_DMND_DATA_RD        (1 << 0)
1045#define NHM_DMND_RFO            (1 << 1)
1046#define NHM_DMND_IFETCH         (1 << 2)
1047#define NHM_DMND_WB             (1 << 3)
1048#define NHM_PF_DATA_RD          (1 << 4)
1049#define NHM_PF_DATA_RFO         (1 << 5)
1050#define NHM_PF_IFETCH           (1 << 6)
1051#define NHM_OFFCORE_OTHER       (1 << 7)
1052#define NHM_UNCORE_HIT          (1 << 8)
1053#define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
1054#define NHM_OTHER_CORE_HITM     (1 << 10)
1055                                /* reserved */
1056#define NHM_REMOTE_CACHE_FWD    (1 << 12)
1057#define NHM_REMOTE_DRAM         (1 << 13)
1058#define NHM_LOCAL_DRAM          (1 << 14)
1059#define NHM_NON_DRAM            (1 << 15)
1060
1061#define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1062#define NHM_REMOTE              (NHM_REMOTE_DRAM)
1063
1064#define NHM_DMND_READ           (NHM_DMND_DATA_RD)
1065#define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
1066#define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1067
1068#define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1069#define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1070#define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
1071
1072static __initconst const u64 nehalem_hw_cache_extra_regs
1073                                [PERF_COUNT_HW_CACHE_MAX]
1074                                [PERF_COUNT_HW_CACHE_OP_MAX]
1075                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1076{
1077 [ C(LL  ) ] = {
1078        [ C(OP_READ) ] = {
1079                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1080                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1081        },
1082        [ C(OP_WRITE) ] = {
1083                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1084                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1085        },
1086        [ C(OP_PREFETCH) ] = {
1087                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1088                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1089        },
1090 },
1091 [ C(NODE) ] = {
1092        [ C(OP_READ) ] = {
1093                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1094                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1095        },
1096        [ C(OP_WRITE) ] = {
1097                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1098                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1099        },
1100        [ C(OP_PREFETCH) ] = {
1101                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1102                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1103        },
1104 },
1105};
1106
1107static __initconst const u64 nehalem_hw_cache_event_ids
1108                                [PERF_COUNT_HW_CACHE_MAX]
1109                                [PERF_COUNT_HW_CACHE_OP_MAX]
1110                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1111{
1112 [ C(L1D) ] = {
1113        [ C(OP_READ) ] = {
1114                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1115                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1116        },
1117        [ C(OP_WRITE) ] = {
1118                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1119                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1120        },
1121        [ C(OP_PREFETCH) ] = {
1122                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1123                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1124        },
1125 },
1126 [ C(L1I ) ] = {
1127        [ C(OP_READ) ] = {
1128                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1129                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1130        },
1131        [ C(OP_WRITE) ] = {
1132                [ C(RESULT_ACCESS) ] = -1,
1133                [ C(RESULT_MISS)   ] = -1,
1134        },
1135        [ C(OP_PREFETCH) ] = {
1136                [ C(RESULT_ACCESS) ] = 0x0,
1137                [ C(RESULT_MISS)   ] = 0x0,
1138        },
1139 },
1140 [ C(LL  ) ] = {
1141        [ C(OP_READ) ] = {
1142                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1143                [ C(RESULT_ACCESS) ] = 0x01b7,
1144                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1145                [ C(RESULT_MISS)   ] = 0x01b7,
1146        },
1147        /*
1148         * Use RFO, not WRITEBACK, because a write miss would typically occur
1149         * on RFO.
1150         */
1151        [ C(OP_WRITE) ] = {
1152                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1153                [ C(RESULT_ACCESS) ] = 0x01b7,
1154                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1155                [ C(RESULT_MISS)   ] = 0x01b7,
1156        },
1157        [ C(OP_PREFETCH) ] = {
1158                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1159                [ C(RESULT_ACCESS) ] = 0x01b7,
1160                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1161                [ C(RESULT_MISS)   ] = 0x01b7,
1162        },
1163 },
1164 [ C(DTLB) ] = {
1165        [ C(OP_READ) ] = {
1166                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1167                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1168        },
1169        [ C(OP_WRITE) ] = {
1170                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1171                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1172        },
1173        [ C(OP_PREFETCH) ] = {
1174                [ C(RESULT_ACCESS) ] = 0x0,
1175                [ C(RESULT_MISS)   ] = 0x0,
1176        },
1177 },
1178 [ C(ITLB) ] = {
1179        [ C(OP_READ) ] = {
1180                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1181                [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1182        },
1183        [ C(OP_WRITE) ] = {
1184                [ C(RESULT_ACCESS) ] = -1,
1185                [ C(RESULT_MISS)   ] = -1,
1186        },
1187        [ C(OP_PREFETCH) ] = {
1188                [ C(RESULT_ACCESS) ] = -1,
1189                [ C(RESULT_MISS)   ] = -1,
1190        },
1191 },
1192 [ C(BPU ) ] = {
1193        [ C(OP_READ) ] = {
1194                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1195                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1196        },
1197        [ C(OP_WRITE) ] = {
1198                [ C(RESULT_ACCESS) ] = -1,
1199                [ C(RESULT_MISS)   ] = -1,
1200        },
1201        [ C(OP_PREFETCH) ] = {
1202                [ C(RESULT_ACCESS) ] = -1,
1203                [ C(RESULT_MISS)   ] = -1,
1204        },
1205 },
1206 [ C(NODE) ] = {
1207        [ C(OP_READ) ] = {
1208                [ C(RESULT_ACCESS) ] = 0x01b7,
1209                [ C(RESULT_MISS)   ] = 0x01b7,
1210        },
1211        [ C(OP_WRITE) ] = {
1212                [ C(RESULT_ACCESS) ] = 0x01b7,
1213                [ C(RESULT_MISS)   ] = 0x01b7,
1214        },
1215        [ C(OP_PREFETCH) ] = {
1216                [ C(RESULT_ACCESS) ] = 0x01b7,
1217                [ C(RESULT_MISS)   ] = 0x01b7,
1218        },
1219 },
1220};
1221
1222static __initconst const u64 core2_hw_cache_event_ids
1223                                [PERF_COUNT_HW_CACHE_MAX]
1224                                [PERF_COUNT_HW_CACHE_OP_MAX]
1225                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1226{
1227 [ C(L1D) ] = {
1228        [ C(OP_READ) ] = {
1229                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1230                [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1231        },
1232        [ C(OP_WRITE) ] = {
1233                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1234                [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1235        },
1236        [ C(OP_PREFETCH) ] = {
1237                [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1238                [ C(RESULT_MISS)   ] = 0,
1239        },
1240 },
1241 [ C(L1I ) ] = {
1242        [ C(OP_READ) ] = {
1243                [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1244                [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1245        },
1246        [ C(OP_WRITE) ] = {
1247                [ C(RESULT_ACCESS) ] = -1,
1248                [ C(RESULT_MISS)   ] = -1,
1249        },
1250        [ C(OP_PREFETCH) ] = {
1251                [ C(RESULT_ACCESS) ] = 0,
1252                [ C(RESULT_MISS)   ] = 0,
1253        },
1254 },
1255 [ C(LL  ) ] = {
1256        [ C(OP_READ) ] = {
1257                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1258                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1259        },
1260        [ C(OP_WRITE) ] = {
1261                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1262                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1263        },
1264        [ C(OP_PREFETCH) ] = {
1265                [ C(RESULT_ACCESS) ] = 0,
1266                [ C(RESULT_MISS)   ] = 0,
1267        },
1268 },
1269 [ C(DTLB) ] = {
1270        [ C(OP_READ) ] = {
1271                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1272                [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1273        },
1274        [ C(OP_WRITE) ] = {
1275                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1276                [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1277        },
1278        [ C(OP_PREFETCH) ] = {
1279                [ C(RESULT_ACCESS) ] = 0,
1280                [ C(RESULT_MISS)   ] = 0,
1281        },
1282 },
1283 [ C(ITLB) ] = {
1284        [ C(OP_READ) ] = {
1285                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1286                [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1287        },
1288        [ C(OP_WRITE) ] = {
1289                [ C(RESULT_ACCESS) ] = -1,
1290                [ C(RESULT_MISS)   ] = -1,
1291        },
1292        [ C(OP_PREFETCH) ] = {
1293                [ C(RESULT_ACCESS) ] = -1,
1294                [ C(RESULT_MISS)   ] = -1,
1295        },
1296 },
1297 [ C(BPU ) ] = {
1298        [ C(OP_READ) ] = {
1299                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1300                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1301        },
1302        [ C(OP_WRITE) ] = {
1303                [ C(RESULT_ACCESS) ] = -1,
1304                [ C(RESULT_MISS)   ] = -1,
1305        },
1306        [ C(OP_PREFETCH) ] = {
1307                [ C(RESULT_ACCESS) ] = -1,
1308                [ C(RESULT_MISS)   ] = -1,
1309        },
1310 },
1311};
1312
1313static __initconst const u64 atom_hw_cache_event_ids
1314                                [PERF_COUNT_HW_CACHE_MAX]
1315                                [PERF_COUNT_HW_CACHE_OP_MAX]
1316                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1317{
1318 [ C(L1D) ] = {
1319        [ C(OP_READ) ] = {
1320                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1321                [ C(RESULT_MISS)   ] = 0,
1322        },
1323        [ C(OP_WRITE) ] = {
1324                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1325                [ C(RESULT_MISS)   ] = 0,
1326        },
1327        [ C(OP_PREFETCH) ] = {
1328                [ C(RESULT_ACCESS) ] = 0x0,
1329                [ C(RESULT_MISS)   ] = 0,
1330        },
1331 },
1332 [ C(L1I ) ] = {
1333        [ C(OP_READ) ] = {
1334                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1335                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1336        },
1337        [ C(OP_WRITE) ] = {
1338                [ C(RESULT_ACCESS) ] = -1,
1339                [ C(RESULT_MISS)   ] = -1,
1340        },
1341        [ C(OP_PREFETCH) ] = {
1342                [ C(RESULT_ACCESS) ] = 0,
1343                [ C(RESULT_MISS)   ] = 0,
1344        },
1345 },
1346 [ C(LL  ) ] = {
1347        [ C(OP_READ) ] = {
1348                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1349                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1350        },
1351        [ C(OP_WRITE) ] = {
1352                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1353                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1354        },
1355        [ C(OP_PREFETCH) ] = {
1356                [ C(RESULT_ACCESS) ] = 0,
1357                [ C(RESULT_MISS)   ] = 0,
1358        },
1359 },
1360 [ C(DTLB) ] = {
1361        [ C(OP_READ) ] = {
1362                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1363                [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1364        },
1365        [ C(OP_WRITE) ] = {
1366                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1367                [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1368        },
1369        [ C(OP_PREFETCH) ] = {
1370                [ C(RESULT_ACCESS) ] = 0,
1371                [ C(RESULT_MISS)   ] = 0,
1372        },
1373 },
1374 [ C(ITLB) ] = {
1375        [ C(OP_READ) ] = {
1376                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1377                [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1378        },
1379        [ C(OP_WRITE) ] = {
1380                [ C(RESULT_ACCESS) ] = -1,
1381                [ C(RESULT_MISS)   ] = -1,
1382        },
1383        [ C(OP_PREFETCH) ] = {
1384                [ C(RESULT_ACCESS) ] = -1,
1385                [ C(RESULT_MISS)   ] = -1,
1386        },
1387 },
1388 [ C(BPU ) ] = {
1389        [ C(OP_READ) ] = {
1390                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1391                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1392        },
1393        [ C(OP_WRITE) ] = {
1394                [ C(RESULT_ACCESS) ] = -1,
1395                [ C(RESULT_MISS)   ] = -1,
1396        },
1397        [ C(OP_PREFETCH) ] = {
1398                [ C(RESULT_ACCESS) ] = -1,
1399                [ C(RESULT_MISS)   ] = -1,
1400        },
1401 },
1402};
1403
1404EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1405EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1406/* no_alloc_cycles.not_delivered */
1407EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1408               "event=0xca,umask=0x50");
1409EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1410/* uops_retired.all */
1411EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1412               "event=0xc2,umask=0x10");
1413/* uops_retired.all */
1414EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1415               "event=0xc2,umask=0x10");
1416
1417static struct attribute *slm_events_attrs[] = {
1418        EVENT_PTR(td_total_slots_slm),
1419        EVENT_PTR(td_total_slots_scale_slm),
1420        EVENT_PTR(td_fetch_bubbles_slm),
1421        EVENT_PTR(td_fetch_bubbles_scale_slm),
1422        EVENT_PTR(td_slots_issued_slm),
1423        EVENT_PTR(td_slots_retired_slm),
1424        NULL
1425};
1426
1427static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1428{
1429        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1430        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1431        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1432        EVENT_EXTRA_END
1433};
1434
1435#define SLM_DMND_READ           SNB_DMND_DATA_RD
1436#define SLM_DMND_WRITE          SNB_DMND_RFO
1437#define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1438
1439#define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1440#define SLM_LLC_ACCESS          SNB_RESP_ANY
1441#define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1442
1443static __initconst const u64 slm_hw_cache_extra_regs
1444                                [PERF_COUNT_HW_CACHE_MAX]
1445                                [PERF_COUNT_HW_CACHE_OP_MAX]
1446                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1447{
1448 [ C(LL  ) ] = {
1449        [ C(OP_READ) ] = {
1450                [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1451                [ C(RESULT_MISS)   ] = 0,
1452        },
1453        [ C(OP_WRITE) ] = {
1454                [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1455                [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1456        },
1457        [ C(OP_PREFETCH) ] = {
1458                [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1459                [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1460        },
1461 },
1462};
1463
1464static __initconst const u64 slm_hw_cache_event_ids
1465                                [PERF_COUNT_HW_CACHE_MAX]
1466                                [PERF_COUNT_HW_CACHE_OP_MAX]
1467                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1468{
1469 [ C(L1D) ] = {
1470        [ C(OP_READ) ] = {
1471                [ C(RESULT_ACCESS) ] = 0,
1472                [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1473        },
1474        [ C(OP_WRITE) ] = {
1475                [ C(RESULT_ACCESS) ] = 0,
1476                [ C(RESULT_MISS)   ] = 0,
1477        },
1478        [ C(OP_PREFETCH) ] = {
1479                [ C(RESULT_ACCESS) ] = 0,
1480                [ C(RESULT_MISS)   ] = 0,
1481        },
1482 },
1483 [ C(L1I ) ] = {
1484        [ C(OP_READ) ] = {
1485                [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1486                [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1487        },
1488        [ C(OP_WRITE) ] = {
1489                [ C(RESULT_ACCESS) ] = -1,
1490                [ C(RESULT_MISS)   ] = -1,
1491        },
1492        [ C(OP_PREFETCH) ] = {
1493                [ C(RESULT_ACCESS) ] = 0,
1494                [ C(RESULT_MISS)   ] = 0,
1495        },
1496 },
1497 [ C(LL  ) ] = {
1498        [ C(OP_READ) ] = {
1499                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1500                [ C(RESULT_ACCESS) ] = 0x01b7,
1501                [ C(RESULT_MISS)   ] = 0,
1502        },
1503        [ C(OP_WRITE) ] = {
1504                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1505                [ C(RESULT_ACCESS) ] = 0x01b7,
1506                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1507                [ C(RESULT_MISS)   ] = 0x01b7,
1508        },
1509        [ C(OP_PREFETCH) ] = {
1510                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1511                [ C(RESULT_ACCESS) ] = 0x01b7,
1512                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1513                [ C(RESULT_MISS)   ] = 0x01b7,
1514        },
1515 },
1516 [ C(DTLB) ] = {
1517        [ C(OP_READ) ] = {
1518                [ C(RESULT_ACCESS) ] = 0,
1519                [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1520        },
1521        [ C(OP_WRITE) ] = {
1522                [ C(RESULT_ACCESS) ] = 0,
1523                [ C(RESULT_MISS)   ] = 0,
1524        },
1525        [ C(OP_PREFETCH) ] = {
1526                [ C(RESULT_ACCESS) ] = 0,
1527                [ C(RESULT_MISS)   ] = 0,
1528        },
1529 },
1530 [ C(ITLB) ] = {
1531        [ C(OP_READ) ] = {
1532                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1533                [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1534        },
1535        [ C(OP_WRITE) ] = {
1536                [ C(RESULT_ACCESS) ] = -1,
1537                [ C(RESULT_MISS)   ] = -1,
1538        },
1539        [ C(OP_PREFETCH) ] = {
1540                [ C(RESULT_ACCESS) ] = -1,
1541                [ C(RESULT_MISS)   ] = -1,
1542        },
1543 },
1544 [ C(BPU ) ] = {
1545        [ C(OP_READ) ] = {
1546                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1547                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1548        },
1549        [ C(OP_WRITE) ] = {
1550                [ C(RESULT_ACCESS) ] = -1,
1551                [ C(RESULT_MISS)   ] = -1,
1552        },
1553        [ C(OP_PREFETCH) ] = {
1554                [ C(RESULT_ACCESS) ] = -1,
1555                [ C(RESULT_MISS)   ] = -1,
1556        },
1557 },
1558};
1559
1560EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1561EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1562/* UOPS_NOT_DELIVERED.ANY */
1563EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1564/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1565EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1566/* UOPS_RETIRED.ANY */
1567EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1568/* UOPS_ISSUED.ANY */
1569EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1570
1571static struct attribute *glm_events_attrs[] = {
1572        EVENT_PTR(td_total_slots_glm),
1573        EVENT_PTR(td_total_slots_scale_glm),
1574        EVENT_PTR(td_fetch_bubbles_glm),
1575        EVENT_PTR(td_recovery_bubbles_glm),
1576        EVENT_PTR(td_slots_issued_glm),
1577        EVENT_PTR(td_slots_retired_glm),
1578        NULL
1579};
1580
1581static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1582        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1583        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1584        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1585        EVENT_EXTRA_END
1586};
1587
1588#define GLM_DEMAND_DATA_RD              BIT_ULL(0)
1589#define GLM_DEMAND_RFO                  BIT_ULL(1)
1590#define GLM_ANY_RESPONSE                BIT_ULL(16)
1591#define GLM_SNP_NONE_OR_MISS            BIT_ULL(33)
1592#define GLM_DEMAND_READ                 GLM_DEMAND_DATA_RD
1593#define GLM_DEMAND_WRITE                GLM_DEMAND_RFO
1594#define GLM_DEMAND_PREFETCH             (SNB_PF_DATA_RD|SNB_PF_RFO)
1595#define GLM_LLC_ACCESS                  GLM_ANY_RESPONSE
1596#define GLM_SNP_ANY                     (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1597#define GLM_LLC_MISS                    (GLM_SNP_ANY|SNB_NON_DRAM)
1598
1599static __initconst const u64 glm_hw_cache_event_ids
1600                                [PERF_COUNT_HW_CACHE_MAX]
1601                                [PERF_COUNT_HW_CACHE_OP_MAX]
1602                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1603        [C(L1D)] = {
1604                [C(OP_READ)] = {
1605                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1606                        [C(RESULT_MISS)]        = 0x0,
1607                },
1608                [C(OP_WRITE)] = {
1609                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1610                        [C(RESULT_MISS)]        = 0x0,
1611                },
1612                [C(OP_PREFETCH)] = {
1613                        [C(RESULT_ACCESS)]      = 0x0,
1614                        [C(RESULT_MISS)]        = 0x0,
1615                },
1616        },
1617        [C(L1I)] = {
1618                [C(OP_READ)] = {
1619                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1620                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1621                },
1622                [C(OP_WRITE)] = {
1623                        [C(RESULT_ACCESS)]      = -1,
1624                        [C(RESULT_MISS)]        = -1,
1625                },
1626                [C(OP_PREFETCH)] = {
1627                        [C(RESULT_ACCESS)]      = 0x0,
1628                        [C(RESULT_MISS)]        = 0x0,
1629                },
1630        },
1631        [C(LL)] = {
1632                [C(OP_READ)] = {
1633                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1634                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1635                },
1636                [C(OP_WRITE)] = {
1637                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1638                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1639                },
1640                [C(OP_PREFETCH)] = {
1641                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1642                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1643                },
1644        },
1645        [C(DTLB)] = {
1646                [C(OP_READ)] = {
1647                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1648                        [C(RESULT_MISS)]        = 0x0,
1649                },
1650                [C(OP_WRITE)] = {
1651                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1652                        [C(RESULT_MISS)]        = 0x0,
1653                },
1654                [C(OP_PREFETCH)] = {
1655                        [C(RESULT_ACCESS)]      = 0x0,
1656                        [C(RESULT_MISS)]        = 0x0,
1657                },
1658        },
1659        [C(ITLB)] = {
1660                [C(OP_READ)] = {
1661                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1662                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1663                },
1664                [C(OP_WRITE)] = {
1665                        [C(RESULT_ACCESS)]      = -1,
1666                        [C(RESULT_MISS)]        = -1,
1667                },
1668                [C(OP_PREFETCH)] = {
1669                        [C(RESULT_ACCESS)]      = -1,
1670                        [C(RESULT_MISS)]        = -1,
1671                },
1672        },
1673        [C(BPU)] = {
1674                [C(OP_READ)] = {
1675                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1676                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1677                },
1678                [C(OP_WRITE)] = {
1679                        [C(RESULT_ACCESS)]      = -1,
1680                        [C(RESULT_MISS)]        = -1,
1681                },
1682                [C(OP_PREFETCH)] = {
1683                        [C(RESULT_ACCESS)]      = -1,
1684                        [C(RESULT_MISS)]        = -1,
1685                },
1686        },
1687};
1688
1689static __initconst const u64 glm_hw_cache_extra_regs
1690                                [PERF_COUNT_HW_CACHE_MAX]
1691                                [PERF_COUNT_HW_CACHE_OP_MAX]
1692                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1693        [C(LL)] = {
1694                [C(OP_READ)] = {
1695                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1696                                                  GLM_LLC_ACCESS,
1697                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1698                                                  GLM_LLC_MISS,
1699                },
1700                [C(OP_WRITE)] = {
1701                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1702                                                  GLM_LLC_ACCESS,
1703                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1704                                                  GLM_LLC_MISS,
1705                },
1706                [C(OP_PREFETCH)] = {
1707                        [C(RESULT_ACCESS)]      = GLM_DEMAND_PREFETCH|
1708                                                  GLM_LLC_ACCESS,
1709                        [C(RESULT_MISS)]        = GLM_DEMAND_PREFETCH|
1710                                                  GLM_LLC_MISS,
1711                },
1712        },
1713};
1714
1715static __initconst const u64 glp_hw_cache_event_ids
1716                                [PERF_COUNT_HW_CACHE_MAX]
1717                                [PERF_COUNT_HW_CACHE_OP_MAX]
1718                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1719        [C(L1D)] = {
1720                [C(OP_READ)] = {
1721                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1722                        [C(RESULT_MISS)]        = 0x0,
1723                },
1724                [C(OP_WRITE)] = {
1725                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1726                        [C(RESULT_MISS)]        = 0x0,
1727                },
1728                [C(OP_PREFETCH)] = {
1729                        [C(RESULT_ACCESS)]      = 0x0,
1730                        [C(RESULT_MISS)]        = 0x0,
1731                },
1732        },
1733        [C(L1I)] = {
1734                [C(OP_READ)] = {
1735                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1736                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1737                },
1738                [C(OP_WRITE)] = {
1739                        [C(RESULT_ACCESS)]      = -1,
1740                        [C(RESULT_MISS)]        = -1,
1741                },
1742                [C(OP_PREFETCH)] = {
1743                        [C(RESULT_ACCESS)]      = 0x0,
1744                        [C(RESULT_MISS)]        = 0x0,
1745                },
1746        },
1747        [C(LL)] = {
1748                [C(OP_READ)] = {
1749                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1750                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1751                },
1752                [C(OP_WRITE)] = {
1753                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1754                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1755                },
1756                [C(OP_PREFETCH)] = {
1757                        [C(RESULT_ACCESS)]      = 0x0,
1758                        [C(RESULT_MISS)]        = 0x0,
1759                },
1760        },
1761        [C(DTLB)] = {
1762                [C(OP_READ)] = {
1763                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1764                        [C(RESULT_MISS)]        = 0xe08,        /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1765                },
1766                [C(OP_WRITE)] = {
1767                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1768                        [C(RESULT_MISS)]        = 0xe49,        /* DTLB_STORE_MISSES.WALK_COMPLETED */
1769                },
1770                [C(OP_PREFETCH)] = {
1771                        [C(RESULT_ACCESS)]      = 0x0,
1772                        [C(RESULT_MISS)]        = 0x0,
1773                },
1774        },
1775        [C(ITLB)] = {
1776                [C(OP_READ)] = {
1777                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1778                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1779                },
1780                [C(OP_WRITE)] = {
1781                        [C(RESULT_ACCESS)]      = -1,
1782                        [C(RESULT_MISS)]        = -1,
1783                },
1784                [C(OP_PREFETCH)] = {
1785                        [C(RESULT_ACCESS)]      = -1,
1786                        [C(RESULT_MISS)]        = -1,
1787                },
1788        },
1789        [C(BPU)] = {
1790                [C(OP_READ)] = {
1791                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1792                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1793                },
1794                [C(OP_WRITE)] = {
1795                        [C(RESULT_ACCESS)]      = -1,
1796                        [C(RESULT_MISS)]        = -1,
1797                },
1798                [C(OP_PREFETCH)] = {
1799                        [C(RESULT_ACCESS)]      = -1,
1800                        [C(RESULT_MISS)]        = -1,
1801                },
1802        },
1803};
1804
1805static __initconst const u64 glp_hw_cache_extra_regs
1806                                [PERF_COUNT_HW_CACHE_MAX]
1807                                [PERF_COUNT_HW_CACHE_OP_MAX]
1808                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1809        [C(LL)] = {
1810                [C(OP_READ)] = {
1811                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1812                                                  GLM_LLC_ACCESS,
1813                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1814                                                  GLM_LLC_MISS,
1815                },
1816                [C(OP_WRITE)] = {
1817                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1818                                                  GLM_LLC_ACCESS,
1819                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1820                                                  GLM_LLC_MISS,
1821                },
1822                [C(OP_PREFETCH)] = {
1823                        [C(RESULT_ACCESS)]      = 0x0,
1824                        [C(RESULT_MISS)]        = 0x0,
1825                },
1826        },
1827};
1828
1829#define KNL_OT_L2_HITE          BIT_ULL(19) /* Other Tile L2 Hit */
1830#define KNL_OT_L2_HITF          BIT_ULL(20) /* Other Tile L2 Hit */
1831#define KNL_MCDRAM_LOCAL        BIT_ULL(21)
1832#define KNL_MCDRAM_FAR          BIT_ULL(22)
1833#define KNL_DDR_LOCAL           BIT_ULL(23)
1834#define KNL_DDR_FAR             BIT_ULL(24)
1835#define KNL_DRAM_ANY            (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1836                                    KNL_DDR_LOCAL | KNL_DDR_FAR)
1837#define KNL_L2_READ             SLM_DMND_READ
1838#define KNL_L2_WRITE            SLM_DMND_WRITE
1839#define KNL_L2_PREFETCH         SLM_DMND_PREFETCH
1840#define KNL_L2_ACCESS           SLM_LLC_ACCESS
1841#define KNL_L2_MISS             (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1842                                   KNL_DRAM_ANY | SNB_SNP_ANY | \
1843                                                  SNB_NON_DRAM)
1844
1845static __initconst const u64 knl_hw_cache_extra_regs
1846                                [PERF_COUNT_HW_CACHE_MAX]
1847                                [PERF_COUNT_HW_CACHE_OP_MAX]
1848                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1849        [C(LL)] = {
1850                [C(OP_READ)] = {
1851                        [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1852                        [C(RESULT_MISS)]   = 0,
1853                },
1854                [C(OP_WRITE)] = {
1855                        [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1856                        [C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
1857                },
1858                [C(OP_PREFETCH)] = {
1859                        [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1860                        [C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
1861                },
1862        },
1863};
1864
1865/*
1866 * Used from PMIs where the LBRs are already disabled.
1867 *
1868 * This function could be called consecutively. It is required to remain in
1869 * disabled state if called consecutively.
1870 *
1871 * During consecutive calls, the same disable value will be written to related
1872 * registers, so the PMU state remains unchanged.
1873 *
1874 * intel_bts events don't coexist with intel PMU's BTS events because of
1875 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1876 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1877 */
1878static void __intel_pmu_disable_all(void)
1879{
1880        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1881
1882        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1883
1884        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1885                intel_pmu_disable_bts();
1886
1887        intel_pmu_pebs_disable_all();
1888}
1889
1890static void intel_pmu_disable_all(void)
1891{
1892        __intel_pmu_disable_all();
1893        intel_pmu_lbr_disable_all();
1894}
1895
1896static void __intel_pmu_enable_all(int added, bool pmi)
1897{
1898        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1899
1900        intel_pmu_pebs_enable_all();
1901        intel_pmu_lbr_enable_all(pmi);
1902        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1903                        x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1904
1905        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1906                struct perf_event *event =
1907                        cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1908
1909                if (WARN_ON_ONCE(!event))
1910                        return;
1911
1912                intel_pmu_enable_bts(event->hw.config);
1913        }
1914}
1915
1916static void intel_pmu_enable_all(int added)
1917{
1918        __intel_pmu_enable_all(added, false);
1919}
1920
1921/*
1922 * Workaround for:
1923 *   Intel Errata AAK100 (model 26)
1924 *   Intel Errata AAP53  (model 30)
1925 *   Intel Errata BD53   (model 44)
1926 *
1927 * The official story:
1928 *   These chips need to be 'reset' when adding counters by programming the
1929 *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1930 *   in sequence on the same PMC or on different PMCs.
1931 *
1932 * In practise it appears some of these events do in fact count, and
1933 * we need to programm all 4 events.
1934 */
1935static void intel_pmu_nhm_workaround(void)
1936{
1937        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1938        static const unsigned long nhm_magic[4] = {
1939                0x4300B5,
1940                0x4300D2,
1941                0x4300B1,
1942                0x4300B1
1943        };
1944        struct perf_event *event;
1945        int i;
1946
1947        /*
1948         * The Errata requires below steps:
1949         * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1950         * 2) Configure 4 PERFEVTSELx with the magic events and clear
1951         *    the corresponding PMCx;
1952         * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1953         * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1954         * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1955         */
1956
1957        /*
1958         * The real steps we choose are a little different from above.
1959         * A) To reduce MSR operations, we don't run step 1) as they
1960         *    are already cleared before this function is called;
1961         * B) Call x86_perf_event_update to save PMCx before configuring
1962         *    PERFEVTSELx with magic number;
1963         * C) With step 5), we do clear only when the PERFEVTSELx is
1964         *    not used currently.
1965         * D) Call x86_perf_event_set_period to restore PMCx;
1966         */
1967
1968        /* We always operate 4 pairs of PERF Counters */
1969        for (i = 0; i < 4; i++) {
1970                event = cpuc->events[i];
1971                if (event)
1972                        x86_perf_event_update(event);
1973        }
1974
1975        for (i = 0; i < 4; i++) {
1976                wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1977                wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1978        }
1979
1980        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1981        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1982
1983        for (i = 0; i < 4; i++) {
1984                event = cpuc->events[i];
1985
1986                if (event) {
1987                        x86_perf_event_set_period(event);
1988                        __x86_pmu_enable_event(&event->hw,
1989                                        ARCH_PERFMON_EVENTSEL_ENABLE);
1990                } else
1991                        wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1992        }
1993}
1994
1995static void intel_pmu_nhm_enable_all(int added)
1996{
1997        if (added)
1998                intel_pmu_nhm_workaround();
1999        intel_pmu_enable_all(added);
2000}
2001
2002static void enable_counter_freeze(void)
2003{
2004        update_debugctlmsr(get_debugctlmsr() |
2005                        DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2006}
2007
2008static void disable_counter_freeze(void)
2009{
2010        update_debugctlmsr(get_debugctlmsr() &
2011                        ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2012}
2013
2014static inline u64 intel_pmu_get_status(void)
2015{
2016        u64 status;
2017
2018        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2019
2020        return status;
2021}
2022
2023static inline void intel_pmu_ack_status(u64 ack)
2024{
2025        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2026}
2027
2028static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2029{
2030        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2031        u64 ctrl_val, mask;
2032
2033        mask = 0xfULL << (idx * 4);
2034
2035        rdmsrl(hwc->config_base, ctrl_val);
2036        ctrl_val &= ~mask;
2037        wrmsrl(hwc->config_base, ctrl_val);
2038}
2039
2040static inline bool event_is_checkpointed(struct perf_event *event)
2041{
2042        return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2043}
2044
2045static void intel_pmu_disable_event(struct perf_event *event)
2046{
2047        struct hw_perf_event *hwc = &event->hw;
2048        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2049
2050        if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2051                intel_pmu_disable_bts();
2052                intel_pmu_drain_bts_buffer();
2053                return;
2054        }
2055
2056        cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2057        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2058        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2059
2060        if (unlikely(event->attr.precise_ip))
2061                intel_pmu_pebs_disable(event);
2062
2063        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2064                intel_pmu_disable_fixed(hwc);
2065                return;
2066        }
2067
2068        x86_pmu_disable_event(event);
2069}
2070
2071static void intel_pmu_del_event(struct perf_event *event)
2072{
2073        if (needs_branch_stack(event))
2074                intel_pmu_lbr_del(event);
2075        if (event->attr.precise_ip)
2076                intel_pmu_pebs_del(event);
2077}
2078
2079static void intel_pmu_read_event(struct perf_event *event)
2080{
2081        if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2082                intel_pmu_auto_reload_read(event);
2083        else
2084                x86_perf_event_update(event);
2085}
2086
2087static void intel_pmu_enable_fixed(struct perf_event *event)
2088{
2089        struct hw_perf_event *hwc = &event->hw;
2090        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2091        u64 ctrl_val, mask, bits = 0;
2092
2093        /*
2094         * Enable IRQ generation (0x8), if not PEBS,
2095         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2096         * if requested:
2097         */
2098        if (!event->attr.precise_ip)
2099                bits |= 0x8;
2100        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2101                bits |= 0x2;
2102        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2103                bits |= 0x1;
2104
2105        /*
2106         * ANY bit is supported in v3 and up
2107         */
2108        if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2109                bits |= 0x4;
2110
2111        bits <<= (idx * 4);
2112        mask = 0xfULL << (idx * 4);
2113
2114        rdmsrl(hwc->config_base, ctrl_val);
2115        ctrl_val &= ~mask;
2116        ctrl_val |= bits;
2117        wrmsrl(hwc->config_base, ctrl_val);
2118}
2119
2120static void intel_pmu_enable_event(struct perf_event *event)
2121{
2122        struct hw_perf_event *hwc = &event->hw;
2123        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2124
2125        if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2126                if (!__this_cpu_read(cpu_hw_events.enabled))
2127                        return;
2128
2129                intel_pmu_enable_bts(hwc->config);
2130                return;
2131        }
2132
2133        if (event->attr.exclude_host)
2134                cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2135        if (event->attr.exclude_guest)
2136                cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2137
2138        if (unlikely(event_is_checkpointed(event)))
2139                cpuc->intel_cp_status |= (1ull << hwc->idx);
2140
2141        if (unlikely(event->attr.precise_ip))
2142                intel_pmu_pebs_enable(event);
2143
2144        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2145                intel_pmu_enable_fixed(event);
2146                return;
2147        }
2148
2149        __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2150}
2151
2152static void intel_pmu_add_event(struct perf_event *event)
2153{
2154        if (event->attr.precise_ip)
2155                intel_pmu_pebs_add(event);
2156        if (needs_branch_stack(event))
2157                intel_pmu_lbr_add(event);
2158}
2159
2160/*
2161 * Save and restart an expired event. Called by NMI contexts,
2162 * so it has to be careful about preempting normal event ops:
2163 */
2164int intel_pmu_save_and_restart(struct perf_event *event)
2165{
2166        x86_perf_event_update(event);
2167        /*
2168         * For a checkpointed counter always reset back to 0.  This
2169         * avoids a situation where the counter overflows, aborts the
2170         * transaction and is then set back to shortly before the
2171         * overflow, and overflows and aborts again.
2172         */
2173        if (unlikely(event_is_checkpointed(event))) {
2174                /* No race with NMIs because the counter should not be armed */
2175                wrmsrl(event->hw.event_base, 0);
2176                local64_set(&event->hw.prev_count, 0);
2177        }
2178        return x86_perf_event_set_period(event);
2179}
2180
2181static void intel_pmu_reset(void)
2182{
2183        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2184        unsigned long flags;
2185        int idx;
2186
2187        if (!x86_pmu.num_counters)
2188                return;
2189
2190        local_irq_save(flags);
2191
2192        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2193
2194        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2195                wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2196                wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
2197        }
2198        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2199                wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2200
2201        if (ds)
2202                ds->bts_index = ds->bts_buffer_base;
2203
2204        /* Ack all overflows and disable fixed counters */
2205        if (x86_pmu.version >= 2) {
2206                intel_pmu_ack_status(intel_pmu_get_status());
2207                wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2208        }
2209
2210        /* Reset LBRs and LBR freezing */
2211        if (x86_pmu.lbr_nr) {
2212                update_debugctlmsr(get_debugctlmsr() &
2213                        ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2214        }
2215
2216        local_irq_restore(flags);
2217}
2218
2219static int handle_pmi_common(struct pt_regs *regs, u64 status)
2220{
2221        struct perf_sample_data data;
2222        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2223        int bit;
2224        int handled = 0;
2225
2226        inc_irq_stat(apic_perf_irqs);
2227
2228        /*
2229         * Ignore a range of extra bits in status that do not indicate
2230         * overflow by themselves.
2231         */
2232        status &= ~(GLOBAL_STATUS_COND_CHG |
2233                    GLOBAL_STATUS_ASIF |
2234                    GLOBAL_STATUS_LBRS_FROZEN);
2235        if (!status)
2236                return 0;
2237        /*
2238         * In case multiple PEBS events are sampled at the same time,
2239         * it is possible to have GLOBAL_STATUS bit 62 set indicating
2240         * PEBS buffer overflow and also seeing at most 3 PEBS counters
2241         * having their bits set in the status register. This is a sign
2242         * that there was at least one PEBS record pending at the time
2243         * of the PMU interrupt. PEBS counters must only be processed
2244         * via the drain_pebs() calls and not via the regular sample
2245         * processing loop coming after that the function, otherwise
2246         * phony regular samples may be generated in the sampling buffer
2247         * not marked with the EXACT tag. Another possibility is to have
2248         * one PEBS event and at least one non-PEBS event whic hoverflows
2249         * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2250         * not be set, yet the overflow status bit for the PEBS counter will
2251         * be on Skylake.
2252         *
2253         * To avoid this problem, we systematically ignore the PEBS-enabled
2254         * counters from the GLOBAL_STATUS mask and we always process PEBS
2255         * events via drain_pebs().
2256         */
2257        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2258                status &= ~cpuc->pebs_enabled;
2259        else
2260                status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2261
2262        /*
2263         * PEBS overflow sets bit 62 in the global status register
2264         */
2265        if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2266                handled++;
2267                x86_pmu.drain_pebs(regs);
2268                status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2269        }
2270
2271        /*
2272         * Intel PT
2273         */
2274        if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2275                handled++;
2276                intel_pt_interrupt();
2277        }
2278
2279        /*
2280         * Checkpointed counters can lead to 'spurious' PMIs because the
2281         * rollback caused by the PMI will have cleared the overflow status
2282         * bit. Therefore always force probe these counters.
2283         */
2284        status |= cpuc->intel_cp_status;
2285
2286        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2287                struct perf_event *event = cpuc->events[bit];
2288
2289                handled++;
2290
2291                if (!test_bit(bit, cpuc->active_mask))
2292                        continue;
2293
2294                if (!intel_pmu_save_and_restart(event))
2295                        continue;
2296
2297                perf_sample_data_init(&data, 0, event->hw.last_period);
2298
2299                if (has_branch_stack(event))
2300                        data.br_stack = &cpuc->lbr_stack;
2301
2302                if (perf_event_overflow(event, &data, regs))
2303                        x86_pmu_stop(event, 0);
2304        }
2305
2306        return handled;
2307}
2308
2309static bool disable_counter_freezing = true;
2310static int __init intel_perf_counter_freezing_setup(char *s)
2311{
2312        bool res;
2313
2314        if (kstrtobool(s, &res))
2315                return -EINVAL;
2316
2317        disable_counter_freezing = !res;
2318        return 1;
2319}
2320__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2321
2322/*
2323 * Simplified handler for Arch Perfmon v4:
2324 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2325 * This is done automatically on PMU ack.
2326 * - Ack the PMU only after the APIC.
2327 */
2328
2329static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2330{
2331        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2332        int handled = 0;
2333        bool bts = false;
2334        u64 status;
2335        int pmu_enabled = cpuc->enabled;
2336        int loops = 0;
2337
2338        /* PMU has been disabled because of counter freezing */
2339        cpuc->enabled = 0;
2340        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2341                bts = true;
2342                intel_bts_disable_local();
2343                handled = intel_pmu_drain_bts_buffer();
2344                handled += intel_bts_interrupt();
2345        }
2346        status = intel_pmu_get_status();
2347        if (!status)
2348                goto done;
2349again:
2350        intel_pmu_lbr_read();
2351        if (++loops > 100) {
2352                static bool warned;
2353
2354                if (!warned) {
2355                        WARN(1, "perfevents: irq loop stuck!\n");
2356                        perf_event_print_debug();
2357                        warned = true;
2358                }
2359                intel_pmu_reset();
2360                goto done;
2361        }
2362
2363
2364        handled += handle_pmi_common(regs, status);
2365done:
2366        /* Ack the PMI in the APIC */
2367        apic_write(APIC_LVTPC, APIC_DM_NMI);
2368
2369        /*
2370         * The counters start counting immediately while ack the status.
2371         * Make it as close as possible to IRET. This avoids bogus
2372         * freezing on Skylake CPUs.
2373         */
2374        if (status) {
2375                intel_pmu_ack_status(status);
2376        } else {
2377                /*
2378                 * CPU may issues two PMIs very close to each other.
2379                 * When the PMI handler services the first one, the
2380                 * GLOBAL_STATUS is already updated to reflect both.
2381                 * When it IRETs, the second PMI is immediately
2382                 * handled and it sees clear status. At the meantime,
2383                 * there may be a third PMI, because the freezing bit
2384                 * isn't set since the ack in first PMI handlers.
2385                 * Double check if there is more work to be done.
2386                 */
2387                status = intel_pmu_get_status();
2388                if (status)
2389                        goto again;
2390        }
2391
2392        if (bts)
2393                intel_bts_enable_local();
2394        cpuc->enabled = pmu_enabled;
2395        return handled;
2396}
2397
2398/*
2399 * This handler is triggered by the local APIC, so the APIC IRQ handling
2400 * rules apply:
2401 */
2402static int intel_pmu_handle_irq(struct pt_regs *regs)
2403{
2404        struct cpu_hw_events *cpuc;
2405        int loops;
2406        u64 status;
2407        int handled;
2408        int pmu_enabled;
2409
2410        cpuc = this_cpu_ptr(&cpu_hw_events);
2411
2412        /*
2413         * Save the PMU state.
2414         * It needs to be restored when leaving the handler.
2415         */
2416        pmu_enabled = cpuc->enabled;
2417        /*
2418         * No known reason to not always do late ACK,
2419         * but just in case do it opt-in.
2420         */
2421        if (!x86_pmu.late_ack)
2422                apic_write(APIC_LVTPC, APIC_DM_NMI);
2423        intel_bts_disable_local();
2424        cpuc->enabled = 0;
2425        __intel_pmu_disable_all();
2426        handled = intel_pmu_drain_bts_buffer();
2427        handled += intel_bts_interrupt();
2428        status = intel_pmu_get_status();
2429        if (!status)
2430                goto done;
2431
2432        loops = 0;
2433again:
2434        intel_pmu_lbr_read();
2435        intel_pmu_ack_status(status);
2436        if (++loops > 100) {
2437                static bool warned;
2438
2439                if (!warned) {
2440                        WARN(1, "perfevents: irq loop stuck!\n");
2441                        perf_event_print_debug();
2442                        warned = true;
2443                }
2444                intel_pmu_reset();
2445                goto done;
2446        }
2447
2448        handled += handle_pmi_common(regs, status);
2449
2450        /*
2451         * Repeat if there is more work to be done:
2452         */
2453        status = intel_pmu_get_status();
2454        if (status)
2455                goto again;
2456
2457done:
2458        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2459        cpuc->enabled = pmu_enabled;
2460        if (pmu_enabled)
2461                __intel_pmu_enable_all(0, true);
2462        intel_bts_enable_local();
2463
2464        /*
2465         * Only unmask the NMI after the overflow counters
2466         * have been reset. This avoids spurious NMIs on
2467         * Haswell CPUs.
2468         */
2469        if (x86_pmu.late_ack)
2470                apic_write(APIC_LVTPC, APIC_DM_NMI);
2471        return handled;
2472}
2473
2474static struct event_constraint *
2475intel_bts_constraints(struct perf_event *event)
2476{
2477        if (unlikely(intel_pmu_has_bts(event)))
2478                return &bts_constraint;
2479
2480        return NULL;
2481}
2482
2483static int intel_alt_er(int idx, u64 config)
2484{
2485        int alt_idx = idx;
2486
2487        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2488                return idx;
2489
2490        if (idx == EXTRA_REG_RSP_0)
2491                alt_idx = EXTRA_REG_RSP_1;
2492
2493        if (idx == EXTRA_REG_RSP_1)
2494                alt_idx = EXTRA_REG_RSP_0;
2495
2496        if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2497                return idx;
2498
2499        return alt_idx;
2500}
2501
2502static void intel_fixup_er(struct perf_event *event, int idx)
2503{
2504        event->hw.extra_reg.idx = idx;
2505
2506        if (idx == EXTRA_REG_RSP_0) {
2507                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2508                event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2509                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2510        } else if (idx == EXTRA_REG_RSP_1) {
2511                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2512                event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2513                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2514        }
2515}
2516
2517/*
2518 * manage allocation of shared extra msr for certain events
2519 *
2520 * sharing can be:
2521 * per-cpu: to be shared between the various events on a single PMU
2522 * per-core: per-cpu + shared by HT threads
2523 */
2524static struct event_constraint *
2525__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2526                                   struct perf_event *event,
2527                                   struct hw_perf_event_extra *reg)
2528{
2529        struct event_constraint *c = &emptyconstraint;
2530        struct er_account *era;
2531        unsigned long flags;
2532        int idx = reg->idx;
2533
2534        /*
2535         * reg->alloc can be set due to existing state, so for fake cpuc we
2536         * need to ignore this, otherwise we might fail to allocate proper fake
2537         * state for this extra reg constraint. Also see the comment below.
2538         */
2539        if (reg->alloc && !cpuc->is_fake)
2540                return NULL; /* call x86_get_event_constraint() */
2541
2542again:
2543        era = &cpuc->shared_regs->regs[idx];
2544        /*
2545         * we use spin_lock_irqsave() to avoid lockdep issues when
2546         * passing a fake cpuc
2547         */
2548        raw_spin_lock_irqsave(&era->lock, flags);
2549
2550        if (!atomic_read(&era->ref) || era->config == reg->config) {
2551
2552                /*
2553                 * If its a fake cpuc -- as per validate_{group,event}() we
2554                 * shouldn't touch event state and we can avoid doing so
2555                 * since both will only call get_event_constraints() once
2556                 * on each event, this avoids the need for reg->alloc.
2557                 *
2558                 * Not doing the ER fixup will only result in era->reg being
2559                 * wrong, but since we won't actually try and program hardware
2560                 * this isn't a problem either.
2561                 */
2562                if (!cpuc->is_fake) {
2563                        if (idx != reg->idx)
2564                                intel_fixup_er(event, idx);
2565
2566                        /*
2567                         * x86_schedule_events() can call get_event_constraints()
2568                         * multiple times on events in the case of incremental
2569                         * scheduling(). reg->alloc ensures we only do the ER
2570                         * allocation once.
2571                         */
2572                        reg->alloc = 1;
2573                }
2574
2575                /* lock in msr value */
2576                era->config = reg->config;
2577                era->reg = reg->reg;
2578
2579                /* one more user */
2580                atomic_inc(&era->ref);
2581
2582                /*
2583                 * need to call x86_get_event_constraint()
2584                 * to check if associated event has constraints
2585                 */
2586                c = NULL;
2587        } else {
2588                idx = intel_alt_er(idx, reg->config);
2589                if (idx != reg->idx) {
2590                        raw_spin_unlock_irqrestore(&era->lock, flags);
2591                        goto again;
2592                }
2593        }
2594        raw_spin_unlock_irqrestore(&era->lock, flags);
2595
2596        return c;
2597}
2598
2599static void
2600__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2601                                   struct hw_perf_event_extra *reg)
2602{
2603        struct er_account *era;
2604
2605        /*
2606         * Only put constraint if extra reg was actually allocated. Also takes
2607         * care of event which do not use an extra shared reg.
2608         *
2609         * Also, if this is a fake cpuc we shouldn't touch any event state
2610         * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2611         * either since it'll be thrown out.
2612         */
2613        if (!reg->alloc || cpuc->is_fake)
2614                return;
2615
2616        era = &cpuc->shared_regs->regs[reg->idx];
2617
2618        /* one fewer user */
2619        atomic_dec(&era->ref);
2620
2621        /* allocate again next time */
2622        reg->alloc = 0;
2623}
2624
2625static struct event_constraint *
2626intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2627                              struct perf_event *event)
2628{
2629        struct event_constraint *c = NULL, *d;
2630        struct hw_perf_event_extra *xreg, *breg;
2631
2632        xreg = &event->hw.extra_reg;
2633        if (xreg->idx != EXTRA_REG_NONE) {
2634                c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2635                if (c == &emptyconstraint)
2636                        return c;
2637        }
2638        breg = &event->hw.branch_reg;
2639        if (breg->idx != EXTRA_REG_NONE) {
2640                d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2641                if (d == &emptyconstraint) {
2642                        __intel_shared_reg_put_constraints(cpuc, xreg);
2643                        c = d;
2644                }
2645        }
2646        return c;
2647}
2648
2649struct event_constraint *
2650x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2651                          struct perf_event *event)
2652{
2653        struct event_constraint *c;
2654
2655        if (x86_pmu.event_constraints) {
2656                for_each_event_constraint(c, x86_pmu.event_constraints) {
2657                        if ((event->hw.config & c->cmask) == c->code) {
2658                                event->hw.flags |= c->flags;
2659                                return c;
2660                        }
2661                }
2662        }
2663
2664        return &unconstrained;
2665}
2666
2667static struct event_constraint *
2668__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2669                            struct perf_event *event)
2670{
2671        struct event_constraint *c;
2672
2673        c = intel_bts_constraints(event);
2674        if (c)
2675                return c;
2676
2677        c = intel_shared_regs_constraints(cpuc, event);
2678        if (c)
2679                return c;
2680
2681        c = intel_pebs_constraints(event);
2682        if (c)
2683                return c;
2684
2685        return x86_get_event_constraints(cpuc, idx, event);
2686}
2687
2688static void
2689intel_start_scheduling(struct cpu_hw_events *cpuc)
2690{
2691        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2692        struct intel_excl_states *xl;
2693        int tid = cpuc->excl_thread_id;
2694
2695        /*
2696         * nothing needed if in group validation mode
2697         */
2698        if (cpuc->is_fake || !is_ht_workaround_enabled())
2699                return;
2700
2701        /*
2702         * no exclusion needed
2703         */
2704        if (WARN_ON_ONCE(!excl_cntrs))
2705                return;
2706
2707        xl = &excl_cntrs->states[tid];
2708
2709        xl->sched_started = true;
2710        /*
2711         * lock shared state until we are done scheduling
2712         * in stop_event_scheduling()
2713         * makes scheduling appear as a transaction
2714         */
2715        raw_spin_lock(&excl_cntrs->lock);
2716}
2717
2718static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2719{
2720        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2721        struct event_constraint *c = cpuc->event_constraint[idx];
2722        struct intel_excl_states *xl;
2723        int tid = cpuc->excl_thread_id;
2724
2725        if (cpuc->is_fake || !is_ht_workaround_enabled())
2726                return;
2727
2728        if (WARN_ON_ONCE(!excl_cntrs))
2729                return;
2730
2731        if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2732                return;
2733
2734        xl = &excl_cntrs->states[tid];
2735
2736        lockdep_assert_held(&excl_cntrs->lock);
2737
2738        if (c->flags & PERF_X86_EVENT_EXCL)
2739                xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2740        else
2741                xl->state[cntr] = INTEL_EXCL_SHARED;
2742}
2743
2744static void
2745intel_stop_scheduling(struct cpu_hw_events *cpuc)
2746{
2747        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2748        struct intel_excl_states *xl;
2749        int tid = cpuc->excl_thread_id;
2750
2751        /*
2752         * nothing needed if in group validation mode
2753         */
2754        if (cpuc->is_fake || !is_ht_workaround_enabled())
2755                return;
2756        /*
2757         * no exclusion needed
2758         */
2759        if (WARN_ON_ONCE(!excl_cntrs))
2760                return;
2761
2762        xl = &excl_cntrs->states[tid];
2763
2764        xl->sched_started = false;
2765        /*
2766         * release shared state lock (acquired in intel_start_scheduling())
2767         */
2768        raw_spin_unlock(&excl_cntrs->lock);
2769}
2770
2771static struct event_constraint *
2772intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2773                           int idx, struct event_constraint *c)
2774{
2775        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2776        struct intel_excl_states *xlo;
2777        int tid = cpuc->excl_thread_id;
2778        int is_excl, i;
2779
2780        /*
2781         * validating a group does not require
2782         * enforcing cross-thread  exclusion
2783         */
2784        if (cpuc->is_fake || !is_ht_workaround_enabled())
2785                return c;
2786
2787        /*
2788         * no exclusion needed
2789         */
2790        if (WARN_ON_ONCE(!excl_cntrs))
2791                return c;
2792
2793        /*
2794         * because we modify the constraint, we need
2795         * to make a copy. Static constraints come
2796         * from static const tables.
2797         *
2798         * only needed when constraint has not yet
2799         * been cloned (marked dynamic)
2800         */
2801        if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2802                struct event_constraint *cx;
2803
2804                /*
2805                 * grab pre-allocated constraint entry
2806                 */
2807                cx = &cpuc->constraint_list[idx];
2808
2809                /*
2810                 * initialize dynamic constraint
2811                 * with static constraint
2812                 */
2813                *cx = *c;
2814
2815                /*
2816                 * mark constraint as dynamic, so we
2817                 * can free it later on
2818                 */
2819                cx->flags |= PERF_X86_EVENT_DYNAMIC;
2820                c = cx;
2821        }
2822
2823        /*
2824         * From here on, the constraint is dynamic.
2825         * Either it was just allocated above, or it
2826         * was allocated during a earlier invocation
2827         * of this function
2828         */
2829
2830        /*
2831         * state of sibling HT
2832         */
2833        xlo = &excl_cntrs->states[tid ^ 1];
2834
2835        /*
2836         * event requires exclusive counter access
2837         * across HT threads
2838         */
2839        is_excl = c->flags & PERF_X86_EVENT_EXCL;
2840        if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2841                event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2842                if (!cpuc->n_excl++)
2843                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2844        }
2845
2846        /*
2847         * Modify static constraint with current dynamic
2848         * state of thread
2849         *
2850         * EXCLUSIVE: sibling counter measuring exclusive event
2851         * SHARED   : sibling counter measuring non-exclusive event
2852         * UNUSED   : sibling counter unused
2853         */
2854        for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2855                /*
2856                 * exclusive event in sibling counter
2857                 * our corresponding counter cannot be used
2858                 * regardless of our event
2859                 */
2860                if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
2861                        __clear_bit(i, c->idxmsk);
2862                /*
2863                 * if measuring an exclusive event, sibling
2864                 * measuring non-exclusive, then counter cannot
2865                 * be used
2866                 */
2867                if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
2868                        __clear_bit(i, c->idxmsk);
2869        }
2870
2871        /*
2872         * recompute actual bit weight for scheduling algorithm
2873         */
2874        c->weight = hweight64(c->idxmsk64);
2875
2876        /*
2877         * if we return an empty mask, then switch
2878         * back to static empty constraint to avoid
2879         * the cost of freeing later on
2880         */
2881        if (c->weight == 0)
2882                c = &emptyconstraint;
2883
2884        return c;
2885}
2886
2887static struct event_constraint *
2888intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2889                            struct perf_event *event)
2890{
2891        struct event_constraint *c1 = NULL;
2892        struct event_constraint *c2;
2893
2894        if (idx >= 0) /* fake does < 0 */
2895                c1 = cpuc->event_constraint[idx];
2896
2897        /*
2898         * first time only
2899         * - static constraint: no change across incremental scheduling calls
2900         * - dynamic constraint: handled by intel_get_excl_constraints()
2901         */
2902        c2 = __intel_get_event_constraints(cpuc, idx, event);
2903        if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2904                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2905                c1->weight = c2->weight;
2906                c2 = c1;
2907        }
2908
2909        if (cpuc->excl_cntrs)
2910                return intel_get_excl_constraints(cpuc, event, idx, c2);
2911
2912        return c2;
2913}
2914
2915static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2916                struct perf_event *event)
2917{
2918        struct hw_perf_event *hwc = &event->hw;
2919        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2920        int tid = cpuc->excl_thread_id;
2921        struct intel_excl_states *xl;
2922
2923        /*
2924         * nothing needed if in group validation mode
2925         */
2926        if (cpuc->is_fake)
2927                return;
2928
2929        if (WARN_ON_ONCE(!excl_cntrs))
2930                return;
2931
2932        if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2933                hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2934                if (!--cpuc->n_excl)
2935                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2936        }
2937
2938        /*
2939         * If event was actually assigned, then mark the counter state as
2940         * unused now.
2941         */
2942        if (hwc->idx >= 0) {
2943                xl = &excl_cntrs->states[tid];
2944
2945                /*
2946                 * put_constraint may be called from x86_schedule_events()
2947                 * which already has the lock held so here make locking
2948                 * conditional.
2949                 */
2950                if (!xl->sched_started)
2951                        raw_spin_lock(&excl_cntrs->lock);
2952
2953                xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
2954
2955                if (!xl->sched_started)
2956                        raw_spin_unlock(&excl_cntrs->lock);
2957        }
2958}
2959
2960static void
2961intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2962                                        struct perf_event *event)
2963{
2964        struct hw_perf_event_extra *reg;
2965
2966        reg = &event->hw.extra_reg;
2967        if (reg->idx != EXTRA_REG_NONE)
2968                __intel_shared_reg_put_constraints(cpuc, reg);
2969
2970        reg = &event->hw.branch_reg;
2971        if (reg->idx != EXTRA_REG_NONE)
2972                __intel_shared_reg_put_constraints(cpuc, reg);
2973}
2974
2975static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2976                                        struct perf_event *event)
2977{
2978        intel_put_shared_regs_event_constraints(cpuc, event);
2979
2980        /*
2981         * is PMU has exclusive counter restrictions, then
2982         * all events are subject to and must call the
2983         * put_excl_constraints() routine
2984         */
2985        if (cpuc->excl_cntrs)
2986                intel_put_excl_constraints(cpuc, event);
2987}
2988
2989static void intel_pebs_aliases_core2(struct perf_event *event)
2990{
2991        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2992                /*
2993                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2994                 * (0x003c) so that we can use it with PEBS.
2995                 *
2996                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2997                 * PEBS capable. However we can use INST_RETIRED.ANY_P
2998                 * (0x00c0), which is a PEBS capable event, to get the same
2999                 * count.
3000                 *
3001                 * INST_RETIRED.ANY_P counts the number of cycles that retires
3002                 * CNTMASK instructions. By setting CNTMASK to a value (16)
3003                 * larger than the maximum number of instructions that can be
3004                 * retired per cycle (4) and then inverting the condition, we
3005                 * count all cycles that retire 16 or less instructions, which
3006                 * is every cycle.
3007                 *
3008                 * Thereby we gain a PEBS capable cycle counter.
3009                 */
3010                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3011
3012                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3013                event->hw.config = alt_config;
3014        }
3015}
3016
3017static void intel_pebs_aliases_snb(struct perf_event *event)
3018{
3019        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3020                /*
3021                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3022                 * (0x003c) so that we can use it with PEBS.
3023                 *
3024                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3025                 * PEBS capable. However we can use UOPS_RETIRED.ALL
3026                 * (0x01c2), which is a PEBS capable event, to get the same
3027                 * count.
3028                 *
3029                 * UOPS_RETIRED.ALL counts the number of cycles that retires
3030                 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3031                 * larger than the maximum number of micro-ops that can be
3032                 * retired per cycle (4) and then inverting the condition, we
3033                 * count all cycles that retire 16 or less micro-ops, which
3034                 * is every cycle.
3035                 *
3036                 * Thereby we gain a PEBS capable cycle counter.
3037                 */
3038                u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3039
3040                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3041                event->hw.config = alt_config;
3042        }
3043}
3044
3045static void intel_pebs_aliases_precdist(struct perf_event *event)
3046{
3047        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3048                /*
3049                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3050                 * (0x003c) so that we can use it with PEBS.
3051                 *
3052                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3053                 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3054                 * (0x01c0), which is a PEBS capable event, to get the same
3055                 * count.
3056                 *
3057                 * The PREC_DIST event has special support to minimize sample
3058                 * shadowing effects. One drawback is that it can be
3059                 * only programmed on counter 1, but that seems like an
3060                 * acceptable trade off.
3061                 */
3062                u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3063
3064                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3065                event->hw.config = alt_config;
3066        }
3067}
3068
3069static void intel_pebs_aliases_ivb(struct perf_event *event)
3070{
3071        if (event->attr.precise_ip < 3)
3072                return intel_pebs_aliases_snb(event);
3073        return intel_pebs_aliases_precdist(event);
3074}
3075
3076static void intel_pebs_aliases_skl(struct perf_event *event)
3077{
3078        if (event->attr.precise_ip < 3)
3079                return intel_pebs_aliases_core2(event);
3080        return intel_pebs_aliases_precdist(event);
3081}
3082
3083static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3084{
3085        unsigned long flags = x86_pmu.large_pebs_flags;
3086
3087        if (event->attr.use_clockid)
3088                flags &= ~PERF_SAMPLE_TIME;
3089        if (!event->attr.exclude_kernel)
3090                flags &= ~PERF_SAMPLE_REGS_USER;
3091        if (event->attr.sample_regs_user & ~PEBS_REGS)
3092                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3093        return flags;
3094}
3095
3096static int intel_pmu_bts_config(struct perf_event *event)
3097{
3098        struct perf_event_attr *attr = &event->attr;
3099
3100        if (unlikely(intel_pmu_has_bts(event))) {
3101                /* BTS is not supported by this architecture. */
3102                if (!x86_pmu.bts_active)
3103                        return -EOPNOTSUPP;
3104
3105                /* BTS is currently only allowed for user-mode. */
3106                if (!attr->exclude_kernel)
3107                        return -EOPNOTSUPP;
3108
3109                /* BTS is not allowed for precise events. */
3110                if (attr->precise_ip)
3111                        return -EOPNOTSUPP;
3112
3113                /* disallow bts if conflicting events are present */
3114                if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3115                        return -EBUSY;
3116
3117                event->destroy = hw_perf_lbr_event_destroy;
3118        }
3119
3120        return 0;
3121}
3122
3123static int core_pmu_hw_config(struct perf_event *event)
3124{
3125        int ret = x86_pmu_hw_config(event);
3126
3127        if (ret)
3128                return ret;
3129
3130        return intel_pmu_bts_config(event);
3131}
3132
3133static int intel_pmu_hw_config(struct perf_event *event)
3134{
3135        int ret = x86_pmu_hw_config(event);
3136
3137        if (ret)
3138                return ret;
3139
3140        ret = intel_pmu_bts_config(event);
3141        if (ret)
3142                return ret;
3143
3144        if (event->attr.precise_ip) {
3145                if (!event->attr.freq) {
3146                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3147                        if (!(event->attr.sample_type &
3148                              ~intel_pmu_large_pebs_flags(event)))
3149                                event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3150                }
3151                if (x86_pmu.pebs_aliases)
3152                        x86_pmu.pebs_aliases(event);
3153
3154                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3155                        event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3156        }
3157
3158        if (needs_branch_stack(event)) {
3159                ret = intel_pmu_setup_lbr_filter(event);
3160                if (ret)
3161                        return ret;
3162
3163                /*
3164                 * BTS is set up earlier in this path, so don't account twice
3165                 */
3166                if (!unlikely(intel_pmu_has_bts(event))) {
3167                        /* disallow lbr if conflicting events are present */
3168                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3169                                return -EBUSY;
3170
3171                        event->destroy = hw_perf_lbr_event_destroy;
3172                }
3173        }
3174
3175        if (event->attr.type != PERF_TYPE_RAW)
3176                return 0;
3177
3178        if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3179                return 0;
3180
3181        if (x86_pmu.version < 3)
3182                return -EINVAL;
3183
3184        if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3185                return -EACCES;
3186
3187        event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3188
3189        return 0;
3190}
3191
3192struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3193{
3194        if (x86_pmu.guest_get_msrs)
3195                return x86_pmu.guest_get_msrs(nr);
3196        *nr = 0;
3197        return NULL;
3198}
3199EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3200
3201static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3202{
3203        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3204        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3205
3206        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3207        arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3208        arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3209        /*
3210         * If PMU counter has PEBS enabled it is not enough to disable counter
3211         * on a guest entry since PEBS memory write can overshoot guest entry
3212         * and corrupt guest memory. Disabling PEBS solves the problem.
3213         */
3214        arr[1].msr = MSR_IA32_PEBS_ENABLE;
3215        arr[1].host = cpuc->pebs_enabled;
3216        arr[1].guest = 0;
3217
3218        *nr = 2;
3219        return arr;
3220}
3221
3222static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3223{
3224        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3225        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3226        int idx;
3227
3228        for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
3229                struct perf_event *event = cpuc->events[idx];
3230
3231                arr[idx].msr = x86_pmu_config_addr(idx);
3232                arr[idx].host = arr[idx].guest = 0;
3233
3234                if (!test_bit(idx, cpuc->active_mask))
3235                        continue;
3236
3237                arr[idx].host = arr[idx].guest =
3238                        event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3239
3240                if (event->attr.exclude_host)
3241                        arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3242                else if (event->attr.exclude_guest)
3243                        arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3244        }
3245
3246        *nr = x86_pmu.num_counters;
3247        return arr;
3248}
3249
3250static void core_pmu_enable_event(struct perf_event *event)
3251{
3252        if (!event->attr.exclude_host)
3253                x86_pmu_enable_event(event);
3254}
3255
3256static void core_pmu_enable_all(int added)
3257{
3258        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3259        int idx;
3260
3261        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3262                struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3263
3264                if (!test_bit(idx, cpuc->active_mask) ||
3265                                cpuc->events[idx]->attr.exclude_host)
3266                        continue;
3267
3268                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3269        }
3270}
3271
3272static int hsw_hw_config(struct perf_event *event)
3273{
3274        int ret = intel_pmu_hw_config(event);
3275
3276        if (ret)
3277                return ret;
3278        if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3279                return 0;
3280        event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3281
3282        /*
3283         * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3284         * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3285         * this combination.
3286         */
3287        if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3288             ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3289              event->attr.precise_ip > 0))
3290                return -EOPNOTSUPP;
3291
3292        if (event_is_checkpointed(event)) {
3293                /*
3294                 * Sampling of checkpointed events can cause situations where
3295                 * the CPU constantly aborts because of a overflow, which is
3296                 * then checkpointed back and ignored. Forbid checkpointing
3297                 * for sampling.
3298                 *
3299                 * But still allow a long sampling period, so that perf stat
3300                 * from KVM works.
3301                 */
3302                if (event->attr.sample_period > 0 &&
3303                    event->attr.sample_period < 0x7fffffff)
3304                        return -EOPNOTSUPP;
3305        }
3306        return 0;
3307}
3308
3309static struct event_constraint counter0_constraint =
3310                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3311
3312static struct event_constraint counter2_constraint =
3313                        EVENT_CONSTRAINT(0, 0x4, 0);
3314
3315static struct event_constraint *
3316hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3317                          struct perf_event *event)
3318{
3319        struct event_constraint *c;
3320
3321        c = intel_get_event_constraints(cpuc, idx, event);
3322
3323        /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3324        if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3325                if (c->idxmsk64 & (1U << 2))
3326                        return &counter2_constraint;
3327                return &emptyconstraint;
3328        }
3329
3330        return c;
3331}
3332
3333static struct event_constraint *
3334glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3335                          struct perf_event *event)
3336{
3337        struct event_constraint *c;
3338
3339        /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3340        if (event->attr.precise_ip == 3)
3341                return &counter0_constraint;
3342
3343        c = intel_get_event_constraints(cpuc, idx, event);
3344
3345        return c;
3346}
3347
3348/*
3349 * Broadwell:
3350 *
3351 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3352 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3353 * the two to enforce a minimum period of 128 (the smallest value that has bits
3354 * 0-5 cleared and >= 100).
3355 *
3356 * Because of how the code in x86_perf_event_set_period() works, the truncation
3357 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3358 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3359 *
3360 * Therefore the effective (average) period matches the requested period,
3361 * despite coarser hardware granularity.
3362 */
3363static u64 bdw_limit_period(struct perf_event *event, u64 left)
3364{
3365        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3366                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
3367                if (left < 128)
3368                        left = 128;
3369                left &= ~0x3fULL;
3370        }
3371        return left;
3372}
3373
3374PMU_FORMAT_ATTR(event,  "config:0-7"    );
3375PMU_FORMAT_ATTR(umask,  "config:8-15"   );
3376PMU_FORMAT_ATTR(edge,   "config:18"     );
3377PMU_FORMAT_ATTR(pc,     "config:19"     );
3378PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
3379PMU_FORMAT_ATTR(inv,    "config:23"     );
3380PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
3381PMU_FORMAT_ATTR(in_tx,  "config:32");
3382PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3383
3384static struct attribute *intel_arch_formats_attr[] = {
3385        &format_attr_event.attr,
3386        &format_attr_umask.attr,
3387        &format_attr_edge.attr,
3388        &format_attr_pc.attr,
3389        &format_attr_inv.attr,
3390        &format_attr_cmask.attr,
3391        NULL,
3392};
3393
3394ssize_t intel_event_sysfs_show(char *page, u64 config)
3395{
3396        u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3397
3398        return x86_event_sysfs_show(page, config, event);
3399}
3400
3401struct intel_shared_regs *allocate_shared_regs(int cpu)
3402{
3403        struct intel_shared_regs *regs;
3404        int i;
3405
3406        regs = kzalloc_node(sizeof(struct intel_shared_regs),
3407                            GFP_KERNEL, cpu_to_node(cpu));
3408        if (regs) {
3409                /*
3410                 * initialize the locks to keep lockdep happy
3411                 */
3412                for (i = 0; i < EXTRA_REG_MAX; i++)
3413                        raw_spin_lock_init(&regs->regs[i].lock);
3414
3415                regs->core_id = -1;
3416        }
3417        return regs;
3418}
3419
3420static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3421{
3422        struct intel_excl_cntrs *c;
3423
3424        c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3425                         GFP_KERNEL, cpu_to_node(cpu));
3426        if (c) {
3427                raw_spin_lock_init(&c->lock);
3428                c->core_id = -1;
3429        }
3430        return c;
3431}
3432
3433static int intel_pmu_cpu_prepare(int cpu)
3434{
3435        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3436
3437        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3438                cpuc->shared_regs = allocate_shared_regs(cpu);
3439                if (!cpuc->shared_regs)
3440                        goto err;
3441        }
3442
3443        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3444                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3445
3446                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
3447                if (!cpuc->constraint_list)
3448                        goto err_shared_regs;
3449
3450                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3451                if (!cpuc->excl_cntrs)
3452                        goto err_constraint_list;
3453
3454                cpuc->excl_thread_id = 0;
3455        }
3456
3457        return 0;
3458
3459err_constraint_list:
3460        kfree(cpuc->constraint_list);
3461        cpuc->constraint_list = NULL;
3462
3463err_shared_regs:
3464        kfree(cpuc->shared_regs);
3465        cpuc->shared_regs = NULL;
3466
3467err:
3468        return -ENOMEM;
3469}
3470
3471static void flip_smm_bit(void *data)
3472{
3473        unsigned long set = *(unsigned long *)data;
3474
3475        if (set > 0) {
3476                msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3477                            DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3478        } else {
3479                msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3480                              DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3481        }
3482}
3483
3484static void intel_pmu_cpu_starting(int cpu)
3485{
3486        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3487        int core_id = topology_core_id(cpu);
3488        int i;
3489
3490        init_debug_store_on_cpu(cpu);
3491        /*
3492         * Deal with CPUs that don't clear their LBRs on power-up.
3493         */
3494        intel_pmu_lbr_reset();
3495
3496        cpuc->lbr_sel = NULL;
3497
3498        if (x86_pmu.version > 1)
3499                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3500
3501        if (x86_pmu.counter_freezing)
3502                enable_counter_freeze();
3503
3504        if (!cpuc->shared_regs)
3505                return;
3506
3507        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3508                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3509                        struct intel_shared_regs *pc;
3510
3511                        pc = per_cpu(cpu_hw_events, i).shared_regs;
3512                        if (pc && pc->core_id == core_id) {
3513                                cpuc->kfree_on_online[0] = cpuc->shared_regs;
3514                                cpuc->shared_regs = pc;
3515                                break;
3516                        }
3517                }
3518                cpuc->shared_regs->core_id = core_id;
3519                cpuc->shared_regs->refcnt++;
3520        }
3521
3522        if (x86_pmu.lbr_sel_map)
3523                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3524
3525        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3526                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3527                        struct cpu_hw_events *sibling;
3528                        struct intel_excl_cntrs *c;
3529
3530                        sibling = &per_cpu(cpu_hw_events, i);
3531                        c = sibling->excl_cntrs;
3532                        if (c && c->core_id == core_id) {
3533                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3534                                cpuc->excl_cntrs = c;
3535                                if (!sibling->excl_thread_id)
3536                                        cpuc->excl_thread_id = 1;
3537                                break;
3538                        }
3539                }
3540                cpuc->excl_cntrs->core_id = core_id;
3541                cpuc->excl_cntrs->refcnt++;
3542        }
3543}
3544
3545static void free_excl_cntrs(int cpu)
3546{
3547        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3548        struct intel_excl_cntrs *c;
3549
3550        c = cpuc->excl_cntrs;
3551        if (c) {
3552                if (c->core_id == -1 || --c->refcnt == 0)
3553                        kfree(c);
3554                cpuc->excl_cntrs = NULL;
3555                kfree(cpuc->constraint_list);
3556                cpuc->constraint_list = NULL;
3557        }
3558}
3559
3560static void intel_pmu_cpu_dying(int cpu)
3561{
3562        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3563        struct intel_shared_regs *pc;
3564
3565        pc = cpuc->shared_regs;
3566        if (pc) {
3567                if (pc->core_id == -1 || --pc->refcnt == 0)
3568                        kfree(pc);
3569                cpuc->shared_regs = NULL;
3570        }
3571
3572        free_excl_cntrs(cpu);
3573
3574        fini_debug_store_on_cpu(cpu);
3575
3576        if (x86_pmu.counter_freezing)
3577                disable_counter_freeze();
3578}
3579
3580static void intel_pmu_sched_task(struct perf_event_context *ctx,
3581                                 bool sched_in)
3582{
3583        intel_pmu_pebs_sched_task(ctx, sched_in);
3584        intel_pmu_lbr_sched_task(ctx, sched_in);
3585}
3586
3587PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3588
3589PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3590
3591PMU_FORMAT_ATTR(frontend, "config1:0-23");
3592
3593static struct attribute *intel_arch3_formats_attr[] = {
3594        &format_attr_event.attr,
3595        &format_attr_umask.attr,
3596        &format_attr_edge.attr,
3597        &format_attr_pc.attr,
3598        &format_attr_any.attr,
3599        &format_attr_inv.attr,
3600        &format_attr_cmask.attr,
3601        NULL,
3602};
3603
3604static struct attribute *hsw_format_attr[] = {
3605        &format_attr_in_tx.attr,
3606        &format_attr_in_tx_cp.attr,
3607        &format_attr_offcore_rsp.attr,
3608        &format_attr_ldlat.attr,
3609        NULL
3610};
3611
3612static struct attribute *nhm_format_attr[] = {
3613        &format_attr_offcore_rsp.attr,
3614        &format_attr_ldlat.attr,
3615        NULL
3616};
3617
3618static struct attribute *slm_format_attr[] = {
3619        &format_attr_offcore_rsp.attr,
3620        NULL
3621};
3622
3623static struct attribute *skl_format_attr[] = {
3624        &format_attr_frontend.attr,
3625        NULL,
3626};
3627
3628static __initconst const struct x86_pmu core_pmu = {
3629        .name                   = "core",
3630        .handle_irq             = x86_pmu_handle_irq,
3631        .disable_all            = x86_pmu_disable_all,
3632        .enable_all             = core_pmu_enable_all,
3633        .enable                 = core_pmu_enable_event,
3634        .disable                = x86_pmu_disable_event,
3635        .hw_config              = core_pmu_hw_config,
3636        .schedule_events        = x86_schedule_events,
3637        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
3638        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
3639        .event_map              = intel_pmu_event_map,
3640        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
3641        .apic                   = 1,
3642        .large_pebs_flags       = LARGE_PEBS_FLAGS,
3643
3644        /*
3645         * Intel PMCs cannot be accessed sanely above 32-bit width,
3646         * so we install an artificial 1<<31 period regardless of
3647         * the generic event period:
3648         */
3649        .max_period             = (1ULL<<31) - 1,
3650        .get_event_constraints  = intel_get_event_constraints,
3651        .put_event_constraints  = intel_put_event_constraints,
3652        .event_constraints      = intel_core_event_constraints,
3653        .guest_get_msrs         = core_guest_get_msrs,
3654        .format_attrs           = intel_arch_formats_attr,
3655        .events_sysfs_show      = intel_event_sysfs_show,
3656
3657        /*
3658         * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3659         * together with PMU version 1 and thus be using core_pmu with
3660         * shared_regs. We need following callbacks here to allocate
3661         * it properly.
3662         */
3663        .cpu_prepare            = intel_pmu_cpu_prepare,
3664        .cpu_starting           = intel_pmu_cpu_starting,
3665        .cpu_dying              = intel_pmu_cpu_dying,
3666};
3667
3668static struct attribute *intel_pmu_attrs[];
3669
3670static __initconst const struct x86_pmu intel_pmu = {
3671        .name                   = "Intel",
3672        .handle_irq             = intel_pmu_handle_irq,
3673        .disable_all            = intel_pmu_disable_all,
3674        .enable_all             = intel_pmu_enable_all,
3675        .enable                 = intel_pmu_enable_event,
3676        .disable                = intel_pmu_disable_event,
3677        .add                    = intel_pmu_add_event,
3678        .del                    = intel_pmu_del_event,
3679        .read                   = intel_pmu_read_event,
3680        .hw_config              = intel_pmu_hw_config,
3681        .schedule_events        = x86_schedule_events,
3682        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
3683        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
3684        .event_map              = intel_pmu_event_map,
3685        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
3686        .apic                   = 1,
3687        .large_pebs_flags       = LARGE_PEBS_FLAGS,
3688        /*
3689         * Intel PMCs cannot be accessed sanely above 32 bit width,
3690         * so we install an artificial 1<<31 period regardless of
3691         * the generic event period:
3692         */
3693        .max_period             = (1ULL << 31) - 1,
3694        .get_event_constraints  = intel_get_event_constraints,
3695        .put_event_constraints  = intel_put_event_constraints,
3696        .pebs_aliases           = intel_pebs_aliases_core2,
3697
3698        .format_attrs           = intel_arch3_formats_attr,
3699        .events_sysfs_show      = intel_event_sysfs_show,
3700
3701        .attrs                  = intel_pmu_attrs,
3702
3703        .cpu_prepare            = intel_pmu_cpu_prepare,
3704        .cpu_starting           = intel_pmu_cpu_starting,
3705        .cpu_dying              = intel_pmu_cpu_dying,
3706        .guest_get_msrs         = intel_guest_get_msrs,
3707        .sched_task             = intel_pmu_sched_task,
3708};
3709
3710static __init void intel_clovertown_quirk(void)
3711{
3712        /*
3713         * PEBS is unreliable due to:
3714         *
3715         *   AJ67  - PEBS may experience CPL leaks
3716         *   AJ68  - PEBS PMI may be delayed by one event
3717         *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3718         *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3719         *
3720         * AJ67 could be worked around by restricting the OS/USR flags.
3721         * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3722         *
3723         * AJ106 could possibly be worked around by not allowing LBR
3724         *       usage from PEBS, including the fixup.
3725         * AJ68  could possibly be worked around by always programming
3726         *       a pebs_event_reset[0] value and coping with the lost events.
3727         *
3728         * But taken together it might just make sense to not enable PEBS on
3729         * these chips.
3730         */
3731        pr_warn("PEBS disabled due to CPU errata\n");
3732        x86_pmu.pebs = 0;
3733        x86_pmu.pebs_constraints = NULL;
3734}
3735
3736static int intel_snb_pebs_broken(int cpu)
3737{
3738        u32 rev = UINT_MAX; /* default to broken for unknown models */
3739
3740        switch (cpu_data(cpu).x86_model) {
3741        case INTEL_FAM6_SANDYBRIDGE:
3742                rev = 0x28;
3743                break;
3744
3745        case INTEL_FAM6_SANDYBRIDGE_X:
3746                switch (cpu_data(cpu).x86_stepping) {
3747                case 6: rev = 0x618; break;
3748                case 7: rev = 0x70c; break;
3749                }
3750        }
3751
3752        return (cpu_data(cpu).microcode < rev);
3753}
3754
3755static void intel_snb_check_microcode(void)
3756{
3757        int pebs_broken = 0;
3758        int cpu;
3759
3760        for_each_online_cpu(cpu) {
3761                if ((pebs_broken = intel_snb_pebs_broken(cpu)))
3762                        break;
3763        }
3764
3765        if (pebs_broken == x86_pmu.pebs_broken)
3766                return;
3767
3768        /*
3769         * Serialized by the microcode lock..
3770         */
3771        if (x86_pmu.pebs_broken) {
3772                pr_info("PEBS enabled due to microcode update\n");
3773                x86_pmu.pebs_broken = 0;
3774        } else {
3775                pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
3776                x86_pmu.pebs_broken = 1;
3777        }
3778}
3779
3780static bool is_lbr_from(unsigned long msr)
3781{
3782        unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
3783
3784        return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
3785}
3786
3787/*
3788 * Under certain circumstances, access certain MSR may cause #GP.
3789 * The function tests if the input MSR can be safely accessed.
3790 */
3791static bool check_msr(unsigned long msr, u64 mask)
3792{
3793        u64 val_old, val_new, val_tmp;
3794
3795        /*
3796         * Read the current value, change it and read it back to see if it
3797         * matches, this is needed to detect certain hardware emulators
3798         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
3799         */
3800        if (rdmsrl_safe(msr, &val_old))
3801                return false;
3802
3803        /*
3804         * Only change the bits which can be updated by wrmsrl.
3805         */
3806        val_tmp = val_old ^ mask;
3807
3808        if (is_lbr_from(msr))
3809                val_tmp = lbr_from_signext_quirk_wr(val_tmp);
3810
3811        if (wrmsrl_safe(msr, val_tmp) ||
3812            rdmsrl_safe(msr, &val_new))
3813                return false;
3814
3815        /*
3816         * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
3817         * should equal rdmsrl()'s even with the quirk.
3818         */
3819        if (val_new != val_tmp)
3820                return false;
3821
3822        if (is_lbr_from(msr))
3823                val_old = lbr_from_signext_quirk_wr(val_old);
3824
3825        /* Here it's sure that the MSR can be safely accessed.
3826         * Restore the old value and return.
3827         */
3828        wrmsrl(msr, val_old);
3829
3830        return true;
3831}
3832
3833static __init void intel_sandybridge_quirk(void)
3834{
3835        x86_pmu.check_microcode = intel_snb_check_microcode;
3836        cpus_read_lock();
3837        intel_snb_check_microcode();
3838        cpus_read_unlock();
3839}
3840
3841static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
3842        { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
3843        { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
3844        { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
3845        { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
3846        { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
3847        { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
3848        { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
3849};
3850
3851static __init void intel_arch_events_quirk(void)
3852{
3853        int bit;
3854
3855        /* disable event that reported as not presend by cpuid */
3856        for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
3857                intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
3858                pr_warn("CPUID marked event: \'%s\' unavailable\n",
3859                        intel_arch_events_map[bit].name);
3860        }
3861}
3862
3863static __init void intel_nehalem_quirk(void)
3864{
3865        union cpuid10_ebx ebx;
3866
3867        ebx.full = x86_pmu.events_maskl;
3868        if (ebx.split.no_branch_misses_retired) {
3869                /*
3870                 * Erratum AAJ80 detected, we work it around by using
3871                 * the BR_MISP_EXEC.ANY event. This will over-count
3872                 * branch-misses, but it's still much better than the
3873                 * architectural event which is often completely bogus:
3874                 */
3875                intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
3876                ebx.split.no_branch_misses_retired = 0;
3877                x86_pmu.events_maskl = ebx.full;
3878                pr_info("CPU erratum AAJ80 worked around\n");
3879        }
3880}
3881
3882static bool intel_glp_counter_freezing_broken(int cpu)
3883{
3884        u32 rev = UINT_MAX; /* default to broken for unknown stepping */
3885
3886        switch (cpu_data(cpu).x86_stepping) {
3887        case 1:
3888                rev = 0x28;
3889                break;
3890        case 8:
3891                rev = 0x6;
3892                break;
3893        }
3894
3895        return (cpu_data(cpu).microcode < rev);
3896}
3897
3898static __init void intel_glp_counter_freezing_quirk(void)
3899{
3900        /* Check if it's already disabled */
3901        if (disable_counter_freezing)
3902                return;
3903
3904        /*
3905         * If the system starts with the wrong ucode, leave the
3906         * counter-freezing feature permanently disabled.
3907         */
3908        if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
3909                pr_info("PMU counter freezing disabled due to CPU errata,"
3910                        "please upgrade microcode\n");
3911                x86_pmu.counter_freezing = false;
3912                x86_pmu.handle_irq = intel_pmu_handle_irq;
3913        }
3914}
3915
3916/*
3917 * enable software workaround for errata:
3918 * SNB: BJ122
3919 * IVB: BV98
3920 * HSW: HSD29
3921 *
3922 * Only needed when HT is enabled. However detecting
3923 * if HT is enabled is difficult (model specific). So instead,
3924 * we enable the workaround in the early boot, and verify if
3925 * it is needed in a later initcall phase once we have valid
3926 * topology information to check if HT is actually enabled
3927 */
3928static __init void intel_ht_bug(void)
3929{
3930        x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
3931
3932        x86_pmu.start_scheduling = intel_start_scheduling;
3933        x86_pmu.commit_scheduling = intel_commit_scheduling;
3934        x86_pmu.stop_scheduling = intel_stop_scheduling;
3935}
3936
3937EVENT_ATTR_STR(mem-loads,       mem_ld_hsw,     "event=0xcd,umask=0x1,ldlat=3");
3938EVENT_ATTR_STR(mem-stores,      mem_st_hsw,     "event=0xd0,umask=0x82")
3939
3940/* Haswell special events */
3941EVENT_ATTR_STR(tx-start,        tx_start,       "event=0xc9,umask=0x1");
3942EVENT_ATTR_STR(tx-commit,       tx_commit,      "event=0xc9,umask=0x2");
3943EVENT_ATTR_STR(tx-abort,        tx_abort,       "event=0xc9,umask=0x4");
3944EVENT_ATTR_STR(tx-capacity,     tx_capacity,    "event=0x54,umask=0x2");
3945EVENT_ATTR_STR(tx-conflict,     tx_conflict,    "event=0x54,umask=0x1");
3946EVENT_ATTR_STR(el-start,        el_start,       "event=0xc8,umask=0x1");
3947EVENT_ATTR_STR(el-commit,       el_commit,      "event=0xc8,umask=0x2");
3948EVENT_ATTR_STR(el-abort,        el_abort,       "event=0xc8,umask=0x4");
3949EVENT_ATTR_STR(el-capacity,     el_capacity,    "event=0x54,umask=0x2");
3950EVENT_ATTR_STR(el-conflict,     el_conflict,    "event=0x54,umask=0x1");
3951EVENT_ATTR_STR(cycles-t,        cycles_t,       "event=0x3c,in_tx=1");
3952EVENT_ATTR_STR(cycles-ct,       cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
3953
3954static struct attribute *hsw_events_attrs[] = {
3955        EVENT_PTR(td_slots_issued),
3956        EVENT_PTR(td_slots_retired),
3957        EVENT_PTR(td_fetch_bubbles),
3958        EVENT_PTR(td_total_slots),
3959        EVENT_PTR(td_total_slots_scale),
3960        EVENT_PTR(td_recovery_bubbles),
3961        EVENT_PTR(td_recovery_bubbles_scale),
3962        NULL
3963};
3964
3965static struct attribute *hsw_mem_events_attrs[] = {
3966        EVENT_PTR(mem_ld_hsw),
3967        EVENT_PTR(mem_st_hsw),
3968        NULL,
3969};
3970
3971static struct attribute *hsw_tsx_events_attrs[] = {
3972        EVENT_PTR(tx_start),
3973        EVENT_PTR(tx_commit),
3974        EVENT_PTR(tx_abort),
3975        EVENT_PTR(tx_capacity),
3976        EVENT_PTR(tx_conflict),
3977        EVENT_PTR(el_start),
3978        EVENT_PTR(el_commit),
3979        EVENT_PTR(el_abort),
3980        EVENT_PTR(el_capacity),
3981        EVENT_PTR(el_conflict),
3982        EVENT_PTR(cycles_t),
3983        EVENT_PTR(cycles_ct),
3984        NULL
3985};
3986
3987static ssize_t freeze_on_smi_show(struct device *cdev,
3988                                  struct device_attribute *attr,
3989                                  char *buf)
3990{
3991        return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
3992}
3993
3994static DEFINE_MUTEX(freeze_on_smi_mutex);
3995
3996static ssize_t freeze_on_smi_store(struct device *cdev,
3997                                   struct device_attribute *attr,
3998                                   const char *buf, size_t count)
3999{
4000        unsigned long val;
4001        ssize_t ret;
4002
4003        ret = kstrtoul(buf, 0, &val);
4004        if (ret)
4005                return ret;
4006
4007        if (val > 1)
4008                return -EINVAL;
4009
4010        mutex_lock(&freeze_on_smi_mutex);
4011
4012        if (x86_pmu.attr_freeze_on_smi == val)
4013                goto done;
4014
4015        x86_pmu.attr_freeze_on_smi = val;
4016
4017        get_online_cpus();
4018        on_each_cpu(flip_smm_bit, &val, 1);
4019        put_online_cpus();
4020done:
4021        mutex_unlock(&freeze_on_smi_mutex);
4022
4023        return count;
4024}
4025
4026static DEVICE_ATTR_RW(freeze_on_smi);
4027
4028static ssize_t branches_show(struct device *cdev,
4029                             struct device_attribute *attr,
4030                             char *buf)
4031{
4032        return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4033}
4034
4035static DEVICE_ATTR_RO(branches);
4036
4037static struct attribute *lbr_attrs[] = {
4038        &dev_attr_branches.attr,
4039        NULL
4040};
4041
4042static char pmu_name_str[30];
4043
4044static ssize_t pmu_name_show(struct device *cdev,
4045                             struct device_attribute *attr,
4046                             char *buf)
4047{
4048        return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4049}
4050
4051static DEVICE_ATTR_RO(pmu_name);
4052
4053static struct attribute *intel_pmu_caps_attrs[] = {
4054       &dev_attr_pmu_name.attr,
4055       NULL
4056};
4057
4058static struct attribute *intel_pmu_attrs[] = {
4059        &dev_attr_freeze_on_smi.attr,
4060        NULL,
4061};
4062
4063static __init struct attribute **
4064get_events_attrs(struct attribute **base,
4065                 struct attribute **mem,
4066                 struct attribute **tsx)
4067{
4068        struct attribute **attrs = base;
4069        struct attribute **old;
4070
4071        if (mem && x86_pmu.pebs)
4072                attrs = merge_attr(attrs, mem);
4073
4074        if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4075                old = attrs;
4076                attrs = merge_attr(attrs, tsx);
4077                if (old != base)
4078                        kfree(old);
4079        }
4080
4081        return attrs;
4082}
4083
4084__init int intel_pmu_init(void)
4085{
4086        struct attribute **extra_attr = NULL;
4087        struct attribute **mem_attr = NULL;
4088        struct attribute **tsx_attr = NULL;
4089        struct attribute **to_free = NULL;
4090        union cpuid10_edx edx;
4091        union cpuid10_eax eax;
4092        union cpuid10_ebx ebx;
4093        struct event_constraint *c;
4094        unsigned int unused;
4095        struct extra_reg *er;
4096        int version, i;
4097        char *name;
4098
4099        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4100                switch (boot_cpu_data.x86) {
4101                case 0x6:
4102                        return p6_pmu_init();
4103                case 0xb:
4104                        return knc_pmu_init();
4105                case 0xf:
4106                        return p4_pmu_init();
4107                }
4108                return -ENODEV;
4109        }
4110
4111        /*
4112         * Check whether the Architectural PerfMon supports
4113         * Branch Misses Retired hw_event or not.
4114         */
4115        cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4116        if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4117                return -ENODEV;
4118
4119        version = eax.split.version_id;
4120        if (version < 2)
4121                x86_pmu = core_pmu;
4122        else
4123                x86_pmu = intel_pmu;
4124
4125        x86_pmu.version                 = version;
4126        x86_pmu.num_counters            = eax.split.num_counters;
4127        x86_pmu.cntval_bits             = eax.split.bit_width;
4128        x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
4129
4130        x86_pmu.events_maskl            = ebx.full;
4131        x86_pmu.events_mask_len         = eax.split.mask_length;
4132
4133        x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4134
4135        /*
4136         * Quirk: v2 perfmon does not report fixed-purpose events, so
4137         * assume at least 3 events, when not running in a hypervisor:
4138         */
4139        if (version > 1) {
4140                int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4141
4142                x86_pmu.num_counters_fixed =
4143                        max((int)edx.split.num_counters_fixed, assume);
4144        }
4145
4146        if (version >= 4)
4147                x86_pmu.counter_freezing = !disable_counter_freezing;
4148
4149        if (boot_cpu_has(X86_FEATURE_PDCM)) {
4150                u64 capabilities;
4151
4152                rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4153                x86_pmu.intel_cap.capabilities = capabilities;
4154        }
4155
4156        intel_ds_init();
4157
4158        x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4159
4160        /*
4161         * Install the hw-cache-events table:
4162         */
4163        switch (boot_cpu_data.x86_model) {
4164        case INTEL_FAM6_CORE_YONAH:
4165                pr_cont("Core events, ");
4166                name = "core";
4167                break;
4168
4169        case INTEL_FAM6_CORE2_MEROM:
4170                x86_add_quirk(intel_clovertown_quirk);
4171        case INTEL_FAM6_CORE2_MEROM_L:
4172        case INTEL_FAM6_CORE2_PENRYN:
4173        case INTEL_FAM6_CORE2_DUNNINGTON:
4174                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4175                       sizeof(hw_cache_event_ids));
4176
4177                intel_pmu_lbr_init_core();
4178
4179                x86_pmu.event_constraints = intel_core2_event_constraints;
4180                x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4181                pr_cont("Core2 events, ");
4182                name = "core2";
4183                break;
4184
4185        case INTEL_FAM6_NEHALEM:
4186        case INTEL_FAM6_NEHALEM_EP:
4187        case INTEL_FAM6_NEHALEM_EX:
4188                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4189                       sizeof(hw_cache_event_ids));
4190                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4191                       sizeof(hw_cache_extra_regs));
4192
4193                intel_pmu_lbr_init_nhm();
4194
4195                x86_pmu.event_constraints = intel_nehalem_event_constraints;
4196                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4197                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4198                x86_pmu.extra_regs = intel_nehalem_extra_regs;
4199
4200                mem_attr = nhm_mem_events_attrs;
4201
4202                /* UOPS_ISSUED.STALLED_CYCLES */
4203                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4204                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4205                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4206                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4207                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4208
4209                intel_pmu_pebs_data_source_nhm();
4210                x86_add_quirk(intel_nehalem_quirk);
4211                x86_pmu.pebs_no_tlb = 1;
4212                extra_attr = nhm_format_attr;
4213
4214                pr_cont("Nehalem events, ");
4215                name = "nehalem";
4216                break;
4217
4218        case INTEL_FAM6_ATOM_BONNELL:
4219        case INTEL_FAM6_ATOM_BONNELL_MID:
4220        case INTEL_FAM6_ATOM_SALTWELL:
4221        case INTEL_FAM6_ATOM_SALTWELL_MID:
4222        case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4223                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4224                       sizeof(hw_cache_event_ids));
4225
4226                intel_pmu_lbr_init_atom();
4227
4228                x86_pmu.event_constraints = intel_gen_event_constraints;
4229                x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4230                x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4231                pr_cont("Atom events, ");
4232                name = "bonnell";
4233                break;
4234
4235        case INTEL_FAM6_ATOM_SILVERMONT:
4236        case INTEL_FAM6_ATOM_SILVERMONT_X:
4237        case INTEL_FAM6_ATOM_SILVERMONT_MID:
4238        case INTEL_FAM6_ATOM_AIRMONT:
4239        case INTEL_FAM6_ATOM_AIRMONT_MID:
4240                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4241                        sizeof(hw_cache_event_ids));
4242                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4243                       sizeof(hw_cache_extra_regs));
4244
4245                intel_pmu_lbr_init_slm();
4246
4247                x86_pmu.event_constraints = intel_slm_event_constraints;
4248                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4249                x86_pmu.extra_regs = intel_slm_extra_regs;
4250                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4251                x86_pmu.cpu_events = slm_events_attrs;
4252                extra_attr = slm_format_attr;
4253                pr_cont("Silvermont events, ");
4254                name = "silvermont";
4255                break;
4256
4257        case INTEL_FAM6_ATOM_GOLDMONT:
4258        case INTEL_FAM6_ATOM_GOLDMONT_X:
4259                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4260                       sizeof(hw_cache_event_ids));
4261                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4262                       sizeof(hw_cache_extra_regs));
4263
4264                intel_pmu_lbr_init_skl();
4265
4266                x86_pmu.event_constraints = intel_slm_event_constraints;
4267                x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4268                x86_pmu.extra_regs = intel_glm_extra_regs;
4269                /*
4270                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4271                 * for precise cycles.
4272                 * :pp is identical to :ppp
4273                 */
4274                x86_pmu.pebs_aliases = NULL;
4275                x86_pmu.pebs_prec_dist = true;
4276                x86_pmu.lbr_pt_coexist = true;
4277                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4278                x86_pmu.cpu_events = glm_events_attrs;
4279                extra_attr = slm_format_attr;
4280                pr_cont("Goldmont events, ");
4281                name = "goldmont";
4282                break;
4283
4284        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4285                x86_add_quirk(intel_glp_counter_freezing_quirk);
4286                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4287                       sizeof(hw_cache_event_ids));
4288                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4289                       sizeof(hw_cache_extra_regs));
4290
4291                intel_pmu_lbr_init_skl();
4292
4293                x86_pmu.event_constraints = intel_slm_event_constraints;
4294                x86_pmu.extra_regs = intel_glm_extra_regs;
4295                /*
4296                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4297                 * for precise cycles.
4298                 */
4299                x86_pmu.pebs_aliases = NULL;
4300                x86_pmu.pebs_prec_dist = true;
4301                x86_pmu.lbr_pt_coexist = true;
4302                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4303                x86_pmu.flags |= PMU_FL_PEBS_ALL;
4304                x86_pmu.get_event_constraints = glp_get_event_constraints;
4305                x86_pmu.cpu_events = glm_events_attrs;
4306                /* Goldmont Plus has 4-wide pipeline */
4307                event_attr_td_total_slots_scale_glm.event_str = "4";
4308                extra_attr = slm_format_attr;
4309                pr_cont("Goldmont plus events, ");
4310                name = "goldmont_plus";
4311                break;
4312
4313        case INTEL_FAM6_WESTMERE:
4314        case INTEL_FAM6_WESTMERE_EP:
4315        case INTEL_FAM6_WESTMERE_EX:
4316                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4317                       sizeof(hw_cache_event_ids));
4318                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4319                       sizeof(hw_cache_extra_regs));
4320
4321                intel_pmu_lbr_init_nhm();
4322
4323                x86_pmu.event_constraints = intel_westmere_event_constraints;
4324                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4325                x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4326                x86_pmu.extra_regs = intel_westmere_extra_regs;
4327                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4328
4329                mem_attr = nhm_mem_events_attrs;
4330
4331                /* UOPS_ISSUED.STALLED_CYCLES */
4332                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4333                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4334                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4335                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4336                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4337
4338                intel_pmu_pebs_data_source_nhm();
4339                extra_attr = nhm_format_attr;
4340                pr_cont("Westmere events, ");
4341                name = "westmere";
4342                break;
4343
4344        case INTEL_FAM6_SANDYBRIDGE:
4345        case INTEL_FAM6_SANDYBRIDGE_X:
4346                x86_add_quirk(intel_sandybridge_quirk);
4347                x86_add_quirk(intel_ht_bug);
4348                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4349                       sizeof(hw_cache_event_ids));
4350                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4351                       sizeof(hw_cache_extra_regs));
4352
4353                intel_pmu_lbr_init_snb();
4354
4355                x86_pmu.event_constraints = intel_snb_event_constraints;
4356                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4357                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4358                if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4359                        x86_pmu.extra_regs = intel_snbep_extra_regs;
4360                else
4361                        x86_pmu.extra_regs = intel_snb_extra_regs;
4362
4363
4364                /* all extra regs are per-cpu when HT is on */
4365                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4366                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4367
4368                x86_pmu.cpu_events = snb_events_attrs;
4369                mem_attr = snb_mem_events_attrs;
4370
4371                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4372                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4373                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4374                /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4375                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4376                        X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4377
4378                extra_attr = nhm_format_attr;
4379
4380                pr_cont("SandyBridge events, ");
4381                name = "sandybridge";
4382                break;
4383
4384        case INTEL_FAM6_IVYBRIDGE:
4385        case INTEL_FAM6_IVYBRIDGE_X:
4386                x86_add_quirk(intel_ht_bug);
4387                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4388                       sizeof(hw_cache_event_ids));
4389                /* dTLB-load-misses on IVB is different than SNB */
4390                hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4391
4392                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4393                       sizeof(hw_cache_extra_regs));
4394
4395                intel_pmu_lbr_init_snb();
4396
4397                x86_pmu.event_constraints = intel_ivb_event_constraints;
4398                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4399                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4400                x86_pmu.pebs_prec_dist = true;
4401                if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4402                        x86_pmu.extra_regs = intel_snbep_extra_regs;
4403                else
4404                        x86_pmu.extra_regs = intel_snb_extra_regs;
4405                /* all extra regs are per-cpu when HT is on */
4406                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4407                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4408
4409                x86_pmu.cpu_events = snb_events_attrs;
4410                mem_attr = snb_mem_events_attrs;
4411
4412                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4413                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4414                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4415
4416                extra_attr = nhm_format_attr;
4417
4418                pr_cont("IvyBridge events, ");
4419                name = "ivybridge";
4420                break;
4421
4422
4423        case INTEL_FAM6_HASWELL_CORE:
4424        case INTEL_FAM6_HASWELL_X:
4425        case INTEL_FAM6_HASWELL_ULT:
4426        case INTEL_FAM6_HASWELL_GT3E:
4427                x86_add_quirk(intel_ht_bug);
4428                x86_pmu.late_ack = true;
4429                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4430                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4431
4432                intel_pmu_lbr_init_hsw();
4433
4434                x86_pmu.event_constraints = intel_hsw_event_constraints;
4435                x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4436                x86_pmu.extra_regs = intel_snbep_extra_regs;
4437                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4438                x86_pmu.pebs_prec_dist = true;
4439                /* all extra regs are per-cpu when HT is on */
4440                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4441                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4442
4443                x86_pmu.hw_config = hsw_hw_config;
4444                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4445                x86_pmu.cpu_events = hsw_events_attrs;
4446                x86_pmu.lbr_double_abort = true;
4447                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4448                        hsw_format_attr : nhm_format_attr;
4449                mem_attr = hsw_mem_events_attrs;
4450                tsx_attr = hsw_tsx_events_attrs;
4451                pr_cont("Haswell events, ");
4452                name = "haswell";
4453                break;
4454
4455        case INTEL_FAM6_BROADWELL_CORE:
4456        case INTEL_FAM6_BROADWELL_XEON_D:
4457        case INTEL_FAM6_BROADWELL_GT3E:
4458        case INTEL_FAM6_BROADWELL_X:
4459                x86_pmu.late_ack = true;
4460                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4461                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4462
4463                /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4464                hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4465                                                                         BDW_L3_MISS|HSW_SNOOP_DRAM;
4466                hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4467                                                                          HSW_SNOOP_DRAM;
4468                hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4469                                                                             BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4470                hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4471                                                                              BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4472
4473                intel_pmu_lbr_init_hsw();
4474
4475                x86_pmu.event_constraints = intel_bdw_event_constraints;
4476                x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4477                x86_pmu.extra_regs = intel_snbep_extra_regs;
4478                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4479                x86_pmu.pebs_prec_dist = true;
4480                /* all extra regs are per-cpu when HT is on */
4481                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4482                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4483
4484                x86_pmu.hw_config = hsw_hw_config;
4485                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4486                x86_pmu.cpu_events = hsw_events_attrs;
4487                x86_pmu.limit_period = bdw_limit_period;
4488                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4489                        hsw_format_attr : nhm_format_attr;
4490                mem_attr = hsw_mem_events_attrs;
4491                tsx_attr = hsw_tsx_events_attrs;
4492                pr_cont("Broadwell events, ");
4493                name = "broadwell";
4494                break;
4495
4496        case INTEL_FAM6_XEON_PHI_KNL:
4497        case INTEL_FAM6_XEON_PHI_KNM:
4498                memcpy(hw_cache_event_ids,
4499                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4500                memcpy(hw_cache_extra_regs,
4501                       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4502                intel_pmu_lbr_init_knl();
4503
4504                x86_pmu.event_constraints = intel_slm_event_constraints;
4505                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4506                x86_pmu.extra_regs = intel_knl_extra_regs;
4507
4508                /* all extra regs are per-cpu when HT is on */
4509                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4510                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4511                extra_attr = slm_format_attr;
4512                pr_cont("Knights Landing/Mill events, ");
4513                name = "knights-landing";
4514                break;
4515
4516        case INTEL_FAM6_SKYLAKE_MOBILE:
4517        case INTEL_FAM6_SKYLAKE_DESKTOP:
4518        case INTEL_FAM6_SKYLAKE_X:
4519        case INTEL_FAM6_KABYLAKE_MOBILE:
4520        case INTEL_FAM6_KABYLAKE_DESKTOP:
4521                x86_pmu.late_ack = true;
4522                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4523                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4524                intel_pmu_lbr_init_skl();
4525
4526                /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4527                event_attr_td_recovery_bubbles.event_str_noht =
4528                        "event=0xd,umask=0x1,cmask=1";
4529                event_attr_td_recovery_bubbles.event_str_ht =
4530                        "event=0xd,umask=0x1,cmask=1,any=1";
4531
4532                x86_pmu.event_constraints = intel_skl_event_constraints;
4533                x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4534                x86_pmu.extra_regs = intel_skl_extra_regs;
4535                x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4536                x86_pmu.pebs_prec_dist = true;
4537                /* all extra regs are per-cpu when HT is on */
4538                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4539                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4540
4541                x86_pmu.hw_config = hsw_hw_config;
4542                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4543                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4544                        hsw_format_attr : nhm_format_attr;
4545                extra_attr = merge_attr(extra_attr, skl_format_attr);
4546                to_free = extra_attr;
4547                x86_pmu.cpu_events = hsw_events_attrs;
4548                mem_attr = hsw_mem_events_attrs;
4549                tsx_attr = hsw_tsx_events_attrs;
4550                intel_pmu_pebs_data_source_skl(
4551                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4552                pr_cont("Skylake events, ");
4553                name = "skylake";
4554                break;
4555
4556        default:
4557                switch (x86_pmu.version) {
4558                case 1:
4559                        x86_pmu.event_constraints = intel_v1_event_constraints;
4560                        pr_cont("generic architected perfmon v1, ");
4561                        name = "generic_arch_v1";
4562                        break;
4563                default:
4564                        /*
4565                         * default constraints for v2 and up
4566                         */
4567                        x86_pmu.event_constraints = intel_gen_event_constraints;
4568                        pr_cont("generic architected perfmon, ");
4569                        name = "generic_arch_v2+";
4570                        break;
4571                }
4572        }
4573
4574        snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4575
4576        if (version >= 2 && extra_attr) {
4577                x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4578                                                  extra_attr);
4579                WARN_ON(!x86_pmu.format_attrs);
4580        }
4581
4582        x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4583                                              mem_attr, tsx_attr);
4584
4585        if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
4586                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
4587                     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
4588                x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
4589        }
4590        x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
4591
4592        if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
4593                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
4594                     x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
4595                x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
4596        }
4597
4598        x86_pmu.intel_ctrl |=
4599                ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
4600
4601        if (x86_pmu.event_constraints) {
4602                /*
4603                 * event on fixed counter2 (REF_CYCLES) only works on this
4604                 * counter, so do not extend mask to generic counters
4605                 */
4606                for_each_event_constraint(c, x86_pmu.event_constraints) {
4607                        if (c->cmask == FIXED_EVENT_FLAGS
4608                            && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
4609                                c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
4610                        }
4611                        c->idxmsk64 &=
4612                                ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
4613                        c->weight = hweight64(c->idxmsk64);
4614                }
4615        }
4616
4617        /*
4618         * Access LBR MSR may cause #GP under certain circumstances.
4619         * E.g. KVM doesn't support LBR MSR
4620         * Check all LBT MSR here.
4621         * Disable LBR access if any LBR MSRs can not be accessed.
4622         */
4623        if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
4624                x86_pmu.lbr_nr = 0;
4625        for (i = 0; i < x86_pmu.lbr_nr; i++) {
4626                if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
4627                      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
4628                        x86_pmu.lbr_nr = 0;
4629        }
4630
4631        x86_pmu.caps_attrs = intel_pmu_caps_attrs;
4632
4633        if (x86_pmu.lbr_nr) {
4634                x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
4635                pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
4636        }
4637
4638        /*
4639         * Access extra MSR may cause #GP under certain circumstances.
4640         * E.g. KVM doesn't support offcore event
4641         * Check all extra_regs here.
4642         */
4643        if (x86_pmu.extra_regs) {
4644                for (er = x86_pmu.extra_regs; er->msr; er++) {
4645                        er->extra_msr_access = check_msr(er->msr, 0x11UL);
4646                        /* Disable LBR select mapping */
4647                        if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
4648                                x86_pmu.lbr_sel_map = NULL;
4649                }
4650        }
4651
4652        /* Support full width counters using alternative MSR range */
4653        if (x86_pmu.intel_cap.full_width_write) {
4654                x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
4655                x86_pmu.perfctr = MSR_IA32_PMC0;
4656                pr_cont("full-width counters, ");
4657        }
4658
4659        /*
4660         * For arch perfmon 4 use counter freezing to avoid
4661         * several MSR accesses in the PMI.
4662         */
4663        if (x86_pmu.counter_freezing)
4664                x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
4665
4666        kfree(to_free);
4667        return 0;
4668}
4669
4670/*
4671 * HT bug: phase 2 init
4672 * Called once we have valid topology information to check
4673 * whether or not HT is enabled
4674 * If HT is off, then we disable the workaround
4675 */
4676static __init int fixup_ht_bug(void)
4677{
4678        int c;
4679        /*
4680         * problem not present on this CPU model, nothing to do
4681         */
4682        if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
4683                return 0;
4684
4685        if (topology_max_smt_threads() > 1) {
4686                pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
4687                return 0;
4688        }
4689
4690        cpus_read_lock();
4691
4692        hardlockup_detector_perf_stop();
4693
4694        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4695
4696        x86_pmu.start_scheduling = NULL;
4697        x86_pmu.commit_scheduling = NULL;
4698        x86_pmu.stop_scheduling = NULL;
4699
4700        hardlockup_detector_perf_restart();
4701
4702        for_each_online_cpu(c)
4703                free_excl_cntrs(c);
4704
4705        cpus_read_unlock();
4706        pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4707        return 0;
4708}
4709subsys_initcall(fixup_ht_bug)
4710