linux/arch/x86/events/intel/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Per core/cpu state
   4 *
   5 * Used to coordinate shared registers between HT threads or
   6 * among events on a single PMU.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/stddef.h>
  12#include <linux/types.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/nmi.h>
  17
  18#include <asm/cpufeature.h>
  19#include <asm/hardirq.h>
  20#include <asm/intel-family.h>
  21#include <asm/apic.h>
  22#include <asm/cpu_device_id.h>
  23
  24#include "../perf_event.h"
  25
  26/*
  27 * Intel PerfMon, used on Core and later.
  28 */
  29static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
  30{
  31        [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
  32        [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
  33        [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
  34        [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
  35        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
  36        [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
  37        [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
  38        [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
  39};
  40
  41static struct event_constraint intel_core_event_constraints[] __read_mostly =
  42{
  43        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  44        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  45        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  46        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  47        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  48        INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  49        EVENT_CONSTRAINT_END
  50};
  51
  52static struct event_constraint intel_core2_event_constraints[] __read_mostly =
  53{
  54        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  55        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  56        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  57        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  58        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  59        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  60        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  61        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  62        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  63        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  64        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  65        INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  66        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  67        EVENT_CONSTRAINT_END
  68};
  69
  70static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
  71{
  72        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  73        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  74        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  75        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  76        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  77        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  78        INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  79        INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  80        INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  81        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  82        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  83        EVENT_CONSTRAINT_END
  84};
  85
  86static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  87{
  88        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
  89        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  90        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
  91        EVENT_EXTRA_END
  92};
  93
  94static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
  95{
  96        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  97        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  98        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  99        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
 100        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
 101        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
 102        INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
 103        EVENT_CONSTRAINT_END
 104};
 105
 106static struct event_constraint intel_snb_event_constraints[] __read_mostly =
 107{
 108        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 109        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 110        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 111        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 112        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 113        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 114        INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 115        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
 116        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 117        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 118        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 119        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 120
 121        /*
 122         * When HT is off these events can only run on the bottom 4 counters
 123         * When HT is on, they are impacted by the HT bug and require EXCL access
 124         */
 125        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 126        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 127        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 128        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 129
 130        EVENT_CONSTRAINT_END
 131};
 132
 133static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
 134{
 135        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 136        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 137        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 138        INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
 139        INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
 140        INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
 141        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
 142        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 143        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 144        INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
 145        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 146        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 147        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 148
 149        /*
 150         * When HT is off these events can only run on the bottom 4 counters
 151         * When HT is on, they are impacted by the HT bug and require EXCL access
 152         */
 153        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 154        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 155        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 156        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 157
 158        EVENT_CONSTRAINT_END
 159};
 160
 161static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
 162{
 163        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 164        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 165        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 166        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
 167        EVENT_EXTRA_END
 168};
 169
 170static struct event_constraint intel_v1_event_constraints[] __read_mostly =
 171{
 172        EVENT_CONSTRAINT_END
 173};
 174
 175static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 176{
 177        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 178        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 179        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 180        EVENT_CONSTRAINT_END
 181};
 182
 183static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 184{
 185        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 186        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 187        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 188        EVENT_CONSTRAINT_END
 189};
 190
 191static struct event_constraint intel_skl_event_constraints[] = {
 192        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 193        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 194        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 195        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
 196
 197        /*
 198         * when HT is off, these can only run on the bottom 4 counters
 199         */
 200        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 201        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 202        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 203        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 204        INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
 205
 206        EVENT_CONSTRAINT_END
 207};
 208
 209static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
 210        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
 211        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
 212        EVENT_EXTRA_END
 213};
 214
 215static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 216        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 217        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 218        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 219        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 220        EVENT_EXTRA_END
 221};
 222
 223static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 224        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 225        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 226        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 227        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 228        EVENT_EXTRA_END
 229};
 230
 231static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
 232        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 233        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 234        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 235        /*
 236         * Note the low 8 bits eventsel code is not a continuous field, containing
 237         * some #GPing bits. These are masked out.
 238         */
 239        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 240        EVENT_EXTRA_END
 241};
 242
 243static struct event_constraint intel_icl_event_constraints[] = {
 244        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 245        INTEL_UEVENT_CONSTRAINT(0x1c0, 0),      /* INST_RETIRED.PREC_DIST */
 246        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 247        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 248        FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
 249        INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
 250        INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
 251        INTEL_EVENT_CONSTRAINT(0x32, 0xf),      /* SW_PREFETCH_ACCESS.* */
 252        INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
 253        INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
 254        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
 255        INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
 256        INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
 257        INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
 258        INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
 259        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
 260        INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
 261        EVENT_CONSTRAINT_END
 262};
 263
 264static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
 265        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
 266        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
 267        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 268        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 269        EVENT_EXTRA_END
 270};
 271
 272EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 273EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 274EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
 275
 276static struct attribute *nhm_mem_events_attrs[] = {
 277        EVENT_PTR(mem_ld_nhm),
 278        NULL,
 279};
 280
 281/*
 282 * topdown events for Intel Core CPUs.
 283 *
 284 * The events are all in slots, which is a free slot in a 4 wide
 285 * pipeline. Some events are already reported in slots, for cycle
 286 * events we multiply by the pipeline width (4).
 287 *
 288 * With Hyper Threading on, topdown metrics are either summed or averaged
 289 * between the threads of a core: (count_t0 + count_t1).
 290 *
 291 * For the average case the metric is always scaled to pipeline width,
 292 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
 293 */
 294
 295EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
 296        "event=0x3c,umask=0x0",                 /* cpu_clk_unhalted.thread */
 297        "event=0x3c,umask=0x0,any=1");          /* cpu_clk_unhalted.thread_any */
 298EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
 299EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
 300        "event=0xe,umask=0x1");                 /* uops_issued.any */
 301EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
 302        "event=0xc2,umask=0x2");                /* uops_retired.retire_slots */
 303EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
 304        "event=0x9c,umask=0x1");                /* idq_uops_not_delivered_core */
 305EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
 306        "event=0xd,umask=0x3,cmask=1",          /* int_misc.recovery_cycles */
 307        "event=0xd,umask=0x3,cmask=1,any=1");   /* int_misc.recovery_cycles_any */
 308EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
 309        "4", "2");
 310
 311static struct attribute *snb_events_attrs[] = {
 312        EVENT_PTR(td_slots_issued),
 313        EVENT_PTR(td_slots_retired),
 314        EVENT_PTR(td_fetch_bubbles),
 315        EVENT_PTR(td_total_slots),
 316        EVENT_PTR(td_total_slots_scale),
 317        EVENT_PTR(td_recovery_bubbles),
 318        EVENT_PTR(td_recovery_bubbles_scale),
 319        NULL,
 320};
 321
 322static struct attribute *snb_mem_events_attrs[] = {
 323        EVENT_PTR(mem_ld_snb),
 324        EVENT_PTR(mem_st_snb),
 325        NULL,
 326};
 327
 328static struct event_constraint intel_hsw_event_constraints[] = {
 329        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 330        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 331        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 332        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 333        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 334        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 335        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 336        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 337        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 338        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 339        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 340        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 341
 342        /*
 343         * When HT is off these events can only run on the bottom 4 counters
 344         * When HT is on, they are impacted by the HT bug and require EXCL access
 345         */
 346        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 347        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 348        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 349        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 350
 351        EVENT_CONSTRAINT_END
 352};
 353
 354static struct event_constraint intel_bdw_event_constraints[] = {
 355        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 356        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 357        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 358        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 359        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 360        /*
 361         * when HT is off, these can only run on the bottom 4 counters
 362         */
 363        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 364        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 365        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 366        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 367        EVENT_CONSTRAINT_END
 368};
 369
 370static u64 intel_pmu_event_map(int hw_event)
 371{
 372        return intel_perfmon_event_map[hw_event];
 373}
 374
 375/*
 376 * Notes on the events:
 377 * - data reads do not include code reads (comparable to earlier tables)
 378 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 379 * - remote node access includes remote memory, remote cache, remote mmio.
 380 * - prefetches are not included in the counts.
 381 * - icache miss does not include decoded icache
 382 */
 383
 384#define SKL_DEMAND_DATA_RD              BIT_ULL(0)
 385#define SKL_DEMAND_RFO                  BIT_ULL(1)
 386#define SKL_ANY_RESPONSE                BIT_ULL(16)
 387#define SKL_SUPPLIER_NONE               BIT_ULL(17)
 388#define SKL_L3_MISS_LOCAL_DRAM          BIT_ULL(26)
 389#define SKL_L3_MISS_REMOTE_HOP0_DRAM    BIT_ULL(27)
 390#define SKL_L3_MISS_REMOTE_HOP1_DRAM    BIT_ULL(28)
 391#define SKL_L3_MISS_REMOTE_HOP2P_DRAM   BIT_ULL(29)
 392#define SKL_L3_MISS                     (SKL_L3_MISS_LOCAL_DRAM| \
 393                                         SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 394                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 395                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 396#define SKL_SPL_HIT                     BIT_ULL(30)
 397#define SKL_SNOOP_NONE                  BIT_ULL(31)
 398#define SKL_SNOOP_NOT_NEEDED            BIT_ULL(32)
 399#define SKL_SNOOP_MISS                  BIT_ULL(33)
 400#define SKL_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 401#define SKL_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 402#define SKL_SNOOP_HITM                  BIT_ULL(36)
 403#define SKL_SNOOP_NON_DRAM              BIT_ULL(37)
 404#define SKL_ANY_SNOOP                   (SKL_SPL_HIT|SKL_SNOOP_NONE| \
 405                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 406                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 407                                         SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
 408#define SKL_DEMAND_READ                 SKL_DEMAND_DATA_RD
 409#define SKL_SNOOP_DRAM                  (SKL_SNOOP_NONE| \
 410                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 411                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 412                                         SKL_SNOOP_HITM|SKL_SPL_HIT)
 413#define SKL_DEMAND_WRITE                SKL_DEMAND_RFO
 414#define SKL_LLC_ACCESS                  SKL_ANY_RESPONSE
 415#define SKL_L3_MISS_REMOTE              (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 416                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 417                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 418
 419static __initconst const u64 skl_hw_cache_event_ids
 420                                [PERF_COUNT_HW_CACHE_MAX]
 421                                [PERF_COUNT_HW_CACHE_OP_MAX]
 422                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 423{
 424 [ C(L1D ) ] = {
 425        [ C(OP_READ) ] = {
 426                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 427                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 428        },
 429        [ C(OP_WRITE) ] = {
 430                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 431                [ C(RESULT_MISS)   ] = 0x0,
 432        },
 433        [ C(OP_PREFETCH) ] = {
 434                [ C(RESULT_ACCESS) ] = 0x0,
 435                [ C(RESULT_MISS)   ] = 0x0,
 436        },
 437 },
 438 [ C(L1I ) ] = {
 439        [ C(OP_READ) ] = {
 440                [ C(RESULT_ACCESS) ] = 0x0,
 441                [ C(RESULT_MISS)   ] = 0x283,   /* ICACHE_64B.MISS */
 442        },
 443        [ C(OP_WRITE) ] = {
 444                [ C(RESULT_ACCESS) ] = -1,
 445                [ C(RESULT_MISS)   ] = -1,
 446        },
 447        [ C(OP_PREFETCH) ] = {
 448                [ C(RESULT_ACCESS) ] = 0x0,
 449                [ C(RESULT_MISS)   ] = 0x0,
 450        },
 451 },
 452 [ C(LL  ) ] = {
 453        [ C(OP_READ) ] = {
 454                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 455                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 456        },
 457        [ C(OP_WRITE) ] = {
 458                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 459                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 460        },
 461        [ C(OP_PREFETCH) ] = {
 462                [ C(RESULT_ACCESS) ] = 0x0,
 463                [ C(RESULT_MISS)   ] = 0x0,
 464        },
 465 },
 466 [ C(DTLB) ] = {
 467        [ C(OP_READ) ] = {
 468                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 469                [ C(RESULT_MISS)   ] = 0xe08,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
 470        },
 471        [ C(OP_WRITE) ] = {
 472                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 473                [ C(RESULT_MISS)   ] = 0xe49,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
 474        },
 475        [ C(OP_PREFETCH) ] = {
 476                [ C(RESULT_ACCESS) ] = 0x0,
 477                [ C(RESULT_MISS)   ] = 0x0,
 478        },
 479 },
 480 [ C(ITLB) ] = {
 481        [ C(OP_READ) ] = {
 482                [ C(RESULT_ACCESS) ] = 0x2085,  /* ITLB_MISSES.STLB_HIT */
 483                [ C(RESULT_MISS)   ] = 0xe85,   /* ITLB_MISSES.WALK_COMPLETED */
 484        },
 485        [ C(OP_WRITE) ] = {
 486                [ C(RESULT_ACCESS) ] = -1,
 487                [ C(RESULT_MISS)   ] = -1,
 488        },
 489        [ C(OP_PREFETCH) ] = {
 490                [ C(RESULT_ACCESS) ] = -1,
 491                [ C(RESULT_MISS)   ] = -1,
 492        },
 493 },
 494 [ C(BPU ) ] = {
 495        [ C(OP_READ) ] = {
 496                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 497                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 498        },
 499        [ C(OP_WRITE) ] = {
 500                [ C(RESULT_ACCESS) ] = -1,
 501                [ C(RESULT_MISS)   ] = -1,
 502        },
 503        [ C(OP_PREFETCH) ] = {
 504                [ C(RESULT_ACCESS) ] = -1,
 505                [ C(RESULT_MISS)   ] = -1,
 506        },
 507 },
 508 [ C(NODE) ] = {
 509        [ C(OP_READ) ] = {
 510                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 511                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 512        },
 513        [ C(OP_WRITE) ] = {
 514                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 515                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 516        },
 517        [ C(OP_PREFETCH) ] = {
 518                [ C(RESULT_ACCESS) ] = 0x0,
 519                [ C(RESULT_MISS)   ] = 0x0,
 520        },
 521 },
 522};
 523
 524static __initconst const u64 skl_hw_cache_extra_regs
 525                                [PERF_COUNT_HW_CACHE_MAX]
 526                                [PERF_COUNT_HW_CACHE_OP_MAX]
 527                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 528{
 529 [ C(LL  ) ] = {
 530        [ C(OP_READ) ] = {
 531                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 532                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 533                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 534                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 535                                       SKL_SUPPLIER_NONE,
 536        },
 537        [ C(OP_WRITE) ] = {
 538                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 539                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 540                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 541                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 542                                       SKL_SUPPLIER_NONE,
 543        },
 544        [ C(OP_PREFETCH) ] = {
 545                [ C(RESULT_ACCESS) ] = 0x0,
 546                [ C(RESULT_MISS)   ] = 0x0,
 547        },
 548 },
 549 [ C(NODE) ] = {
 550        [ C(OP_READ) ] = {
 551                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 552                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 553                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 554                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 555        },
 556        [ C(OP_WRITE) ] = {
 557                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 558                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 559                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 560                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 561        },
 562        [ C(OP_PREFETCH) ] = {
 563                [ C(RESULT_ACCESS) ] = 0x0,
 564                [ C(RESULT_MISS)   ] = 0x0,
 565        },
 566 },
 567};
 568
 569#define SNB_DMND_DATA_RD        (1ULL << 0)
 570#define SNB_DMND_RFO            (1ULL << 1)
 571#define SNB_DMND_IFETCH         (1ULL << 2)
 572#define SNB_DMND_WB             (1ULL << 3)
 573#define SNB_PF_DATA_RD          (1ULL << 4)
 574#define SNB_PF_RFO              (1ULL << 5)
 575#define SNB_PF_IFETCH           (1ULL << 6)
 576#define SNB_LLC_DATA_RD         (1ULL << 7)
 577#define SNB_LLC_RFO             (1ULL << 8)
 578#define SNB_LLC_IFETCH          (1ULL << 9)
 579#define SNB_BUS_LOCKS           (1ULL << 10)
 580#define SNB_STRM_ST             (1ULL << 11)
 581#define SNB_OTHER               (1ULL << 15)
 582#define SNB_RESP_ANY            (1ULL << 16)
 583#define SNB_NO_SUPP             (1ULL << 17)
 584#define SNB_LLC_HITM            (1ULL << 18)
 585#define SNB_LLC_HITE            (1ULL << 19)
 586#define SNB_LLC_HITS            (1ULL << 20)
 587#define SNB_LLC_HITF            (1ULL << 21)
 588#define SNB_LOCAL               (1ULL << 22)
 589#define SNB_REMOTE              (0xffULL << 23)
 590#define SNB_SNP_NONE            (1ULL << 31)
 591#define SNB_SNP_NOT_NEEDED      (1ULL << 32)
 592#define SNB_SNP_MISS            (1ULL << 33)
 593#define SNB_NO_FWD              (1ULL << 34)
 594#define SNB_SNP_FWD             (1ULL << 35)
 595#define SNB_HITM                (1ULL << 36)
 596#define SNB_NON_DRAM            (1ULL << 37)
 597
 598#define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
 599#define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
 600#define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
 601
 602#define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
 603                                 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
 604                                 SNB_HITM)
 605
 606#define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
 607#define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
 608
 609#define SNB_L3_ACCESS           SNB_RESP_ANY
 610#define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
 611
 612static __initconst const u64 snb_hw_cache_extra_regs
 613                                [PERF_COUNT_HW_CACHE_MAX]
 614                                [PERF_COUNT_HW_CACHE_OP_MAX]
 615                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 616{
 617 [ C(LL  ) ] = {
 618        [ C(OP_READ) ] = {
 619                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
 620                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
 621        },
 622        [ C(OP_WRITE) ] = {
 623                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
 624                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
 625        },
 626        [ C(OP_PREFETCH) ] = {
 627                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
 628                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
 629        },
 630 },
 631 [ C(NODE) ] = {
 632        [ C(OP_READ) ] = {
 633                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
 634                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
 635        },
 636        [ C(OP_WRITE) ] = {
 637                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
 638                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
 639        },
 640        [ C(OP_PREFETCH) ] = {
 641                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
 642                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
 643        },
 644 },
 645};
 646
 647static __initconst const u64 snb_hw_cache_event_ids
 648                                [PERF_COUNT_HW_CACHE_MAX]
 649                                [PERF_COUNT_HW_CACHE_OP_MAX]
 650                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 651{
 652 [ C(L1D) ] = {
 653        [ C(OP_READ) ] = {
 654                [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
 655                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
 656        },
 657        [ C(OP_WRITE) ] = {
 658                [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
 659                [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
 660        },
 661        [ C(OP_PREFETCH) ] = {
 662                [ C(RESULT_ACCESS) ] = 0x0,
 663                [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
 664        },
 665 },
 666 [ C(L1I ) ] = {
 667        [ C(OP_READ) ] = {
 668                [ C(RESULT_ACCESS) ] = 0x0,
 669                [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
 670        },
 671        [ C(OP_WRITE) ] = {
 672                [ C(RESULT_ACCESS) ] = -1,
 673                [ C(RESULT_MISS)   ] = -1,
 674        },
 675        [ C(OP_PREFETCH) ] = {
 676                [ C(RESULT_ACCESS) ] = 0x0,
 677                [ C(RESULT_MISS)   ] = 0x0,
 678        },
 679 },
 680 [ C(LL  ) ] = {
 681        [ C(OP_READ) ] = {
 682                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 683                [ C(RESULT_ACCESS) ] = 0x01b7,
 684                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 685                [ C(RESULT_MISS)   ] = 0x01b7,
 686        },
 687        [ C(OP_WRITE) ] = {
 688                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 689                [ C(RESULT_ACCESS) ] = 0x01b7,
 690                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
 691                [ C(RESULT_MISS)   ] = 0x01b7,
 692        },
 693        [ C(OP_PREFETCH) ] = {
 694                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
 695                [ C(RESULT_ACCESS) ] = 0x01b7,
 696                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
 697                [ C(RESULT_MISS)   ] = 0x01b7,
 698        },
 699 },
 700 [ C(DTLB) ] = {
 701        [ C(OP_READ) ] = {
 702                [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
 703                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
 704        },
 705        [ C(OP_WRITE) ] = {
 706                [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
 707                [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 708        },
 709        [ C(OP_PREFETCH) ] = {
 710                [ C(RESULT_ACCESS) ] = 0x0,
 711                [ C(RESULT_MISS)   ] = 0x0,
 712        },
 713 },
 714 [ C(ITLB) ] = {
 715        [ C(OP_READ) ] = {
 716                [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
 717                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
 718        },
 719        [ C(OP_WRITE) ] = {
 720                [ C(RESULT_ACCESS) ] = -1,
 721                [ C(RESULT_MISS)   ] = -1,
 722        },
 723        [ C(OP_PREFETCH) ] = {
 724                [ C(RESULT_ACCESS) ] = -1,
 725                [ C(RESULT_MISS)   ] = -1,
 726        },
 727 },
 728 [ C(BPU ) ] = {
 729        [ C(OP_READ) ] = {
 730                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
 731                [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
 732        },
 733        [ C(OP_WRITE) ] = {
 734                [ C(RESULT_ACCESS) ] = -1,
 735                [ C(RESULT_MISS)   ] = -1,
 736        },
 737        [ C(OP_PREFETCH) ] = {
 738                [ C(RESULT_ACCESS) ] = -1,
 739                [ C(RESULT_MISS)   ] = -1,
 740        },
 741 },
 742 [ C(NODE) ] = {
 743        [ C(OP_READ) ] = {
 744                [ C(RESULT_ACCESS) ] = 0x01b7,
 745                [ C(RESULT_MISS)   ] = 0x01b7,
 746        },
 747        [ C(OP_WRITE) ] = {
 748                [ C(RESULT_ACCESS) ] = 0x01b7,
 749                [ C(RESULT_MISS)   ] = 0x01b7,
 750        },
 751        [ C(OP_PREFETCH) ] = {
 752                [ C(RESULT_ACCESS) ] = 0x01b7,
 753                [ C(RESULT_MISS)   ] = 0x01b7,
 754        },
 755 },
 756
 757};
 758
 759/*
 760 * Notes on the events:
 761 * - data reads do not include code reads (comparable to earlier tables)
 762 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 763 * - remote node access includes remote memory, remote cache, remote mmio.
 764 * - prefetches are not included in the counts because they are not
 765 *   reliably counted.
 766 */
 767
 768#define HSW_DEMAND_DATA_RD              BIT_ULL(0)
 769#define HSW_DEMAND_RFO                  BIT_ULL(1)
 770#define HSW_ANY_RESPONSE                BIT_ULL(16)
 771#define HSW_SUPPLIER_NONE               BIT_ULL(17)
 772#define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
 773#define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
 774#define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
 775#define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
 776#define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
 777                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 778                                         HSW_L3_MISS_REMOTE_HOP2P)
 779#define HSW_SNOOP_NONE                  BIT_ULL(31)
 780#define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
 781#define HSW_SNOOP_MISS                  BIT_ULL(33)
 782#define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 783#define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 784#define HSW_SNOOP_HITM                  BIT_ULL(36)
 785#define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
 786#define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
 787                                         HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
 788                                         HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
 789                                         HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
 790#define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
 791#define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
 792#define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
 793#define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
 794                                         HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
 795#define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
 796
 797#define BDW_L3_MISS_LOCAL               BIT(26)
 798#define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
 799                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 800                                         HSW_L3_MISS_REMOTE_HOP2P)
 801
 802
 803static __initconst const u64 hsw_hw_cache_event_ids
 804                                [PERF_COUNT_HW_CACHE_MAX]
 805                                [PERF_COUNT_HW_CACHE_OP_MAX]
 806                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 807{
 808 [ C(L1D ) ] = {
 809        [ C(OP_READ) ] = {
 810                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 811                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 812        },
 813        [ C(OP_WRITE) ] = {
 814                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 815                [ C(RESULT_MISS)   ] = 0x0,
 816        },
 817        [ C(OP_PREFETCH) ] = {
 818                [ C(RESULT_ACCESS) ] = 0x0,
 819                [ C(RESULT_MISS)   ] = 0x0,
 820        },
 821 },
 822 [ C(L1I ) ] = {
 823        [ C(OP_READ) ] = {
 824                [ C(RESULT_ACCESS) ] = 0x0,
 825                [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
 826        },
 827        [ C(OP_WRITE) ] = {
 828                [ C(RESULT_ACCESS) ] = -1,
 829                [ C(RESULT_MISS)   ] = -1,
 830        },
 831        [ C(OP_PREFETCH) ] = {
 832                [ C(RESULT_ACCESS) ] = 0x0,
 833                [ C(RESULT_MISS)   ] = 0x0,
 834        },
 835 },
 836 [ C(LL  ) ] = {
 837        [ C(OP_READ) ] = {
 838                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 839                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 840        },
 841        [ C(OP_WRITE) ] = {
 842                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 843                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 844        },
 845        [ C(OP_PREFETCH) ] = {
 846                [ C(RESULT_ACCESS) ] = 0x0,
 847                [ C(RESULT_MISS)   ] = 0x0,
 848        },
 849 },
 850 [ C(DTLB) ] = {
 851        [ C(OP_READ) ] = {
 852                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 853                [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
 854        },
 855        [ C(OP_WRITE) ] = {
 856                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 857                [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 858        },
 859        [ C(OP_PREFETCH) ] = {
 860                [ C(RESULT_ACCESS) ] = 0x0,
 861                [ C(RESULT_MISS)   ] = 0x0,
 862        },
 863 },
 864 [ C(ITLB) ] = {
 865        [ C(OP_READ) ] = {
 866                [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
 867                [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
 868        },
 869        [ C(OP_WRITE) ] = {
 870                [ C(RESULT_ACCESS) ] = -1,
 871                [ C(RESULT_MISS)   ] = -1,
 872        },
 873        [ C(OP_PREFETCH) ] = {
 874                [ C(RESULT_ACCESS) ] = -1,
 875                [ C(RESULT_MISS)   ] = -1,
 876        },
 877 },
 878 [ C(BPU ) ] = {
 879        [ C(OP_READ) ] = {
 880                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 881                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 882        },
 883        [ C(OP_WRITE) ] = {
 884                [ C(RESULT_ACCESS) ] = -1,
 885                [ C(RESULT_MISS)   ] = -1,
 886        },
 887        [ C(OP_PREFETCH) ] = {
 888                [ C(RESULT_ACCESS) ] = -1,
 889                [ C(RESULT_MISS)   ] = -1,
 890        },
 891 },
 892 [ C(NODE) ] = {
 893        [ C(OP_READ) ] = {
 894                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 895                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 896        },
 897        [ C(OP_WRITE) ] = {
 898                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 899                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 900        },
 901        [ C(OP_PREFETCH) ] = {
 902                [ C(RESULT_ACCESS) ] = 0x0,
 903                [ C(RESULT_MISS)   ] = 0x0,
 904        },
 905 },
 906};
 907
 908static __initconst const u64 hsw_hw_cache_extra_regs
 909                                [PERF_COUNT_HW_CACHE_MAX]
 910                                [PERF_COUNT_HW_CACHE_OP_MAX]
 911                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 912{
 913 [ C(LL  ) ] = {
 914        [ C(OP_READ) ] = {
 915                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
 916                                       HSW_LLC_ACCESS,
 917                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
 918                                       HSW_L3_MISS|HSW_ANY_SNOOP,
 919        },
 920        [ C(OP_WRITE) ] = {
 921                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
 922                                       HSW_LLC_ACCESS,
 923                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
 924                                       HSW_L3_MISS|HSW_ANY_SNOOP,
 925        },
 926        [ C(OP_PREFETCH) ] = {
 927                [ C(RESULT_ACCESS) ] = 0x0,
 928                [ C(RESULT_MISS)   ] = 0x0,
 929        },
 930 },
 931 [ C(NODE) ] = {
 932        [ C(OP_READ) ] = {
 933                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
 934                                       HSW_L3_MISS_LOCAL_DRAM|
 935                                       HSW_SNOOP_DRAM,
 936                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
 937                                       HSW_L3_MISS_REMOTE|
 938                                       HSW_SNOOP_DRAM,
 939        },
 940        [ C(OP_WRITE) ] = {
 941                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
 942                                       HSW_L3_MISS_LOCAL_DRAM|
 943                                       HSW_SNOOP_DRAM,
 944                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
 945                                       HSW_L3_MISS_REMOTE|
 946                                       HSW_SNOOP_DRAM,
 947        },
 948        [ C(OP_PREFETCH) ] = {
 949                [ C(RESULT_ACCESS) ] = 0x0,
 950                [ C(RESULT_MISS)   ] = 0x0,
 951        },
 952 },
 953};
 954
 955static __initconst const u64 westmere_hw_cache_event_ids
 956                                [PERF_COUNT_HW_CACHE_MAX]
 957                                [PERF_COUNT_HW_CACHE_OP_MAX]
 958                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 959{
 960 [ C(L1D) ] = {
 961        [ C(OP_READ) ] = {
 962                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
 963                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
 964        },
 965        [ C(OP_WRITE) ] = {
 966                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
 967                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
 968        },
 969        [ C(OP_PREFETCH) ] = {
 970                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
 971                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
 972        },
 973 },
 974 [ C(L1I ) ] = {
 975        [ C(OP_READ) ] = {
 976                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
 977                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
 978        },
 979        [ C(OP_WRITE) ] = {
 980                [ C(RESULT_ACCESS) ] = -1,
 981                [ C(RESULT_MISS)   ] = -1,
 982        },
 983        [ C(OP_PREFETCH) ] = {
 984                [ C(RESULT_ACCESS) ] = 0x0,
 985                [ C(RESULT_MISS)   ] = 0x0,
 986        },
 987 },
 988 [ C(LL  ) ] = {
 989        [ C(OP_READ) ] = {
 990                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 991                [ C(RESULT_ACCESS) ] = 0x01b7,
 992                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 993                [ C(RESULT_MISS)   ] = 0x01b7,
 994        },
 995        /*
 996         * Use RFO, not WRITEBACK, because a write miss would typically occur
 997         * on RFO.
 998         */
 999        [ C(OP_WRITE) ] = {
1000                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1001                [ C(RESULT_ACCESS) ] = 0x01b7,
1002                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1003                [ C(RESULT_MISS)   ] = 0x01b7,
1004        },
1005        [ C(OP_PREFETCH) ] = {
1006                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1007                [ C(RESULT_ACCESS) ] = 0x01b7,
1008                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1009                [ C(RESULT_MISS)   ] = 0x01b7,
1010        },
1011 },
1012 [ C(DTLB) ] = {
1013        [ C(OP_READ) ] = {
1014                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1015                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1016        },
1017        [ C(OP_WRITE) ] = {
1018                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1019                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1020        },
1021        [ C(OP_PREFETCH) ] = {
1022                [ C(RESULT_ACCESS) ] = 0x0,
1023                [ C(RESULT_MISS)   ] = 0x0,
1024        },
1025 },
1026 [ C(ITLB) ] = {
1027        [ C(OP_READ) ] = {
1028                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1029                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1030        },
1031        [ C(OP_WRITE) ] = {
1032                [ C(RESULT_ACCESS) ] = -1,
1033                [ C(RESULT_MISS)   ] = -1,
1034        },
1035        [ C(OP_PREFETCH) ] = {
1036                [ C(RESULT_ACCESS) ] = -1,
1037                [ C(RESULT_MISS)   ] = -1,
1038        },
1039 },
1040 [ C(BPU ) ] = {
1041        [ C(OP_READ) ] = {
1042                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1043                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1044        },
1045        [ C(OP_WRITE) ] = {
1046                [ C(RESULT_ACCESS) ] = -1,
1047                [ C(RESULT_MISS)   ] = -1,
1048        },
1049        [ C(OP_PREFETCH) ] = {
1050                [ C(RESULT_ACCESS) ] = -1,
1051                [ C(RESULT_MISS)   ] = -1,
1052        },
1053 },
1054 [ C(NODE) ] = {
1055        [ C(OP_READ) ] = {
1056                [ C(RESULT_ACCESS) ] = 0x01b7,
1057                [ C(RESULT_MISS)   ] = 0x01b7,
1058        },
1059        [ C(OP_WRITE) ] = {
1060                [ C(RESULT_ACCESS) ] = 0x01b7,
1061                [ C(RESULT_MISS)   ] = 0x01b7,
1062        },
1063        [ C(OP_PREFETCH) ] = {
1064                [ C(RESULT_ACCESS) ] = 0x01b7,
1065                [ C(RESULT_MISS)   ] = 0x01b7,
1066        },
1067 },
1068};
1069
1070/*
1071 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1072 * See IA32 SDM Vol 3B 30.6.1.3
1073 */
1074
1075#define NHM_DMND_DATA_RD        (1 << 0)
1076#define NHM_DMND_RFO            (1 << 1)
1077#define NHM_DMND_IFETCH         (1 << 2)
1078#define NHM_DMND_WB             (1 << 3)
1079#define NHM_PF_DATA_RD          (1 << 4)
1080#define NHM_PF_DATA_RFO         (1 << 5)
1081#define NHM_PF_IFETCH           (1 << 6)
1082#define NHM_OFFCORE_OTHER       (1 << 7)
1083#define NHM_UNCORE_HIT          (1 << 8)
1084#define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
1085#define NHM_OTHER_CORE_HITM     (1 << 10)
1086                                /* reserved */
1087#define NHM_REMOTE_CACHE_FWD    (1 << 12)
1088#define NHM_REMOTE_DRAM         (1 << 13)
1089#define NHM_LOCAL_DRAM          (1 << 14)
1090#define NHM_NON_DRAM            (1 << 15)
1091
1092#define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1093#define NHM_REMOTE              (NHM_REMOTE_DRAM)
1094
1095#define NHM_DMND_READ           (NHM_DMND_DATA_RD)
1096#define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
1097#define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1098
1099#define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1100#define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1101#define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
1102
1103static __initconst const u64 nehalem_hw_cache_extra_regs
1104                                [PERF_COUNT_HW_CACHE_MAX]
1105                                [PERF_COUNT_HW_CACHE_OP_MAX]
1106                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1107{
1108 [ C(LL  ) ] = {
1109        [ C(OP_READ) ] = {
1110                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1111                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1112        },
1113        [ C(OP_WRITE) ] = {
1114                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1115                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1116        },
1117        [ C(OP_PREFETCH) ] = {
1118                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1119                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1120        },
1121 },
1122 [ C(NODE) ] = {
1123        [ C(OP_READ) ] = {
1124                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1125                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1126        },
1127        [ C(OP_WRITE) ] = {
1128                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1129                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1130        },
1131        [ C(OP_PREFETCH) ] = {
1132                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1133                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1134        },
1135 },
1136};
1137
1138static __initconst const u64 nehalem_hw_cache_event_ids
1139                                [PERF_COUNT_HW_CACHE_MAX]
1140                                [PERF_COUNT_HW_CACHE_OP_MAX]
1141                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1142{
1143 [ C(L1D) ] = {
1144        [ C(OP_READ) ] = {
1145                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1146                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1147        },
1148        [ C(OP_WRITE) ] = {
1149                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1150                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1151        },
1152        [ C(OP_PREFETCH) ] = {
1153                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1154                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1155        },
1156 },
1157 [ C(L1I ) ] = {
1158        [ C(OP_READ) ] = {
1159                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1160                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1161        },
1162        [ C(OP_WRITE) ] = {
1163                [ C(RESULT_ACCESS) ] = -1,
1164                [ C(RESULT_MISS)   ] = -1,
1165        },
1166        [ C(OP_PREFETCH) ] = {
1167                [ C(RESULT_ACCESS) ] = 0x0,
1168                [ C(RESULT_MISS)   ] = 0x0,
1169        },
1170 },
1171 [ C(LL  ) ] = {
1172        [ C(OP_READ) ] = {
1173                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1174                [ C(RESULT_ACCESS) ] = 0x01b7,
1175                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1176                [ C(RESULT_MISS)   ] = 0x01b7,
1177        },
1178        /*
1179         * Use RFO, not WRITEBACK, because a write miss would typically occur
1180         * on RFO.
1181         */
1182        [ C(OP_WRITE) ] = {
1183                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1184                [ C(RESULT_ACCESS) ] = 0x01b7,
1185                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1186                [ C(RESULT_MISS)   ] = 0x01b7,
1187        },
1188        [ C(OP_PREFETCH) ] = {
1189                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1190                [ C(RESULT_ACCESS) ] = 0x01b7,
1191                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1192                [ C(RESULT_MISS)   ] = 0x01b7,
1193        },
1194 },
1195 [ C(DTLB) ] = {
1196        [ C(OP_READ) ] = {
1197                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1198                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1199        },
1200        [ C(OP_WRITE) ] = {
1201                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1202                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1203        },
1204        [ C(OP_PREFETCH) ] = {
1205                [ C(RESULT_ACCESS) ] = 0x0,
1206                [ C(RESULT_MISS)   ] = 0x0,
1207        },
1208 },
1209 [ C(ITLB) ] = {
1210        [ C(OP_READ) ] = {
1211                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1212                [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1213        },
1214        [ C(OP_WRITE) ] = {
1215                [ C(RESULT_ACCESS) ] = -1,
1216                [ C(RESULT_MISS)   ] = -1,
1217        },
1218        [ C(OP_PREFETCH) ] = {
1219                [ C(RESULT_ACCESS) ] = -1,
1220                [ C(RESULT_MISS)   ] = -1,
1221        },
1222 },
1223 [ C(BPU ) ] = {
1224        [ C(OP_READ) ] = {
1225                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1226                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1227        },
1228        [ C(OP_WRITE) ] = {
1229                [ C(RESULT_ACCESS) ] = -1,
1230                [ C(RESULT_MISS)   ] = -1,
1231        },
1232        [ C(OP_PREFETCH) ] = {
1233                [ C(RESULT_ACCESS) ] = -1,
1234                [ C(RESULT_MISS)   ] = -1,
1235        },
1236 },
1237 [ C(NODE) ] = {
1238        [ C(OP_READ) ] = {
1239                [ C(RESULT_ACCESS) ] = 0x01b7,
1240                [ C(RESULT_MISS)   ] = 0x01b7,
1241        },
1242        [ C(OP_WRITE) ] = {
1243                [ C(RESULT_ACCESS) ] = 0x01b7,
1244                [ C(RESULT_MISS)   ] = 0x01b7,
1245        },
1246        [ C(OP_PREFETCH) ] = {
1247                [ C(RESULT_ACCESS) ] = 0x01b7,
1248                [ C(RESULT_MISS)   ] = 0x01b7,
1249        },
1250 },
1251};
1252
1253static __initconst const u64 core2_hw_cache_event_ids
1254                                [PERF_COUNT_HW_CACHE_MAX]
1255                                [PERF_COUNT_HW_CACHE_OP_MAX]
1256                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1257{
1258 [ C(L1D) ] = {
1259        [ C(OP_READ) ] = {
1260                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1261                [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1262        },
1263        [ C(OP_WRITE) ] = {
1264                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1265                [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1266        },
1267        [ C(OP_PREFETCH) ] = {
1268                [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1269                [ C(RESULT_MISS)   ] = 0,
1270        },
1271 },
1272 [ C(L1I ) ] = {
1273        [ C(OP_READ) ] = {
1274                [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1275                [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1276        },
1277        [ C(OP_WRITE) ] = {
1278                [ C(RESULT_ACCESS) ] = -1,
1279                [ C(RESULT_MISS)   ] = -1,
1280        },
1281        [ C(OP_PREFETCH) ] = {
1282                [ C(RESULT_ACCESS) ] = 0,
1283                [ C(RESULT_MISS)   ] = 0,
1284        },
1285 },
1286 [ C(LL  ) ] = {
1287        [ C(OP_READ) ] = {
1288                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1289                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1290        },
1291        [ C(OP_WRITE) ] = {
1292                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1293                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1294        },
1295        [ C(OP_PREFETCH) ] = {
1296                [ C(RESULT_ACCESS) ] = 0,
1297                [ C(RESULT_MISS)   ] = 0,
1298        },
1299 },
1300 [ C(DTLB) ] = {
1301        [ C(OP_READ) ] = {
1302                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1303                [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1304        },
1305        [ C(OP_WRITE) ] = {
1306                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1307                [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1308        },
1309        [ C(OP_PREFETCH) ] = {
1310                [ C(RESULT_ACCESS) ] = 0,
1311                [ C(RESULT_MISS)   ] = 0,
1312        },
1313 },
1314 [ C(ITLB) ] = {
1315        [ C(OP_READ) ] = {
1316                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1317                [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1318        },
1319        [ C(OP_WRITE) ] = {
1320                [ C(RESULT_ACCESS) ] = -1,
1321                [ C(RESULT_MISS)   ] = -1,
1322        },
1323        [ C(OP_PREFETCH) ] = {
1324                [ C(RESULT_ACCESS) ] = -1,
1325                [ C(RESULT_MISS)   ] = -1,
1326        },
1327 },
1328 [ C(BPU ) ] = {
1329        [ C(OP_READ) ] = {
1330                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1331                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1332        },
1333        [ C(OP_WRITE) ] = {
1334                [ C(RESULT_ACCESS) ] = -1,
1335                [ C(RESULT_MISS)   ] = -1,
1336        },
1337        [ C(OP_PREFETCH) ] = {
1338                [ C(RESULT_ACCESS) ] = -1,
1339                [ C(RESULT_MISS)   ] = -1,
1340        },
1341 },
1342};
1343
1344static __initconst const u64 atom_hw_cache_event_ids
1345                                [PERF_COUNT_HW_CACHE_MAX]
1346                                [PERF_COUNT_HW_CACHE_OP_MAX]
1347                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1348{
1349 [ C(L1D) ] = {
1350        [ C(OP_READ) ] = {
1351                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1352                [ C(RESULT_MISS)   ] = 0,
1353        },
1354        [ C(OP_WRITE) ] = {
1355                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1356                [ C(RESULT_MISS)   ] = 0,
1357        },
1358        [ C(OP_PREFETCH) ] = {
1359                [ C(RESULT_ACCESS) ] = 0x0,
1360                [ C(RESULT_MISS)   ] = 0,
1361        },
1362 },
1363 [ C(L1I ) ] = {
1364        [ C(OP_READ) ] = {
1365                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1366                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1367        },
1368        [ C(OP_WRITE) ] = {
1369                [ C(RESULT_ACCESS) ] = -1,
1370                [ C(RESULT_MISS)   ] = -1,
1371        },
1372        [ C(OP_PREFETCH) ] = {
1373                [ C(RESULT_ACCESS) ] = 0,
1374                [ C(RESULT_MISS)   ] = 0,
1375        },
1376 },
1377 [ C(LL  ) ] = {
1378        [ C(OP_READ) ] = {
1379                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1380                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1381        },
1382        [ C(OP_WRITE) ] = {
1383                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1384                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1385        },
1386        [ C(OP_PREFETCH) ] = {
1387                [ C(RESULT_ACCESS) ] = 0,
1388                [ C(RESULT_MISS)   ] = 0,
1389        },
1390 },
1391 [ C(DTLB) ] = {
1392        [ C(OP_READ) ] = {
1393                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1394                [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1395        },
1396        [ C(OP_WRITE) ] = {
1397                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1398                [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1399        },
1400        [ C(OP_PREFETCH) ] = {
1401                [ C(RESULT_ACCESS) ] = 0,
1402                [ C(RESULT_MISS)   ] = 0,
1403        },
1404 },
1405 [ C(ITLB) ] = {
1406        [ C(OP_READ) ] = {
1407                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1408                [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1409        },
1410        [ C(OP_WRITE) ] = {
1411                [ C(RESULT_ACCESS) ] = -1,
1412                [ C(RESULT_MISS)   ] = -1,
1413        },
1414        [ C(OP_PREFETCH) ] = {
1415                [ C(RESULT_ACCESS) ] = -1,
1416                [ C(RESULT_MISS)   ] = -1,
1417        },
1418 },
1419 [ C(BPU ) ] = {
1420        [ C(OP_READ) ] = {
1421                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1422                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1423        },
1424        [ C(OP_WRITE) ] = {
1425                [ C(RESULT_ACCESS) ] = -1,
1426                [ C(RESULT_MISS)   ] = -1,
1427        },
1428        [ C(OP_PREFETCH) ] = {
1429                [ C(RESULT_ACCESS) ] = -1,
1430                [ C(RESULT_MISS)   ] = -1,
1431        },
1432 },
1433};
1434
1435EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1436EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1437/* no_alloc_cycles.not_delivered */
1438EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1439               "event=0xca,umask=0x50");
1440EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1441/* uops_retired.all */
1442EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1443               "event=0xc2,umask=0x10");
1444/* uops_retired.all */
1445EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1446               "event=0xc2,umask=0x10");
1447
1448static struct attribute *slm_events_attrs[] = {
1449        EVENT_PTR(td_total_slots_slm),
1450        EVENT_PTR(td_total_slots_scale_slm),
1451        EVENT_PTR(td_fetch_bubbles_slm),
1452        EVENT_PTR(td_fetch_bubbles_scale_slm),
1453        EVENT_PTR(td_slots_issued_slm),
1454        EVENT_PTR(td_slots_retired_slm),
1455        NULL
1456};
1457
1458static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1459{
1460        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1461        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1462        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1463        EVENT_EXTRA_END
1464};
1465
1466#define SLM_DMND_READ           SNB_DMND_DATA_RD
1467#define SLM_DMND_WRITE          SNB_DMND_RFO
1468#define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1469
1470#define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1471#define SLM_LLC_ACCESS          SNB_RESP_ANY
1472#define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1473
1474static __initconst const u64 slm_hw_cache_extra_regs
1475                                [PERF_COUNT_HW_CACHE_MAX]
1476                                [PERF_COUNT_HW_CACHE_OP_MAX]
1477                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1478{
1479 [ C(LL  ) ] = {
1480        [ C(OP_READ) ] = {
1481                [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1482                [ C(RESULT_MISS)   ] = 0,
1483        },
1484        [ C(OP_WRITE) ] = {
1485                [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1486                [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1487        },
1488        [ C(OP_PREFETCH) ] = {
1489                [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1490                [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1491        },
1492 },
1493};
1494
1495static __initconst const u64 slm_hw_cache_event_ids
1496                                [PERF_COUNT_HW_CACHE_MAX]
1497                                [PERF_COUNT_HW_CACHE_OP_MAX]
1498                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1499{
1500 [ C(L1D) ] = {
1501        [ C(OP_READ) ] = {
1502                [ C(RESULT_ACCESS) ] = 0,
1503                [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1504        },
1505        [ C(OP_WRITE) ] = {
1506                [ C(RESULT_ACCESS) ] = 0,
1507                [ C(RESULT_MISS)   ] = 0,
1508        },
1509        [ C(OP_PREFETCH) ] = {
1510                [ C(RESULT_ACCESS) ] = 0,
1511                [ C(RESULT_MISS)   ] = 0,
1512        },
1513 },
1514 [ C(L1I ) ] = {
1515        [ C(OP_READ) ] = {
1516                [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1517                [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1518        },
1519        [ C(OP_WRITE) ] = {
1520                [ C(RESULT_ACCESS) ] = -1,
1521                [ C(RESULT_MISS)   ] = -1,
1522        },
1523        [ C(OP_PREFETCH) ] = {
1524                [ C(RESULT_ACCESS) ] = 0,
1525                [ C(RESULT_MISS)   ] = 0,
1526        },
1527 },
1528 [ C(LL  ) ] = {
1529        [ C(OP_READ) ] = {
1530                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1531                [ C(RESULT_ACCESS) ] = 0x01b7,
1532                [ C(RESULT_MISS)   ] = 0,
1533        },
1534        [ C(OP_WRITE) ] = {
1535                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1536                [ C(RESULT_ACCESS) ] = 0x01b7,
1537                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1538                [ C(RESULT_MISS)   ] = 0x01b7,
1539        },
1540        [ C(OP_PREFETCH) ] = {
1541                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1542                [ C(RESULT_ACCESS) ] = 0x01b7,
1543                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1544                [ C(RESULT_MISS)   ] = 0x01b7,
1545        },
1546 },
1547 [ C(DTLB) ] = {
1548        [ C(OP_READ) ] = {
1549                [ C(RESULT_ACCESS) ] = 0,
1550                [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1551        },
1552        [ C(OP_WRITE) ] = {
1553                [ C(RESULT_ACCESS) ] = 0,
1554                [ C(RESULT_MISS)   ] = 0,
1555        },
1556        [ C(OP_PREFETCH) ] = {
1557                [ C(RESULT_ACCESS) ] = 0,
1558                [ C(RESULT_MISS)   ] = 0,
1559        },
1560 },
1561 [ C(ITLB) ] = {
1562        [ C(OP_READ) ] = {
1563                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1564                [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1565        },
1566        [ C(OP_WRITE) ] = {
1567                [ C(RESULT_ACCESS) ] = -1,
1568                [ C(RESULT_MISS)   ] = -1,
1569        },
1570        [ C(OP_PREFETCH) ] = {
1571                [ C(RESULT_ACCESS) ] = -1,
1572                [ C(RESULT_MISS)   ] = -1,
1573        },
1574 },
1575 [ C(BPU ) ] = {
1576        [ C(OP_READ) ] = {
1577                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1578                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1579        },
1580        [ C(OP_WRITE) ] = {
1581                [ C(RESULT_ACCESS) ] = -1,
1582                [ C(RESULT_MISS)   ] = -1,
1583        },
1584        [ C(OP_PREFETCH) ] = {
1585                [ C(RESULT_ACCESS) ] = -1,
1586                [ C(RESULT_MISS)   ] = -1,
1587        },
1588 },
1589};
1590
1591EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1592EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1593/* UOPS_NOT_DELIVERED.ANY */
1594EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1595/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1596EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1597/* UOPS_RETIRED.ANY */
1598EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1599/* UOPS_ISSUED.ANY */
1600EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1601
1602static struct attribute *glm_events_attrs[] = {
1603        EVENT_PTR(td_total_slots_glm),
1604        EVENT_PTR(td_total_slots_scale_glm),
1605        EVENT_PTR(td_fetch_bubbles_glm),
1606        EVENT_PTR(td_recovery_bubbles_glm),
1607        EVENT_PTR(td_slots_issued_glm),
1608        EVENT_PTR(td_slots_retired_glm),
1609        NULL
1610};
1611
1612static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1613        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1614        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1615        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1616        EVENT_EXTRA_END
1617};
1618
1619#define GLM_DEMAND_DATA_RD              BIT_ULL(0)
1620#define GLM_DEMAND_RFO                  BIT_ULL(1)
1621#define GLM_ANY_RESPONSE                BIT_ULL(16)
1622#define GLM_SNP_NONE_OR_MISS            BIT_ULL(33)
1623#define GLM_DEMAND_READ                 GLM_DEMAND_DATA_RD
1624#define GLM_DEMAND_WRITE                GLM_DEMAND_RFO
1625#define GLM_DEMAND_PREFETCH             (SNB_PF_DATA_RD|SNB_PF_RFO)
1626#define GLM_LLC_ACCESS                  GLM_ANY_RESPONSE
1627#define GLM_SNP_ANY                     (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1628#define GLM_LLC_MISS                    (GLM_SNP_ANY|SNB_NON_DRAM)
1629
1630static __initconst const u64 glm_hw_cache_event_ids
1631                                [PERF_COUNT_HW_CACHE_MAX]
1632                                [PERF_COUNT_HW_CACHE_OP_MAX]
1633                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1634        [C(L1D)] = {
1635                [C(OP_READ)] = {
1636                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1637                        [C(RESULT_MISS)]        = 0x0,
1638                },
1639                [C(OP_WRITE)] = {
1640                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1641                        [C(RESULT_MISS)]        = 0x0,
1642                },
1643                [C(OP_PREFETCH)] = {
1644                        [C(RESULT_ACCESS)]      = 0x0,
1645                        [C(RESULT_MISS)]        = 0x0,
1646                },
1647        },
1648        [C(L1I)] = {
1649                [C(OP_READ)] = {
1650                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1651                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1652                },
1653                [C(OP_WRITE)] = {
1654                        [C(RESULT_ACCESS)]      = -1,
1655                        [C(RESULT_MISS)]        = -1,
1656                },
1657                [C(OP_PREFETCH)] = {
1658                        [C(RESULT_ACCESS)]      = 0x0,
1659                        [C(RESULT_MISS)]        = 0x0,
1660                },
1661        },
1662        [C(LL)] = {
1663                [C(OP_READ)] = {
1664                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1665                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1666                },
1667                [C(OP_WRITE)] = {
1668                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1669                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1670                },
1671                [C(OP_PREFETCH)] = {
1672                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1673                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1674                },
1675        },
1676        [C(DTLB)] = {
1677                [C(OP_READ)] = {
1678                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1679                        [C(RESULT_MISS)]        = 0x0,
1680                },
1681                [C(OP_WRITE)] = {
1682                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1683                        [C(RESULT_MISS)]        = 0x0,
1684                },
1685                [C(OP_PREFETCH)] = {
1686                        [C(RESULT_ACCESS)]      = 0x0,
1687                        [C(RESULT_MISS)]        = 0x0,
1688                },
1689        },
1690        [C(ITLB)] = {
1691                [C(OP_READ)] = {
1692                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1693                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1694                },
1695                [C(OP_WRITE)] = {
1696                        [C(RESULT_ACCESS)]      = -1,
1697                        [C(RESULT_MISS)]        = -1,
1698                },
1699                [C(OP_PREFETCH)] = {
1700                        [C(RESULT_ACCESS)]      = -1,
1701                        [C(RESULT_MISS)]        = -1,
1702                },
1703        },
1704        [C(BPU)] = {
1705                [C(OP_READ)] = {
1706                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1707                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1708                },
1709                [C(OP_WRITE)] = {
1710                        [C(RESULT_ACCESS)]      = -1,
1711                        [C(RESULT_MISS)]        = -1,
1712                },
1713                [C(OP_PREFETCH)] = {
1714                        [C(RESULT_ACCESS)]      = -1,
1715                        [C(RESULT_MISS)]        = -1,
1716                },
1717        },
1718};
1719
1720static __initconst const u64 glm_hw_cache_extra_regs
1721                                [PERF_COUNT_HW_CACHE_MAX]
1722                                [PERF_COUNT_HW_CACHE_OP_MAX]
1723                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1724        [C(LL)] = {
1725                [C(OP_READ)] = {
1726                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1727                                                  GLM_LLC_ACCESS,
1728                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1729                                                  GLM_LLC_MISS,
1730                },
1731                [C(OP_WRITE)] = {
1732                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1733                                                  GLM_LLC_ACCESS,
1734                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1735                                                  GLM_LLC_MISS,
1736                },
1737                [C(OP_PREFETCH)] = {
1738                        [C(RESULT_ACCESS)]      = GLM_DEMAND_PREFETCH|
1739                                                  GLM_LLC_ACCESS,
1740                        [C(RESULT_MISS)]        = GLM_DEMAND_PREFETCH|
1741                                                  GLM_LLC_MISS,
1742                },
1743        },
1744};
1745
1746static __initconst const u64 glp_hw_cache_event_ids
1747                                [PERF_COUNT_HW_CACHE_MAX]
1748                                [PERF_COUNT_HW_CACHE_OP_MAX]
1749                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1750        [C(L1D)] = {
1751                [C(OP_READ)] = {
1752                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1753                        [C(RESULT_MISS)]        = 0x0,
1754                },
1755                [C(OP_WRITE)] = {
1756                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1757                        [C(RESULT_MISS)]        = 0x0,
1758                },
1759                [C(OP_PREFETCH)] = {
1760                        [C(RESULT_ACCESS)]      = 0x0,
1761                        [C(RESULT_MISS)]        = 0x0,
1762                },
1763        },
1764        [C(L1I)] = {
1765                [C(OP_READ)] = {
1766                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1767                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1768                },
1769                [C(OP_WRITE)] = {
1770                        [C(RESULT_ACCESS)]      = -1,
1771                        [C(RESULT_MISS)]        = -1,
1772                },
1773                [C(OP_PREFETCH)] = {
1774                        [C(RESULT_ACCESS)]      = 0x0,
1775                        [C(RESULT_MISS)]        = 0x0,
1776                },
1777        },
1778        [C(LL)] = {
1779                [C(OP_READ)] = {
1780                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1781                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1782                },
1783                [C(OP_WRITE)] = {
1784                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1785                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1786                },
1787                [C(OP_PREFETCH)] = {
1788                        [C(RESULT_ACCESS)]      = 0x0,
1789                        [C(RESULT_MISS)]        = 0x0,
1790                },
1791        },
1792        [C(DTLB)] = {
1793                [C(OP_READ)] = {
1794                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1795                        [C(RESULT_MISS)]        = 0xe08,        /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1796                },
1797                [C(OP_WRITE)] = {
1798                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1799                        [C(RESULT_MISS)]        = 0xe49,        /* DTLB_STORE_MISSES.WALK_COMPLETED */
1800                },
1801                [C(OP_PREFETCH)] = {
1802                        [C(RESULT_ACCESS)]      = 0x0,
1803                        [C(RESULT_MISS)]        = 0x0,
1804                },
1805        },
1806        [C(ITLB)] = {
1807                [C(OP_READ)] = {
1808                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1809                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1810                },
1811                [C(OP_WRITE)] = {
1812                        [C(RESULT_ACCESS)]      = -1,
1813                        [C(RESULT_MISS)]        = -1,
1814                },
1815                [C(OP_PREFETCH)] = {
1816                        [C(RESULT_ACCESS)]      = -1,
1817                        [C(RESULT_MISS)]        = -1,
1818                },
1819        },
1820        [C(BPU)] = {
1821                [C(OP_READ)] = {
1822                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1823                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1824                },
1825                [C(OP_WRITE)] = {
1826                        [C(RESULT_ACCESS)]      = -1,
1827                        [C(RESULT_MISS)]        = -1,
1828                },
1829                [C(OP_PREFETCH)] = {
1830                        [C(RESULT_ACCESS)]      = -1,
1831                        [C(RESULT_MISS)]        = -1,
1832                },
1833        },
1834};
1835
1836static __initconst const u64 glp_hw_cache_extra_regs
1837                                [PERF_COUNT_HW_CACHE_MAX]
1838                                [PERF_COUNT_HW_CACHE_OP_MAX]
1839                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1840        [C(LL)] = {
1841                [C(OP_READ)] = {
1842                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1843                                                  GLM_LLC_ACCESS,
1844                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1845                                                  GLM_LLC_MISS,
1846                },
1847                [C(OP_WRITE)] = {
1848                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1849                                                  GLM_LLC_ACCESS,
1850                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1851                                                  GLM_LLC_MISS,
1852                },
1853                [C(OP_PREFETCH)] = {
1854                        [C(RESULT_ACCESS)]      = 0x0,
1855                        [C(RESULT_MISS)]        = 0x0,
1856                },
1857        },
1858};
1859
1860#define TNT_LOCAL_DRAM                  BIT_ULL(26)
1861#define TNT_DEMAND_READ                 GLM_DEMAND_DATA_RD
1862#define TNT_DEMAND_WRITE                GLM_DEMAND_RFO
1863#define TNT_LLC_ACCESS                  GLM_ANY_RESPONSE
1864#define TNT_SNP_ANY                     (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
1865                                         SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
1866#define TNT_LLC_MISS                    (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
1867
1868static __initconst const u64 tnt_hw_cache_extra_regs
1869                                [PERF_COUNT_HW_CACHE_MAX]
1870                                [PERF_COUNT_HW_CACHE_OP_MAX]
1871                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1872        [C(LL)] = {
1873                [C(OP_READ)] = {
1874                        [C(RESULT_ACCESS)]      = TNT_DEMAND_READ|
1875                                                  TNT_LLC_ACCESS,
1876                        [C(RESULT_MISS)]        = TNT_DEMAND_READ|
1877                                                  TNT_LLC_MISS,
1878                },
1879                [C(OP_WRITE)] = {
1880                        [C(RESULT_ACCESS)]      = TNT_DEMAND_WRITE|
1881                                                  TNT_LLC_ACCESS,
1882                        [C(RESULT_MISS)]        = TNT_DEMAND_WRITE|
1883                                                  TNT_LLC_MISS,
1884                },
1885                [C(OP_PREFETCH)] = {
1886                        [C(RESULT_ACCESS)]      = 0x0,
1887                        [C(RESULT_MISS)]        = 0x0,
1888                },
1889        },
1890};
1891
1892static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
1893        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1894        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
1895        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
1896        EVENT_EXTRA_END
1897};
1898
1899#define KNL_OT_L2_HITE          BIT_ULL(19) /* Other Tile L2 Hit */
1900#define KNL_OT_L2_HITF          BIT_ULL(20) /* Other Tile L2 Hit */
1901#define KNL_MCDRAM_LOCAL        BIT_ULL(21)
1902#define KNL_MCDRAM_FAR          BIT_ULL(22)
1903#define KNL_DDR_LOCAL           BIT_ULL(23)
1904#define KNL_DDR_FAR             BIT_ULL(24)
1905#define KNL_DRAM_ANY            (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
1906                                    KNL_DDR_LOCAL | KNL_DDR_FAR)
1907#define KNL_L2_READ             SLM_DMND_READ
1908#define KNL_L2_WRITE            SLM_DMND_WRITE
1909#define KNL_L2_PREFETCH         SLM_DMND_PREFETCH
1910#define KNL_L2_ACCESS           SLM_LLC_ACCESS
1911#define KNL_L2_MISS             (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
1912                                   KNL_DRAM_ANY | SNB_SNP_ANY | \
1913                                                  SNB_NON_DRAM)
1914
1915static __initconst const u64 knl_hw_cache_extra_regs
1916                                [PERF_COUNT_HW_CACHE_MAX]
1917                                [PERF_COUNT_HW_CACHE_OP_MAX]
1918                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1919        [C(LL)] = {
1920                [C(OP_READ)] = {
1921                        [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
1922                        [C(RESULT_MISS)]   = 0,
1923                },
1924                [C(OP_WRITE)] = {
1925                        [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
1926                        [C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
1927                },
1928                [C(OP_PREFETCH)] = {
1929                        [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
1930                        [C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
1931                },
1932        },
1933};
1934
1935/*
1936 * Used from PMIs where the LBRs are already disabled.
1937 *
1938 * This function could be called consecutively. It is required to remain in
1939 * disabled state if called consecutively.
1940 *
1941 * During consecutive calls, the same disable value will be written to related
1942 * registers, so the PMU state remains unchanged.
1943 *
1944 * intel_bts events don't coexist with intel PMU's BTS events because of
1945 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
1946 * disabled around intel PMU's event batching etc, only inside the PMI handler.
1947 */
1948static void __intel_pmu_disable_all(void)
1949{
1950        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1951
1952        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1953
1954        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1955                intel_pmu_disable_bts();
1956
1957        intel_pmu_pebs_disable_all();
1958}
1959
1960static void intel_pmu_disable_all(void)
1961{
1962        __intel_pmu_disable_all();
1963        intel_pmu_lbr_disable_all();
1964}
1965
1966static void __intel_pmu_enable_all(int added, bool pmi)
1967{
1968        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1969
1970        intel_pmu_pebs_enable_all();
1971        intel_pmu_lbr_enable_all(pmi);
1972        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1973                        x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
1974
1975        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1976                struct perf_event *event =
1977                        cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
1978
1979                if (WARN_ON_ONCE(!event))
1980                        return;
1981
1982                intel_pmu_enable_bts(event->hw.config);
1983        }
1984}
1985
1986static void intel_pmu_enable_all(int added)
1987{
1988        __intel_pmu_enable_all(added, false);
1989}
1990
1991/*
1992 * Workaround for:
1993 *   Intel Errata AAK100 (model 26)
1994 *   Intel Errata AAP53  (model 30)
1995 *   Intel Errata BD53   (model 44)
1996 *
1997 * The official story:
1998 *   These chips need to be 'reset' when adding counters by programming the
1999 *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2000 *   in sequence on the same PMC or on different PMCs.
2001 *
2002 * In practise it appears some of these events do in fact count, and
2003 * we need to program all 4 events.
2004 */
2005static void intel_pmu_nhm_workaround(void)
2006{
2007        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2008        static const unsigned long nhm_magic[4] = {
2009                0x4300B5,
2010                0x4300D2,
2011                0x4300B1,
2012                0x4300B1
2013        };
2014        struct perf_event *event;
2015        int i;
2016
2017        /*
2018         * The Errata requires below steps:
2019         * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2020         * 2) Configure 4 PERFEVTSELx with the magic events and clear
2021         *    the corresponding PMCx;
2022         * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2023         * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2024         * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2025         */
2026
2027        /*
2028         * The real steps we choose are a little different from above.
2029         * A) To reduce MSR operations, we don't run step 1) as they
2030         *    are already cleared before this function is called;
2031         * B) Call x86_perf_event_update to save PMCx before configuring
2032         *    PERFEVTSELx with magic number;
2033         * C) With step 5), we do clear only when the PERFEVTSELx is
2034         *    not used currently.
2035         * D) Call x86_perf_event_set_period to restore PMCx;
2036         */
2037
2038        /* We always operate 4 pairs of PERF Counters */
2039        for (i = 0; i < 4; i++) {
2040                event = cpuc->events[i];
2041                if (event)
2042                        x86_perf_event_update(event);
2043        }
2044
2045        for (i = 0; i < 4; i++) {
2046                wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2047                wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2048        }
2049
2050        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2051        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2052
2053        for (i = 0; i < 4; i++) {
2054                event = cpuc->events[i];
2055
2056                if (event) {
2057                        x86_perf_event_set_period(event);
2058                        __x86_pmu_enable_event(&event->hw,
2059                                        ARCH_PERFMON_EVENTSEL_ENABLE);
2060                } else
2061                        wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2062        }
2063}
2064
2065static void intel_pmu_nhm_enable_all(int added)
2066{
2067        if (added)
2068                intel_pmu_nhm_workaround();
2069        intel_pmu_enable_all(added);
2070}
2071
2072static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2073{
2074        u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2075
2076        if (cpuc->tfa_shadow != val) {
2077                cpuc->tfa_shadow = val;
2078                wrmsrl(MSR_TSX_FORCE_ABORT, val);
2079        }
2080}
2081
2082static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2083{
2084        /*
2085         * We're going to use PMC3, make sure TFA is set before we touch it.
2086         */
2087        if (cntr == 3)
2088                intel_set_tfa(cpuc, true);
2089}
2090
2091static void intel_tfa_pmu_enable_all(int added)
2092{
2093        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2094
2095        /*
2096         * If we find PMC3 is no longer used when we enable the PMU, we can
2097         * clear TFA.
2098         */
2099        if (!test_bit(3, cpuc->active_mask))
2100                intel_set_tfa(cpuc, false);
2101
2102        intel_pmu_enable_all(added);
2103}
2104
2105static void enable_counter_freeze(void)
2106{
2107        update_debugctlmsr(get_debugctlmsr() |
2108                        DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2109}
2110
2111static void disable_counter_freeze(void)
2112{
2113        update_debugctlmsr(get_debugctlmsr() &
2114                        ~DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI);
2115}
2116
2117static inline u64 intel_pmu_get_status(void)
2118{
2119        u64 status;
2120
2121        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2122
2123        return status;
2124}
2125
2126static inline void intel_pmu_ack_status(u64 ack)
2127{
2128        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2129}
2130
2131static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
2132{
2133        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2134        u64 ctrl_val, mask;
2135
2136        mask = 0xfULL << (idx * 4);
2137
2138        rdmsrl(hwc->config_base, ctrl_val);
2139        ctrl_val &= ~mask;
2140        wrmsrl(hwc->config_base, ctrl_val);
2141}
2142
2143static inline bool event_is_checkpointed(struct perf_event *event)
2144{
2145        return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2146}
2147
2148static void intel_pmu_disable_event(struct perf_event *event)
2149{
2150        struct hw_perf_event *hwc = &event->hw;
2151        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2152
2153        if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2154                intel_pmu_disable_bts();
2155                intel_pmu_drain_bts_buffer();
2156                return;
2157        }
2158
2159        cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
2160        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
2161        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
2162
2163        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2164                intel_pmu_disable_fixed(hwc);
2165                return;
2166        }
2167
2168        x86_pmu_disable_event(event);
2169
2170        /*
2171         * Needs to be called after x86_pmu_disable_event,
2172         * so we don't trigger the event without PEBS bit set.
2173         */
2174        if (unlikely(event->attr.precise_ip))
2175                intel_pmu_pebs_disable(event);
2176}
2177
2178static void intel_pmu_del_event(struct perf_event *event)
2179{
2180        if (needs_branch_stack(event))
2181                intel_pmu_lbr_del(event);
2182        if (event->attr.precise_ip)
2183                intel_pmu_pebs_del(event);
2184}
2185
2186static void intel_pmu_read_event(struct perf_event *event)
2187{
2188        if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2189                intel_pmu_auto_reload_read(event);
2190        else
2191                x86_perf_event_update(event);
2192}
2193
2194static void intel_pmu_enable_fixed(struct perf_event *event)
2195{
2196        struct hw_perf_event *hwc = &event->hw;
2197        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
2198        u64 ctrl_val, mask, bits = 0;
2199
2200        /*
2201         * Enable IRQ generation (0x8), if not PEBS,
2202         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2203         * if requested:
2204         */
2205        if (!event->attr.precise_ip)
2206                bits |= 0x8;
2207        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2208                bits |= 0x2;
2209        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2210                bits |= 0x1;
2211
2212        /*
2213         * ANY bit is supported in v3 and up
2214         */
2215        if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2216                bits |= 0x4;
2217
2218        bits <<= (idx * 4);
2219        mask = 0xfULL << (idx * 4);
2220
2221        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2222                bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2223                mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2224        }
2225
2226        rdmsrl(hwc->config_base, ctrl_val);
2227        ctrl_val &= ~mask;
2228        ctrl_val |= bits;
2229        wrmsrl(hwc->config_base, ctrl_val);
2230}
2231
2232static void intel_pmu_enable_event(struct perf_event *event)
2233{
2234        struct hw_perf_event *hwc = &event->hw;
2235        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2236
2237        if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
2238                if (!__this_cpu_read(cpu_hw_events.enabled))
2239                        return;
2240
2241                intel_pmu_enable_bts(hwc->config);
2242                return;
2243        }
2244
2245        if (event->attr.exclude_host)
2246                cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
2247        if (event->attr.exclude_guest)
2248                cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
2249
2250        if (unlikely(event_is_checkpointed(event)))
2251                cpuc->intel_cp_status |= (1ull << hwc->idx);
2252
2253        if (unlikely(event->attr.precise_ip))
2254                intel_pmu_pebs_enable(event);
2255
2256        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
2257                intel_pmu_enable_fixed(event);
2258                return;
2259        }
2260
2261        __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2262}
2263
2264static void intel_pmu_add_event(struct perf_event *event)
2265{
2266        if (event->attr.precise_ip)
2267                intel_pmu_pebs_add(event);
2268        if (needs_branch_stack(event))
2269                intel_pmu_lbr_add(event);
2270}
2271
2272/*
2273 * Save and restart an expired event. Called by NMI contexts,
2274 * so it has to be careful about preempting normal event ops:
2275 */
2276int intel_pmu_save_and_restart(struct perf_event *event)
2277{
2278        x86_perf_event_update(event);
2279        /*
2280         * For a checkpointed counter always reset back to 0.  This
2281         * avoids a situation where the counter overflows, aborts the
2282         * transaction and is then set back to shortly before the
2283         * overflow, and overflows and aborts again.
2284         */
2285        if (unlikely(event_is_checkpointed(event))) {
2286                /* No race with NMIs because the counter should not be armed */
2287                wrmsrl(event->hw.event_base, 0);
2288                local64_set(&event->hw.prev_count, 0);
2289        }
2290        return x86_perf_event_set_period(event);
2291}
2292
2293static void intel_pmu_reset(void)
2294{
2295        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2296        unsigned long flags;
2297        int idx;
2298
2299        if (!x86_pmu.num_counters)
2300                return;
2301
2302        local_irq_save(flags);
2303
2304        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2305
2306        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2307                wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2308                wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
2309        }
2310        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
2311                wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2312
2313        if (ds)
2314                ds->bts_index = ds->bts_buffer_base;
2315
2316        /* Ack all overflows and disable fixed counters */
2317        if (x86_pmu.version >= 2) {
2318                intel_pmu_ack_status(intel_pmu_get_status());
2319                wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2320        }
2321
2322        /* Reset LBRs and LBR freezing */
2323        if (x86_pmu.lbr_nr) {
2324                update_debugctlmsr(get_debugctlmsr() &
2325                        ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2326        }
2327
2328        local_irq_restore(flags);
2329}
2330
2331static int handle_pmi_common(struct pt_regs *regs, u64 status)
2332{
2333        struct perf_sample_data data;
2334        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2335        int bit;
2336        int handled = 0;
2337
2338        inc_irq_stat(apic_perf_irqs);
2339
2340        /*
2341         * Ignore a range of extra bits in status that do not indicate
2342         * overflow by themselves.
2343         */
2344        status &= ~(GLOBAL_STATUS_COND_CHG |
2345                    GLOBAL_STATUS_ASIF |
2346                    GLOBAL_STATUS_LBRS_FROZEN);
2347        if (!status)
2348                return 0;
2349        /*
2350         * In case multiple PEBS events are sampled at the same time,
2351         * it is possible to have GLOBAL_STATUS bit 62 set indicating
2352         * PEBS buffer overflow and also seeing at most 3 PEBS counters
2353         * having their bits set in the status register. This is a sign
2354         * that there was at least one PEBS record pending at the time
2355         * of the PMU interrupt. PEBS counters must only be processed
2356         * via the drain_pebs() calls and not via the regular sample
2357         * processing loop coming after that the function, otherwise
2358         * phony regular samples may be generated in the sampling buffer
2359         * not marked with the EXACT tag. Another possibility is to have
2360         * one PEBS event and at least one non-PEBS event whic hoverflows
2361         * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2362         * not be set, yet the overflow status bit for the PEBS counter will
2363         * be on Skylake.
2364         *
2365         * To avoid this problem, we systematically ignore the PEBS-enabled
2366         * counters from the GLOBAL_STATUS mask and we always process PEBS
2367         * events via drain_pebs().
2368         */
2369        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2370                status &= ~cpuc->pebs_enabled;
2371        else
2372                status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2373
2374        /*
2375         * PEBS overflow sets bit 62 in the global status register
2376         */
2377        if (__test_and_clear_bit(62, (unsigned long *)&status)) {
2378                handled++;
2379                x86_pmu.drain_pebs(regs);
2380                status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2381        }
2382
2383        /*
2384         * Intel PT
2385         */
2386        if (__test_and_clear_bit(55, (unsigned long *)&status)) {
2387                handled++;
2388                if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2389                        perf_guest_cbs->handle_intel_pt_intr))
2390                        perf_guest_cbs->handle_intel_pt_intr();
2391                else
2392                        intel_pt_interrupt();
2393        }
2394
2395        /*
2396         * Checkpointed counters can lead to 'spurious' PMIs because the
2397         * rollback caused by the PMI will have cleared the overflow status
2398         * bit. Therefore always force probe these counters.
2399         */
2400        status |= cpuc->intel_cp_status;
2401
2402        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2403                struct perf_event *event = cpuc->events[bit];
2404
2405                handled++;
2406
2407                if (!test_bit(bit, cpuc->active_mask))
2408                        continue;
2409
2410                if (!intel_pmu_save_and_restart(event))
2411                        continue;
2412
2413                perf_sample_data_init(&data, 0, event->hw.last_period);
2414
2415                if (has_branch_stack(event))
2416                        data.br_stack = &cpuc->lbr_stack;
2417
2418                if (perf_event_overflow(event, &data, regs))
2419                        x86_pmu_stop(event, 0);
2420        }
2421
2422        return handled;
2423}
2424
2425static bool disable_counter_freezing = true;
2426static int __init intel_perf_counter_freezing_setup(char *s)
2427{
2428        bool res;
2429
2430        if (kstrtobool(s, &res))
2431                return -EINVAL;
2432
2433        disable_counter_freezing = !res;
2434        return 1;
2435}
2436__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
2437
2438/*
2439 * Simplified handler for Arch Perfmon v4:
2440 * - We rely on counter freezing/unfreezing to enable/disable the PMU.
2441 * This is done automatically on PMU ack.
2442 * - Ack the PMU only after the APIC.
2443 */
2444
2445static int intel_pmu_handle_irq_v4(struct pt_regs *regs)
2446{
2447        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2448        int handled = 0;
2449        bool bts = false;
2450        u64 status;
2451        int pmu_enabled = cpuc->enabled;
2452        int loops = 0;
2453
2454        /* PMU has been disabled because of counter freezing */
2455        cpuc->enabled = 0;
2456        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2457                bts = true;
2458                intel_bts_disable_local();
2459                handled = intel_pmu_drain_bts_buffer();
2460                handled += intel_bts_interrupt();
2461        }
2462        status = intel_pmu_get_status();
2463        if (!status)
2464                goto done;
2465again:
2466        intel_pmu_lbr_read();
2467        if (++loops > 100) {
2468                static bool warned;
2469
2470                if (!warned) {
2471                        WARN(1, "perfevents: irq loop stuck!\n");
2472                        perf_event_print_debug();
2473                        warned = true;
2474                }
2475                intel_pmu_reset();
2476                goto done;
2477        }
2478
2479
2480        handled += handle_pmi_common(regs, status);
2481done:
2482        /* Ack the PMI in the APIC */
2483        apic_write(APIC_LVTPC, APIC_DM_NMI);
2484
2485        /*
2486         * The counters start counting immediately while ack the status.
2487         * Make it as close as possible to IRET. This avoids bogus
2488         * freezing on Skylake CPUs.
2489         */
2490        if (status) {
2491                intel_pmu_ack_status(status);
2492        } else {
2493                /*
2494                 * CPU may issues two PMIs very close to each other.
2495                 * When the PMI handler services the first one, the
2496                 * GLOBAL_STATUS is already updated to reflect both.
2497                 * When it IRETs, the second PMI is immediately
2498                 * handled and it sees clear status. At the meantime,
2499                 * there may be a third PMI, because the freezing bit
2500                 * isn't set since the ack in first PMI handlers.
2501                 * Double check if there is more work to be done.
2502                 */
2503                status = intel_pmu_get_status();
2504                if (status)
2505                        goto again;
2506        }
2507
2508        if (bts)
2509                intel_bts_enable_local();
2510        cpuc->enabled = pmu_enabled;
2511        return handled;
2512}
2513
2514/*
2515 * This handler is triggered by the local APIC, so the APIC IRQ handling
2516 * rules apply:
2517 */
2518static int intel_pmu_handle_irq(struct pt_regs *regs)
2519{
2520        struct cpu_hw_events *cpuc;
2521        int loops;
2522        u64 status;
2523        int handled;
2524        int pmu_enabled;
2525
2526        cpuc = this_cpu_ptr(&cpu_hw_events);
2527
2528        /*
2529         * Save the PMU state.
2530         * It needs to be restored when leaving the handler.
2531         */
2532        pmu_enabled = cpuc->enabled;
2533        /*
2534         * No known reason to not always do late ACK,
2535         * but just in case do it opt-in.
2536         */
2537        if (!x86_pmu.late_ack)
2538                apic_write(APIC_LVTPC, APIC_DM_NMI);
2539        intel_bts_disable_local();
2540        cpuc->enabled = 0;
2541        __intel_pmu_disable_all();
2542        handled = intel_pmu_drain_bts_buffer();
2543        handled += intel_bts_interrupt();
2544        status = intel_pmu_get_status();
2545        if (!status)
2546                goto done;
2547
2548        loops = 0;
2549again:
2550        intel_pmu_lbr_read();
2551        intel_pmu_ack_status(status);
2552        if (++loops > 100) {
2553                static bool warned;
2554
2555                if (!warned) {
2556                        WARN(1, "perfevents: irq loop stuck!\n");
2557                        perf_event_print_debug();
2558                        warned = true;
2559                }
2560                intel_pmu_reset();
2561                goto done;
2562        }
2563
2564        handled += handle_pmi_common(regs, status);
2565
2566        /*
2567         * Repeat if there is more work to be done:
2568         */
2569        status = intel_pmu_get_status();
2570        if (status)
2571                goto again;
2572
2573done:
2574        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2575        cpuc->enabled = pmu_enabled;
2576        if (pmu_enabled)
2577                __intel_pmu_enable_all(0, true);
2578        intel_bts_enable_local();
2579
2580        /*
2581         * Only unmask the NMI after the overflow counters
2582         * have been reset. This avoids spurious NMIs on
2583         * Haswell CPUs.
2584         */
2585        if (x86_pmu.late_ack)
2586                apic_write(APIC_LVTPC, APIC_DM_NMI);
2587        return handled;
2588}
2589
2590static struct event_constraint *
2591intel_bts_constraints(struct perf_event *event)
2592{
2593        if (unlikely(intel_pmu_has_bts(event)))
2594                return &bts_constraint;
2595
2596        return NULL;
2597}
2598
2599static int intel_alt_er(int idx, u64 config)
2600{
2601        int alt_idx = idx;
2602
2603        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
2604                return idx;
2605
2606        if (idx == EXTRA_REG_RSP_0)
2607                alt_idx = EXTRA_REG_RSP_1;
2608
2609        if (idx == EXTRA_REG_RSP_1)
2610                alt_idx = EXTRA_REG_RSP_0;
2611
2612        if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
2613                return idx;
2614
2615        return alt_idx;
2616}
2617
2618static void intel_fixup_er(struct perf_event *event, int idx)
2619{
2620        event->hw.extra_reg.idx = idx;
2621
2622        if (idx == EXTRA_REG_RSP_0) {
2623                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2624                event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
2625                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
2626        } else if (idx == EXTRA_REG_RSP_1) {
2627                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
2628                event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
2629                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
2630        }
2631}
2632
2633/*
2634 * manage allocation of shared extra msr for certain events
2635 *
2636 * sharing can be:
2637 * per-cpu: to be shared between the various events on a single PMU
2638 * per-core: per-cpu + shared by HT threads
2639 */
2640static struct event_constraint *
2641__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
2642                                   struct perf_event *event,
2643                                   struct hw_perf_event_extra *reg)
2644{
2645        struct event_constraint *c = &emptyconstraint;
2646        struct er_account *era;
2647        unsigned long flags;
2648        int idx = reg->idx;
2649
2650        /*
2651         * reg->alloc can be set due to existing state, so for fake cpuc we
2652         * need to ignore this, otherwise we might fail to allocate proper fake
2653         * state for this extra reg constraint. Also see the comment below.
2654         */
2655        if (reg->alloc && !cpuc->is_fake)
2656                return NULL; /* call x86_get_event_constraint() */
2657
2658again:
2659        era = &cpuc->shared_regs->regs[idx];
2660        /*
2661         * we use spin_lock_irqsave() to avoid lockdep issues when
2662         * passing a fake cpuc
2663         */
2664        raw_spin_lock_irqsave(&era->lock, flags);
2665
2666        if (!atomic_read(&era->ref) || era->config == reg->config) {
2667
2668                /*
2669                 * If its a fake cpuc -- as per validate_{group,event}() we
2670                 * shouldn't touch event state and we can avoid doing so
2671                 * since both will only call get_event_constraints() once
2672                 * on each event, this avoids the need for reg->alloc.
2673                 *
2674                 * Not doing the ER fixup will only result in era->reg being
2675                 * wrong, but since we won't actually try and program hardware
2676                 * this isn't a problem either.
2677                 */
2678                if (!cpuc->is_fake) {
2679                        if (idx != reg->idx)
2680                                intel_fixup_er(event, idx);
2681
2682                        /*
2683                         * x86_schedule_events() can call get_event_constraints()
2684                         * multiple times on events in the case of incremental
2685                         * scheduling(). reg->alloc ensures we only do the ER
2686                         * allocation once.
2687                         */
2688                        reg->alloc = 1;
2689                }
2690
2691                /* lock in msr value */
2692                era->config = reg->config;
2693                era->reg = reg->reg;
2694
2695                /* one more user */
2696                atomic_inc(&era->ref);
2697
2698                /*
2699                 * need to call x86_get_event_constraint()
2700                 * to check if associated event has constraints
2701                 */
2702                c = NULL;
2703        } else {
2704                idx = intel_alt_er(idx, reg->config);
2705                if (idx != reg->idx) {
2706                        raw_spin_unlock_irqrestore(&era->lock, flags);
2707                        goto again;
2708                }
2709        }
2710        raw_spin_unlock_irqrestore(&era->lock, flags);
2711
2712        return c;
2713}
2714
2715static void
2716__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
2717                                   struct hw_perf_event_extra *reg)
2718{
2719        struct er_account *era;
2720
2721        /*
2722         * Only put constraint if extra reg was actually allocated. Also takes
2723         * care of event which do not use an extra shared reg.
2724         *
2725         * Also, if this is a fake cpuc we shouldn't touch any event state
2726         * (reg->alloc) and we don't care about leaving inconsistent cpuc state
2727         * either since it'll be thrown out.
2728         */
2729        if (!reg->alloc || cpuc->is_fake)
2730                return;
2731
2732        era = &cpuc->shared_regs->regs[reg->idx];
2733
2734        /* one fewer user */
2735        atomic_dec(&era->ref);
2736
2737        /* allocate again next time */
2738        reg->alloc = 0;
2739}
2740
2741static struct event_constraint *
2742intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
2743                              struct perf_event *event)
2744{
2745        struct event_constraint *c = NULL, *d;
2746        struct hw_perf_event_extra *xreg, *breg;
2747
2748        xreg = &event->hw.extra_reg;
2749        if (xreg->idx != EXTRA_REG_NONE) {
2750                c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
2751                if (c == &emptyconstraint)
2752                        return c;
2753        }
2754        breg = &event->hw.branch_reg;
2755        if (breg->idx != EXTRA_REG_NONE) {
2756                d = __intel_shared_reg_get_constraints(cpuc, event, breg);
2757                if (d == &emptyconstraint) {
2758                        __intel_shared_reg_put_constraints(cpuc, xreg);
2759                        c = d;
2760                }
2761        }
2762        return c;
2763}
2764
2765struct event_constraint *
2766x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2767                          struct perf_event *event)
2768{
2769        struct event_constraint *c;
2770
2771        if (x86_pmu.event_constraints) {
2772                for_each_event_constraint(c, x86_pmu.event_constraints) {
2773                        if (constraint_match(c, event->hw.config)) {
2774                                event->hw.flags |= c->flags;
2775                                return c;
2776                        }
2777                }
2778        }
2779
2780        return &unconstrained;
2781}
2782
2783static struct event_constraint *
2784__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2785                            struct perf_event *event)
2786{
2787        struct event_constraint *c;
2788
2789        c = intel_bts_constraints(event);
2790        if (c)
2791                return c;
2792
2793        c = intel_shared_regs_constraints(cpuc, event);
2794        if (c)
2795                return c;
2796
2797        c = intel_pebs_constraints(event);
2798        if (c)
2799                return c;
2800
2801        return x86_get_event_constraints(cpuc, idx, event);
2802}
2803
2804static void
2805intel_start_scheduling(struct cpu_hw_events *cpuc)
2806{
2807        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2808        struct intel_excl_states *xl;
2809        int tid = cpuc->excl_thread_id;
2810
2811        /*
2812         * nothing needed if in group validation mode
2813         */
2814        if (cpuc->is_fake || !is_ht_workaround_enabled())
2815                return;
2816
2817        /*
2818         * no exclusion needed
2819         */
2820        if (WARN_ON_ONCE(!excl_cntrs))
2821                return;
2822
2823        xl = &excl_cntrs->states[tid];
2824
2825        xl->sched_started = true;
2826        /*
2827         * lock shared state until we are done scheduling
2828         * in stop_event_scheduling()
2829         * makes scheduling appear as a transaction
2830         */
2831        raw_spin_lock(&excl_cntrs->lock);
2832}
2833
2834static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2835{
2836        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2837        struct event_constraint *c = cpuc->event_constraint[idx];
2838        struct intel_excl_states *xl;
2839        int tid = cpuc->excl_thread_id;
2840
2841        if (cpuc->is_fake || !is_ht_workaround_enabled())
2842                return;
2843
2844        if (WARN_ON_ONCE(!excl_cntrs))
2845                return;
2846
2847        if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
2848                return;
2849
2850        xl = &excl_cntrs->states[tid];
2851
2852        lockdep_assert_held(&excl_cntrs->lock);
2853
2854        if (c->flags & PERF_X86_EVENT_EXCL)
2855                xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
2856        else
2857                xl->state[cntr] = INTEL_EXCL_SHARED;
2858}
2859
2860static void
2861intel_stop_scheduling(struct cpu_hw_events *cpuc)
2862{
2863        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2864        struct intel_excl_states *xl;
2865        int tid = cpuc->excl_thread_id;
2866
2867        /*
2868         * nothing needed if in group validation mode
2869         */
2870        if (cpuc->is_fake || !is_ht_workaround_enabled())
2871                return;
2872        /*
2873         * no exclusion needed
2874         */
2875        if (WARN_ON_ONCE(!excl_cntrs))
2876                return;
2877
2878        xl = &excl_cntrs->states[tid];
2879
2880        xl->sched_started = false;
2881        /*
2882         * release shared state lock (acquired in intel_start_scheduling())
2883         */
2884        raw_spin_unlock(&excl_cntrs->lock);
2885}
2886
2887static struct event_constraint *
2888dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
2889{
2890        WARN_ON_ONCE(!cpuc->constraint_list);
2891
2892        if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
2893                struct event_constraint *cx;
2894
2895                /*
2896                 * grab pre-allocated constraint entry
2897                 */
2898                cx = &cpuc->constraint_list[idx];
2899
2900                /*
2901                 * initialize dynamic constraint
2902                 * with static constraint
2903                 */
2904                *cx = *c;
2905
2906                /*
2907                 * mark constraint as dynamic
2908                 */
2909                cx->flags |= PERF_X86_EVENT_DYNAMIC;
2910                c = cx;
2911        }
2912
2913        return c;
2914}
2915
2916static struct event_constraint *
2917intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
2918                           int idx, struct event_constraint *c)
2919{
2920        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
2921        struct intel_excl_states *xlo;
2922        int tid = cpuc->excl_thread_id;
2923        int is_excl, i, w;
2924
2925        /*
2926         * validating a group does not require
2927         * enforcing cross-thread  exclusion
2928         */
2929        if (cpuc->is_fake || !is_ht_workaround_enabled())
2930                return c;
2931
2932        /*
2933         * no exclusion needed
2934         */
2935        if (WARN_ON_ONCE(!excl_cntrs))
2936                return c;
2937
2938        /*
2939         * because we modify the constraint, we need
2940         * to make a copy. Static constraints come
2941         * from static const tables.
2942         *
2943         * only needed when constraint has not yet
2944         * been cloned (marked dynamic)
2945         */
2946        c = dyn_constraint(cpuc, c, idx);
2947
2948        /*
2949         * From here on, the constraint is dynamic.
2950         * Either it was just allocated above, or it
2951         * was allocated during a earlier invocation
2952         * of this function
2953         */
2954
2955        /*
2956         * state of sibling HT
2957         */
2958        xlo = &excl_cntrs->states[tid ^ 1];
2959
2960        /*
2961         * event requires exclusive counter access
2962         * across HT threads
2963         */
2964        is_excl = c->flags & PERF_X86_EVENT_EXCL;
2965        if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2966                event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2967                if (!cpuc->n_excl++)
2968                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2969        }
2970
2971        /*
2972         * Modify static constraint with current dynamic
2973         * state of thread
2974         *
2975         * EXCLUSIVE: sibling counter measuring exclusive event
2976         * SHARED   : sibling counter measuring non-exclusive event
2977         * UNUSED   : sibling counter unused
2978         */
2979        w = c->weight;
2980        for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
2981                /*
2982                 * exclusive event in sibling counter
2983                 * our corresponding counter cannot be used
2984                 * regardless of our event
2985                 */
2986                if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
2987                        __clear_bit(i, c->idxmsk);
2988                        w--;
2989                        continue;
2990                }
2991                /*
2992                 * if measuring an exclusive event, sibling
2993                 * measuring non-exclusive, then counter cannot
2994                 * be used
2995                 */
2996                if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
2997                        __clear_bit(i, c->idxmsk);
2998                        w--;
2999                        continue;
3000                }
3001        }
3002
3003        /*
3004         * if we return an empty mask, then switch
3005         * back to static empty constraint to avoid
3006         * the cost of freeing later on
3007         */
3008        if (!w)
3009                c = &emptyconstraint;
3010
3011        c->weight = w;
3012
3013        return c;
3014}
3015
3016static struct event_constraint *
3017intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3018                            struct perf_event *event)
3019{
3020        struct event_constraint *c1, *c2;
3021
3022        c1 = cpuc->event_constraint[idx];
3023
3024        /*
3025         * first time only
3026         * - static constraint: no change across incremental scheduling calls
3027         * - dynamic constraint: handled by intel_get_excl_constraints()
3028         */
3029        c2 = __intel_get_event_constraints(cpuc, idx, event);
3030        if (c1) {
3031                WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3032                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3033                c1->weight = c2->weight;
3034                c2 = c1;
3035        }
3036
3037        if (cpuc->excl_cntrs)
3038                return intel_get_excl_constraints(cpuc, event, idx, c2);
3039
3040        return c2;
3041}
3042
3043static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3044                struct perf_event *event)
3045{
3046        struct hw_perf_event *hwc = &event->hw;
3047        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3048        int tid = cpuc->excl_thread_id;
3049        struct intel_excl_states *xl;
3050
3051        /*
3052         * nothing needed if in group validation mode
3053         */
3054        if (cpuc->is_fake)
3055                return;
3056
3057        if (WARN_ON_ONCE(!excl_cntrs))
3058                return;
3059
3060        if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3061                hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3062                if (!--cpuc->n_excl)
3063                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3064        }
3065
3066        /*
3067         * If event was actually assigned, then mark the counter state as
3068         * unused now.
3069         */
3070        if (hwc->idx >= 0) {
3071                xl = &excl_cntrs->states[tid];
3072
3073                /*
3074                 * put_constraint may be called from x86_schedule_events()
3075                 * which already has the lock held so here make locking
3076                 * conditional.
3077                 */
3078                if (!xl->sched_started)
3079                        raw_spin_lock(&excl_cntrs->lock);
3080
3081                xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3082
3083                if (!xl->sched_started)
3084                        raw_spin_unlock(&excl_cntrs->lock);
3085        }
3086}
3087
3088static void
3089intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3090                                        struct perf_event *event)
3091{
3092        struct hw_perf_event_extra *reg;
3093
3094        reg = &event->hw.extra_reg;
3095        if (reg->idx != EXTRA_REG_NONE)
3096                __intel_shared_reg_put_constraints(cpuc, reg);
3097
3098        reg = &event->hw.branch_reg;
3099        if (reg->idx != EXTRA_REG_NONE)
3100                __intel_shared_reg_put_constraints(cpuc, reg);
3101}
3102
3103static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3104                                        struct perf_event *event)
3105{
3106        intel_put_shared_regs_event_constraints(cpuc, event);
3107
3108        /*
3109         * is PMU has exclusive counter restrictions, then
3110         * all events are subject to and must call the
3111         * put_excl_constraints() routine
3112         */
3113        if (cpuc->excl_cntrs)
3114                intel_put_excl_constraints(cpuc, event);
3115}
3116
3117static void intel_pebs_aliases_core2(struct perf_event *event)
3118{
3119        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3120                /*
3121                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3122                 * (0x003c) so that we can use it with PEBS.
3123                 *
3124                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3125                 * PEBS capable. However we can use INST_RETIRED.ANY_P
3126                 * (0x00c0), which is a PEBS capable event, to get the same
3127                 * count.
3128                 *
3129                 * INST_RETIRED.ANY_P counts the number of cycles that retires
3130                 * CNTMASK instructions. By setting CNTMASK to a value (16)
3131                 * larger than the maximum number of instructions that can be
3132                 * retired per cycle (4) and then inverting the condition, we
3133                 * count all cycles that retire 16 or less instructions, which
3134                 * is every cycle.
3135                 *
3136                 * Thereby we gain a PEBS capable cycle counter.
3137                 */
3138                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3139
3140                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3141                event->hw.config = alt_config;
3142        }
3143}
3144
3145static void intel_pebs_aliases_snb(struct perf_event *event)
3146{
3147        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3148                /*
3149                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3150                 * (0x003c) so that we can use it with PEBS.
3151                 *
3152                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3153                 * PEBS capable. However we can use UOPS_RETIRED.ALL
3154                 * (0x01c2), which is a PEBS capable event, to get the same
3155                 * count.
3156                 *
3157                 * UOPS_RETIRED.ALL counts the number of cycles that retires
3158                 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3159                 * larger than the maximum number of micro-ops that can be
3160                 * retired per cycle (4) and then inverting the condition, we
3161                 * count all cycles that retire 16 or less micro-ops, which
3162                 * is every cycle.
3163                 *
3164                 * Thereby we gain a PEBS capable cycle counter.
3165                 */
3166                u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3167
3168                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3169                event->hw.config = alt_config;
3170        }
3171}
3172
3173static void intel_pebs_aliases_precdist(struct perf_event *event)
3174{
3175        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3176                /*
3177                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3178                 * (0x003c) so that we can use it with PEBS.
3179                 *
3180                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3181                 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3182                 * (0x01c0), which is a PEBS capable event, to get the same
3183                 * count.
3184                 *
3185                 * The PREC_DIST event has special support to minimize sample
3186                 * shadowing effects. One drawback is that it can be
3187                 * only programmed on counter 1, but that seems like an
3188                 * acceptable trade off.
3189                 */
3190                u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3191
3192                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3193                event->hw.config = alt_config;
3194        }
3195}
3196
3197static void intel_pebs_aliases_ivb(struct perf_event *event)
3198{
3199        if (event->attr.precise_ip < 3)
3200                return intel_pebs_aliases_snb(event);
3201        return intel_pebs_aliases_precdist(event);
3202}
3203
3204static void intel_pebs_aliases_skl(struct perf_event *event)
3205{
3206        if (event->attr.precise_ip < 3)
3207                return intel_pebs_aliases_core2(event);
3208        return intel_pebs_aliases_precdist(event);
3209}
3210
3211static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3212{
3213        unsigned long flags = x86_pmu.large_pebs_flags;
3214
3215        if (event->attr.use_clockid)
3216                flags &= ~PERF_SAMPLE_TIME;
3217        if (!event->attr.exclude_kernel)
3218                flags &= ~PERF_SAMPLE_REGS_USER;
3219        if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3220                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3221        return flags;
3222}
3223
3224static int intel_pmu_bts_config(struct perf_event *event)
3225{
3226        struct perf_event_attr *attr = &event->attr;
3227
3228        if (unlikely(intel_pmu_has_bts(event))) {
3229                /* BTS is not supported by this architecture. */
3230                if (!x86_pmu.bts_active)
3231                        return -EOPNOTSUPP;
3232
3233                /* BTS is currently only allowed for user-mode. */
3234                if (!attr->exclude_kernel)
3235                        return -EOPNOTSUPP;
3236
3237                /* BTS is not allowed for precise events. */
3238                if (attr->precise_ip)
3239                        return -EOPNOTSUPP;
3240
3241                /* disallow bts if conflicting events are present */
3242                if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3243                        return -EBUSY;
3244
3245                event->destroy = hw_perf_lbr_event_destroy;
3246        }
3247
3248        return 0;
3249}
3250
3251static int core_pmu_hw_config(struct perf_event *event)
3252{
3253        int ret = x86_pmu_hw_config(event);
3254
3255        if (ret)
3256                return ret;
3257
3258        return intel_pmu_bts_config(event);
3259}
3260
3261static int intel_pmu_hw_config(struct perf_event *event)
3262{
3263        int ret = x86_pmu_hw_config(event);
3264
3265        if (ret)
3266                return ret;
3267
3268        ret = intel_pmu_bts_config(event);
3269        if (ret)
3270                return ret;
3271
3272        if (event->attr.precise_ip) {
3273                if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3274                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3275                        if (!(event->attr.sample_type &
3276                              ~intel_pmu_large_pebs_flags(event)))
3277                                event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3278                }
3279                if (x86_pmu.pebs_aliases)
3280                        x86_pmu.pebs_aliases(event);
3281
3282                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3283                        event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3284        }
3285
3286        if (needs_branch_stack(event)) {
3287                ret = intel_pmu_setup_lbr_filter(event);
3288                if (ret)
3289                        return ret;
3290
3291                /*
3292                 * BTS is set up earlier in this path, so don't account twice
3293                 */
3294                if (!unlikely(intel_pmu_has_bts(event))) {
3295                        /* disallow lbr if conflicting events are present */
3296                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3297                                return -EBUSY;
3298
3299                        event->destroy = hw_perf_lbr_event_destroy;
3300                }
3301        }
3302
3303        if (event->attr.type != PERF_TYPE_RAW)
3304                return 0;
3305
3306        if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3307                return 0;
3308
3309        if (x86_pmu.version < 3)
3310                return -EINVAL;
3311
3312        if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3313                return -EACCES;
3314
3315        event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3316
3317        return 0;
3318}
3319
3320struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
3321{
3322        if (x86_pmu.guest_get_msrs)
3323                return x86_pmu.guest_get_msrs(nr);
3324        *nr = 0;
3325        return NULL;
3326}
3327EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
3328
3329static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3330{
3331        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3332        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3333
3334        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3335        arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3336        arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3337        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3338                arr[0].guest &= ~cpuc->pebs_enabled;
3339        else
3340                arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3341        *nr = 1;
3342
3343        if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3344                /*
3345                 * If PMU counter has PEBS enabled it is not enough to
3346                 * disable counter on a guest entry since PEBS memory
3347                 * write can overshoot guest entry and corrupt guest
3348                 * memory. Disabling PEBS solves the problem.
3349                 *
3350                 * Don't do this if the CPU already enforces it.
3351                 */
3352                arr[1].msr = MSR_IA32_PEBS_ENABLE;
3353                arr[1].host = cpuc->pebs_enabled;
3354                arr[1].guest = 0;
3355                *nr = 2;
3356        }
3357
3358        return arr;
3359}
3360
3361static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3362{
3363        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3364        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3365        int idx;
3366
3367        for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
3368                struct perf_event *event = cpuc->events[idx];
3369
3370                arr[idx].msr = x86_pmu_config_addr(idx);
3371                arr[idx].host = arr[idx].guest = 0;
3372
3373                if (!test_bit(idx, cpuc->active_mask))
3374                        continue;
3375
3376                arr[idx].host = arr[idx].guest =
3377                        event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3378
3379                if (event->attr.exclude_host)
3380                        arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3381                else if (event->attr.exclude_guest)
3382                        arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3383        }
3384
3385        *nr = x86_pmu.num_counters;
3386        return arr;
3387}
3388
3389static void core_pmu_enable_event(struct perf_event *event)
3390{
3391        if (!event->attr.exclude_host)
3392                x86_pmu_enable_event(event);
3393}
3394
3395static void core_pmu_enable_all(int added)
3396{
3397        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3398        int idx;
3399
3400        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3401                struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3402
3403                if (!test_bit(idx, cpuc->active_mask) ||
3404                                cpuc->events[idx]->attr.exclude_host)
3405                        continue;
3406
3407                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3408        }
3409}
3410
3411static int hsw_hw_config(struct perf_event *event)
3412{
3413        int ret = intel_pmu_hw_config(event);
3414
3415        if (ret)
3416                return ret;
3417        if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3418                return 0;
3419        event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3420
3421        /*
3422         * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3423         * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3424         * this combination.
3425         */
3426        if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3427             ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3428              event->attr.precise_ip > 0))
3429                return -EOPNOTSUPP;
3430
3431        if (event_is_checkpointed(event)) {
3432                /*
3433                 * Sampling of checkpointed events can cause situations where
3434                 * the CPU constantly aborts because of a overflow, which is
3435                 * then checkpointed back and ignored. Forbid checkpointing
3436                 * for sampling.
3437                 *
3438                 * But still allow a long sampling period, so that perf stat
3439                 * from KVM works.
3440                 */
3441                if (event->attr.sample_period > 0 &&
3442                    event->attr.sample_period < 0x7fffffff)
3443                        return -EOPNOTSUPP;
3444        }
3445        return 0;
3446}
3447
3448static struct event_constraint counter0_constraint =
3449                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3450
3451static struct event_constraint counter2_constraint =
3452                        EVENT_CONSTRAINT(0, 0x4, 0);
3453
3454static struct event_constraint fixed0_constraint =
3455                        FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3456
3457static struct event_constraint fixed0_counter0_constraint =
3458                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3459
3460static struct event_constraint *
3461hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3462                          struct perf_event *event)
3463{
3464        struct event_constraint *c;
3465
3466        c = intel_get_event_constraints(cpuc, idx, event);
3467
3468        /* Handle special quirk on in_tx_checkpointed only in counter 2 */
3469        if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
3470                if (c->idxmsk64 & (1U << 2))
3471                        return &counter2_constraint;
3472                return &emptyconstraint;
3473        }
3474
3475        return c;
3476}
3477
3478static struct event_constraint *
3479icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3480                          struct perf_event *event)
3481{
3482        /*
3483         * Fixed counter 0 has less skid.
3484         * Force instruction:ppp in Fixed counter 0
3485         */
3486        if ((event->attr.precise_ip == 3) &&
3487            constraint_match(&fixed0_constraint, event->hw.config))
3488                return &fixed0_constraint;
3489
3490        return hsw_get_event_constraints(cpuc, idx, event);
3491}
3492
3493static struct event_constraint *
3494glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3495                          struct perf_event *event)
3496{
3497        struct event_constraint *c;
3498
3499        /* :ppp means to do reduced skid PEBS which is PMC0 only. */
3500        if (event->attr.precise_ip == 3)
3501                return &counter0_constraint;
3502
3503        c = intel_get_event_constraints(cpuc, idx, event);
3504
3505        return c;
3506}
3507
3508static struct event_constraint *
3509tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3510                          struct perf_event *event)
3511{
3512        struct event_constraint *c;
3513
3514        /*
3515         * :ppp means to do reduced skid PEBS,
3516         * which is available on PMC0 and fixed counter 0.
3517         */
3518        if (event->attr.precise_ip == 3) {
3519                /* Force instruction:ppp on PMC0 and Fixed counter 0 */
3520                if (constraint_match(&fixed0_constraint, event->hw.config))
3521                        return &fixed0_counter0_constraint;
3522
3523                return &counter0_constraint;
3524        }
3525
3526        c = intel_get_event_constraints(cpuc, idx, event);
3527
3528        return c;
3529}
3530
3531static bool allow_tsx_force_abort = true;
3532
3533static struct event_constraint *
3534tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3535                          struct perf_event *event)
3536{
3537        struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
3538
3539        /*
3540         * Without TFA we must not use PMC3.
3541         */
3542        if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
3543                c = dyn_constraint(cpuc, c, idx);
3544                c->idxmsk64 &= ~(1ULL << 3);
3545                c->weight--;
3546        }
3547
3548        return c;
3549}
3550
3551/*
3552 * Broadwell:
3553 *
3554 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
3555 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
3556 * the two to enforce a minimum period of 128 (the smallest value that has bits
3557 * 0-5 cleared and >= 100).
3558 *
3559 * Because of how the code in x86_perf_event_set_period() works, the truncation
3560 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
3561 * to make up for the 'lost' events due to carrying the 'error' in period_left.
3562 *
3563 * Therefore the effective (average) period matches the requested period,
3564 * despite coarser hardware granularity.
3565 */
3566static u64 bdw_limit_period(struct perf_event *event, u64 left)
3567{
3568        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
3569                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
3570                if (left < 128)
3571                        left = 128;
3572                left &= ~0x3fULL;
3573        }
3574        return left;
3575}
3576
3577PMU_FORMAT_ATTR(event,  "config:0-7"    );
3578PMU_FORMAT_ATTR(umask,  "config:8-15"   );
3579PMU_FORMAT_ATTR(edge,   "config:18"     );
3580PMU_FORMAT_ATTR(pc,     "config:19"     );
3581PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
3582PMU_FORMAT_ATTR(inv,    "config:23"     );
3583PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
3584PMU_FORMAT_ATTR(in_tx,  "config:32");
3585PMU_FORMAT_ATTR(in_tx_cp, "config:33");
3586
3587static struct attribute *intel_arch_formats_attr[] = {
3588        &format_attr_event.attr,
3589        &format_attr_umask.attr,
3590        &format_attr_edge.attr,
3591        &format_attr_pc.attr,
3592        &format_attr_inv.attr,
3593        &format_attr_cmask.attr,
3594        NULL,
3595};
3596
3597ssize_t intel_event_sysfs_show(char *page, u64 config)
3598{
3599        u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
3600
3601        return x86_event_sysfs_show(page, config, event);
3602}
3603
3604static struct intel_shared_regs *allocate_shared_regs(int cpu)
3605{
3606        struct intel_shared_regs *regs;
3607        int i;
3608
3609        regs = kzalloc_node(sizeof(struct intel_shared_regs),
3610                            GFP_KERNEL, cpu_to_node(cpu));
3611        if (regs) {
3612                /*
3613                 * initialize the locks to keep lockdep happy
3614                 */
3615                for (i = 0; i < EXTRA_REG_MAX; i++)
3616                        raw_spin_lock_init(&regs->regs[i].lock);
3617
3618                regs->core_id = -1;
3619        }
3620        return regs;
3621}
3622
3623static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
3624{
3625        struct intel_excl_cntrs *c;
3626
3627        c = kzalloc_node(sizeof(struct intel_excl_cntrs),
3628                         GFP_KERNEL, cpu_to_node(cpu));
3629        if (c) {
3630                raw_spin_lock_init(&c->lock);
3631                c->core_id = -1;
3632        }
3633        return c;
3634}
3635
3636
3637int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
3638{
3639        cpuc->pebs_record_size = x86_pmu.pebs_record_size;
3640
3641        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
3642                cpuc->shared_regs = allocate_shared_regs(cpu);
3643                if (!cpuc->shared_regs)
3644                        goto err;
3645        }
3646
3647        if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
3648                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
3649
3650                cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
3651                if (!cpuc->constraint_list)
3652                        goto err_shared_regs;
3653        }
3654
3655        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3656                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
3657                if (!cpuc->excl_cntrs)
3658                        goto err_constraint_list;
3659
3660                cpuc->excl_thread_id = 0;
3661        }
3662
3663        return 0;
3664
3665err_constraint_list:
3666        kfree(cpuc->constraint_list);
3667        cpuc->constraint_list = NULL;
3668
3669err_shared_regs:
3670        kfree(cpuc->shared_regs);
3671        cpuc->shared_regs = NULL;
3672
3673err:
3674        return -ENOMEM;
3675}
3676
3677static int intel_pmu_cpu_prepare(int cpu)
3678{
3679        return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
3680}
3681
3682static void flip_smm_bit(void *data)
3683{
3684        unsigned long set = *(unsigned long *)data;
3685
3686        if (set > 0) {
3687                msr_set_bit(MSR_IA32_DEBUGCTLMSR,
3688                            DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3689        } else {
3690                msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
3691                              DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
3692        }
3693}
3694
3695static void intel_pmu_cpu_starting(int cpu)
3696{
3697        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
3698        int core_id = topology_core_id(cpu);
3699        int i;
3700
3701        init_debug_store_on_cpu(cpu);
3702        /*
3703         * Deal with CPUs that don't clear their LBRs on power-up.
3704         */
3705        intel_pmu_lbr_reset();
3706
3707        cpuc->lbr_sel = NULL;
3708
3709        if (x86_pmu.flags & PMU_FL_TFA) {
3710                WARN_ON_ONCE(cpuc->tfa_shadow);
3711                cpuc->tfa_shadow = ~0ULL;
3712                intel_set_tfa(cpuc, false);
3713        }
3714
3715        if (x86_pmu.version > 1)
3716                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
3717
3718        if (x86_pmu.counter_freezing)
3719                enable_counter_freeze();
3720
3721        if (!cpuc->shared_regs)
3722                return;
3723
3724        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
3725                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3726                        struct intel_shared_regs *pc;
3727
3728                        pc = per_cpu(cpu_hw_events, i).shared_regs;
3729                        if (pc && pc->core_id == core_id) {
3730                                cpuc->kfree_on_online[0] = cpuc->shared_regs;
3731                                cpuc->shared_regs = pc;
3732                                break;
3733                        }
3734                }
3735                cpuc->shared_regs->core_id = core_id;
3736                cpuc->shared_regs->refcnt++;
3737        }
3738
3739        if (x86_pmu.lbr_sel_map)
3740                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
3741
3742        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3743                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3744                        struct cpu_hw_events *sibling;
3745                        struct intel_excl_cntrs *c;
3746
3747                        sibling = &per_cpu(cpu_hw_events, i);
3748                        c = sibling->excl_cntrs;
3749                        if (c && c->core_id == core_id) {
3750                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3751                                cpuc->excl_cntrs = c;
3752                                if (!sibling->excl_thread_id)
3753                                        cpuc->excl_thread_id = 1;
3754                                break;
3755                        }
3756                }
3757                cpuc->excl_cntrs->core_id = core_id;
3758                cpuc->excl_cntrs->refcnt++;
3759        }
3760}
3761
3762static void free_excl_cntrs(struct cpu_hw_events *cpuc)
3763{
3764        struct intel_excl_cntrs *c;
3765
3766        c = cpuc->excl_cntrs;
3767        if (c) {
3768                if (c->core_id == -1 || --c->refcnt == 0)
3769                        kfree(c);
3770                cpuc->excl_cntrs = NULL;
3771        }
3772
3773        kfree(cpuc->constraint_list);
3774        cpuc->constraint_list = NULL;
3775}
3776
3777static void intel_pmu_cpu_dying(int cpu)
3778{
3779        fini_debug_store_on_cpu(cpu);
3780
3781        if (x86_pmu.counter_freezing)
3782                disable_counter_freeze();
3783}
3784
3785void intel_cpuc_finish(struct cpu_hw_events *cpuc)
3786{
3787        struct intel_shared_regs *pc;
3788
3789        pc = cpuc->shared_regs;
3790        if (pc) {
3791                if (pc->core_id == -1 || --pc->refcnt == 0)
3792                        kfree(pc);
3793                cpuc->shared_regs = NULL;
3794        }
3795
3796        free_excl_cntrs(cpuc);
3797}
3798
3799static void intel_pmu_cpu_dead(int cpu)
3800{
3801        intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
3802}
3803
3804static void intel_pmu_sched_task(struct perf_event_context *ctx,
3805                                 bool sched_in)
3806{
3807        intel_pmu_pebs_sched_task(ctx, sched_in);
3808        intel_pmu_lbr_sched_task(ctx, sched_in);
3809}
3810
3811static int intel_pmu_check_period(struct perf_event *event, u64 value)
3812{
3813        return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
3814}
3815
3816PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
3817
3818PMU_FORMAT_ATTR(ldlat, "config1:0-15");
3819
3820PMU_FORMAT_ATTR(frontend, "config1:0-23");
3821
3822static struct attribute *intel_arch3_formats_attr[] = {
3823        &format_attr_event.attr,
3824        &format_attr_umask.attr,
3825        &format_attr_edge.attr,
3826        &format_attr_pc.attr,
3827        &format_attr_any.attr,
3828        &format_attr_inv.attr,
3829        &format_attr_cmask.attr,
3830        NULL,
3831};
3832
3833static struct attribute *hsw_format_attr[] = {
3834        &format_attr_in_tx.attr,
3835        &format_attr_in_tx_cp.attr,
3836        &format_attr_offcore_rsp.attr,
3837        &format_attr_ldlat.attr,
3838        NULL
3839};
3840
3841static struct attribute *nhm_format_attr[] = {
3842        &format_attr_offcore_rsp.attr,
3843        &format_attr_ldlat.attr,
3844        NULL
3845};
3846
3847static struct attribute *slm_format_attr[] = {
3848        &format_attr_offcore_rsp.attr,
3849        NULL
3850};
3851
3852static struct attribute *skl_format_attr[] = {
3853        &format_attr_frontend.attr,
3854        NULL,
3855};
3856
3857static __initconst const struct x86_pmu core_pmu = {
3858        .name                   = "core",
3859        .handle_irq             = x86_pmu_handle_irq,
3860        .disable_all            = x86_pmu_disable_all,
3861        .enable_all             = core_pmu_enable_all,
3862        .enable                 = core_pmu_enable_event,
3863        .disable                = x86_pmu_disable_event,
3864        .hw_config              = core_pmu_hw_config,
3865        .schedule_events        = x86_schedule_events,
3866        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
3867        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
3868        .event_map              = intel_pmu_event_map,
3869        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
3870        .apic                   = 1,
3871        .large_pebs_flags       = LARGE_PEBS_FLAGS,
3872
3873        /*
3874         * Intel PMCs cannot be accessed sanely above 32-bit width,
3875         * so we install an artificial 1<<31 period regardless of
3876         * the generic event period:
3877         */
3878        .max_period             = (1ULL<<31) - 1,
3879        .get_event_constraints  = intel_get_event_constraints,
3880        .put_event_constraints  = intel_put_event_constraints,
3881        .event_constraints      = intel_core_event_constraints,
3882        .guest_get_msrs         = core_guest_get_msrs,
3883        .format_attrs           = intel_arch_formats_attr,
3884        .events_sysfs_show      = intel_event_sysfs_show,
3885
3886        /*
3887         * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
3888         * together with PMU version 1 and thus be using core_pmu with
3889         * shared_regs. We need following callbacks here to allocate
3890         * it properly.
3891         */
3892        .cpu_prepare            = intel_pmu_cpu_prepare,
3893        .cpu_starting           = intel_pmu_cpu_starting,
3894        .cpu_dying              = intel_pmu_cpu_dying,
3895        .cpu_dead               = intel_pmu_cpu_dead,
3896
3897        .check_period           = intel_pmu_check_period,
3898};
3899
3900static struct attribute *intel_pmu_attrs[];
3901
3902static __initconst const struct x86_pmu intel_pmu = {
3903        .name                   = "Intel",
3904        .handle_irq             = intel_pmu_handle_irq,
3905        .disable_all            = intel_pmu_disable_all,
3906        .enable_all             = intel_pmu_enable_all,
3907        .enable                 = intel_pmu_enable_event,
3908        .disable                = intel_pmu_disable_event,
3909        .add                    = intel_pmu_add_event,
3910        .del                    = intel_pmu_del_event,
3911        .read                   = intel_pmu_read_event,
3912        .hw_config              = intel_pmu_hw_config,
3913        .schedule_events        = x86_schedule_events,
3914        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
3915        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
3916        .event_map              = intel_pmu_event_map,
3917        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
3918        .apic                   = 1,
3919        .large_pebs_flags       = LARGE_PEBS_FLAGS,
3920        /*
3921         * Intel PMCs cannot be accessed sanely above 32 bit width,
3922         * so we install an artificial 1<<31 period regardless of
3923         * the generic event period:
3924         */
3925        .max_period             = (1ULL << 31) - 1,
3926        .get_event_constraints  = intel_get_event_constraints,
3927        .put_event_constraints  = intel_put_event_constraints,
3928        .pebs_aliases           = intel_pebs_aliases_core2,
3929
3930        .format_attrs           = intel_arch3_formats_attr,
3931        .events_sysfs_show      = intel_event_sysfs_show,
3932
3933        .attrs                  = intel_pmu_attrs,
3934
3935        .cpu_prepare            = intel_pmu_cpu_prepare,
3936        .cpu_starting           = intel_pmu_cpu_starting,
3937        .cpu_dying              = intel_pmu_cpu_dying,
3938        .cpu_dead               = intel_pmu_cpu_dead,
3939
3940        .guest_get_msrs         = intel_guest_get_msrs,
3941        .sched_task             = intel_pmu_sched_task,
3942
3943        .check_period           = intel_pmu_check_period,
3944};
3945
3946static __init void intel_clovertown_quirk(void)
3947{
3948        /*
3949         * PEBS is unreliable due to:
3950         *
3951         *   AJ67  - PEBS may experience CPL leaks
3952         *   AJ68  - PEBS PMI may be delayed by one event
3953         *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
3954         *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
3955         *
3956         * AJ67 could be worked around by restricting the OS/USR flags.
3957         * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
3958         *
3959         * AJ106 could possibly be worked around by not allowing LBR
3960         *       usage from PEBS, including the fixup.
3961         * AJ68  could possibly be worked around by always programming
3962         *       a pebs_event_reset[0] value and coping with the lost events.
3963         *
3964         * But taken together it might just make sense to not enable PEBS on
3965         * these chips.
3966         */
3967        pr_warn("PEBS disabled due to CPU errata\n");
3968        x86_pmu.pebs = 0;
3969        x86_pmu.pebs_constraints = NULL;
3970}
3971
3972static const struct x86_cpu_desc isolation_ucodes[] = {
3973        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE,          3, 0x0000001f),
3974        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT,           1, 0x0000001e),
3975        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E,          1, 0x00000015),
3976        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             2, 0x00000037),
3977        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             4, 0x0000000a),
3978        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE,        4, 0x00000023),
3979        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E,        1, 0x00000014),
3980        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      2, 0x00000010),
3981        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      3, 0x07000009),
3982        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      4, 0x0f000009),
3983        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      5, 0x0e000002),
3984        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,           2, 0x0b000014),
3985        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             3, 0x00000021),
3986        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             4, 0x00000000),
3987        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE,        3, 0x0000007c),
3988        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP,       3, 0x0000007c),
3989        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,      9, 0x0000004e),
3990        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,       9, 0x0000004e),
3991        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      10, 0x0000004e),
3992        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      11, 0x0000004e),
3993        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      12, 0x0000004e),
3994        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     10, 0x0000004e),
3995        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     11, 0x0000004e),
3996        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     12, 0x0000004e),
3997        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     13, 0x0000004e),
3998        {}
3999};
4000
4001static void intel_check_pebs_isolation(void)
4002{
4003        x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4004}
4005
4006static __init void intel_pebs_isolation_quirk(void)
4007{
4008        WARN_ON_ONCE(x86_pmu.check_microcode);
4009        x86_pmu.check_microcode = intel_check_pebs_isolation;
4010        intel_check_pebs_isolation();
4011}
4012
4013static const struct x86_cpu_desc pebs_ucodes[] = {
4014        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE,          7, 0x00000028),
4015        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        6, 0x00000618),
4016        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        7, 0x0000070c),
4017        {}
4018};
4019
4020static bool intel_snb_pebs_broken(void)
4021{
4022        return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4023}
4024
4025static void intel_snb_check_microcode(void)
4026{
4027        if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4028                return;
4029
4030        /*
4031         * Serialized by the microcode lock..
4032         */
4033        if (x86_pmu.pebs_broken) {
4034                pr_info("PEBS enabled due to microcode update\n");
4035                x86_pmu.pebs_broken = 0;
4036        } else {
4037                pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4038                x86_pmu.pebs_broken = 1;
4039        }
4040}
4041
4042static bool is_lbr_from(unsigned long msr)
4043{
4044        unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4045
4046        return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4047}
4048
4049/*
4050 * Under certain circumstances, access certain MSR may cause #GP.
4051 * The function tests if the input MSR can be safely accessed.
4052 */
4053static bool check_msr(unsigned long msr, u64 mask)
4054{
4055        u64 val_old, val_new, val_tmp;
4056
4057        /*
4058         * Read the current value, change it and read it back to see if it
4059         * matches, this is needed to detect certain hardware emulators
4060         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4061         */
4062        if (rdmsrl_safe(msr, &val_old))
4063                return false;
4064
4065        /*
4066         * Only change the bits which can be updated by wrmsrl.
4067         */
4068        val_tmp = val_old ^ mask;
4069
4070        if (is_lbr_from(msr))
4071                val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4072
4073        if (wrmsrl_safe(msr, val_tmp) ||
4074            rdmsrl_safe(msr, &val_new))
4075                return false;
4076
4077        /*
4078         * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4079         * should equal rdmsrl()'s even with the quirk.
4080         */
4081        if (val_new != val_tmp)
4082                return false;
4083
4084        if (is_lbr_from(msr))
4085                val_old = lbr_from_signext_quirk_wr(val_old);
4086
4087        /* Here it's sure that the MSR can be safely accessed.
4088         * Restore the old value and return.
4089         */
4090        wrmsrl(msr, val_old);
4091
4092        return true;
4093}
4094
4095static __init void intel_sandybridge_quirk(void)
4096{
4097        x86_pmu.check_microcode = intel_snb_check_microcode;
4098        cpus_read_lock();
4099        intel_snb_check_microcode();
4100        cpus_read_unlock();
4101}
4102
4103static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4104        { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4105        { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4106        { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4107        { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4108        { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4109        { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4110        { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4111};
4112
4113static __init void intel_arch_events_quirk(void)
4114{
4115        int bit;
4116
4117        /* disable event that reported as not presend by cpuid */
4118        for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4119                intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4120                pr_warn("CPUID marked event: \'%s\' unavailable\n",
4121                        intel_arch_events_map[bit].name);
4122        }
4123}
4124
4125static __init void intel_nehalem_quirk(void)
4126{
4127        union cpuid10_ebx ebx;
4128
4129        ebx.full = x86_pmu.events_maskl;
4130        if (ebx.split.no_branch_misses_retired) {
4131                /*
4132                 * Erratum AAJ80 detected, we work it around by using
4133                 * the BR_MISP_EXEC.ANY event. This will over-count
4134                 * branch-misses, but it's still much better than the
4135                 * architectural event which is often completely bogus:
4136                 */
4137                intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4138                ebx.split.no_branch_misses_retired = 0;
4139                x86_pmu.events_maskl = ebx.full;
4140                pr_info("CPU erratum AAJ80 worked around\n");
4141        }
4142}
4143
4144static const struct x86_cpu_desc counter_freezing_ucodes[] = {
4145        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,         2, 0x0000000e),
4146        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,         9, 0x0000002e),
4147        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,        10, 0x00000008),
4148        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X,       1, 0x00000028),
4149        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS,    1, 0x00000028),
4150        INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS,    8, 0x00000006),
4151        {}
4152};
4153
4154static bool intel_counter_freezing_broken(void)
4155{
4156        return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
4157}
4158
4159static __init void intel_counter_freezing_quirk(void)
4160{
4161        /* Check if it's already disabled */
4162        if (disable_counter_freezing)
4163                return;
4164
4165        /*
4166         * If the system starts with the wrong ucode, leave the
4167         * counter-freezing feature permanently disabled.
4168         */
4169        if (intel_counter_freezing_broken()) {
4170                pr_info("PMU counter freezing disabled due to CPU errata,"
4171                        "please upgrade microcode\n");
4172                x86_pmu.counter_freezing = false;
4173                x86_pmu.handle_irq = intel_pmu_handle_irq;
4174        }
4175}
4176
4177/*
4178 * enable software workaround for errata:
4179 * SNB: BJ122
4180 * IVB: BV98
4181 * HSW: HSD29
4182 *
4183 * Only needed when HT is enabled. However detecting
4184 * if HT is enabled is difficult (model specific). So instead,
4185 * we enable the workaround in the early boot, and verify if
4186 * it is needed in a later initcall phase once we have valid
4187 * topology information to check if HT is actually enabled
4188 */
4189static __init void intel_ht_bug(void)
4190{
4191        x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4192
4193        x86_pmu.start_scheduling = intel_start_scheduling;
4194        x86_pmu.commit_scheduling = intel_commit_scheduling;
4195        x86_pmu.stop_scheduling = intel_stop_scheduling;
4196}
4197
4198EVENT_ATTR_STR(mem-loads,       mem_ld_hsw,     "event=0xcd,umask=0x1,ldlat=3");
4199EVENT_ATTR_STR(mem-stores,      mem_st_hsw,     "event=0xd0,umask=0x82")
4200
4201/* Haswell special events */
4202EVENT_ATTR_STR(tx-start,        tx_start,       "event=0xc9,umask=0x1");
4203EVENT_ATTR_STR(tx-commit,       tx_commit,      "event=0xc9,umask=0x2");
4204EVENT_ATTR_STR(tx-abort,        tx_abort,       "event=0xc9,umask=0x4");
4205EVENT_ATTR_STR(tx-capacity,     tx_capacity,    "event=0x54,umask=0x2");
4206EVENT_ATTR_STR(tx-conflict,     tx_conflict,    "event=0x54,umask=0x1");
4207EVENT_ATTR_STR(el-start,        el_start,       "event=0xc8,umask=0x1");
4208EVENT_ATTR_STR(el-commit,       el_commit,      "event=0xc8,umask=0x2");
4209EVENT_ATTR_STR(el-abort,        el_abort,       "event=0xc8,umask=0x4");
4210EVENT_ATTR_STR(el-capacity,     el_capacity,    "event=0x54,umask=0x2");
4211EVENT_ATTR_STR(el-conflict,     el_conflict,    "event=0x54,umask=0x1");
4212EVENT_ATTR_STR(cycles-t,        cycles_t,       "event=0x3c,in_tx=1");
4213EVENT_ATTR_STR(cycles-ct,       cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
4214
4215static struct attribute *hsw_events_attrs[] = {
4216        EVENT_PTR(td_slots_issued),
4217        EVENT_PTR(td_slots_retired),
4218        EVENT_PTR(td_fetch_bubbles),
4219        EVENT_PTR(td_total_slots),
4220        EVENT_PTR(td_total_slots_scale),
4221        EVENT_PTR(td_recovery_bubbles),
4222        EVENT_PTR(td_recovery_bubbles_scale),
4223        NULL
4224};
4225
4226static struct attribute *hsw_mem_events_attrs[] = {
4227        EVENT_PTR(mem_ld_hsw),
4228        EVENT_PTR(mem_st_hsw),
4229        NULL,
4230};
4231
4232static struct attribute *hsw_tsx_events_attrs[] = {
4233        EVENT_PTR(tx_start),
4234        EVENT_PTR(tx_commit),
4235        EVENT_PTR(tx_abort),
4236        EVENT_PTR(tx_capacity),
4237        EVENT_PTR(tx_conflict),
4238        EVENT_PTR(el_start),
4239        EVENT_PTR(el_commit),
4240        EVENT_PTR(el_abort),
4241        EVENT_PTR(el_capacity),
4242        EVENT_PTR(el_conflict),
4243        EVENT_PTR(cycles_t),
4244        EVENT_PTR(cycles_ct),
4245        NULL
4246};
4247
4248EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
4249EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4250EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
4251EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4252
4253static struct attribute *icl_events_attrs[] = {
4254        EVENT_PTR(mem_ld_hsw),
4255        EVENT_PTR(mem_st_hsw),
4256        NULL,
4257};
4258
4259static struct attribute *icl_tsx_events_attrs[] = {
4260        EVENT_PTR(tx_start),
4261        EVENT_PTR(tx_abort),
4262        EVENT_PTR(tx_commit),
4263        EVENT_PTR(tx_capacity_read),
4264        EVENT_PTR(tx_capacity_write),
4265        EVENT_PTR(tx_conflict),
4266        EVENT_PTR(el_start),
4267        EVENT_PTR(el_abort),
4268        EVENT_PTR(el_commit),
4269        EVENT_PTR(el_capacity_read),
4270        EVENT_PTR(el_capacity_write),
4271        EVENT_PTR(el_conflict),
4272        EVENT_PTR(cycles_t),
4273        EVENT_PTR(cycles_ct),
4274        NULL,
4275};
4276
4277static __init struct attribute **get_icl_events_attrs(void)
4278{
4279        return boot_cpu_has(X86_FEATURE_RTM) ?
4280                merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
4281                icl_events_attrs;
4282}
4283
4284static ssize_t freeze_on_smi_show(struct device *cdev,
4285                                  struct device_attribute *attr,
4286                                  char *buf)
4287{
4288        return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
4289}
4290
4291static DEFINE_MUTEX(freeze_on_smi_mutex);
4292
4293static ssize_t freeze_on_smi_store(struct device *cdev,
4294                                   struct device_attribute *attr,
4295                                   const char *buf, size_t count)
4296{
4297        unsigned long val;
4298        ssize_t ret;
4299
4300        ret = kstrtoul(buf, 0, &val);
4301        if (ret)
4302                return ret;
4303
4304        if (val > 1)
4305                return -EINVAL;
4306
4307        mutex_lock(&freeze_on_smi_mutex);
4308
4309        if (x86_pmu.attr_freeze_on_smi == val)
4310                goto done;
4311
4312        x86_pmu.attr_freeze_on_smi = val;
4313
4314        get_online_cpus();
4315        on_each_cpu(flip_smm_bit, &val, 1);
4316        put_online_cpus();
4317done:
4318        mutex_unlock(&freeze_on_smi_mutex);
4319
4320        return count;
4321}
4322
4323static void update_tfa_sched(void *ignored)
4324{
4325        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4326
4327        /*
4328         * check if PMC3 is used
4329         * and if so force schedule out for all event types all contexts
4330         */
4331        if (test_bit(3, cpuc->active_mask))
4332                perf_pmu_resched(x86_get_pmu());
4333}
4334
4335static ssize_t show_sysctl_tfa(struct device *cdev,
4336                              struct device_attribute *attr,
4337                              char *buf)
4338{
4339        return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
4340}
4341
4342static ssize_t set_sysctl_tfa(struct device *cdev,
4343                              struct device_attribute *attr,
4344                              const char *buf, size_t count)
4345{
4346        bool val;
4347        ssize_t ret;
4348
4349        ret = kstrtobool(buf, &val);
4350        if (ret)
4351                return ret;
4352
4353        /* no change */
4354        if (val == allow_tsx_force_abort)
4355                return count;
4356
4357        allow_tsx_force_abort = val;
4358
4359        get_online_cpus();
4360        on_each_cpu(update_tfa_sched, NULL, 1);
4361        put_online_cpus();
4362
4363        return count;
4364}
4365
4366
4367static DEVICE_ATTR_RW(freeze_on_smi);
4368
4369static ssize_t branches_show(struct device *cdev,
4370                             struct device_attribute *attr,
4371                             char *buf)
4372{
4373        return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
4374}
4375
4376static DEVICE_ATTR_RO(branches);
4377
4378static struct attribute *lbr_attrs[] = {
4379        &dev_attr_branches.attr,
4380        NULL
4381};
4382
4383static char pmu_name_str[30];
4384
4385static ssize_t pmu_name_show(struct device *cdev,
4386                             struct device_attribute *attr,
4387                             char *buf)
4388{
4389        return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
4390}
4391
4392static DEVICE_ATTR_RO(pmu_name);
4393
4394static struct attribute *intel_pmu_caps_attrs[] = {
4395       &dev_attr_pmu_name.attr,
4396       NULL
4397};
4398
4399static DEVICE_ATTR(allow_tsx_force_abort, 0644,
4400                   show_sysctl_tfa,
4401                   set_sysctl_tfa);
4402
4403static struct attribute *intel_pmu_attrs[] = {
4404        &dev_attr_freeze_on_smi.attr,
4405        NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4406        NULL,
4407};
4408
4409static __init struct attribute **
4410get_events_attrs(struct attribute **base,
4411                 struct attribute **mem,
4412                 struct attribute **tsx)
4413{
4414        struct attribute **attrs = base;
4415        struct attribute **old;
4416
4417        if (mem && x86_pmu.pebs)
4418                attrs = merge_attr(attrs, mem);
4419
4420        if (tsx && boot_cpu_has(X86_FEATURE_RTM)) {
4421                old = attrs;
4422                attrs = merge_attr(attrs, tsx);
4423                if (old != base)
4424                        kfree(old);
4425        }
4426
4427        return attrs;
4428}
4429
4430__init int intel_pmu_init(void)
4431{
4432        struct attribute **extra_attr = NULL;
4433        struct attribute **mem_attr = NULL;
4434        struct attribute **tsx_attr = NULL;
4435        struct attribute **to_free = NULL;
4436        union cpuid10_edx edx;
4437        union cpuid10_eax eax;
4438        union cpuid10_ebx ebx;
4439        struct event_constraint *c;
4440        unsigned int unused;
4441        struct extra_reg *er;
4442        int version, i;
4443        char *name;
4444
4445        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
4446                switch (boot_cpu_data.x86) {
4447                case 0x6:
4448                        return p6_pmu_init();
4449                case 0xb:
4450                        return knc_pmu_init();
4451                case 0xf:
4452                        return p4_pmu_init();
4453                }
4454                return -ENODEV;
4455        }
4456
4457        /*
4458         * Check whether the Architectural PerfMon supports
4459         * Branch Misses Retired hw_event or not.
4460         */
4461        cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
4462        if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
4463                return -ENODEV;
4464
4465        version = eax.split.version_id;
4466        if (version < 2)
4467                x86_pmu = core_pmu;
4468        else
4469                x86_pmu = intel_pmu;
4470
4471        x86_pmu.version                 = version;
4472        x86_pmu.num_counters            = eax.split.num_counters;
4473        x86_pmu.cntval_bits             = eax.split.bit_width;
4474        x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
4475
4476        x86_pmu.events_maskl            = ebx.full;
4477        x86_pmu.events_mask_len         = eax.split.mask_length;
4478
4479        x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
4480
4481        /*
4482         * Quirk: v2 perfmon does not report fixed-purpose events, so
4483         * assume at least 3 events, when not running in a hypervisor:
4484         */
4485        if (version > 1) {
4486                int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
4487
4488                x86_pmu.num_counters_fixed =
4489                        max((int)edx.split.num_counters_fixed, assume);
4490        }
4491
4492        if (version >= 4)
4493                x86_pmu.counter_freezing = !disable_counter_freezing;
4494
4495        if (boot_cpu_has(X86_FEATURE_PDCM)) {
4496                u64 capabilities;
4497
4498                rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
4499                x86_pmu.intel_cap.capabilities = capabilities;
4500        }
4501
4502        intel_ds_init();
4503
4504        x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
4505
4506        /*
4507         * Install the hw-cache-events table:
4508         */
4509        switch (boot_cpu_data.x86_model) {
4510        case INTEL_FAM6_CORE_YONAH:
4511                pr_cont("Core events, ");
4512                name = "core";
4513                break;
4514
4515        case INTEL_FAM6_CORE2_MEROM:
4516                x86_add_quirk(intel_clovertown_quirk);
4517                /* fall through */
4518
4519        case INTEL_FAM6_CORE2_MEROM_L:
4520        case INTEL_FAM6_CORE2_PENRYN:
4521        case INTEL_FAM6_CORE2_DUNNINGTON:
4522                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
4523                       sizeof(hw_cache_event_ids));
4524
4525                intel_pmu_lbr_init_core();
4526
4527                x86_pmu.event_constraints = intel_core2_event_constraints;
4528                x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
4529                pr_cont("Core2 events, ");
4530                name = "core2";
4531                break;
4532
4533        case INTEL_FAM6_NEHALEM:
4534        case INTEL_FAM6_NEHALEM_EP:
4535        case INTEL_FAM6_NEHALEM_EX:
4536                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
4537                       sizeof(hw_cache_event_ids));
4538                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4539                       sizeof(hw_cache_extra_regs));
4540
4541                intel_pmu_lbr_init_nhm();
4542
4543                x86_pmu.event_constraints = intel_nehalem_event_constraints;
4544                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4545                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4546                x86_pmu.extra_regs = intel_nehalem_extra_regs;
4547
4548                mem_attr = nhm_mem_events_attrs;
4549
4550                /* UOPS_ISSUED.STALLED_CYCLES */
4551                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4552                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4553                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4554                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4555                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4556
4557                intel_pmu_pebs_data_source_nhm();
4558                x86_add_quirk(intel_nehalem_quirk);
4559                x86_pmu.pebs_no_tlb = 1;
4560                extra_attr = nhm_format_attr;
4561
4562                pr_cont("Nehalem events, ");
4563                name = "nehalem";
4564                break;
4565
4566        case INTEL_FAM6_ATOM_BONNELL:
4567        case INTEL_FAM6_ATOM_BONNELL_MID:
4568        case INTEL_FAM6_ATOM_SALTWELL:
4569        case INTEL_FAM6_ATOM_SALTWELL_MID:
4570        case INTEL_FAM6_ATOM_SALTWELL_TABLET:
4571                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
4572                       sizeof(hw_cache_event_ids));
4573
4574                intel_pmu_lbr_init_atom();
4575
4576                x86_pmu.event_constraints = intel_gen_event_constraints;
4577                x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
4578                x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
4579                pr_cont("Atom events, ");
4580                name = "bonnell";
4581                break;
4582
4583        case INTEL_FAM6_ATOM_SILVERMONT:
4584        case INTEL_FAM6_ATOM_SILVERMONT_X:
4585        case INTEL_FAM6_ATOM_SILVERMONT_MID:
4586        case INTEL_FAM6_ATOM_AIRMONT:
4587        case INTEL_FAM6_ATOM_AIRMONT_MID:
4588                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
4589                        sizeof(hw_cache_event_ids));
4590                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
4591                       sizeof(hw_cache_extra_regs));
4592
4593                intel_pmu_lbr_init_slm();
4594
4595                x86_pmu.event_constraints = intel_slm_event_constraints;
4596                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4597                x86_pmu.extra_regs = intel_slm_extra_regs;
4598                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4599                x86_pmu.cpu_events = slm_events_attrs;
4600                extra_attr = slm_format_attr;
4601                pr_cont("Silvermont events, ");
4602                name = "silvermont";
4603                break;
4604
4605        case INTEL_FAM6_ATOM_GOLDMONT:
4606        case INTEL_FAM6_ATOM_GOLDMONT_X:
4607                x86_add_quirk(intel_counter_freezing_quirk);
4608                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
4609                       sizeof(hw_cache_event_ids));
4610                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
4611                       sizeof(hw_cache_extra_regs));
4612
4613                intel_pmu_lbr_init_skl();
4614
4615                x86_pmu.event_constraints = intel_slm_event_constraints;
4616                x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
4617                x86_pmu.extra_regs = intel_glm_extra_regs;
4618                /*
4619                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4620                 * for precise cycles.
4621                 * :pp is identical to :ppp
4622                 */
4623                x86_pmu.pebs_aliases = NULL;
4624                x86_pmu.pebs_prec_dist = true;
4625                x86_pmu.lbr_pt_coexist = true;
4626                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4627                x86_pmu.cpu_events = glm_events_attrs;
4628                extra_attr = slm_format_attr;
4629                pr_cont("Goldmont events, ");
4630                name = "goldmont";
4631                break;
4632
4633        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
4634                x86_add_quirk(intel_counter_freezing_quirk);
4635                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4636                       sizeof(hw_cache_event_ids));
4637                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
4638                       sizeof(hw_cache_extra_regs));
4639
4640                intel_pmu_lbr_init_skl();
4641
4642                x86_pmu.event_constraints = intel_slm_event_constraints;
4643                x86_pmu.extra_regs = intel_glm_extra_regs;
4644                /*
4645                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4646                 * for precise cycles.
4647                 */
4648                x86_pmu.pebs_aliases = NULL;
4649                x86_pmu.pebs_prec_dist = true;
4650                x86_pmu.lbr_pt_coexist = true;
4651                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4652                x86_pmu.flags |= PMU_FL_PEBS_ALL;
4653                x86_pmu.get_event_constraints = glp_get_event_constraints;
4654                x86_pmu.cpu_events = glm_events_attrs;
4655                /* Goldmont Plus has 4-wide pipeline */
4656                event_attr_td_total_slots_scale_glm.event_str = "4";
4657                extra_attr = slm_format_attr;
4658                pr_cont("Goldmont plus events, ");
4659                name = "goldmont_plus";
4660                break;
4661
4662        case INTEL_FAM6_ATOM_TREMONT_X:
4663                x86_pmu.late_ack = true;
4664                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
4665                       sizeof(hw_cache_event_ids));
4666                memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
4667                       sizeof(hw_cache_extra_regs));
4668                hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4669
4670                intel_pmu_lbr_init_skl();
4671
4672                x86_pmu.event_constraints = intel_slm_event_constraints;
4673                x86_pmu.extra_regs = intel_tnt_extra_regs;
4674                /*
4675                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
4676                 * for precise cycles.
4677                 */
4678                x86_pmu.pebs_aliases = NULL;
4679                x86_pmu.pebs_prec_dist = true;
4680                x86_pmu.lbr_pt_coexist = true;
4681                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4682                x86_pmu.get_event_constraints = tnt_get_event_constraints;
4683                extra_attr = slm_format_attr;
4684                pr_cont("Tremont events, ");
4685                name = "Tremont";
4686                break;
4687
4688        case INTEL_FAM6_WESTMERE:
4689        case INTEL_FAM6_WESTMERE_EP:
4690        case INTEL_FAM6_WESTMERE_EX:
4691                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
4692                       sizeof(hw_cache_event_ids));
4693                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
4694                       sizeof(hw_cache_extra_regs));
4695
4696                intel_pmu_lbr_init_nhm();
4697
4698                x86_pmu.event_constraints = intel_westmere_event_constraints;
4699                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4700                x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
4701                x86_pmu.extra_regs = intel_westmere_extra_regs;
4702                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4703
4704                mem_attr = nhm_mem_events_attrs;
4705
4706                /* UOPS_ISSUED.STALLED_CYCLES */
4707                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4708                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4709                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
4710                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4711                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
4712
4713                intel_pmu_pebs_data_source_nhm();
4714                extra_attr = nhm_format_attr;
4715                pr_cont("Westmere events, ");
4716                name = "westmere";
4717                break;
4718
4719        case INTEL_FAM6_SANDYBRIDGE:
4720        case INTEL_FAM6_SANDYBRIDGE_X:
4721                x86_add_quirk(intel_sandybridge_quirk);
4722                x86_add_quirk(intel_ht_bug);
4723                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4724                       sizeof(hw_cache_event_ids));
4725                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4726                       sizeof(hw_cache_extra_regs));
4727
4728                intel_pmu_lbr_init_snb();
4729
4730                x86_pmu.event_constraints = intel_snb_event_constraints;
4731                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
4732                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
4733                if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
4734                        x86_pmu.extra_regs = intel_snbep_extra_regs;
4735                else
4736                        x86_pmu.extra_regs = intel_snb_extra_regs;
4737
4738
4739                /* all extra regs are per-cpu when HT is on */
4740                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4741                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4742
4743                x86_pmu.cpu_events = snb_events_attrs;
4744                mem_attr = snb_mem_events_attrs;
4745
4746                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4747                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4748                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4749                /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
4750                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
4751                        X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
4752
4753                extra_attr = nhm_format_attr;
4754
4755                pr_cont("SandyBridge events, ");
4756                name = "sandybridge";
4757                break;
4758
4759        case INTEL_FAM6_IVYBRIDGE:
4760        case INTEL_FAM6_IVYBRIDGE_X:
4761                x86_add_quirk(intel_ht_bug);
4762                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
4763                       sizeof(hw_cache_event_ids));
4764                /* dTLB-load-misses on IVB is different than SNB */
4765                hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
4766
4767                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
4768                       sizeof(hw_cache_extra_regs));
4769
4770                intel_pmu_lbr_init_snb();
4771
4772                x86_pmu.event_constraints = intel_ivb_event_constraints;
4773                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
4774                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4775                x86_pmu.pebs_prec_dist = true;
4776                if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
4777                        x86_pmu.extra_regs = intel_snbep_extra_regs;
4778                else
4779                        x86_pmu.extra_regs = intel_snb_extra_regs;
4780                /* all extra regs are per-cpu when HT is on */
4781                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4782                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4783
4784                x86_pmu.cpu_events = snb_events_attrs;
4785                mem_attr = snb_mem_events_attrs;
4786
4787                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
4788                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
4789                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
4790
4791                extra_attr = nhm_format_attr;
4792
4793                pr_cont("IvyBridge events, ");
4794                name = "ivybridge";
4795                break;
4796
4797
4798        case INTEL_FAM6_HASWELL_CORE:
4799        case INTEL_FAM6_HASWELL_X:
4800        case INTEL_FAM6_HASWELL_ULT:
4801        case INTEL_FAM6_HASWELL_GT3E:
4802                x86_add_quirk(intel_ht_bug);
4803                x86_add_quirk(intel_pebs_isolation_quirk);
4804                x86_pmu.late_ack = true;
4805                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4806                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4807
4808                intel_pmu_lbr_init_hsw();
4809
4810                x86_pmu.event_constraints = intel_hsw_event_constraints;
4811                x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
4812                x86_pmu.extra_regs = intel_snbep_extra_regs;
4813                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4814                x86_pmu.pebs_prec_dist = true;
4815                /* all extra regs are per-cpu when HT is on */
4816                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4817                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4818
4819                x86_pmu.hw_config = hsw_hw_config;
4820                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4821                x86_pmu.cpu_events = hsw_events_attrs;
4822                x86_pmu.lbr_double_abort = true;
4823                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4824                        hsw_format_attr : nhm_format_attr;
4825                mem_attr = hsw_mem_events_attrs;
4826                tsx_attr = hsw_tsx_events_attrs;
4827                pr_cont("Haswell events, ");
4828                name = "haswell";
4829                break;
4830
4831        case INTEL_FAM6_BROADWELL_CORE:
4832        case INTEL_FAM6_BROADWELL_XEON_D:
4833        case INTEL_FAM6_BROADWELL_GT3E:
4834        case INTEL_FAM6_BROADWELL_X:
4835                x86_add_quirk(intel_pebs_isolation_quirk);
4836                x86_pmu.late_ack = true;
4837                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4838                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4839
4840                /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
4841                hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
4842                                                                         BDW_L3_MISS|HSW_SNOOP_DRAM;
4843                hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
4844                                                                          HSW_SNOOP_DRAM;
4845                hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
4846                                                                             BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4847                hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
4848                                                                              BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
4849
4850                intel_pmu_lbr_init_hsw();
4851
4852                x86_pmu.event_constraints = intel_bdw_event_constraints;
4853                x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
4854                x86_pmu.extra_regs = intel_snbep_extra_regs;
4855                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
4856                x86_pmu.pebs_prec_dist = true;
4857                /* all extra regs are per-cpu when HT is on */
4858                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4859                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4860
4861                x86_pmu.hw_config = hsw_hw_config;
4862                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4863                x86_pmu.cpu_events = hsw_events_attrs;
4864                x86_pmu.limit_period = bdw_limit_period;
4865                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4866                        hsw_format_attr : nhm_format_attr;
4867                mem_attr = hsw_mem_events_attrs;
4868                tsx_attr = hsw_tsx_events_attrs;
4869                pr_cont("Broadwell events, ");
4870                name = "broadwell";
4871                break;
4872
4873        case INTEL_FAM6_XEON_PHI_KNL:
4874        case INTEL_FAM6_XEON_PHI_KNM:
4875                memcpy(hw_cache_event_ids,
4876                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4877                memcpy(hw_cache_extra_regs,
4878                       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4879                intel_pmu_lbr_init_knl();
4880
4881                x86_pmu.event_constraints = intel_slm_event_constraints;
4882                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
4883                x86_pmu.extra_regs = intel_knl_extra_regs;
4884
4885                /* all extra regs are per-cpu when HT is on */
4886                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4887                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4888                extra_attr = slm_format_attr;
4889                pr_cont("Knights Landing/Mill events, ");
4890                name = "knights-landing";
4891                break;
4892
4893        case INTEL_FAM6_SKYLAKE_MOBILE:
4894        case INTEL_FAM6_SKYLAKE_DESKTOP:
4895        case INTEL_FAM6_SKYLAKE_X:
4896        case INTEL_FAM6_KABYLAKE_MOBILE:
4897        case INTEL_FAM6_KABYLAKE_DESKTOP:
4898                x86_add_quirk(intel_pebs_isolation_quirk);
4899                x86_pmu.late_ack = true;
4900                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4901                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4902                intel_pmu_lbr_init_skl();
4903
4904                /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
4905                event_attr_td_recovery_bubbles.event_str_noht =
4906                        "event=0xd,umask=0x1,cmask=1";
4907                event_attr_td_recovery_bubbles.event_str_ht =
4908                        "event=0xd,umask=0x1,cmask=1,any=1";
4909
4910                x86_pmu.event_constraints = intel_skl_event_constraints;
4911                x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
4912                x86_pmu.extra_regs = intel_skl_extra_regs;
4913                x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
4914                x86_pmu.pebs_prec_dist = true;
4915                /* all extra regs are per-cpu when HT is on */
4916                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4917                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4918
4919                x86_pmu.hw_config = hsw_hw_config;
4920                x86_pmu.get_event_constraints = hsw_get_event_constraints;
4921                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4922                        hsw_format_attr : nhm_format_attr;
4923                extra_attr = merge_attr(extra_attr, skl_format_attr);
4924                to_free = extra_attr;
4925                x86_pmu.cpu_events = hsw_events_attrs;
4926                mem_attr = hsw_mem_events_attrs;
4927                tsx_attr = hsw_tsx_events_attrs;
4928                intel_pmu_pebs_data_source_skl(
4929                        boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4930
4931                if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4932                        x86_pmu.flags |= PMU_FL_TFA;
4933                        x86_pmu.get_event_constraints = tfa_get_event_constraints;
4934                        x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4935                        x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4936                        intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
4937                }
4938
4939                pr_cont("Skylake events, ");
4940                name = "skylake";
4941                break;
4942
4943        case INTEL_FAM6_ICELAKE_MOBILE:
4944                x86_pmu.late_ack = true;
4945                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
4946                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
4947                hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
4948                intel_pmu_lbr_init_skl();
4949
4950                x86_pmu.event_constraints = intel_icl_event_constraints;
4951                x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
4952                x86_pmu.extra_regs = intel_icl_extra_regs;
4953                x86_pmu.pebs_aliases = NULL;
4954                x86_pmu.pebs_prec_dist = true;
4955                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
4956                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
4957
4958                x86_pmu.hw_config = hsw_hw_config;
4959                x86_pmu.get_event_constraints = icl_get_event_constraints;
4960                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
4961                        hsw_format_attr : nhm_format_attr;
4962                extra_attr = merge_attr(extra_attr, skl_format_attr);
4963                x86_pmu.cpu_events = get_icl_events_attrs();
4964                x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
4965                x86_pmu.lbr_pt_coexist = true;
4966                intel_pmu_pebs_data_source_skl(false);
4967                pr_cont("Icelake events, ");
4968                name = "icelake";
4969                break;
4970
4971        default:
4972                switch (x86_pmu.version) {
4973                case 1:
4974                        x86_pmu.event_constraints = intel_v1_event_constraints;
4975                        pr_cont("generic architected perfmon v1, ");
4976                        name = "generic_arch_v1";
4977                        break;
4978                default:
4979                        /*
4980                         * default constraints for v2 and up
4981                         */
4982                        x86_pmu.event_constraints = intel_gen_event_constraints;
4983                        pr_cont("generic architected perfmon, ");
4984                        name = "generic_arch_v2+";
4985                        break;
4986                }
4987        }
4988
4989        snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
4990
4991        if (version >= 2 && extra_attr) {
4992                x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
4993                                                  extra_attr);
4994                WARN_ON(!x86_pmu.format_attrs);
4995        }
4996
4997        x86_pmu.cpu_events = get_events_attrs(x86_pmu.cpu_events,
4998                                              mem_attr, tsx_attr);
4999
5000        if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
5001                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5002                     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
5003                x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
5004        }
5005        x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
5006
5007        if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5008                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5009                     x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
5010                x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
5011        }
5012
5013        x86_pmu.intel_ctrl |=
5014                ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
5015
5016        if (x86_pmu.event_constraints) {
5017                /*
5018                 * event on fixed counter2 (REF_CYCLES) only works on this
5019                 * counter, so do not extend mask to generic counters
5020                 */
5021                for_each_event_constraint(c, x86_pmu.event_constraints) {
5022                        if (c->cmask == FIXED_EVENT_FLAGS
5023                            && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
5024                                c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
5025                        }
5026                        c->idxmsk64 &=
5027                                ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
5028                        c->weight = hweight64(c->idxmsk64);
5029                }
5030        }
5031
5032        /*
5033         * Access LBR MSR may cause #GP under certain circumstances.
5034         * E.g. KVM doesn't support LBR MSR
5035         * Check all LBT MSR here.
5036         * Disable LBR access if any LBR MSRs can not be accessed.
5037         */
5038        if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
5039                x86_pmu.lbr_nr = 0;
5040        for (i = 0; i < x86_pmu.lbr_nr; i++) {
5041                if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
5042                      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
5043                        x86_pmu.lbr_nr = 0;
5044        }
5045
5046        x86_pmu.caps_attrs = intel_pmu_caps_attrs;
5047
5048        if (x86_pmu.lbr_nr) {
5049                x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
5050                pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
5051        }
5052
5053        /*
5054         * Access extra MSR may cause #GP under certain circumstances.
5055         * E.g. KVM doesn't support offcore event
5056         * Check all extra_regs here.
5057         */
5058        if (x86_pmu.extra_regs) {
5059                for (er = x86_pmu.extra_regs; er->msr; er++) {
5060                        er->extra_msr_access = check_msr(er->msr, 0x11UL);
5061                        /* Disable LBR select mapping */
5062                        if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5063                                x86_pmu.lbr_sel_map = NULL;
5064                }
5065        }
5066
5067        /* Support full width counters using alternative MSR range */
5068        if (x86_pmu.intel_cap.full_width_write) {
5069                x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
5070                x86_pmu.perfctr = MSR_IA32_PMC0;
5071                pr_cont("full-width counters, ");
5072        }
5073
5074        /*
5075         * For arch perfmon 4 use counter freezing to avoid
5076         * several MSR accesses in the PMI.
5077         */
5078        if (x86_pmu.counter_freezing)
5079                x86_pmu.handle_irq = intel_pmu_handle_irq_v4;
5080
5081        kfree(to_free);
5082        return 0;
5083}
5084
5085/*
5086 * HT bug: phase 2 init
5087 * Called once we have valid topology information to check
5088 * whether or not HT is enabled
5089 * If HT is off, then we disable the workaround
5090 */
5091static __init int fixup_ht_bug(void)
5092{
5093        int c;
5094        /*
5095         * problem not present on this CPU model, nothing to do
5096         */
5097        if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
5098                return 0;
5099
5100        if (topology_max_smt_threads() > 1) {
5101                pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
5102                return 0;
5103        }
5104
5105        cpus_read_lock();
5106
5107        hardlockup_detector_perf_stop();
5108
5109        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
5110
5111        x86_pmu.start_scheduling = NULL;
5112        x86_pmu.commit_scheduling = NULL;
5113        x86_pmu.stop_scheduling = NULL;
5114
5115        hardlockup_detector_perf_restart();
5116
5117        for_each_online_cpu(c)
5118                free_excl_cntrs(&per_cpu(cpu_hw_events, c));
5119
5120        cpus_read_unlock();
5121        pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
5122        return 0;
5123}
5124subsys_initcall(fixup_ht_bug)
5125