linux/arch/x86/events/intel/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Per core/cpu state
   4 *
   5 * Used to coordinate shared registers between HT threads or
   6 * among events on a single PMU.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/stddef.h>
  12#include <linux/types.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/nmi.h>
  17
  18#include <asm/cpufeature.h>
  19#include <asm/hardirq.h>
  20#include <asm/intel-family.h>
  21#include <asm/intel_pt.h>
  22#include <asm/apic.h>
  23#include <asm/cpu_device_id.h>
  24
  25#include "../perf_event.h"
  26
  27/*
  28 * Intel PerfMon, used on Core and later.
  29 */
  30static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
  31{
  32        [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
  33        [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
  34        [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
  35        [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
  36        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
  37        [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
  38        [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
  39        [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
  40};
  41
  42static struct event_constraint intel_core_event_constraints[] __read_mostly =
  43{
  44        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  45        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  46        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  47        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  48        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  49        INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  50        EVENT_CONSTRAINT_END
  51};
  52
  53static struct event_constraint intel_core2_event_constraints[] __read_mostly =
  54{
  55        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  56        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  57        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  58        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  59        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  60        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  61        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  62        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  63        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  64        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  65        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  66        INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  67        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  68        EVENT_CONSTRAINT_END
  69};
  70
  71static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
  72{
  73        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  74        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  75        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  76        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  77        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  78        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  79        INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  80        INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  81        INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  82        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  83        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  84        EVENT_CONSTRAINT_END
  85};
  86
  87static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  88{
  89        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
  90        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  91        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
  92        EVENT_EXTRA_END
  93};
  94
  95static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
  96{
  97        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  98        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  99        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 100        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
 101        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
 102        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
 103        INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
 104        EVENT_CONSTRAINT_END
 105};
 106
 107static struct event_constraint intel_snb_event_constraints[] __read_mostly =
 108{
 109        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 110        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 111        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 112        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 113        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 114        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 115        INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 116        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
 117        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 118        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 119        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 120        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 121
 122        /*
 123         * When HT is off these events can only run on the bottom 4 counters
 124         * When HT is on, they are impacted by the HT bug and require EXCL access
 125         */
 126        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 127        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 128        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 129        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 130
 131        EVENT_CONSTRAINT_END
 132};
 133
 134static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
 135{
 136        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 137        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 138        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 139        INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
 140        INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
 141        INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
 142        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
 143        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 144        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 145        INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
 146        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 147        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 148        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 149
 150        /*
 151         * When HT is off these events can only run on the bottom 4 counters
 152         * When HT is on, they are impacted by the HT bug and require EXCL access
 153         */
 154        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 155        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 156        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 157        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 158
 159        EVENT_CONSTRAINT_END
 160};
 161
 162static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
 163{
 164        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 165        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 166        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 167        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
 168        EVENT_EXTRA_END
 169};
 170
 171static struct event_constraint intel_v1_event_constraints[] __read_mostly =
 172{
 173        EVENT_CONSTRAINT_END
 174};
 175
 176static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 177{
 178        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 179        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 180        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 181        EVENT_CONSTRAINT_END
 182};
 183
 184static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 185{
 186        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 187        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 188        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 189        EVENT_CONSTRAINT_END
 190};
 191
 192static struct event_constraint intel_skl_event_constraints[] = {
 193        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 194        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 195        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 196        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
 197
 198        /*
 199         * when HT is off, these can only run on the bottom 4 counters
 200         */
 201        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 202        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 203        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 204        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 205        INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
 206
 207        EVENT_CONSTRAINT_END
 208};
 209
 210static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
 211        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
 212        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
 213        EVENT_EXTRA_END
 214};
 215
 216static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 217        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 218        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 219        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 220        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 221        EVENT_EXTRA_END
 222};
 223
 224static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 225        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 226        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 227        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 228        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 229        EVENT_EXTRA_END
 230};
 231
 232static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
 233        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 234        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 235        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 236        /*
 237         * Note the low 8 bits eventsel code is not a continuous field, containing
 238         * some #GPing bits. These are masked out.
 239         */
 240        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 241        EVENT_EXTRA_END
 242};
 243
 244static struct event_constraint intel_icl_event_constraints[] = {
 245        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 246        FIXED_EVENT_CONSTRAINT(0x01c0, 0),      /* INST_RETIRED.PREC_DIST */
 247        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 248        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 249        FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
 250        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
 251        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
 252        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
 253        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
 254        INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
 255        INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
 256        INTEL_EVENT_CONSTRAINT(0x32, 0xf),      /* SW_PREFETCH_ACCESS.* */
 257        INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
 258        INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
 259        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
 260        INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
 261        INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
 262        INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
 263        INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
 264        INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
 265        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
 266        INTEL_EVENT_CONSTRAINT(0xef, 0xf),
 267        INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
 268        EVENT_CONSTRAINT_END
 269};
 270
 271static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
 272        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
 273        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
 274        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 275        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 276        EVENT_EXTRA_END
 277};
 278
 279static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
 280        INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
 281        INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
 282        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 283        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 284        INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
 285        INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
 286        EVENT_EXTRA_END
 287};
 288
 289static struct event_constraint intel_spr_event_constraints[] = {
 290        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 291        FIXED_EVENT_CONSTRAINT(0x01c0, 0),      /* INST_RETIRED.PREC_DIST */
 292        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 293        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 294        FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
 295        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
 296        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
 297        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
 298        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
 299        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
 300        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
 301        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
 302        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
 303
 304        INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
 305        INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
 306        /*
 307         * Generally event codes < 0x90 are restricted to counters 0-3.
 308         * The 0x2E and 0x3C are exception, which has no restriction.
 309         */
 310        INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
 311
 312        INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
 313        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
 314        INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
 315        INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
 316        INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
 317        INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
 318        INTEL_EVENT_CONSTRAINT(0xce, 0x1),
 319        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
 320        /*
 321         * Generally event codes >= 0x90 are likely to have no restrictions.
 322         * The exception are defined as above.
 323         */
 324        INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
 325
 326        EVENT_CONSTRAINT_END
 327};
 328
 329
 330EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 331EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 332EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
 333
 334static struct attribute *nhm_mem_events_attrs[] = {
 335        EVENT_PTR(mem_ld_nhm),
 336        NULL,
 337};
 338
 339/*
 340 * topdown events for Intel Core CPUs.
 341 *
 342 * The events are all in slots, which is a free slot in a 4 wide
 343 * pipeline. Some events are already reported in slots, for cycle
 344 * events we multiply by the pipeline width (4).
 345 *
 346 * With Hyper Threading on, topdown metrics are either summed or averaged
 347 * between the threads of a core: (count_t0 + count_t1).
 348 *
 349 * For the average case the metric is always scaled to pipeline width,
 350 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
 351 */
 352
 353EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
 354        "event=0x3c,umask=0x0",                 /* cpu_clk_unhalted.thread */
 355        "event=0x3c,umask=0x0,any=1");          /* cpu_clk_unhalted.thread_any */
 356EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
 357EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
 358        "event=0xe,umask=0x1");                 /* uops_issued.any */
 359EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
 360        "event=0xc2,umask=0x2");                /* uops_retired.retire_slots */
 361EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
 362        "event=0x9c,umask=0x1");                /* idq_uops_not_delivered_core */
 363EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
 364        "event=0xd,umask=0x3,cmask=1",          /* int_misc.recovery_cycles */
 365        "event=0xd,umask=0x3,cmask=1,any=1");   /* int_misc.recovery_cycles_any */
 366EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
 367        "4", "2");
 368
 369EVENT_ATTR_STR(slots,                   slots,                  "event=0x00,umask=0x4");
 370EVENT_ATTR_STR(topdown-retiring,        td_retiring,            "event=0x00,umask=0x80");
 371EVENT_ATTR_STR(topdown-bad-spec,        td_bad_spec,            "event=0x00,umask=0x81");
 372EVENT_ATTR_STR(topdown-fe-bound,        td_fe_bound,            "event=0x00,umask=0x82");
 373EVENT_ATTR_STR(topdown-be-bound,        td_be_bound,            "event=0x00,umask=0x83");
 374EVENT_ATTR_STR(topdown-heavy-ops,       td_heavy_ops,           "event=0x00,umask=0x84");
 375EVENT_ATTR_STR(topdown-br-mispredict,   td_br_mispredict,       "event=0x00,umask=0x85");
 376EVENT_ATTR_STR(topdown-fetch-lat,       td_fetch_lat,           "event=0x00,umask=0x86");
 377EVENT_ATTR_STR(topdown-mem-bound,       td_mem_bound,           "event=0x00,umask=0x87");
 378
 379static struct attribute *snb_events_attrs[] = {
 380        EVENT_PTR(td_slots_issued),
 381        EVENT_PTR(td_slots_retired),
 382        EVENT_PTR(td_fetch_bubbles),
 383        EVENT_PTR(td_total_slots),
 384        EVENT_PTR(td_total_slots_scale),
 385        EVENT_PTR(td_recovery_bubbles),
 386        EVENT_PTR(td_recovery_bubbles_scale),
 387        NULL,
 388};
 389
 390static struct attribute *snb_mem_events_attrs[] = {
 391        EVENT_PTR(mem_ld_snb),
 392        EVENT_PTR(mem_st_snb),
 393        NULL,
 394};
 395
 396static struct event_constraint intel_hsw_event_constraints[] = {
 397        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 398        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 399        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 400        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 401        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 402        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 403        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 404        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 405        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 406        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 407        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 408        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 409
 410        /*
 411         * When HT is off these events can only run on the bottom 4 counters
 412         * When HT is on, they are impacted by the HT bug and require EXCL access
 413         */
 414        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 415        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 416        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 417        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 418
 419        EVENT_CONSTRAINT_END
 420};
 421
 422static struct event_constraint intel_bdw_event_constraints[] = {
 423        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 424        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 425        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 426        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 427        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 428        /*
 429         * when HT is off, these can only run on the bottom 4 counters
 430         */
 431        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 432        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 433        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 434        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 435        EVENT_CONSTRAINT_END
 436};
 437
 438static u64 intel_pmu_event_map(int hw_event)
 439{
 440        return intel_perfmon_event_map[hw_event];
 441}
 442
 443static __initconst const u64 spr_hw_cache_event_ids
 444                                [PERF_COUNT_HW_CACHE_MAX]
 445                                [PERF_COUNT_HW_CACHE_OP_MAX]
 446                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 447{
 448 [ C(L1D ) ] = {
 449        [ C(OP_READ) ] = {
 450                [ C(RESULT_ACCESS) ] = 0x81d0,
 451                [ C(RESULT_MISS)   ] = 0xe124,
 452        },
 453        [ C(OP_WRITE) ] = {
 454                [ C(RESULT_ACCESS) ] = 0x82d0,
 455        },
 456 },
 457 [ C(L1I ) ] = {
 458        [ C(OP_READ) ] = {
 459                [ C(RESULT_MISS)   ] = 0xe424,
 460        },
 461        [ C(OP_WRITE) ] = {
 462                [ C(RESULT_ACCESS) ] = -1,
 463                [ C(RESULT_MISS)   ] = -1,
 464        },
 465 },
 466 [ C(LL  ) ] = {
 467        [ C(OP_READ) ] = {
 468                [ C(RESULT_ACCESS) ] = 0x12a,
 469                [ C(RESULT_MISS)   ] = 0x12a,
 470        },
 471        [ C(OP_WRITE) ] = {
 472                [ C(RESULT_ACCESS) ] = 0x12a,
 473                [ C(RESULT_MISS)   ] = 0x12a,
 474        },
 475 },
 476 [ C(DTLB) ] = {
 477        [ C(OP_READ) ] = {
 478                [ C(RESULT_ACCESS) ] = 0x81d0,
 479                [ C(RESULT_MISS)   ] = 0xe12,
 480        },
 481        [ C(OP_WRITE) ] = {
 482                [ C(RESULT_ACCESS) ] = 0x82d0,
 483                [ C(RESULT_MISS)   ] = 0xe13,
 484        },
 485 },
 486 [ C(ITLB) ] = {
 487        [ C(OP_READ) ] = {
 488                [ C(RESULT_ACCESS) ] = -1,
 489                [ C(RESULT_MISS)   ] = 0xe11,
 490        },
 491        [ C(OP_WRITE) ] = {
 492                [ C(RESULT_ACCESS) ] = -1,
 493                [ C(RESULT_MISS)   ] = -1,
 494        },
 495        [ C(OP_PREFETCH) ] = {
 496                [ C(RESULT_ACCESS) ] = -1,
 497                [ C(RESULT_MISS)   ] = -1,
 498        },
 499 },
 500 [ C(BPU ) ] = {
 501        [ C(OP_READ) ] = {
 502                [ C(RESULT_ACCESS) ] = 0x4c4,
 503                [ C(RESULT_MISS)   ] = 0x4c5,
 504        },
 505        [ C(OP_WRITE) ] = {
 506                [ C(RESULT_ACCESS) ] = -1,
 507                [ C(RESULT_MISS)   ] = -1,
 508        },
 509        [ C(OP_PREFETCH) ] = {
 510                [ C(RESULT_ACCESS) ] = -1,
 511                [ C(RESULT_MISS)   ] = -1,
 512        },
 513 },
 514 [ C(NODE) ] = {
 515        [ C(OP_READ) ] = {
 516                [ C(RESULT_ACCESS) ] = 0x12a,
 517                [ C(RESULT_MISS)   ] = 0x12a,
 518        },
 519 },
 520};
 521
 522static __initconst const u64 spr_hw_cache_extra_regs
 523                                [PERF_COUNT_HW_CACHE_MAX]
 524                                [PERF_COUNT_HW_CACHE_OP_MAX]
 525                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 526{
 527 [ C(LL  ) ] = {
 528        [ C(OP_READ) ] = {
 529                [ C(RESULT_ACCESS) ] = 0x10001,
 530                [ C(RESULT_MISS)   ] = 0x3fbfc00001,
 531        },
 532        [ C(OP_WRITE) ] = {
 533                [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
 534                [ C(RESULT_MISS)   ] = 0x3f3fc00002,
 535        },
 536 },
 537 [ C(NODE) ] = {
 538        [ C(OP_READ) ] = {
 539                [ C(RESULT_ACCESS) ] = 0x10c000001,
 540                [ C(RESULT_MISS)   ] = 0x3fb3000001,
 541        },
 542 },
 543};
 544
 545/*
 546 * Notes on the events:
 547 * - data reads do not include code reads (comparable to earlier tables)
 548 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 549 * - remote node access includes remote memory, remote cache, remote mmio.
 550 * - prefetches are not included in the counts.
 551 * - icache miss does not include decoded icache
 552 */
 553
 554#define SKL_DEMAND_DATA_RD              BIT_ULL(0)
 555#define SKL_DEMAND_RFO                  BIT_ULL(1)
 556#define SKL_ANY_RESPONSE                BIT_ULL(16)
 557#define SKL_SUPPLIER_NONE               BIT_ULL(17)
 558#define SKL_L3_MISS_LOCAL_DRAM          BIT_ULL(26)
 559#define SKL_L3_MISS_REMOTE_HOP0_DRAM    BIT_ULL(27)
 560#define SKL_L3_MISS_REMOTE_HOP1_DRAM    BIT_ULL(28)
 561#define SKL_L3_MISS_REMOTE_HOP2P_DRAM   BIT_ULL(29)
 562#define SKL_L3_MISS                     (SKL_L3_MISS_LOCAL_DRAM| \
 563                                         SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 564                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 565                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 566#define SKL_SPL_HIT                     BIT_ULL(30)
 567#define SKL_SNOOP_NONE                  BIT_ULL(31)
 568#define SKL_SNOOP_NOT_NEEDED            BIT_ULL(32)
 569#define SKL_SNOOP_MISS                  BIT_ULL(33)
 570#define SKL_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 571#define SKL_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 572#define SKL_SNOOP_HITM                  BIT_ULL(36)
 573#define SKL_SNOOP_NON_DRAM              BIT_ULL(37)
 574#define SKL_ANY_SNOOP                   (SKL_SPL_HIT|SKL_SNOOP_NONE| \
 575                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 576                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 577                                         SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
 578#define SKL_DEMAND_READ                 SKL_DEMAND_DATA_RD
 579#define SKL_SNOOP_DRAM                  (SKL_SNOOP_NONE| \
 580                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 581                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 582                                         SKL_SNOOP_HITM|SKL_SPL_HIT)
 583#define SKL_DEMAND_WRITE                SKL_DEMAND_RFO
 584#define SKL_LLC_ACCESS                  SKL_ANY_RESPONSE
 585#define SKL_L3_MISS_REMOTE              (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 586                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 587                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 588
 589static __initconst const u64 skl_hw_cache_event_ids
 590                                [PERF_COUNT_HW_CACHE_MAX]
 591                                [PERF_COUNT_HW_CACHE_OP_MAX]
 592                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 593{
 594 [ C(L1D ) ] = {
 595        [ C(OP_READ) ] = {
 596                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 597                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 598        },
 599        [ C(OP_WRITE) ] = {
 600                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 601                [ C(RESULT_MISS)   ] = 0x0,
 602        },
 603        [ C(OP_PREFETCH) ] = {
 604                [ C(RESULT_ACCESS) ] = 0x0,
 605                [ C(RESULT_MISS)   ] = 0x0,
 606        },
 607 },
 608 [ C(L1I ) ] = {
 609        [ C(OP_READ) ] = {
 610                [ C(RESULT_ACCESS) ] = 0x0,
 611                [ C(RESULT_MISS)   ] = 0x283,   /* ICACHE_64B.MISS */
 612        },
 613        [ C(OP_WRITE) ] = {
 614                [ C(RESULT_ACCESS) ] = -1,
 615                [ C(RESULT_MISS)   ] = -1,
 616        },
 617        [ C(OP_PREFETCH) ] = {
 618                [ C(RESULT_ACCESS) ] = 0x0,
 619                [ C(RESULT_MISS)   ] = 0x0,
 620        },
 621 },
 622 [ C(LL  ) ] = {
 623        [ C(OP_READ) ] = {
 624                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 625                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 626        },
 627        [ C(OP_WRITE) ] = {
 628                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 629                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 630        },
 631        [ C(OP_PREFETCH) ] = {
 632                [ C(RESULT_ACCESS) ] = 0x0,
 633                [ C(RESULT_MISS)   ] = 0x0,
 634        },
 635 },
 636 [ C(DTLB) ] = {
 637        [ C(OP_READ) ] = {
 638                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 639                [ C(RESULT_MISS)   ] = 0xe08,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
 640        },
 641        [ C(OP_WRITE) ] = {
 642                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 643                [ C(RESULT_MISS)   ] = 0xe49,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
 644        },
 645        [ C(OP_PREFETCH) ] = {
 646                [ C(RESULT_ACCESS) ] = 0x0,
 647                [ C(RESULT_MISS)   ] = 0x0,
 648        },
 649 },
 650 [ C(ITLB) ] = {
 651        [ C(OP_READ) ] = {
 652                [ C(RESULT_ACCESS) ] = 0x2085,  /* ITLB_MISSES.STLB_HIT */
 653                [ C(RESULT_MISS)   ] = 0xe85,   /* ITLB_MISSES.WALK_COMPLETED */
 654        },
 655        [ C(OP_WRITE) ] = {
 656                [ C(RESULT_ACCESS) ] = -1,
 657                [ C(RESULT_MISS)   ] = -1,
 658        },
 659        [ C(OP_PREFETCH) ] = {
 660                [ C(RESULT_ACCESS) ] = -1,
 661                [ C(RESULT_MISS)   ] = -1,
 662        },
 663 },
 664 [ C(BPU ) ] = {
 665        [ C(OP_READ) ] = {
 666                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 667                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 668        },
 669        [ C(OP_WRITE) ] = {
 670                [ C(RESULT_ACCESS) ] = -1,
 671                [ C(RESULT_MISS)   ] = -1,
 672        },
 673        [ C(OP_PREFETCH) ] = {
 674                [ C(RESULT_ACCESS) ] = -1,
 675                [ C(RESULT_MISS)   ] = -1,
 676        },
 677 },
 678 [ C(NODE) ] = {
 679        [ C(OP_READ) ] = {
 680                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 681                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 682        },
 683        [ C(OP_WRITE) ] = {
 684                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 685                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 686        },
 687        [ C(OP_PREFETCH) ] = {
 688                [ C(RESULT_ACCESS) ] = 0x0,
 689                [ C(RESULT_MISS)   ] = 0x0,
 690        },
 691 },
 692};
 693
 694static __initconst const u64 skl_hw_cache_extra_regs
 695                                [PERF_COUNT_HW_CACHE_MAX]
 696                                [PERF_COUNT_HW_CACHE_OP_MAX]
 697                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 698{
 699 [ C(LL  ) ] = {
 700        [ C(OP_READ) ] = {
 701                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 702                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 703                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 704                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 705                                       SKL_SUPPLIER_NONE,
 706        },
 707        [ C(OP_WRITE) ] = {
 708                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 709                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 710                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 711                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 712                                       SKL_SUPPLIER_NONE,
 713        },
 714        [ C(OP_PREFETCH) ] = {
 715                [ C(RESULT_ACCESS) ] = 0x0,
 716                [ C(RESULT_MISS)   ] = 0x0,
 717        },
 718 },
 719 [ C(NODE) ] = {
 720        [ C(OP_READ) ] = {
 721                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 722                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 723                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 724                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 725        },
 726        [ C(OP_WRITE) ] = {
 727                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 728                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 729                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 730                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 731        },
 732        [ C(OP_PREFETCH) ] = {
 733                [ C(RESULT_ACCESS) ] = 0x0,
 734                [ C(RESULT_MISS)   ] = 0x0,
 735        },
 736 },
 737};
 738
 739#define SNB_DMND_DATA_RD        (1ULL << 0)
 740#define SNB_DMND_RFO            (1ULL << 1)
 741#define SNB_DMND_IFETCH         (1ULL << 2)
 742#define SNB_DMND_WB             (1ULL << 3)
 743#define SNB_PF_DATA_RD          (1ULL << 4)
 744#define SNB_PF_RFO              (1ULL << 5)
 745#define SNB_PF_IFETCH           (1ULL << 6)
 746#define SNB_LLC_DATA_RD         (1ULL << 7)
 747#define SNB_LLC_RFO             (1ULL << 8)
 748#define SNB_LLC_IFETCH          (1ULL << 9)
 749#define SNB_BUS_LOCKS           (1ULL << 10)
 750#define SNB_STRM_ST             (1ULL << 11)
 751#define SNB_OTHER               (1ULL << 15)
 752#define SNB_RESP_ANY            (1ULL << 16)
 753#define SNB_NO_SUPP             (1ULL << 17)
 754#define SNB_LLC_HITM            (1ULL << 18)
 755#define SNB_LLC_HITE            (1ULL << 19)
 756#define SNB_LLC_HITS            (1ULL << 20)
 757#define SNB_LLC_HITF            (1ULL << 21)
 758#define SNB_LOCAL               (1ULL << 22)
 759#define SNB_REMOTE              (0xffULL << 23)
 760#define SNB_SNP_NONE            (1ULL << 31)
 761#define SNB_SNP_NOT_NEEDED      (1ULL << 32)
 762#define SNB_SNP_MISS            (1ULL << 33)
 763#define SNB_NO_FWD              (1ULL << 34)
 764#define SNB_SNP_FWD             (1ULL << 35)
 765#define SNB_HITM                (1ULL << 36)
 766#define SNB_NON_DRAM            (1ULL << 37)
 767
 768#define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
 769#define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
 770#define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
 771
 772#define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
 773                                 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
 774                                 SNB_HITM)
 775
 776#define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
 777#define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
 778
 779#define SNB_L3_ACCESS           SNB_RESP_ANY
 780#define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
 781
 782static __initconst const u64 snb_hw_cache_extra_regs
 783                                [PERF_COUNT_HW_CACHE_MAX]
 784                                [PERF_COUNT_HW_CACHE_OP_MAX]
 785                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 786{
 787 [ C(LL  ) ] = {
 788        [ C(OP_READ) ] = {
 789                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
 790                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
 791        },
 792        [ C(OP_WRITE) ] = {
 793                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
 794                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
 795        },
 796        [ C(OP_PREFETCH) ] = {
 797                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
 798                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
 799        },
 800 },
 801 [ C(NODE) ] = {
 802        [ C(OP_READ) ] = {
 803                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
 804                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
 805        },
 806        [ C(OP_WRITE) ] = {
 807                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
 808                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
 809        },
 810        [ C(OP_PREFETCH) ] = {
 811                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
 812                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
 813        },
 814 },
 815};
 816
 817static __initconst const u64 snb_hw_cache_event_ids
 818                                [PERF_COUNT_HW_CACHE_MAX]
 819                                [PERF_COUNT_HW_CACHE_OP_MAX]
 820                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 821{
 822 [ C(L1D) ] = {
 823        [ C(OP_READ) ] = {
 824                [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
 825                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
 826        },
 827        [ C(OP_WRITE) ] = {
 828                [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
 829                [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
 830        },
 831        [ C(OP_PREFETCH) ] = {
 832                [ C(RESULT_ACCESS) ] = 0x0,
 833                [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
 834        },
 835 },
 836 [ C(L1I ) ] = {
 837        [ C(OP_READ) ] = {
 838                [ C(RESULT_ACCESS) ] = 0x0,
 839                [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
 840        },
 841        [ C(OP_WRITE) ] = {
 842                [ C(RESULT_ACCESS) ] = -1,
 843                [ C(RESULT_MISS)   ] = -1,
 844        },
 845        [ C(OP_PREFETCH) ] = {
 846                [ C(RESULT_ACCESS) ] = 0x0,
 847                [ C(RESULT_MISS)   ] = 0x0,
 848        },
 849 },
 850 [ C(LL  ) ] = {
 851        [ C(OP_READ) ] = {
 852                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 853                [ C(RESULT_ACCESS) ] = 0x01b7,
 854                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 855                [ C(RESULT_MISS)   ] = 0x01b7,
 856        },
 857        [ C(OP_WRITE) ] = {
 858                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 859                [ C(RESULT_ACCESS) ] = 0x01b7,
 860                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
 861                [ C(RESULT_MISS)   ] = 0x01b7,
 862        },
 863        [ C(OP_PREFETCH) ] = {
 864                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
 865                [ C(RESULT_ACCESS) ] = 0x01b7,
 866                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
 867                [ C(RESULT_MISS)   ] = 0x01b7,
 868        },
 869 },
 870 [ C(DTLB) ] = {
 871        [ C(OP_READ) ] = {
 872                [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
 873                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
 874        },
 875        [ C(OP_WRITE) ] = {
 876                [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
 877                [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 878        },
 879        [ C(OP_PREFETCH) ] = {
 880                [ C(RESULT_ACCESS) ] = 0x0,
 881                [ C(RESULT_MISS)   ] = 0x0,
 882        },
 883 },
 884 [ C(ITLB) ] = {
 885        [ C(OP_READ) ] = {
 886                [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
 887                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
 888        },
 889        [ C(OP_WRITE) ] = {
 890                [ C(RESULT_ACCESS) ] = -1,
 891                [ C(RESULT_MISS)   ] = -1,
 892        },
 893        [ C(OP_PREFETCH) ] = {
 894                [ C(RESULT_ACCESS) ] = -1,
 895                [ C(RESULT_MISS)   ] = -1,
 896        },
 897 },
 898 [ C(BPU ) ] = {
 899        [ C(OP_READ) ] = {
 900                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
 901                [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
 902        },
 903        [ C(OP_WRITE) ] = {
 904                [ C(RESULT_ACCESS) ] = -1,
 905                [ C(RESULT_MISS)   ] = -1,
 906        },
 907        [ C(OP_PREFETCH) ] = {
 908                [ C(RESULT_ACCESS) ] = -1,
 909                [ C(RESULT_MISS)   ] = -1,
 910        },
 911 },
 912 [ C(NODE) ] = {
 913        [ C(OP_READ) ] = {
 914                [ C(RESULT_ACCESS) ] = 0x01b7,
 915                [ C(RESULT_MISS)   ] = 0x01b7,
 916        },
 917        [ C(OP_WRITE) ] = {
 918                [ C(RESULT_ACCESS) ] = 0x01b7,
 919                [ C(RESULT_MISS)   ] = 0x01b7,
 920        },
 921        [ C(OP_PREFETCH) ] = {
 922                [ C(RESULT_ACCESS) ] = 0x01b7,
 923                [ C(RESULT_MISS)   ] = 0x01b7,
 924        },
 925 },
 926
 927};
 928
 929/*
 930 * Notes on the events:
 931 * - data reads do not include code reads (comparable to earlier tables)
 932 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 933 * - remote node access includes remote memory, remote cache, remote mmio.
 934 * - prefetches are not included in the counts because they are not
 935 *   reliably counted.
 936 */
 937
 938#define HSW_DEMAND_DATA_RD              BIT_ULL(0)
 939#define HSW_DEMAND_RFO                  BIT_ULL(1)
 940#define HSW_ANY_RESPONSE                BIT_ULL(16)
 941#define HSW_SUPPLIER_NONE               BIT_ULL(17)
 942#define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
 943#define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
 944#define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
 945#define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
 946#define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
 947                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 948                                         HSW_L3_MISS_REMOTE_HOP2P)
 949#define HSW_SNOOP_NONE                  BIT_ULL(31)
 950#define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
 951#define HSW_SNOOP_MISS                  BIT_ULL(33)
 952#define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 953#define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 954#define HSW_SNOOP_HITM                  BIT_ULL(36)
 955#define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
 956#define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
 957                                         HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
 958                                         HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
 959                                         HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
 960#define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
 961#define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
 962#define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
 963#define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
 964                                         HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
 965#define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
 966
 967#define BDW_L3_MISS_LOCAL               BIT(26)
 968#define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
 969                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 970                                         HSW_L3_MISS_REMOTE_HOP2P)
 971
 972
 973static __initconst const u64 hsw_hw_cache_event_ids
 974                                [PERF_COUNT_HW_CACHE_MAX]
 975                                [PERF_COUNT_HW_CACHE_OP_MAX]
 976                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 977{
 978 [ C(L1D ) ] = {
 979        [ C(OP_READ) ] = {
 980                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 981                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 982        },
 983        [ C(OP_WRITE) ] = {
 984                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 985                [ C(RESULT_MISS)   ] = 0x0,
 986        },
 987        [ C(OP_PREFETCH) ] = {
 988                [ C(RESULT_ACCESS) ] = 0x0,
 989                [ C(RESULT_MISS)   ] = 0x0,
 990        },
 991 },
 992 [ C(L1I ) ] = {
 993        [ C(OP_READ) ] = {
 994                [ C(RESULT_ACCESS) ] = 0x0,
 995                [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
 996        },
 997        [ C(OP_WRITE) ] = {
 998                [ C(RESULT_ACCESS) ] = -1,
 999                [ C(RESULT_MISS)   ] = -1,
1000        },
1001        [ C(OP_PREFETCH) ] = {
1002                [ C(RESULT_ACCESS) ] = 0x0,
1003                [ C(RESULT_MISS)   ] = 0x0,
1004        },
1005 },
1006 [ C(LL  ) ] = {
1007        [ C(OP_READ) ] = {
1008                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1009                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1010        },
1011        [ C(OP_WRITE) ] = {
1012                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1013                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1014        },
1015        [ C(OP_PREFETCH) ] = {
1016                [ C(RESULT_ACCESS) ] = 0x0,
1017                [ C(RESULT_MISS)   ] = 0x0,
1018        },
1019 },
1020 [ C(DTLB) ] = {
1021        [ C(OP_READ) ] = {
1022                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
1023                [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1024        },
1025        [ C(OP_WRITE) ] = {
1026                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
1027                [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1028        },
1029        [ C(OP_PREFETCH) ] = {
1030                [ C(RESULT_ACCESS) ] = 0x0,
1031                [ C(RESULT_MISS)   ] = 0x0,
1032        },
1033 },
1034 [ C(ITLB) ] = {
1035        [ C(OP_READ) ] = {
1036                [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
1037                [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1038        },
1039        [ C(OP_WRITE) ] = {
1040                [ C(RESULT_ACCESS) ] = -1,
1041                [ C(RESULT_MISS)   ] = -1,
1042        },
1043        [ C(OP_PREFETCH) ] = {
1044                [ C(RESULT_ACCESS) ] = -1,
1045                [ C(RESULT_MISS)   ] = -1,
1046        },
1047 },
1048 [ C(BPU ) ] = {
1049        [ C(OP_READ) ] = {
1050                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
1051                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
1052        },
1053        [ C(OP_WRITE) ] = {
1054                [ C(RESULT_ACCESS) ] = -1,
1055                [ C(RESULT_MISS)   ] = -1,
1056        },
1057        [ C(OP_PREFETCH) ] = {
1058                [ C(RESULT_ACCESS) ] = -1,
1059                [ C(RESULT_MISS)   ] = -1,
1060        },
1061 },
1062 [ C(NODE) ] = {
1063        [ C(OP_READ) ] = {
1064                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1065                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1066        },
1067        [ C(OP_WRITE) ] = {
1068                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1069                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1070        },
1071        [ C(OP_PREFETCH) ] = {
1072                [ C(RESULT_ACCESS) ] = 0x0,
1073                [ C(RESULT_MISS)   ] = 0x0,
1074        },
1075 },
1076};
1077
1078static __initconst const u64 hsw_hw_cache_extra_regs
1079                                [PERF_COUNT_HW_CACHE_MAX]
1080                                [PERF_COUNT_HW_CACHE_OP_MAX]
1081                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1082{
1083 [ C(LL  ) ] = {
1084        [ C(OP_READ) ] = {
1085                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1086                                       HSW_LLC_ACCESS,
1087                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1088                                       HSW_L3_MISS|HSW_ANY_SNOOP,
1089        },
1090        [ C(OP_WRITE) ] = {
1091                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1092                                       HSW_LLC_ACCESS,
1093                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1094                                       HSW_L3_MISS|HSW_ANY_SNOOP,
1095        },
1096        [ C(OP_PREFETCH) ] = {
1097                [ C(RESULT_ACCESS) ] = 0x0,
1098                [ C(RESULT_MISS)   ] = 0x0,
1099        },
1100 },
1101 [ C(NODE) ] = {
1102        [ C(OP_READ) ] = {
1103                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1104                                       HSW_L3_MISS_LOCAL_DRAM|
1105                                       HSW_SNOOP_DRAM,
1106                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1107                                       HSW_L3_MISS_REMOTE|
1108                                       HSW_SNOOP_DRAM,
1109        },
1110        [ C(OP_WRITE) ] = {
1111                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1112                                       HSW_L3_MISS_LOCAL_DRAM|
1113                                       HSW_SNOOP_DRAM,
1114                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1115                                       HSW_L3_MISS_REMOTE|
1116                                       HSW_SNOOP_DRAM,
1117        },
1118        [ C(OP_PREFETCH) ] = {
1119                [ C(RESULT_ACCESS) ] = 0x0,
1120                [ C(RESULT_MISS)   ] = 0x0,
1121        },
1122 },
1123};
1124
1125static __initconst const u64 westmere_hw_cache_event_ids
1126                                [PERF_COUNT_HW_CACHE_MAX]
1127                                [PERF_COUNT_HW_CACHE_OP_MAX]
1128                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1129{
1130 [ C(L1D) ] = {
1131        [ C(OP_READ) ] = {
1132                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1133                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1134        },
1135        [ C(OP_WRITE) ] = {
1136                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1137                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1138        },
1139        [ C(OP_PREFETCH) ] = {
1140                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1141                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1142        },
1143 },
1144 [ C(L1I ) ] = {
1145        [ C(OP_READ) ] = {
1146                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1147                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1148        },
1149        [ C(OP_WRITE) ] = {
1150                [ C(RESULT_ACCESS) ] = -1,
1151                [ C(RESULT_MISS)   ] = -1,
1152        },
1153        [ C(OP_PREFETCH) ] = {
1154                [ C(RESULT_ACCESS) ] = 0x0,
1155                [ C(RESULT_MISS)   ] = 0x0,
1156        },
1157 },
1158 [ C(LL  ) ] = {
1159        [ C(OP_READ) ] = {
1160                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1161                [ C(RESULT_ACCESS) ] = 0x01b7,
1162                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1163                [ C(RESULT_MISS)   ] = 0x01b7,
1164        },
1165        /*
1166         * Use RFO, not WRITEBACK, because a write miss would typically occur
1167         * on RFO.
1168         */
1169        [ C(OP_WRITE) ] = {
1170                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1171                [ C(RESULT_ACCESS) ] = 0x01b7,
1172                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1173                [ C(RESULT_MISS)   ] = 0x01b7,
1174        },
1175        [ C(OP_PREFETCH) ] = {
1176                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1177                [ C(RESULT_ACCESS) ] = 0x01b7,
1178                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1179                [ C(RESULT_MISS)   ] = 0x01b7,
1180        },
1181 },
1182 [ C(DTLB) ] = {
1183        [ C(OP_READ) ] = {
1184                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1185                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1186        },
1187        [ C(OP_WRITE) ] = {
1188                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1189                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1190        },
1191        [ C(OP_PREFETCH) ] = {
1192                [ C(RESULT_ACCESS) ] = 0x0,
1193                [ C(RESULT_MISS)   ] = 0x0,
1194        },
1195 },
1196 [ C(ITLB) ] = {
1197        [ C(OP_READ) ] = {
1198                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1199                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1200        },
1201        [ C(OP_WRITE) ] = {
1202                [ C(RESULT_ACCESS) ] = -1,
1203                [ C(RESULT_MISS)   ] = -1,
1204        },
1205        [ C(OP_PREFETCH) ] = {
1206                [ C(RESULT_ACCESS) ] = -1,
1207                [ C(RESULT_MISS)   ] = -1,
1208        },
1209 },
1210 [ C(BPU ) ] = {
1211        [ C(OP_READ) ] = {
1212                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1213                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1214        },
1215        [ C(OP_WRITE) ] = {
1216                [ C(RESULT_ACCESS) ] = -1,
1217                [ C(RESULT_MISS)   ] = -1,
1218        },
1219        [ C(OP_PREFETCH) ] = {
1220                [ C(RESULT_ACCESS) ] = -1,
1221                [ C(RESULT_MISS)   ] = -1,
1222        },
1223 },
1224 [ C(NODE) ] = {
1225        [ C(OP_READ) ] = {
1226                [ C(RESULT_ACCESS) ] = 0x01b7,
1227                [ C(RESULT_MISS)   ] = 0x01b7,
1228        },
1229        [ C(OP_WRITE) ] = {
1230                [ C(RESULT_ACCESS) ] = 0x01b7,
1231                [ C(RESULT_MISS)   ] = 0x01b7,
1232        },
1233        [ C(OP_PREFETCH) ] = {
1234                [ C(RESULT_ACCESS) ] = 0x01b7,
1235                [ C(RESULT_MISS)   ] = 0x01b7,
1236        },
1237 },
1238};
1239
1240/*
1241 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1242 * See IA32 SDM Vol 3B 30.6.1.3
1243 */
1244
1245#define NHM_DMND_DATA_RD        (1 << 0)
1246#define NHM_DMND_RFO            (1 << 1)
1247#define NHM_DMND_IFETCH         (1 << 2)
1248#define NHM_DMND_WB             (1 << 3)
1249#define NHM_PF_DATA_RD          (1 << 4)
1250#define NHM_PF_DATA_RFO         (1 << 5)
1251#define NHM_PF_IFETCH           (1 << 6)
1252#define NHM_OFFCORE_OTHER       (1 << 7)
1253#define NHM_UNCORE_HIT          (1 << 8)
1254#define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
1255#define NHM_OTHER_CORE_HITM     (1 << 10)
1256                                /* reserved */
1257#define NHM_REMOTE_CACHE_FWD    (1 << 12)
1258#define NHM_REMOTE_DRAM         (1 << 13)
1259#define NHM_LOCAL_DRAM          (1 << 14)
1260#define NHM_NON_DRAM            (1 << 15)
1261
1262#define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1263#define NHM_REMOTE              (NHM_REMOTE_DRAM)
1264
1265#define NHM_DMND_READ           (NHM_DMND_DATA_RD)
1266#define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
1267#define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1268
1269#define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1270#define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1271#define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
1272
1273static __initconst const u64 nehalem_hw_cache_extra_regs
1274                                [PERF_COUNT_HW_CACHE_MAX]
1275                                [PERF_COUNT_HW_CACHE_OP_MAX]
1276                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1277{
1278 [ C(LL  ) ] = {
1279        [ C(OP_READ) ] = {
1280                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1281                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1282        },
1283        [ C(OP_WRITE) ] = {
1284                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1285                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1286        },
1287        [ C(OP_PREFETCH) ] = {
1288                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1289                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1290        },
1291 },
1292 [ C(NODE) ] = {
1293        [ C(OP_READ) ] = {
1294                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1295                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1296        },
1297        [ C(OP_WRITE) ] = {
1298                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1299                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1300        },
1301        [ C(OP_PREFETCH) ] = {
1302                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1303                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1304        },
1305 },
1306};
1307
1308static __initconst const u64 nehalem_hw_cache_event_ids
1309                                [PERF_COUNT_HW_CACHE_MAX]
1310                                [PERF_COUNT_HW_CACHE_OP_MAX]
1311                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1312{
1313 [ C(L1D) ] = {
1314        [ C(OP_READ) ] = {
1315                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1316                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1317        },
1318        [ C(OP_WRITE) ] = {
1319                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1320                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1321        },
1322        [ C(OP_PREFETCH) ] = {
1323                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1324                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1325        },
1326 },
1327 [ C(L1I ) ] = {
1328        [ C(OP_READ) ] = {
1329                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1330                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1331        },
1332        [ C(OP_WRITE) ] = {
1333                [ C(RESULT_ACCESS) ] = -1,
1334                [ C(RESULT_MISS)   ] = -1,
1335        },
1336        [ C(OP_PREFETCH) ] = {
1337                [ C(RESULT_ACCESS) ] = 0x0,
1338                [ C(RESULT_MISS)   ] = 0x0,
1339        },
1340 },
1341 [ C(LL  ) ] = {
1342        [ C(OP_READ) ] = {
1343                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1344                [ C(RESULT_ACCESS) ] = 0x01b7,
1345                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1346                [ C(RESULT_MISS)   ] = 0x01b7,
1347        },
1348        /*
1349         * Use RFO, not WRITEBACK, because a write miss would typically occur
1350         * on RFO.
1351         */
1352        [ C(OP_WRITE) ] = {
1353                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1354                [ C(RESULT_ACCESS) ] = 0x01b7,
1355                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1356                [ C(RESULT_MISS)   ] = 0x01b7,
1357        },
1358        [ C(OP_PREFETCH) ] = {
1359                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1360                [ C(RESULT_ACCESS) ] = 0x01b7,
1361                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1362                [ C(RESULT_MISS)   ] = 0x01b7,
1363        },
1364 },
1365 [ C(DTLB) ] = {
1366        [ C(OP_READ) ] = {
1367                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1368                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1369        },
1370        [ C(OP_WRITE) ] = {
1371                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1372                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1373        },
1374        [ C(OP_PREFETCH) ] = {
1375                [ C(RESULT_ACCESS) ] = 0x0,
1376                [ C(RESULT_MISS)   ] = 0x0,
1377        },
1378 },
1379 [ C(ITLB) ] = {
1380        [ C(OP_READ) ] = {
1381                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1382                [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1383        },
1384        [ C(OP_WRITE) ] = {
1385                [ C(RESULT_ACCESS) ] = -1,
1386                [ C(RESULT_MISS)   ] = -1,
1387        },
1388        [ C(OP_PREFETCH) ] = {
1389                [ C(RESULT_ACCESS) ] = -1,
1390                [ C(RESULT_MISS)   ] = -1,
1391        },
1392 },
1393 [ C(BPU ) ] = {
1394        [ C(OP_READ) ] = {
1395                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1396                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1397        },
1398        [ C(OP_WRITE) ] = {
1399                [ C(RESULT_ACCESS) ] = -1,
1400                [ C(RESULT_MISS)   ] = -1,
1401        },
1402        [ C(OP_PREFETCH) ] = {
1403                [ C(RESULT_ACCESS) ] = -1,
1404                [ C(RESULT_MISS)   ] = -1,
1405        },
1406 },
1407 [ C(NODE) ] = {
1408        [ C(OP_READ) ] = {
1409                [ C(RESULT_ACCESS) ] = 0x01b7,
1410                [ C(RESULT_MISS)   ] = 0x01b7,
1411        },
1412        [ C(OP_WRITE) ] = {
1413                [ C(RESULT_ACCESS) ] = 0x01b7,
1414                [ C(RESULT_MISS)   ] = 0x01b7,
1415        },
1416        [ C(OP_PREFETCH) ] = {
1417                [ C(RESULT_ACCESS) ] = 0x01b7,
1418                [ C(RESULT_MISS)   ] = 0x01b7,
1419        },
1420 },
1421};
1422
1423static __initconst const u64 core2_hw_cache_event_ids
1424                                [PERF_COUNT_HW_CACHE_MAX]
1425                                [PERF_COUNT_HW_CACHE_OP_MAX]
1426                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1427{
1428 [ C(L1D) ] = {
1429        [ C(OP_READ) ] = {
1430                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1431                [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1432        },
1433        [ C(OP_WRITE) ] = {
1434                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1435                [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1436        },
1437        [ C(OP_PREFETCH) ] = {
1438                [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1439                [ C(RESULT_MISS)   ] = 0,
1440        },
1441 },
1442 [ C(L1I ) ] = {
1443        [ C(OP_READ) ] = {
1444                [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1445                [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1446        },
1447        [ C(OP_WRITE) ] = {
1448                [ C(RESULT_ACCESS) ] = -1,
1449                [ C(RESULT_MISS)   ] = -1,
1450        },
1451        [ C(OP_PREFETCH) ] = {
1452                [ C(RESULT_ACCESS) ] = 0,
1453                [ C(RESULT_MISS)   ] = 0,
1454        },
1455 },
1456 [ C(LL  ) ] = {
1457        [ C(OP_READ) ] = {
1458                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1459                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1460        },
1461        [ C(OP_WRITE) ] = {
1462                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1463                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1464        },
1465        [ C(OP_PREFETCH) ] = {
1466                [ C(RESULT_ACCESS) ] = 0,
1467                [ C(RESULT_MISS)   ] = 0,
1468        },
1469 },
1470 [ C(DTLB) ] = {
1471        [ C(OP_READ) ] = {
1472                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1473                [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1474        },
1475        [ C(OP_WRITE) ] = {
1476                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1477                [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1478        },
1479        [ C(OP_PREFETCH) ] = {
1480                [ C(RESULT_ACCESS) ] = 0,
1481                [ C(RESULT_MISS)   ] = 0,
1482        },
1483 },
1484 [ C(ITLB) ] = {
1485        [ C(OP_READ) ] = {
1486                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1487                [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1488        },
1489        [ C(OP_WRITE) ] = {
1490                [ C(RESULT_ACCESS) ] = -1,
1491                [ C(RESULT_MISS)   ] = -1,
1492        },
1493        [ C(OP_PREFETCH) ] = {
1494                [ C(RESULT_ACCESS) ] = -1,
1495                [ C(RESULT_MISS)   ] = -1,
1496        },
1497 },
1498 [ C(BPU ) ] = {
1499        [ C(OP_READ) ] = {
1500                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1501                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1502        },
1503        [ C(OP_WRITE) ] = {
1504                [ C(RESULT_ACCESS) ] = -1,
1505                [ C(RESULT_MISS)   ] = -1,
1506        },
1507        [ C(OP_PREFETCH) ] = {
1508                [ C(RESULT_ACCESS) ] = -1,
1509                [ C(RESULT_MISS)   ] = -1,
1510        },
1511 },
1512};
1513
1514static __initconst const u64 atom_hw_cache_event_ids
1515                                [PERF_COUNT_HW_CACHE_MAX]
1516                                [PERF_COUNT_HW_CACHE_OP_MAX]
1517                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1518{
1519 [ C(L1D) ] = {
1520        [ C(OP_READ) ] = {
1521                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1522                [ C(RESULT_MISS)   ] = 0,
1523        },
1524        [ C(OP_WRITE) ] = {
1525                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1526                [ C(RESULT_MISS)   ] = 0,
1527        },
1528        [ C(OP_PREFETCH) ] = {
1529                [ C(RESULT_ACCESS) ] = 0x0,
1530                [ C(RESULT_MISS)   ] = 0,
1531        },
1532 },
1533 [ C(L1I ) ] = {
1534        [ C(OP_READ) ] = {
1535                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1536                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1537        },
1538        [ C(OP_WRITE) ] = {
1539                [ C(RESULT_ACCESS) ] = -1,
1540                [ C(RESULT_MISS)   ] = -1,
1541        },
1542        [ C(OP_PREFETCH) ] = {
1543                [ C(RESULT_ACCESS) ] = 0,
1544                [ C(RESULT_MISS)   ] = 0,
1545        },
1546 },
1547 [ C(LL  ) ] = {
1548        [ C(OP_READ) ] = {
1549                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1550                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1551        },
1552        [ C(OP_WRITE) ] = {
1553                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1554                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1555        },
1556        [ C(OP_PREFETCH) ] = {
1557                [ C(RESULT_ACCESS) ] = 0,
1558                [ C(RESULT_MISS)   ] = 0,
1559        },
1560 },
1561 [ C(DTLB) ] = {
1562        [ C(OP_READ) ] = {
1563                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1564                [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1565        },
1566        [ C(OP_WRITE) ] = {
1567                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1568                [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1569        },
1570        [ C(OP_PREFETCH) ] = {
1571                [ C(RESULT_ACCESS) ] = 0,
1572                [ C(RESULT_MISS)   ] = 0,
1573        },
1574 },
1575 [ C(ITLB) ] = {
1576        [ C(OP_READ) ] = {
1577                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1578                [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1579        },
1580        [ C(OP_WRITE) ] = {
1581                [ C(RESULT_ACCESS) ] = -1,
1582                [ C(RESULT_MISS)   ] = -1,
1583        },
1584        [ C(OP_PREFETCH) ] = {
1585                [ C(RESULT_ACCESS) ] = -1,
1586                [ C(RESULT_MISS)   ] = -1,
1587        },
1588 },
1589 [ C(BPU ) ] = {
1590        [ C(OP_READ) ] = {
1591                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1592                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1593        },
1594        [ C(OP_WRITE) ] = {
1595                [ C(RESULT_ACCESS) ] = -1,
1596                [ C(RESULT_MISS)   ] = -1,
1597        },
1598        [ C(OP_PREFETCH) ] = {
1599                [ C(RESULT_ACCESS) ] = -1,
1600                [ C(RESULT_MISS)   ] = -1,
1601        },
1602 },
1603};
1604
1605EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1606EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1607/* no_alloc_cycles.not_delivered */
1608EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1609               "event=0xca,umask=0x50");
1610EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1611/* uops_retired.all */
1612EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1613               "event=0xc2,umask=0x10");
1614/* uops_retired.all */
1615EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1616               "event=0xc2,umask=0x10");
1617
1618static struct attribute *slm_events_attrs[] = {
1619        EVENT_PTR(td_total_slots_slm),
1620        EVENT_PTR(td_total_slots_scale_slm),
1621        EVENT_PTR(td_fetch_bubbles_slm),
1622        EVENT_PTR(td_fetch_bubbles_scale_slm),
1623        EVENT_PTR(td_slots_issued_slm),
1624        EVENT_PTR(td_slots_retired_slm),
1625        NULL
1626};
1627
1628static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1629{
1630        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1631        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1632        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1633        EVENT_EXTRA_END
1634};
1635
1636#define SLM_DMND_READ           SNB_DMND_DATA_RD
1637#define SLM_DMND_WRITE          SNB_DMND_RFO
1638#define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1639
1640#define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1641#define SLM_LLC_ACCESS          SNB_RESP_ANY
1642#define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1643
1644static __initconst const u64 slm_hw_cache_extra_regs
1645                                [PERF_COUNT_HW_CACHE_MAX]
1646                                [PERF_COUNT_HW_CACHE_OP_MAX]
1647                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1648{
1649 [ C(LL  ) ] = {
1650        [ C(OP_READ) ] = {
1651                [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1652                [ C(RESULT_MISS)   ] = 0,
1653        },
1654        [ C(OP_WRITE) ] = {
1655                [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1656                [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1657        },
1658        [ C(OP_PREFETCH) ] = {
1659                [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1660                [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1661        },
1662 },
1663};
1664
1665static __initconst const u64 slm_hw_cache_event_ids
1666                                [PERF_COUNT_HW_CACHE_MAX]
1667                                [PERF_COUNT_HW_CACHE_OP_MAX]
1668                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1669{
1670 [ C(L1D) ] = {
1671        [ C(OP_READ) ] = {
1672                [ C(RESULT_ACCESS) ] = 0,
1673                [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1674        },
1675        [ C(OP_WRITE) ] = {
1676                [ C(RESULT_ACCESS) ] = 0,
1677                [ C(RESULT_MISS)   ] = 0,
1678        },
1679        [ C(OP_PREFETCH) ] = {
1680                [ C(RESULT_ACCESS) ] = 0,
1681                [ C(RESULT_MISS)   ] = 0,
1682        },
1683 },
1684 [ C(L1I ) ] = {
1685        [ C(OP_READ) ] = {
1686                [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1687                [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1688        },
1689        [ C(OP_WRITE) ] = {
1690                [ C(RESULT_ACCESS) ] = -1,
1691                [ C(RESULT_MISS)   ] = -1,
1692        },
1693        [ C(OP_PREFETCH) ] = {
1694                [ C(RESULT_ACCESS) ] = 0,
1695                [ C(RESULT_MISS)   ] = 0,
1696        },
1697 },
1698 [ C(LL  ) ] = {
1699        [ C(OP_READ) ] = {
1700                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1701                [ C(RESULT_ACCESS) ] = 0x01b7,
1702                [ C(RESULT_MISS)   ] = 0,
1703        },
1704        [ C(OP_WRITE) ] = {
1705                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1706                [ C(RESULT_ACCESS) ] = 0x01b7,
1707                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1708                [ C(RESULT_MISS)   ] = 0x01b7,
1709        },
1710        [ C(OP_PREFETCH) ] = {
1711                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1712                [ C(RESULT_ACCESS) ] = 0x01b7,
1713                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1714                [ C(RESULT_MISS)   ] = 0x01b7,
1715        },
1716 },
1717 [ C(DTLB) ] = {
1718        [ C(OP_READ) ] = {
1719                [ C(RESULT_ACCESS) ] = 0,
1720                [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1721        },
1722        [ C(OP_WRITE) ] = {
1723                [ C(RESULT_ACCESS) ] = 0,
1724                [ C(RESULT_MISS)   ] = 0,
1725        },
1726        [ C(OP_PREFETCH) ] = {
1727                [ C(RESULT_ACCESS) ] = 0,
1728                [ C(RESULT_MISS)   ] = 0,
1729        },
1730 },
1731 [ C(ITLB) ] = {
1732        [ C(OP_READ) ] = {
1733                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1734                [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1735        },
1736        [ C(OP_WRITE) ] = {
1737                [ C(RESULT_ACCESS) ] = -1,
1738                [ C(RESULT_MISS)   ] = -1,
1739        },
1740        [ C(OP_PREFETCH) ] = {
1741                [ C(RESULT_ACCESS) ] = -1,
1742                [ C(RESULT_MISS)   ] = -1,
1743        },
1744 },
1745 [ C(BPU ) ] = {
1746        [ C(OP_READ) ] = {
1747                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1748                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1749        },
1750        [ C(OP_WRITE) ] = {
1751                [ C(RESULT_ACCESS) ] = -1,
1752                [ C(RESULT_MISS)   ] = -1,
1753        },
1754        [ C(OP_PREFETCH) ] = {
1755                [ C(RESULT_ACCESS) ] = -1,
1756                [ C(RESULT_MISS)   ] = -1,
1757        },
1758 },
1759};
1760
1761EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1762EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1763/* UOPS_NOT_DELIVERED.ANY */
1764EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1765/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1766EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1767/* UOPS_RETIRED.ANY */
1768EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1769/* UOPS_ISSUED.ANY */
1770EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1771
1772static struct attribute *glm_events_attrs[] = {
1773        EVENT_PTR(td_total_slots_glm),
1774        EVENT_PTR(td_total_slots_scale_glm),
1775        EVENT_PTR(td_fetch_bubbles_glm),
1776        EVENT_PTR(td_recovery_bubbles_glm),
1777        EVENT_PTR(td_slots_issued_glm),
1778        EVENT_PTR(td_slots_retired_glm),
1779        NULL
1780};
1781
1782static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1783        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1784        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1785        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1786        EVENT_EXTRA_END
1787};
1788
1789#define GLM_DEMAND_DATA_RD              BIT_ULL(0)
1790#define GLM_DEMAND_RFO                  BIT_ULL(1)
1791#define GLM_ANY_RESPONSE                BIT_ULL(16)
1792#define GLM_SNP_NONE_OR_MISS            BIT_ULL(33)
1793#define GLM_DEMAND_READ                 GLM_DEMAND_DATA_RD
1794#define GLM_DEMAND_WRITE                GLM_DEMAND_RFO
1795#define GLM_DEMAND_PREFETCH             (SNB_PF_DATA_RD|SNB_PF_RFO)
1796#define GLM_LLC_ACCESS                  GLM_ANY_RESPONSE
1797#define GLM_SNP_ANY                     (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1798#define GLM_LLC_MISS                    (GLM_SNP_ANY|SNB_NON_DRAM)
1799
1800static __initconst const u64 glm_hw_cache_event_ids
1801                                [PERF_COUNT_HW_CACHE_MAX]
1802                                [PERF_COUNT_HW_CACHE_OP_MAX]
1803                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1804        [C(L1D)] = {
1805                [C(OP_READ)] = {
1806                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1807                        [C(RESULT_MISS)]        = 0x0,
1808                },
1809                [C(OP_WRITE)] = {
1810                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1811                        [C(RESULT_MISS)]        = 0x0,
1812                },
1813                [C(OP_PREFETCH)] = {
1814                        [C(RESULT_ACCESS)]      = 0x0,
1815                        [C(RESULT_MISS)]        = 0x0,
1816                },
1817        },
1818        [C(L1I)] = {
1819                [C(OP_READ)] = {
1820                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1821                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1822                },
1823                [C(OP_WRITE)] = {
1824                        [C(RESULT_ACCESS)]      = -1,
1825                        [C(RESULT_MISS)]        = -1,
1826                },
1827                [C(OP_PREFETCH)] = {
1828                        [C(RESULT_ACCESS)]      = 0x0,
1829                        [C(RESULT_MISS)]        = 0x0,
1830                },
1831        },
1832        [C(LL)] = {
1833                [C(OP_READ)] = {
1834                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1835                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1836                },
1837                [C(OP_WRITE)] = {
1838                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1839                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1840                },
1841                [C(OP_PREFETCH)] = {
1842                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1843                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1844                },
1845        },
1846        [C(DTLB)] = {
1847                [C(OP_READ)] = {
1848                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1849                        [C(RESULT_MISS)]        = 0x0,
1850                },
1851                [C(OP_WRITE)] = {
1852                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1853                        [C(RESULT_MISS)]        = 0x0,
1854                },
1855                [C(OP_PREFETCH)] = {
1856                        [C(RESULT_ACCESS)]      = 0x0,
1857                        [C(RESULT_MISS)]        = 0x0,
1858                },
1859        },
1860        [C(ITLB)] = {
1861                [C(OP_READ)] = {
1862                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1863                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1864                },
1865                [C(OP_WRITE)] = {
1866                        [C(RESULT_ACCESS)]      = -1,
1867                        [C(RESULT_MISS)]        = -1,
1868                },
1869                [C(OP_PREFETCH)] = {
1870                        [C(RESULT_ACCESS)]      = -1,
1871                        [C(RESULT_MISS)]        = -1,
1872                },
1873        },
1874        [C(BPU)] = {
1875                [C(OP_READ)] = {
1876                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1877                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1878                },
1879                [C(OP_WRITE)] = {
1880                        [C(RESULT_ACCESS)]      = -1,
1881                        [C(RESULT_MISS)]        = -1,
1882                },
1883                [C(OP_PREFETCH)] = {
1884                        [C(RESULT_ACCESS)]      = -1,
1885                        [C(RESULT_MISS)]        = -1,
1886                },
1887        },
1888};
1889
1890static __initconst const u64 glm_hw_cache_extra_regs
1891                                [PERF_COUNT_HW_CACHE_MAX]
1892                                [PERF_COUNT_HW_CACHE_OP_MAX]
1893                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1894        [C(LL)] = {
1895                [C(OP_READ)] = {
1896                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1897                                                  GLM_LLC_ACCESS,
1898                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1899                                                  GLM_LLC_MISS,
1900                },
1901                [C(OP_WRITE)] = {
1902                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1903                                                  GLM_LLC_ACCESS,
1904                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1905                                                  GLM_LLC_MISS,
1906                },
1907                [C(OP_PREFETCH)] = {
1908                        [C(RESULT_ACCESS)]      = GLM_DEMAND_PREFETCH|
1909                                                  GLM_LLC_ACCESS,
1910                        [C(RESULT_MISS)]        = GLM_DEMAND_PREFETCH|
1911                                                  GLM_LLC_MISS,
1912                },
1913        },
1914};
1915
1916static __initconst const u64 glp_hw_cache_event_ids
1917                                [PERF_COUNT_HW_CACHE_MAX]
1918                                [PERF_COUNT_HW_CACHE_OP_MAX]
1919                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1920        [C(L1D)] = {
1921                [C(OP_READ)] = {
1922                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1923                        [C(RESULT_MISS)]        = 0x0,
1924                },
1925                [C(OP_WRITE)] = {
1926                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1927                        [C(RESULT_MISS)]        = 0x0,
1928                },
1929                [C(OP_PREFETCH)] = {
1930                        [C(RESULT_ACCESS)]      = 0x0,
1931                        [C(RESULT_MISS)]        = 0x0,
1932                },
1933        },
1934        [C(L1I)] = {
1935                [C(OP_READ)] = {
1936                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1937                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1938                },
1939                [C(OP_WRITE)] = {
1940                        [C(RESULT_ACCESS)]      = -1,
1941                        [C(RESULT_MISS)]        = -1,
1942                },
1943                [C(OP_PREFETCH)] = {
1944                        [C(RESULT_ACCESS)]      = 0x0,
1945                        [C(RESULT_MISS)]        = 0x0,
1946                },
1947        },
1948        [C(LL)] = {
1949                [C(OP_READ)] = {
1950                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1951                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1952                },
1953                [C(OP_WRITE)] = {
1954                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1955                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1956                },
1957                [C(OP_PREFETCH)] = {
1958                        [C(RESULT_ACCESS)]      = 0x0,
1959                        [C(RESULT_MISS)]        = 0x0,
1960                },
1961        },
1962        [C(DTLB)] = {
1963                [C(OP_READ)] = {
1964                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1965                        [C(RESULT_MISS)]        = 0xe08,        /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1966                },
1967                [C(OP_WRITE)] = {
1968                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1969                        [C(RESULT_MISS)]        = 0xe49,        /* DTLB_STORE_MISSES.WALK_COMPLETED */
1970                },
1971                [C(OP_PREFETCH)] = {
1972                        [C(RESULT_ACCESS)]      = 0x0,
1973                        [C(RESULT_MISS)]        = 0x0,
1974                },
1975        },
1976        [C(ITLB)] = {
1977                [C(OP_READ)] = {
1978                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1979                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1980                },
1981                [C(OP_WRITE)] = {
1982                        [C(RESULT_ACCESS)]      = -1,
1983                        [C(RESULT_MISS)]        = -1,
1984                },
1985                [C(OP_PREFETCH)] = {
1986                        [C(RESULT_ACCESS)]      = -1,
1987                        [C(RESULT_MISS)]        = -1,
1988                },
1989        },
1990        [C(BPU)] = {
1991                [C(OP_READ)] = {
1992                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1993                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1994                },
1995                [C(OP_WRITE)] = {
1996                        [C(RESULT_ACCESS)]      = -1,
1997                        [C(RESULT_MISS)]        = -1,
1998                },
1999                [C(OP_PREFETCH)] = {
2000                        [C(RESULT_ACCESS)]      = -1,
2001                        [C(RESULT_MISS)]        = -1,
2002                },
2003        },
2004};
2005
2006static __initconst const u64 glp_hw_cache_extra_regs
2007                                [PERF_COUNT_HW_CACHE_MAX]
2008                                [PERF_COUNT_HW_CACHE_OP_MAX]
2009                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2010        [C(LL)] = {
2011                [C(OP_READ)] = {
2012                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
2013                                                  GLM_LLC_ACCESS,
2014                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
2015                                                  GLM_LLC_MISS,
2016                },
2017                [C(OP_WRITE)] = {
2018                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
2019                                                  GLM_LLC_ACCESS,
2020                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
2021                                                  GLM_LLC_MISS,
2022                },
2023                [C(OP_PREFETCH)] = {
2024                        [C(RESULT_ACCESS)]      = 0x0,
2025                        [C(RESULT_MISS)]        = 0x0,
2026                },
2027        },
2028};
2029
2030#define TNT_LOCAL_DRAM                  BIT_ULL(26)
2031#define TNT_DEMAND_READ                 GLM_DEMAND_DATA_RD
2032#define TNT_DEMAND_WRITE                GLM_DEMAND_RFO
2033#define TNT_LLC_ACCESS                  GLM_ANY_RESPONSE
2034#define TNT_SNP_ANY                     (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2035                                         SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2036#define TNT_LLC_MISS                    (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2037
2038static __initconst const u64 tnt_hw_cache_extra_regs
2039                                [PERF_COUNT_HW_CACHE_MAX]
2040                                [PERF_COUNT_HW_CACHE_OP_MAX]
2041                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2042        [C(LL)] = {
2043                [C(OP_READ)] = {
2044                        [C(RESULT_ACCESS)]      = TNT_DEMAND_READ|
2045                                                  TNT_LLC_ACCESS,
2046                        [C(RESULT_MISS)]        = TNT_DEMAND_READ|
2047                                                  TNT_LLC_MISS,
2048                },
2049                [C(OP_WRITE)] = {
2050                        [C(RESULT_ACCESS)]      = TNT_DEMAND_WRITE|
2051                                                  TNT_LLC_ACCESS,
2052                        [C(RESULT_MISS)]        = TNT_DEMAND_WRITE|
2053                                                  TNT_LLC_MISS,
2054                },
2055                [C(OP_PREFETCH)] = {
2056                        [C(RESULT_ACCESS)]      = 0x0,
2057                        [C(RESULT_MISS)]        = 0x0,
2058                },
2059        },
2060};
2061
2062EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt,        "event=0x71,umask=0x0");
2063EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt,        "event=0xc2,umask=0x0");
2064EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt,        "event=0x73,umask=0x6");
2065EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt,        "event=0x74,umask=0x0");
2066
2067static struct attribute *tnt_events_attrs[] = {
2068        EVENT_PTR(td_fe_bound_tnt),
2069        EVENT_PTR(td_retiring_tnt),
2070        EVENT_PTR(td_bad_spec_tnt),
2071        EVENT_PTR(td_be_bound_tnt),
2072        NULL,
2073};
2074
2075static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2076        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2077        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2078        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2079        EVENT_EXTRA_END
2080};
2081
2082static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2083        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2084        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2085        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2086        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2087        EVENT_EXTRA_END
2088};
2089
2090#define KNL_OT_L2_HITE          BIT_ULL(19) /* Other Tile L2 Hit */
2091#define KNL_OT_L2_HITF          BIT_ULL(20) /* Other Tile L2 Hit */
2092#define KNL_MCDRAM_LOCAL        BIT_ULL(21)
2093#define KNL_MCDRAM_FAR          BIT_ULL(22)
2094#define KNL_DDR_LOCAL           BIT_ULL(23)
2095#define KNL_DDR_FAR             BIT_ULL(24)
2096#define KNL_DRAM_ANY            (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2097                                    KNL_DDR_LOCAL | KNL_DDR_FAR)
2098#define KNL_L2_READ             SLM_DMND_READ
2099#define KNL_L2_WRITE            SLM_DMND_WRITE
2100#define KNL_L2_PREFETCH         SLM_DMND_PREFETCH
2101#define KNL_L2_ACCESS           SLM_LLC_ACCESS
2102#define KNL_L2_MISS             (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2103                                   KNL_DRAM_ANY | SNB_SNP_ANY | \
2104                                                  SNB_NON_DRAM)
2105
2106static __initconst const u64 knl_hw_cache_extra_regs
2107                                [PERF_COUNT_HW_CACHE_MAX]
2108                                [PERF_COUNT_HW_CACHE_OP_MAX]
2109                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2110        [C(LL)] = {
2111                [C(OP_READ)] = {
2112                        [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2113                        [C(RESULT_MISS)]   = 0,
2114                },
2115                [C(OP_WRITE)] = {
2116                        [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2117                        [C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
2118                },
2119                [C(OP_PREFETCH)] = {
2120                        [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2121                        [C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
2122                },
2123        },
2124};
2125
2126/*
2127 * Used from PMIs where the LBRs are already disabled.
2128 *
2129 * This function could be called consecutively. It is required to remain in
2130 * disabled state if called consecutively.
2131 *
2132 * During consecutive calls, the same disable value will be written to related
2133 * registers, so the PMU state remains unchanged.
2134 *
2135 * intel_bts events don't coexist with intel PMU's BTS events because of
2136 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2137 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2138 *
2139 * Avoid PEBS_ENABLE MSR access in PMIs.
2140 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2141 * It doesn't matter if the PEBS is enabled or not.
2142 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2143 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2144 * However, there are some cases which may change PEBS status, e.g. PMI
2145 * throttle. The PEBS_ENABLE should be updated where the status changes.
2146 */
2147static void __intel_pmu_disable_all(void)
2148{
2149        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2150
2151        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2152
2153        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2154                intel_pmu_disable_bts();
2155}
2156
2157static void intel_pmu_disable_all(void)
2158{
2159        __intel_pmu_disable_all();
2160        intel_pmu_pebs_disable_all();
2161        intel_pmu_lbr_disable_all();
2162}
2163
2164static void __intel_pmu_enable_all(int added, bool pmi)
2165{
2166        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2167        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2168
2169        intel_pmu_lbr_enable_all(pmi);
2170        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2171               intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2172
2173        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2174                struct perf_event *event =
2175                        cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2176
2177                if (WARN_ON_ONCE(!event))
2178                        return;
2179
2180                intel_pmu_enable_bts(event->hw.config);
2181        }
2182}
2183
2184static void intel_pmu_enable_all(int added)
2185{
2186        intel_pmu_pebs_enable_all();
2187        __intel_pmu_enable_all(added, false);
2188}
2189
2190/*
2191 * Workaround for:
2192 *   Intel Errata AAK100 (model 26)
2193 *   Intel Errata AAP53  (model 30)
2194 *   Intel Errata BD53   (model 44)
2195 *
2196 * The official story:
2197 *   These chips need to be 'reset' when adding counters by programming the
2198 *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2199 *   in sequence on the same PMC or on different PMCs.
2200 *
2201 * In practice it appears some of these events do in fact count, and
2202 * we need to program all 4 events.
2203 */
2204static void intel_pmu_nhm_workaround(void)
2205{
2206        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2207        static const unsigned long nhm_magic[4] = {
2208                0x4300B5,
2209                0x4300D2,
2210                0x4300B1,
2211                0x4300B1
2212        };
2213        struct perf_event *event;
2214        int i;
2215
2216        /*
2217         * The Errata requires below steps:
2218         * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2219         * 2) Configure 4 PERFEVTSELx with the magic events and clear
2220         *    the corresponding PMCx;
2221         * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2222         * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2223         * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2224         */
2225
2226        /*
2227         * The real steps we choose are a little different from above.
2228         * A) To reduce MSR operations, we don't run step 1) as they
2229         *    are already cleared before this function is called;
2230         * B) Call x86_perf_event_update to save PMCx before configuring
2231         *    PERFEVTSELx with magic number;
2232         * C) With step 5), we do clear only when the PERFEVTSELx is
2233         *    not used currently.
2234         * D) Call x86_perf_event_set_period to restore PMCx;
2235         */
2236
2237        /* We always operate 4 pairs of PERF Counters */
2238        for (i = 0; i < 4; i++) {
2239                event = cpuc->events[i];
2240                if (event)
2241                        x86_perf_event_update(event);
2242        }
2243
2244        for (i = 0; i < 4; i++) {
2245                wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2246                wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2247        }
2248
2249        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2250        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2251
2252        for (i = 0; i < 4; i++) {
2253                event = cpuc->events[i];
2254
2255                if (event) {
2256                        x86_perf_event_set_period(event);
2257                        __x86_pmu_enable_event(&event->hw,
2258                                        ARCH_PERFMON_EVENTSEL_ENABLE);
2259                } else
2260                        wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2261        }
2262}
2263
2264static void intel_pmu_nhm_enable_all(int added)
2265{
2266        if (added)
2267                intel_pmu_nhm_workaround();
2268        intel_pmu_enable_all(added);
2269}
2270
2271static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2272{
2273        u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2274
2275        if (cpuc->tfa_shadow != val) {
2276                cpuc->tfa_shadow = val;
2277                wrmsrl(MSR_TSX_FORCE_ABORT, val);
2278        }
2279}
2280
2281static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2282{
2283        /*
2284         * We're going to use PMC3, make sure TFA is set before we touch it.
2285         */
2286        if (cntr == 3)
2287                intel_set_tfa(cpuc, true);
2288}
2289
2290static void intel_tfa_pmu_enable_all(int added)
2291{
2292        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2293
2294        /*
2295         * If we find PMC3 is no longer used when we enable the PMU, we can
2296         * clear TFA.
2297         */
2298        if (!test_bit(3, cpuc->active_mask))
2299                intel_set_tfa(cpuc, false);
2300
2301        intel_pmu_enable_all(added);
2302}
2303
2304static inline u64 intel_pmu_get_status(void)
2305{
2306        u64 status;
2307
2308        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2309
2310        return status;
2311}
2312
2313static inline void intel_pmu_ack_status(u64 ack)
2314{
2315        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2316}
2317
2318static inline bool event_is_checkpointed(struct perf_event *event)
2319{
2320        return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2321}
2322
2323static inline void intel_set_masks(struct perf_event *event, int idx)
2324{
2325        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2326
2327        if (event->attr.exclude_host)
2328                __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2329        if (event->attr.exclude_guest)
2330                __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2331        if (event_is_checkpointed(event))
2332                __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2333}
2334
2335static inline void intel_clear_masks(struct perf_event *event, int idx)
2336{
2337        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2338
2339        __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2340        __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2341        __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2342}
2343
2344static void intel_pmu_disable_fixed(struct perf_event *event)
2345{
2346        struct hw_perf_event *hwc = &event->hw;
2347        u64 ctrl_val, mask;
2348        int idx = hwc->idx;
2349
2350        if (is_topdown_idx(idx)) {
2351                struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2352
2353                /*
2354                 * When there are other active TopDown events,
2355                 * don't disable the fixed counter 3.
2356                 */
2357                if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2358                        return;
2359                idx = INTEL_PMC_IDX_FIXED_SLOTS;
2360        }
2361
2362        intel_clear_masks(event, idx);
2363
2364        mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2365        rdmsrl(hwc->config_base, ctrl_val);
2366        ctrl_val &= ~mask;
2367        wrmsrl(hwc->config_base, ctrl_val);
2368}
2369
2370static void intel_pmu_disable_event(struct perf_event *event)
2371{
2372        struct hw_perf_event *hwc = &event->hw;
2373        int idx = hwc->idx;
2374
2375        switch (idx) {
2376        case 0 ... INTEL_PMC_IDX_FIXED - 1:
2377                intel_clear_masks(event, idx);
2378                x86_pmu_disable_event(event);
2379                break;
2380        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2381        case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2382                intel_pmu_disable_fixed(event);
2383                break;
2384        case INTEL_PMC_IDX_FIXED_BTS:
2385                intel_pmu_disable_bts();
2386                intel_pmu_drain_bts_buffer();
2387                return;
2388        case INTEL_PMC_IDX_FIXED_VLBR:
2389                intel_clear_masks(event, idx);
2390                break;
2391        default:
2392                intel_clear_masks(event, idx);
2393                pr_warn("Failed to disable the event with invalid index %d\n",
2394                        idx);
2395                return;
2396        }
2397
2398        /*
2399         * Needs to be called after x86_pmu_disable_event,
2400         * so we don't trigger the event without PEBS bit set.
2401         */
2402        if (unlikely(event->attr.precise_ip))
2403                intel_pmu_pebs_disable(event);
2404}
2405
2406static void intel_pmu_del_event(struct perf_event *event)
2407{
2408        if (needs_branch_stack(event))
2409                intel_pmu_lbr_del(event);
2410        if (event->attr.precise_ip)
2411                intel_pmu_pebs_del(event);
2412}
2413
2414static int icl_set_topdown_event_period(struct perf_event *event)
2415{
2416        struct hw_perf_event *hwc = &event->hw;
2417        s64 left = local64_read(&hwc->period_left);
2418
2419        /*
2420         * The values in PERF_METRICS MSR are derived from fixed counter 3.
2421         * Software should start both registers, PERF_METRICS and fixed
2422         * counter 3, from zero.
2423         * Clear PERF_METRICS and Fixed counter 3 in initialization.
2424         * After that, both MSRs will be cleared for each read.
2425         * Don't need to clear them again.
2426         */
2427        if (left == x86_pmu.max_period) {
2428                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2429                wrmsrl(MSR_PERF_METRICS, 0);
2430                hwc->saved_slots = 0;
2431                hwc->saved_metric = 0;
2432        }
2433
2434        if ((hwc->saved_slots) && is_slots_event(event)) {
2435                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2436                wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2437        }
2438
2439        perf_event_update_userpage(event);
2440
2441        return 0;
2442}
2443
2444static int adl_set_topdown_event_period(struct perf_event *event)
2445{
2446        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2447
2448        if (pmu->cpu_type != hybrid_big)
2449                return 0;
2450
2451        return icl_set_topdown_event_period(event);
2452}
2453
2454static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2455{
2456        u32 val;
2457
2458        /*
2459         * The metric is reported as an 8bit integer fraction
2460         * summing up to 0xff.
2461         * slots-in-metric = (Metric / 0xff) * slots
2462         */
2463        val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2464        return  mul_u64_u32_div(slots, val, 0xff);
2465}
2466
2467static u64 icl_get_topdown_value(struct perf_event *event,
2468                                       u64 slots, u64 metrics)
2469{
2470        int idx = event->hw.idx;
2471        u64 delta;
2472
2473        if (is_metric_idx(idx))
2474                delta = icl_get_metrics_event_value(metrics, slots, idx);
2475        else
2476                delta = slots;
2477
2478        return delta;
2479}
2480
2481static void __icl_update_topdown_event(struct perf_event *event,
2482                                       u64 slots, u64 metrics,
2483                                       u64 last_slots, u64 last_metrics)
2484{
2485        u64 delta, last = 0;
2486
2487        delta = icl_get_topdown_value(event, slots, metrics);
2488        if (last_slots)
2489                last = icl_get_topdown_value(event, last_slots, last_metrics);
2490
2491        /*
2492         * The 8bit integer fraction of metric may be not accurate,
2493         * especially when the changes is very small.
2494         * For example, if only a few bad_spec happens, the fraction
2495         * may be reduced from 1 to 0. If so, the bad_spec event value
2496         * will be 0 which is definitely less than the last value.
2497         * Avoid update event->count for this case.
2498         */
2499        if (delta > last) {
2500                delta -= last;
2501                local64_add(delta, &event->count);
2502        }
2503}
2504
2505static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2506                                      u64 metrics, int metric_end)
2507{
2508        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2509        struct perf_event *other;
2510        int idx;
2511
2512        event->hw.saved_slots = slots;
2513        event->hw.saved_metric = metrics;
2514
2515        for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2516                if (!is_topdown_idx(idx))
2517                        continue;
2518                other = cpuc->events[idx];
2519                other->hw.saved_slots = slots;
2520                other->hw.saved_metric = metrics;
2521        }
2522}
2523
2524/*
2525 * Update all active Topdown events.
2526 *
2527 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2528 * modify by a NMI. PMU has to be disabled before calling this function.
2529 */
2530
2531static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2532{
2533        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2534        struct perf_event *other;
2535        u64 slots, metrics;
2536        bool reset = true;
2537        int idx;
2538
2539        /* read Fixed counter 3 */
2540        rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2541        if (!slots)
2542                return 0;
2543
2544        /* read PERF_METRICS */
2545        rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2546
2547        for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2548                if (!is_topdown_idx(idx))
2549                        continue;
2550                other = cpuc->events[idx];
2551                __icl_update_topdown_event(other, slots, metrics,
2552                                           event ? event->hw.saved_slots : 0,
2553                                           event ? event->hw.saved_metric : 0);
2554        }
2555
2556        /*
2557         * Check and update this event, which may have been cleared
2558         * in active_mask e.g. x86_pmu_stop()
2559         */
2560        if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2561                __icl_update_topdown_event(event, slots, metrics,
2562                                           event->hw.saved_slots,
2563                                           event->hw.saved_metric);
2564
2565                /*
2566                 * In x86_pmu_stop(), the event is cleared in active_mask first,
2567                 * then drain the delta, which indicates context switch for
2568                 * counting.
2569                 * Save metric and slots for context switch.
2570                 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2571                 * Because the values will be restored in next schedule in.
2572                 */
2573                update_saved_topdown_regs(event, slots, metrics, metric_end);
2574                reset = false;
2575        }
2576
2577        if (reset) {
2578                /* The fixed counter 3 has to be written before the PERF_METRICS. */
2579                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2580                wrmsrl(MSR_PERF_METRICS, 0);
2581                if (event)
2582                        update_saved_topdown_regs(event, 0, 0, metric_end);
2583        }
2584
2585        return slots;
2586}
2587
2588static u64 icl_update_topdown_event(struct perf_event *event)
2589{
2590        return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2591                                                 x86_pmu.num_topdown_events - 1);
2592}
2593
2594static u64 adl_update_topdown_event(struct perf_event *event)
2595{
2596        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2597
2598        if (pmu->cpu_type != hybrid_big)
2599                return 0;
2600
2601        return icl_update_topdown_event(event);
2602}
2603
2604
2605static void intel_pmu_read_topdown_event(struct perf_event *event)
2606{
2607        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2608
2609        /* Only need to call update_topdown_event() once for group read. */
2610        if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2611            !is_slots_event(event))
2612                return;
2613
2614        perf_pmu_disable(event->pmu);
2615        x86_pmu.update_topdown_event(event);
2616        perf_pmu_enable(event->pmu);
2617}
2618
2619static void intel_pmu_read_event(struct perf_event *event)
2620{
2621        if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2622                intel_pmu_auto_reload_read(event);
2623        else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2624                intel_pmu_read_topdown_event(event);
2625        else
2626                x86_perf_event_update(event);
2627}
2628
2629static void intel_pmu_enable_fixed(struct perf_event *event)
2630{
2631        struct hw_perf_event *hwc = &event->hw;
2632        u64 ctrl_val, mask, bits = 0;
2633        int idx = hwc->idx;
2634
2635        if (is_topdown_idx(idx)) {
2636                struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2637                /*
2638                 * When there are other active TopDown events,
2639                 * don't enable the fixed counter 3 again.
2640                 */
2641                if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2642                        return;
2643
2644                idx = INTEL_PMC_IDX_FIXED_SLOTS;
2645        }
2646
2647        intel_set_masks(event, idx);
2648
2649        /*
2650         * Enable IRQ generation (0x8), if not PEBS,
2651         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2652         * if requested:
2653         */
2654        if (!event->attr.precise_ip)
2655                bits |= 0x8;
2656        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2657                bits |= 0x2;
2658        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2659                bits |= 0x1;
2660
2661        /*
2662         * ANY bit is supported in v3 and up
2663         */
2664        if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2665                bits |= 0x4;
2666
2667        idx -= INTEL_PMC_IDX_FIXED;
2668        bits <<= (idx * 4);
2669        mask = 0xfULL << (idx * 4);
2670
2671        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2672                bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2673                mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2674        }
2675
2676        rdmsrl(hwc->config_base, ctrl_val);
2677        ctrl_val &= ~mask;
2678        ctrl_val |= bits;
2679        wrmsrl(hwc->config_base, ctrl_val);
2680}
2681
2682static void intel_pmu_enable_event(struct perf_event *event)
2683{
2684        struct hw_perf_event *hwc = &event->hw;
2685        int idx = hwc->idx;
2686
2687        if (unlikely(event->attr.precise_ip))
2688                intel_pmu_pebs_enable(event);
2689
2690        switch (idx) {
2691        case 0 ... INTEL_PMC_IDX_FIXED - 1:
2692                intel_set_masks(event, idx);
2693                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2694                break;
2695        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2696        case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2697                intel_pmu_enable_fixed(event);
2698                break;
2699        case INTEL_PMC_IDX_FIXED_BTS:
2700                if (!__this_cpu_read(cpu_hw_events.enabled))
2701                        return;
2702                intel_pmu_enable_bts(hwc->config);
2703                break;
2704        case INTEL_PMC_IDX_FIXED_VLBR:
2705                intel_set_masks(event, idx);
2706                break;
2707        default:
2708                pr_warn("Failed to enable the event with invalid index %d\n",
2709                        idx);
2710        }
2711}
2712
2713static void intel_pmu_add_event(struct perf_event *event)
2714{
2715        if (event->attr.precise_ip)
2716                intel_pmu_pebs_add(event);
2717        if (needs_branch_stack(event))
2718                intel_pmu_lbr_add(event);
2719}
2720
2721/*
2722 * Save and restart an expired event. Called by NMI contexts,
2723 * so it has to be careful about preempting normal event ops:
2724 */
2725int intel_pmu_save_and_restart(struct perf_event *event)
2726{
2727        x86_perf_event_update(event);
2728        /*
2729         * For a checkpointed counter always reset back to 0.  This
2730         * avoids a situation where the counter overflows, aborts the
2731         * transaction and is then set back to shortly before the
2732         * overflow, and overflows and aborts again.
2733         */
2734        if (unlikely(event_is_checkpointed(event))) {
2735                /* No race with NMIs because the counter should not be armed */
2736                wrmsrl(event->hw.event_base, 0);
2737                local64_set(&event->hw.prev_count, 0);
2738        }
2739        return x86_perf_event_set_period(event);
2740}
2741
2742static void intel_pmu_reset(void)
2743{
2744        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2745        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2746        int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2747        int num_counters = hybrid(cpuc->pmu, num_counters);
2748        unsigned long flags;
2749        int idx;
2750
2751        if (!num_counters)
2752                return;
2753
2754        local_irq_save(flags);
2755
2756        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2757
2758        for (idx = 0; idx < num_counters; idx++) {
2759                wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2760                wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
2761        }
2762        for (idx = 0; idx < num_counters_fixed; idx++) {
2763                if (fixed_counter_disabled(idx, cpuc->pmu))
2764                        continue;
2765                wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2766        }
2767
2768        if (ds)
2769                ds->bts_index = ds->bts_buffer_base;
2770
2771        /* Ack all overflows and disable fixed counters */
2772        if (x86_pmu.version >= 2) {
2773                intel_pmu_ack_status(intel_pmu_get_status());
2774                wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2775        }
2776
2777        /* Reset LBRs and LBR freezing */
2778        if (x86_pmu.lbr_nr) {
2779                update_debugctlmsr(get_debugctlmsr() &
2780                        ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2781        }
2782
2783        local_irq_restore(flags);
2784}
2785
2786static int handle_pmi_common(struct pt_regs *regs, u64 status)
2787{
2788        struct perf_sample_data data;
2789        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2790        int bit;
2791        int handled = 0;
2792        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2793
2794        inc_irq_stat(apic_perf_irqs);
2795
2796        /*
2797         * Ignore a range of extra bits in status that do not indicate
2798         * overflow by themselves.
2799         */
2800        status &= ~(GLOBAL_STATUS_COND_CHG |
2801                    GLOBAL_STATUS_ASIF |
2802                    GLOBAL_STATUS_LBRS_FROZEN);
2803        if (!status)
2804                return 0;
2805        /*
2806         * In case multiple PEBS events are sampled at the same time,
2807         * it is possible to have GLOBAL_STATUS bit 62 set indicating
2808         * PEBS buffer overflow and also seeing at most 3 PEBS counters
2809         * having their bits set in the status register. This is a sign
2810         * that there was at least one PEBS record pending at the time
2811         * of the PMU interrupt. PEBS counters must only be processed
2812         * via the drain_pebs() calls and not via the regular sample
2813         * processing loop coming after that the function, otherwise
2814         * phony regular samples may be generated in the sampling buffer
2815         * not marked with the EXACT tag. Another possibility is to have
2816         * one PEBS event and at least one non-PEBS event which overflows
2817         * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2818         * not be set, yet the overflow status bit for the PEBS counter will
2819         * be on Skylake.
2820         *
2821         * To avoid this problem, we systematically ignore the PEBS-enabled
2822         * counters from the GLOBAL_STATUS mask and we always process PEBS
2823         * events via drain_pebs().
2824         */
2825        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2826                status &= ~cpuc->pebs_enabled;
2827        else
2828                status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2829
2830        /*
2831         * PEBS overflow sets bit 62 in the global status register
2832         */
2833        if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2834                u64 pebs_enabled = cpuc->pebs_enabled;
2835
2836                handled++;
2837                x86_pmu.drain_pebs(regs, &data);
2838                status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2839
2840                /*
2841                 * PMI throttle may be triggered, which stops the PEBS event.
2842                 * Although cpuc->pebs_enabled is updated accordingly, the
2843                 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2844                 * cpuc->enabled has been forced to 0 in PMI.
2845                 * Update the MSR if pebs_enabled is changed.
2846                 */
2847                if (pebs_enabled != cpuc->pebs_enabled)
2848                        wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2849        }
2850
2851        /*
2852         * Intel PT
2853         */
2854        if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2855                handled++;
2856                if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2857                        perf_guest_cbs->handle_intel_pt_intr))
2858                        perf_guest_cbs->handle_intel_pt_intr();
2859                else
2860                        intel_pt_interrupt();
2861        }
2862
2863        /*
2864         * Intel Perf metrics
2865         */
2866        if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2867                handled++;
2868                if (x86_pmu.update_topdown_event)
2869                        x86_pmu.update_topdown_event(NULL);
2870        }
2871
2872        /*
2873         * Checkpointed counters can lead to 'spurious' PMIs because the
2874         * rollback caused by the PMI will have cleared the overflow status
2875         * bit. Therefore always force probe these counters.
2876         */
2877        status |= cpuc->intel_cp_status;
2878
2879        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2880                struct perf_event *event = cpuc->events[bit];
2881
2882                handled++;
2883
2884                if (!test_bit(bit, cpuc->active_mask))
2885                        continue;
2886
2887                if (!intel_pmu_save_and_restart(event))
2888                        continue;
2889
2890                perf_sample_data_init(&data, 0, event->hw.last_period);
2891
2892                if (has_branch_stack(event))
2893                        data.br_stack = &cpuc->lbr_stack;
2894
2895                if (perf_event_overflow(event, &data, regs))
2896                        x86_pmu_stop(event, 0);
2897        }
2898
2899        return handled;
2900}
2901
2902/*
2903 * This handler is triggered by the local APIC, so the APIC IRQ handling
2904 * rules apply:
2905 */
2906static int intel_pmu_handle_irq(struct pt_regs *regs)
2907{
2908        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2909        bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
2910        bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
2911        int loops;
2912        u64 status;
2913        int handled;
2914        int pmu_enabled;
2915
2916        /*
2917         * Save the PMU state.
2918         * It needs to be restored when leaving the handler.
2919         */
2920        pmu_enabled = cpuc->enabled;
2921        /*
2922         * In general, the early ACK is only applied for old platforms.
2923         * For the big core starts from Haswell, the late ACK should be
2924         * applied.
2925         * For the small core after Tremont, we have to do the ACK right
2926         * before re-enabling counters, which is in the middle of the
2927         * NMI handler.
2928         */
2929        if (!late_ack && !mid_ack)
2930                apic_write(APIC_LVTPC, APIC_DM_NMI);
2931        intel_bts_disable_local();
2932        cpuc->enabled = 0;
2933        __intel_pmu_disable_all();
2934        handled = intel_pmu_drain_bts_buffer();
2935        handled += intel_bts_interrupt();
2936        status = intel_pmu_get_status();
2937        if (!status)
2938                goto done;
2939
2940        loops = 0;
2941again:
2942        intel_pmu_lbr_read();
2943        intel_pmu_ack_status(status);
2944        if (++loops > 100) {
2945                static bool warned;
2946
2947                if (!warned) {
2948                        WARN(1, "perfevents: irq loop stuck!\n");
2949                        perf_event_print_debug();
2950                        warned = true;
2951                }
2952                intel_pmu_reset();
2953                goto done;
2954        }
2955
2956        handled += handle_pmi_common(regs, status);
2957
2958        /*
2959         * Repeat if there is more work to be done:
2960         */
2961        status = intel_pmu_get_status();
2962        if (status)
2963                goto again;
2964
2965done:
2966        if (mid_ack)
2967                apic_write(APIC_LVTPC, APIC_DM_NMI);
2968        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2969        cpuc->enabled = pmu_enabled;
2970        if (pmu_enabled)
2971                __intel_pmu_enable_all(0, true);
2972        intel_bts_enable_local();
2973
2974        /*
2975         * Only unmask the NMI after the overflow counters
2976         * have been reset. This avoids spurious NMIs on
2977         * Haswell CPUs.
2978         */
2979        if (late_ack)
2980                apic_write(APIC_LVTPC, APIC_DM_NMI);
2981        return handled;
2982}
2983
2984static struct event_constraint *
2985intel_bts_constraints(struct perf_event *event)
2986{
2987        if (unlikely(intel_pmu_has_bts(event)))
2988                return &bts_constraint;
2989
2990        return NULL;
2991}
2992
2993/*
2994 * Note: matches a fake event, like Fixed2.
2995 */
2996static struct event_constraint *
2997intel_vlbr_constraints(struct perf_event *event)
2998{
2999        struct event_constraint *c = &vlbr_constraint;
3000
3001        if (unlikely(constraint_match(c, event->hw.config)))
3002                return c;
3003
3004        return NULL;
3005}
3006
3007static int intel_alt_er(struct cpu_hw_events *cpuc,
3008                        int idx, u64 config)
3009{
3010        struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3011        int alt_idx = idx;
3012
3013        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3014                return idx;
3015
3016        if (idx == EXTRA_REG_RSP_0)
3017                alt_idx = EXTRA_REG_RSP_1;
3018
3019        if (idx == EXTRA_REG_RSP_1)
3020                alt_idx = EXTRA_REG_RSP_0;
3021
3022        if (config & ~extra_regs[alt_idx].valid_mask)
3023                return idx;
3024
3025        return alt_idx;
3026}
3027
3028static void intel_fixup_er(struct perf_event *event, int idx)
3029{
3030        struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3031        event->hw.extra_reg.idx = idx;
3032
3033        if (idx == EXTRA_REG_RSP_0) {
3034                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3035                event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3036                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3037        } else if (idx == EXTRA_REG_RSP_1) {
3038                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3039                event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3040                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3041        }
3042}
3043
3044/*
3045 * manage allocation of shared extra msr for certain events
3046 *
3047 * sharing can be:
3048 * per-cpu: to be shared between the various events on a single PMU
3049 * per-core: per-cpu + shared by HT threads
3050 */
3051static struct event_constraint *
3052__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3053                                   struct perf_event *event,
3054                                   struct hw_perf_event_extra *reg)
3055{
3056        struct event_constraint *c = &emptyconstraint;
3057        struct er_account *era;
3058        unsigned long flags;
3059        int idx = reg->idx;
3060
3061        /*
3062         * reg->alloc can be set due to existing state, so for fake cpuc we
3063         * need to ignore this, otherwise we might fail to allocate proper fake
3064         * state for this extra reg constraint. Also see the comment below.
3065         */
3066        if (reg->alloc && !cpuc->is_fake)
3067                return NULL; /* call x86_get_event_constraint() */
3068
3069again:
3070        era = &cpuc->shared_regs->regs[idx];
3071        /*
3072         * we use spin_lock_irqsave() to avoid lockdep issues when
3073         * passing a fake cpuc
3074         */
3075        raw_spin_lock_irqsave(&era->lock, flags);
3076
3077        if (!atomic_read(&era->ref) || era->config == reg->config) {
3078
3079                /*
3080                 * If its a fake cpuc -- as per validate_{group,event}() we
3081                 * shouldn't touch event state and we can avoid doing so
3082                 * since both will only call get_event_constraints() once
3083                 * on each event, this avoids the need for reg->alloc.
3084                 *
3085                 * Not doing the ER fixup will only result in era->reg being
3086                 * wrong, but since we won't actually try and program hardware
3087                 * this isn't a problem either.
3088                 */
3089                if (!cpuc->is_fake) {
3090                        if (idx != reg->idx)
3091                                intel_fixup_er(event, idx);
3092
3093                        /*
3094                         * x86_schedule_events() can call get_event_constraints()
3095                         * multiple times on events in the case of incremental
3096                         * scheduling(). reg->alloc ensures we only do the ER
3097                         * allocation once.
3098                         */
3099                        reg->alloc = 1;
3100                }
3101
3102                /* lock in msr value */
3103                era->config = reg->config;
3104                era->reg = reg->reg;
3105
3106                /* one more user */
3107                atomic_inc(&era->ref);
3108
3109                /*
3110                 * need to call x86_get_event_constraint()
3111                 * to check if associated event has constraints
3112                 */
3113                c = NULL;
3114        } else {
3115                idx = intel_alt_er(cpuc, idx, reg->config);
3116                if (idx != reg->idx) {
3117                        raw_spin_unlock_irqrestore(&era->lock, flags);
3118                        goto again;
3119                }
3120        }
3121        raw_spin_unlock_irqrestore(&era->lock, flags);
3122
3123        return c;
3124}
3125
3126static void
3127__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3128                                   struct hw_perf_event_extra *reg)
3129{
3130        struct er_account *era;
3131
3132        /*
3133         * Only put constraint if extra reg was actually allocated. Also takes
3134         * care of event which do not use an extra shared reg.
3135         *
3136         * Also, if this is a fake cpuc we shouldn't touch any event state
3137         * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3138         * either since it'll be thrown out.
3139         */
3140        if (!reg->alloc || cpuc->is_fake)
3141                return;
3142
3143        era = &cpuc->shared_regs->regs[reg->idx];
3144
3145        /* one fewer user */
3146        atomic_dec(&era->ref);
3147
3148        /* allocate again next time */
3149        reg->alloc = 0;
3150}
3151
3152static struct event_constraint *
3153intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3154                              struct perf_event *event)
3155{
3156        struct event_constraint *c = NULL, *d;
3157        struct hw_perf_event_extra *xreg, *breg;
3158
3159        xreg = &event->hw.extra_reg;
3160        if (xreg->idx != EXTRA_REG_NONE) {
3161                c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3162                if (c == &emptyconstraint)
3163                        return c;
3164        }
3165        breg = &event->hw.branch_reg;
3166        if (breg->idx != EXTRA_REG_NONE) {
3167                d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3168                if (d == &emptyconstraint) {
3169                        __intel_shared_reg_put_constraints(cpuc, xreg);
3170                        c = d;
3171                }
3172        }
3173        return c;
3174}
3175
3176struct event_constraint *
3177x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3178                          struct perf_event *event)
3179{
3180        struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3181        struct event_constraint *c;
3182
3183        if (event_constraints) {
3184                for_each_event_constraint(c, event_constraints) {
3185                        if (constraint_match(c, event->hw.config)) {
3186                                event->hw.flags |= c->flags;
3187                                return c;
3188                        }
3189                }
3190        }
3191
3192        return &hybrid_var(cpuc->pmu, unconstrained);
3193}
3194
3195static struct event_constraint *
3196__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3197                            struct perf_event *event)
3198{
3199        struct event_constraint *c;
3200
3201        c = intel_vlbr_constraints(event);
3202        if (c)
3203                return c;
3204
3205        c = intel_bts_constraints(event);
3206        if (c)
3207                return c;
3208
3209        c = intel_shared_regs_constraints(cpuc, event);
3210        if (c)
3211                return c;
3212
3213        c = intel_pebs_constraints(event);
3214        if (c)
3215                return c;
3216
3217        return x86_get_event_constraints(cpuc, idx, event);
3218}
3219
3220static void
3221intel_start_scheduling(struct cpu_hw_events *cpuc)
3222{
3223        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3224        struct intel_excl_states *xl;
3225        int tid = cpuc->excl_thread_id;
3226
3227        /*
3228         * nothing needed if in group validation mode
3229         */
3230        if (cpuc->is_fake || !is_ht_workaround_enabled())
3231                return;
3232
3233        /*
3234         * no exclusion needed
3235         */
3236        if (WARN_ON_ONCE(!excl_cntrs))
3237                return;
3238
3239        xl = &excl_cntrs->states[tid];
3240
3241        xl->sched_started = true;
3242        /*
3243         * lock shared state until we are done scheduling
3244         * in stop_event_scheduling()
3245         * makes scheduling appear as a transaction
3246         */
3247        raw_spin_lock(&excl_cntrs->lock);
3248}
3249
3250static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3251{
3252        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3253        struct event_constraint *c = cpuc->event_constraint[idx];
3254        struct intel_excl_states *xl;
3255        int tid = cpuc->excl_thread_id;
3256
3257        if (cpuc->is_fake || !is_ht_workaround_enabled())
3258                return;
3259
3260        if (WARN_ON_ONCE(!excl_cntrs))
3261                return;
3262
3263        if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3264                return;
3265
3266        xl = &excl_cntrs->states[tid];
3267
3268        lockdep_assert_held(&excl_cntrs->lock);
3269
3270        if (c->flags & PERF_X86_EVENT_EXCL)
3271                xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3272        else
3273                xl->state[cntr] = INTEL_EXCL_SHARED;
3274}
3275
3276static void
3277intel_stop_scheduling(struct cpu_hw_events *cpuc)
3278{
3279        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3280        struct intel_excl_states *xl;
3281        int tid = cpuc->excl_thread_id;
3282
3283        /*
3284         * nothing needed if in group validation mode
3285         */
3286        if (cpuc->is_fake || !is_ht_workaround_enabled())
3287                return;
3288        /*
3289         * no exclusion needed
3290         */
3291        if (WARN_ON_ONCE(!excl_cntrs))
3292                return;
3293
3294        xl = &excl_cntrs->states[tid];
3295
3296        xl->sched_started = false;
3297        /*
3298         * release shared state lock (acquired in intel_start_scheduling())
3299         */
3300        raw_spin_unlock(&excl_cntrs->lock);
3301}
3302
3303static struct event_constraint *
3304dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3305{
3306        WARN_ON_ONCE(!cpuc->constraint_list);
3307
3308        if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3309                struct event_constraint *cx;
3310
3311                /*
3312                 * grab pre-allocated constraint entry
3313                 */
3314                cx = &cpuc->constraint_list[idx];
3315
3316                /*
3317                 * initialize dynamic constraint
3318                 * with static constraint
3319                 */
3320                *cx = *c;
3321
3322                /*
3323                 * mark constraint as dynamic
3324                 */
3325                cx->flags |= PERF_X86_EVENT_DYNAMIC;
3326                c = cx;
3327        }
3328
3329        return c;
3330}
3331
3332static struct event_constraint *
3333intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3334                           int idx, struct event_constraint *c)
3335{
3336        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3337        struct intel_excl_states *xlo;
3338        int tid = cpuc->excl_thread_id;
3339        int is_excl, i, w;
3340
3341        /*
3342         * validating a group does not require
3343         * enforcing cross-thread  exclusion
3344         */
3345        if (cpuc->is_fake || !is_ht_workaround_enabled())
3346                return c;
3347
3348        /*
3349         * no exclusion needed
3350         */
3351        if (WARN_ON_ONCE(!excl_cntrs))
3352                return c;
3353
3354        /*
3355         * because we modify the constraint, we need
3356         * to make a copy. Static constraints come
3357         * from static const tables.
3358         *
3359         * only needed when constraint has not yet
3360         * been cloned (marked dynamic)
3361         */
3362        c = dyn_constraint(cpuc, c, idx);
3363
3364        /*
3365         * From here on, the constraint is dynamic.
3366         * Either it was just allocated above, or it
3367         * was allocated during a earlier invocation
3368         * of this function
3369         */
3370
3371        /*
3372         * state of sibling HT
3373         */
3374        xlo = &excl_cntrs->states[tid ^ 1];
3375
3376        /*
3377         * event requires exclusive counter access
3378         * across HT threads
3379         */
3380        is_excl = c->flags & PERF_X86_EVENT_EXCL;
3381        if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3382                event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3383                if (!cpuc->n_excl++)
3384                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3385        }
3386
3387        /*
3388         * Modify static constraint with current dynamic
3389         * state of thread
3390         *
3391         * EXCLUSIVE: sibling counter measuring exclusive event
3392         * SHARED   : sibling counter measuring non-exclusive event
3393         * UNUSED   : sibling counter unused
3394         */
3395        w = c->weight;
3396        for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3397                /*
3398                 * exclusive event in sibling counter
3399                 * our corresponding counter cannot be used
3400                 * regardless of our event
3401                 */
3402                if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3403                        __clear_bit(i, c->idxmsk);
3404                        w--;
3405                        continue;
3406                }
3407                /*
3408                 * if measuring an exclusive event, sibling
3409                 * measuring non-exclusive, then counter cannot
3410                 * be used
3411                 */
3412                if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3413                        __clear_bit(i, c->idxmsk);
3414                        w--;
3415                        continue;
3416                }
3417        }
3418
3419        /*
3420         * if we return an empty mask, then switch
3421         * back to static empty constraint to avoid
3422         * the cost of freeing later on
3423         */
3424        if (!w)
3425                c = &emptyconstraint;
3426
3427        c->weight = w;
3428
3429        return c;
3430}
3431
3432static struct event_constraint *
3433intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3434                            struct perf_event *event)
3435{
3436        struct event_constraint *c1, *c2;
3437
3438        c1 = cpuc->event_constraint[idx];
3439
3440        /*
3441         * first time only
3442         * - static constraint: no change across incremental scheduling calls
3443         * - dynamic constraint: handled by intel_get_excl_constraints()
3444         */
3445        c2 = __intel_get_event_constraints(cpuc, idx, event);
3446        if (c1) {
3447                WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3448                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3449                c1->weight = c2->weight;
3450                c2 = c1;
3451        }
3452
3453        if (cpuc->excl_cntrs)
3454                return intel_get_excl_constraints(cpuc, event, idx, c2);
3455
3456        return c2;
3457}
3458
3459static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3460                struct perf_event *event)
3461{
3462        struct hw_perf_event *hwc = &event->hw;
3463        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3464        int tid = cpuc->excl_thread_id;
3465        struct intel_excl_states *xl;
3466
3467        /*
3468         * nothing needed if in group validation mode
3469         */
3470        if (cpuc->is_fake)
3471                return;
3472
3473        if (WARN_ON_ONCE(!excl_cntrs))
3474                return;
3475
3476        if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3477                hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3478                if (!--cpuc->n_excl)
3479                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3480        }
3481
3482        /*
3483         * If event was actually assigned, then mark the counter state as
3484         * unused now.
3485         */
3486        if (hwc->idx >= 0) {
3487                xl = &excl_cntrs->states[tid];
3488
3489                /*
3490                 * put_constraint may be called from x86_schedule_events()
3491                 * which already has the lock held so here make locking
3492                 * conditional.
3493                 */
3494                if (!xl->sched_started)
3495                        raw_spin_lock(&excl_cntrs->lock);
3496
3497                xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3498
3499                if (!xl->sched_started)
3500                        raw_spin_unlock(&excl_cntrs->lock);
3501        }
3502}
3503
3504static void
3505intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3506                                        struct perf_event *event)
3507{
3508        struct hw_perf_event_extra *reg;
3509
3510        reg = &event->hw.extra_reg;
3511        if (reg->idx != EXTRA_REG_NONE)
3512                __intel_shared_reg_put_constraints(cpuc, reg);
3513
3514        reg = &event->hw.branch_reg;
3515        if (reg->idx != EXTRA_REG_NONE)
3516                __intel_shared_reg_put_constraints(cpuc, reg);
3517}
3518
3519static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3520                                        struct perf_event *event)
3521{
3522        intel_put_shared_regs_event_constraints(cpuc, event);
3523
3524        /*
3525         * is PMU has exclusive counter restrictions, then
3526         * all events are subject to and must call the
3527         * put_excl_constraints() routine
3528         */
3529        if (cpuc->excl_cntrs)
3530                intel_put_excl_constraints(cpuc, event);
3531}
3532
3533static void intel_pebs_aliases_core2(struct perf_event *event)
3534{
3535        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3536                /*
3537                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3538                 * (0x003c) so that we can use it with PEBS.
3539                 *
3540                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3541                 * PEBS capable. However we can use INST_RETIRED.ANY_P
3542                 * (0x00c0), which is a PEBS capable event, to get the same
3543                 * count.
3544                 *
3545                 * INST_RETIRED.ANY_P counts the number of cycles that retires
3546                 * CNTMASK instructions. By setting CNTMASK to a value (16)
3547                 * larger than the maximum number of instructions that can be
3548                 * retired per cycle (4) and then inverting the condition, we
3549                 * count all cycles that retire 16 or less instructions, which
3550                 * is every cycle.
3551                 *
3552                 * Thereby we gain a PEBS capable cycle counter.
3553                 */
3554                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3555
3556                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3557                event->hw.config = alt_config;
3558        }
3559}
3560
3561static void intel_pebs_aliases_snb(struct perf_event *event)
3562{
3563        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3564                /*
3565                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3566                 * (0x003c) so that we can use it with PEBS.
3567                 *
3568                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3569                 * PEBS capable. However we can use UOPS_RETIRED.ALL
3570                 * (0x01c2), which is a PEBS capable event, to get the same
3571                 * count.
3572                 *
3573                 * UOPS_RETIRED.ALL counts the number of cycles that retires
3574                 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3575                 * larger than the maximum number of micro-ops that can be
3576                 * retired per cycle (4) and then inverting the condition, we
3577                 * count all cycles that retire 16 or less micro-ops, which
3578                 * is every cycle.
3579                 *
3580                 * Thereby we gain a PEBS capable cycle counter.
3581                 */
3582                u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3583
3584                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3585                event->hw.config = alt_config;
3586        }
3587}
3588
3589static void intel_pebs_aliases_precdist(struct perf_event *event)
3590{
3591        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3592                /*
3593                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3594                 * (0x003c) so that we can use it with PEBS.
3595                 *
3596                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3597                 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3598                 * (0x01c0), which is a PEBS capable event, to get the same
3599                 * count.
3600                 *
3601                 * The PREC_DIST event has special support to minimize sample
3602                 * shadowing effects. One drawback is that it can be
3603                 * only programmed on counter 1, but that seems like an
3604                 * acceptable trade off.
3605                 */
3606                u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3607
3608                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3609                event->hw.config = alt_config;
3610        }
3611}
3612
3613static void intel_pebs_aliases_ivb(struct perf_event *event)
3614{
3615        if (event->attr.precise_ip < 3)
3616                return intel_pebs_aliases_snb(event);
3617        return intel_pebs_aliases_precdist(event);
3618}
3619
3620static void intel_pebs_aliases_skl(struct perf_event *event)
3621{
3622        if (event->attr.precise_ip < 3)
3623                return intel_pebs_aliases_core2(event);
3624        return intel_pebs_aliases_precdist(event);
3625}
3626
3627static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3628{
3629        unsigned long flags = x86_pmu.large_pebs_flags;
3630
3631        if (event->attr.use_clockid)
3632                flags &= ~PERF_SAMPLE_TIME;
3633        if (!event->attr.exclude_kernel)
3634                flags &= ~PERF_SAMPLE_REGS_USER;
3635        if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3636                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3637        return flags;
3638}
3639
3640static int intel_pmu_bts_config(struct perf_event *event)
3641{
3642        struct perf_event_attr *attr = &event->attr;
3643
3644        if (unlikely(intel_pmu_has_bts(event))) {
3645                /* BTS is not supported by this architecture. */
3646                if (!x86_pmu.bts_active)
3647                        return -EOPNOTSUPP;
3648
3649                /* BTS is currently only allowed for user-mode. */
3650                if (!attr->exclude_kernel)
3651                        return -EOPNOTSUPP;
3652
3653                /* BTS is not allowed for precise events. */
3654                if (attr->precise_ip)
3655                        return -EOPNOTSUPP;
3656
3657                /* disallow bts if conflicting events are present */
3658                if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3659                        return -EBUSY;
3660
3661                event->destroy = hw_perf_lbr_event_destroy;
3662        }
3663
3664        return 0;
3665}
3666
3667static int core_pmu_hw_config(struct perf_event *event)
3668{
3669        int ret = x86_pmu_hw_config(event);
3670
3671        if (ret)
3672                return ret;
3673
3674        return intel_pmu_bts_config(event);
3675}
3676
3677#define INTEL_TD_METRIC_AVAILABLE_MAX   (INTEL_TD_METRIC_RETIRING + \
3678                                         ((x86_pmu.num_topdown_events - 1) << 8))
3679
3680static bool is_available_metric_event(struct perf_event *event)
3681{
3682        return is_metric_event(event) &&
3683                event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3684}
3685
3686static inline bool is_mem_loads_event(struct perf_event *event)
3687{
3688        return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3689}
3690
3691static inline bool is_mem_loads_aux_event(struct perf_event *event)
3692{
3693        return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3694}
3695
3696static inline bool require_mem_loads_aux_event(struct perf_event *event)
3697{
3698        if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3699                return false;
3700
3701        if (is_hybrid())
3702                return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3703
3704        return true;
3705}
3706
3707static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3708{
3709        union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3710
3711        return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3712}
3713
3714static int intel_pmu_hw_config(struct perf_event *event)
3715{
3716        int ret = x86_pmu_hw_config(event);
3717
3718        if (ret)
3719                return ret;
3720
3721        ret = intel_pmu_bts_config(event);
3722        if (ret)
3723                return ret;
3724
3725        if (event->attr.precise_ip) {
3726                if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3727                        return -EINVAL;
3728
3729                if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3730                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3731                        if (!(event->attr.sample_type &
3732                              ~intel_pmu_large_pebs_flags(event))) {
3733                                event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3734                                event->attach_state |= PERF_ATTACH_SCHED_CB;
3735                        }
3736                }
3737                if (x86_pmu.pebs_aliases)
3738                        x86_pmu.pebs_aliases(event);
3739
3740                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3741                        event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3742        }
3743
3744        if (needs_branch_stack(event)) {
3745                ret = intel_pmu_setup_lbr_filter(event);
3746                if (ret)
3747                        return ret;
3748                event->attach_state |= PERF_ATTACH_SCHED_CB;
3749
3750                /*
3751                 * BTS is set up earlier in this path, so don't account twice
3752                 */
3753                if (!unlikely(intel_pmu_has_bts(event))) {
3754                        /* disallow lbr if conflicting events are present */
3755                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3756                                return -EBUSY;
3757
3758                        event->destroy = hw_perf_lbr_event_destroy;
3759                }
3760        }
3761
3762        if (event->attr.aux_output) {
3763                if (!event->attr.precise_ip)
3764                        return -EINVAL;
3765
3766                event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3767        }
3768
3769        if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3770            (event->attr.type == PERF_TYPE_HW_CACHE))
3771                return 0;
3772
3773        /*
3774         * Config Topdown slots and metric events
3775         *
3776         * The slots event on Fixed Counter 3 can support sampling,
3777         * which will be handled normally in x86_perf_event_update().
3778         *
3779         * Metric events don't support sampling and require being paired
3780         * with a slots event as group leader. When the slots event
3781         * is used in a metrics group, it too cannot support sampling.
3782         */
3783        if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3784                if (event->attr.config1 || event->attr.config2)
3785                        return -EINVAL;
3786
3787                /*
3788                 * The TopDown metrics events and slots event don't
3789                 * support any filters.
3790                 */
3791                if (event->attr.config & X86_ALL_EVENT_FLAGS)
3792                        return -EINVAL;
3793
3794                if (is_available_metric_event(event)) {
3795                        struct perf_event *leader = event->group_leader;
3796
3797                        /* The metric events don't support sampling. */
3798                        if (is_sampling_event(event))
3799                                return -EINVAL;
3800
3801                        /* The metric events require a slots group leader. */
3802                        if (!is_slots_event(leader))
3803                                return -EINVAL;
3804
3805                        /*
3806                         * The leader/SLOTS must not be a sampling event for
3807                         * metric use; hardware requires it starts at 0 when used
3808                         * in conjunction with MSR_PERF_METRICS.
3809                         */
3810                        if (is_sampling_event(leader))
3811                                return -EINVAL;
3812
3813                        event->event_caps |= PERF_EV_CAP_SIBLING;
3814                        /*
3815                         * Only once we have a METRICs sibling do we
3816                         * need TopDown magic.
3817                         */
3818                        leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3819                        event->hw.flags  |= PERF_X86_EVENT_TOPDOWN;
3820                }
3821        }
3822
3823        /*
3824         * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3825         * doesn't function quite right. As a work-around it needs to always be
3826         * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3827         * The actual count of this second event is irrelevant it just needs
3828         * to be active to make the first event function correctly.
3829         *
3830         * In a group, the auxiliary event must be in front of the load latency
3831         * event. The rule is to simplify the implementation of the check.
3832         * That's because perf cannot have a complete group at the moment.
3833         */
3834        if (require_mem_loads_aux_event(event) &&
3835            (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3836            is_mem_loads_event(event)) {
3837                struct perf_event *leader = event->group_leader;
3838                struct perf_event *sibling = NULL;
3839
3840                if (!is_mem_loads_aux_event(leader)) {
3841                        for_each_sibling_event(sibling, leader) {
3842                                if (is_mem_loads_aux_event(sibling))
3843                                        break;
3844                        }
3845                        if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
3846                                return -ENODATA;
3847                }
3848        }
3849
3850        if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3851                return 0;
3852
3853        if (x86_pmu.version < 3)
3854                return -EINVAL;
3855
3856        ret = perf_allow_cpu(&event->attr);
3857        if (ret)
3858                return ret;
3859
3860        event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3861
3862        return 0;
3863}
3864
3865static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3866{
3867        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3868        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3869        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3870
3871        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3872        arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3873        arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3874        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3875                arr[0].guest &= ~cpuc->pebs_enabled;
3876        else
3877                arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3878        *nr = 1;
3879
3880        if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3881                /*
3882                 * If PMU counter has PEBS enabled it is not enough to
3883                 * disable counter on a guest entry since PEBS memory
3884                 * write can overshoot guest entry and corrupt guest
3885                 * memory. Disabling PEBS solves the problem.
3886                 *
3887                 * Don't do this if the CPU already enforces it.
3888                 */
3889                arr[1].msr = MSR_IA32_PEBS_ENABLE;
3890                arr[1].host = cpuc->pebs_enabled;
3891                arr[1].guest = 0;
3892                *nr = 2;
3893        }
3894
3895        return arr;
3896}
3897
3898static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3899{
3900        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3901        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3902        int idx;
3903
3904        for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
3905                struct perf_event *event = cpuc->events[idx];
3906
3907                arr[idx].msr = x86_pmu_config_addr(idx);
3908                arr[idx].host = arr[idx].guest = 0;
3909
3910                if (!test_bit(idx, cpuc->active_mask))
3911                        continue;
3912
3913                arr[idx].host = arr[idx].guest =
3914                        event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3915
3916                if (event->attr.exclude_host)
3917                        arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3918                else if (event->attr.exclude_guest)
3919                        arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3920        }
3921
3922        *nr = x86_pmu.num_counters;
3923        return arr;
3924}
3925
3926static void core_pmu_enable_event(struct perf_event *event)
3927{
3928        if (!event->attr.exclude_host)
3929                x86_pmu_enable_event(event);
3930}
3931
3932static void core_pmu_enable_all(int added)
3933{
3934        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3935        int idx;
3936
3937        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3938                struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3939
3940                if (!test_bit(idx, cpuc->active_mask) ||
3941                                cpuc->events[idx]->attr.exclude_host)
3942                        continue;
3943
3944                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3945        }
3946}
3947
3948static int hsw_hw_config(struct perf_event *event)
3949{
3950        int ret = intel_pmu_hw_config(event);
3951
3952        if (ret)
3953                return ret;
3954        if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3955                return 0;
3956        event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3957
3958        /*
3959         * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3960         * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3961         * this combination.
3962         */
3963        if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3964             ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3965              event->attr.precise_ip > 0))
3966                return -EOPNOTSUPP;
3967
3968        if (event_is_checkpointed(event)) {
3969                /*
3970                 * Sampling of checkpointed events can cause situations where
3971                 * the CPU constantly aborts because of a overflow, which is
3972                 * then checkpointed back and ignored. Forbid checkpointing
3973                 * for sampling.
3974                 *
3975                 * But still allow a long sampling period, so that perf stat
3976                 * from KVM works.
3977                 */
3978                if (event->attr.sample_period > 0 &&
3979                    event->attr.sample_period < 0x7fffffff)
3980                        return -EOPNOTSUPP;
3981        }
3982        return 0;
3983}
3984
3985static struct event_constraint counter0_constraint =
3986                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3987
3988static struct event_constraint counter2_constraint =
3989                        EVENT_CONSTRAINT(0, 0x4, 0);
3990
3991static struct event_constraint fixed0_constraint =
3992                        FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3993
3994static struct event_constraint fixed0_counter0_constraint =
3995                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3996
3997static struct event_constraint *
3998hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3999                          struct perf_event *event)
4000{
4001        struct event_constraint *c;
4002
4003        c = intel_get_event_constraints(cpuc, idx, event);
4004
4005        /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4006        if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4007                if (c->idxmsk64 & (1U << 2))
4008                        return &counter2_constraint;
4009                return &emptyconstraint;
4010        }
4011
4012        return c;
4013}
4014
4015static struct event_constraint *
4016icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4017                          struct perf_event *event)
4018{
4019        /*
4020         * Fixed counter 0 has less skid.
4021         * Force instruction:ppp in Fixed counter 0
4022         */
4023        if ((event->attr.precise_ip == 3) &&
4024            constraint_match(&fixed0_constraint, event->hw.config))
4025                return &fixed0_constraint;
4026
4027        return hsw_get_event_constraints(cpuc, idx, event);
4028}
4029
4030static struct event_constraint *
4031spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4032                          struct perf_event *event)
4033{
4034        struct event_constraint *c;
4035
4036        c = icl_get_event_constraints(cpuc, idx, event);
4037
4038        /*
4039         * The :ppp indicates the Precise Distribution (PDist) facility, which
4040         * is only supported on the GP counter 0. If a :ppp event which is not
4041         * available on the GP counter 0, error out.
4042         * Exception: Instruction PDIR is only available on the fixed counter 0.
4043         */
4044        if ((event->attr.precise_ip == 3) &&
4045            !constraint_match(&fixed0_constraint, event->hw.config)) {
4046                if (c->idxmsk64 & BIT_ULL(0))
4047                        return &counter0_constraint;
4048
4049                return &emptyconstraint;
4050        }
4051
4052        return c;
4053}
4054
4055static struct event_constraint *
4056glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4057                          struct perf_event *event)
4058{
4059        struct event_constraint *c;
4060
4061        /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4062        if (event->attr.precise_ip == 3)
4063                return &counter0_constraint;
4064
4065        c = intel_get_event_constraints(cpuc, idx, event);
4066
4067        return c;
4068}
4069
4070static struct event_constraint *
4071tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4072                          struct perf_event *event)
4073{
4074        struct event_constraint *c;
4075
4076        /*
4077         * :ppp means to do reduced skid PEBS,
4078         * which is available on PMC0 and fixed counter 0.
4079         */
4080        if (event->attr.precise_ip == 3) {
4081                /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4082                if (constraint_match(&fixed0_constraint, event->hw.config))
4083                        return &fixed0_counter0_constraint;
4084
4085                return &counter0_constraint;
4086        }
4087
4088        c = intel_get_event_constraints(cpuc, idx, event);
4089
4090        return c;
4091}
4092
4093static bool allow_tsx_force_abort = true;
4094
4095static struct event_constraint *
4096tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4097                          struct perf_event *event)
4098{
4099        struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4100
4101        /*
4102         * Without TFA we must not use PMC3.
4103         */
4104        if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4105                c = dyn_constraint(cpuc, c, idx);
4106                c->idxmsk64 &= ~(1ULL << 3);
4107                c->weight--;
4108        }
4109
4110        return c;
4111}
4112
4113static struct event_constraint *
4114adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4115                          struct perf_event *event)
4116{
4117        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4118
4119        if (pmu->cpu_type == hybrid_big)
4120                return spr_get_event_constraints(cpuc, idx, event);
4121        else if (pmu->cpu_type == hybrid_small)
4122                return tnt_get_event_constraints(cpuc, idx, event);
4123
4124        WARN_ON(1);
4125        return &emptyconstraint;
4126}
4127
4128static int adl_hw_config(struct perf_event *event)
4129{
4130        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4131
4132        if (pmu->cpu_type == hybrid_big)
4133                return hsw_hw_config(event);
4134        else if (pmu->cpu_type == hybrid_small)
4135                return intel_pmu_hw_config(event);
4136
4137        WARN_ON(1);
4138        return -EOPNOTSUPP;
4139}
4140
4141static u8 adl_get_hybrid_cpu_type(void)
4142{
4143        return hybrid_big;
4144}
4145
4146/*
4147 * Broadwell:
4148 *
4149 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4150 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4151 * the two to enforce a minimum period of 128 (the smallest value that has bits
4152 * 0-5 cleared and >= 100).
4153 *
4154 * Because of how the code in x86_perf_event_set_period() works, the truncation
4155 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4156 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4157 *
4158 * Therefore the effective (average) period matches the requested period,
4159 * despite coarser hardware granularity.
4160 */
4161static u64 bdw_limit_period(struct perf_event *event, u64 left)
4162{
4163        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4164                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
4165                if (left < 128)
4166                        left = 128;
4167                left &= ~0x3fULL;
4168        }
4169        return left;
4170}
4171
4172static u64 nhm_limit_period(struct perf_event *event, u64 left)
4173{
4174        return max(left, 32ULL);
4175}
4176
4177static u64 spr_limit_period(struct perf_event *event, u64 left)
4178{
4179        if (event->attr.precise_ip == 3)
4180                return max(left, 128ULL);
4181
4182        return left;
4183}
4184
4185PMU_FORMAT_ATTR(event,  "config:0-7"    );
4186PMU_FORMAT_ATTR(umask,  "config:8-15"   );
4187PMU_FORMAT_ATTR(edge,   "config:18"     );
4188PMU_FORMAT_ATTR(pc,     "config:19"     );
4189PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
4190PMU_FORMAT_ATTR(inv,    "config:23"     );
4191PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
4192PMU_FORMAT_ATTR(in_tx,  "config:32");
4193PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4194
4195static struct attribute *intel_arch_formats_attr[] = {
4196        &format_attr_event.attr,
4197        &format_attr_umask.attr,
4198        &format_attr_edge.attr,
4199        &format_attr_pc.attr,
4200        &format_attr_inv.attr,
4201        &format_attr_cmask.attr,
4202        NULL,
4203};
4204
4205ssize_t intel_event_sysfs_show(char *page, u64 config)
4206{
4207        u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4208
4209        return x86_event_sysfs_show(page, config, event);
4210}
4211
4212static struct intel_shared_regs *allocate_shared_regs(int cpu)
4213{
4214        struct intel_shared_regs *regs;
4215        int i;
4216
4217        regs = kzalloc_node(sizeof(struct intel_shared_regs),
4218                            GFP_KERNEL, cpu_to_node(cpu));
4219        if (regs) {
4220                /*
4221                 * initialize the locks to keep lockdep happy
4222                 */
4223                for (i = 0; i < EXTRA_REG_MAX; i++)
4224                        raw_spin_lock_init(&regs->regs[i].lock);
4225
4226                regs->core_id = -1;
4227        }
4228        return regs;
4229}
4230
4231static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4232{
4233        struct intel_excl_cntrs *c;
4234
4235        c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4236                         GFP_KERNEL, cpu_to_node(cpu));
4237        if (c) {
4238                raw_spin_lock_init(&c->lock);
4239                c->core_id = -1;
4240        }
4241        return c;
4242}
4243
4244
4245int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4246{
4247        cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4248
4249        if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4250                cpuc->shared_regs = allocate_shared_regs(cpu);
4251                if (!cpuc->shared_regs)
4252                        goto err;
4253        }
4254
4255        if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4256                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4257
4258                cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4259                if (!cpuc->constraint_list)
4260                        goto err_shared_regs;
4261        }
4262
4263        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4264                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4265                if (!cpuc->excl_cntrs)
4266                        goto err_constraint_list;
4267
4268                cpuc->excl_thread_id = 0;
4269        }
4270
4271        return 0;
4272
4273err_constraint_list:
4274        kfree(cpuc->constraint_list);
4275        cpuc->constraint_list = NULL;
4276
4277err_shared_regs:
4278        kfree(cpuc->shared_regs);
4279        cpuc->shared_regs = NULL;
4280
4281err:
4282        return -ENOMEM;
4283}
4284
4285static int intel_pmu_cpu_prepare(int cpu)
4286{
4287        return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4288}
4289
4290static void flip_smm_bit(void *data)
4291{
4292        unsigned long set = *(unsigned long *)data;
4293
4294        if (set > 0) {
4295                msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4296                            DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4297        } else {
4298                msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4299                              DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4300        }
4301}
4302
4303static bool init_hybrid_pmu(int cpu)
4304{
4305        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4306        u8 cpu_type = get_this_hybrid_cpu_type();
4307        struct x86_hybrid_pmu *pmu = NULL;
4308        int i;
4309
4310        if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4311                cpu_type = x86_pmu.get_hybrid_cpu_type();
4312
4313        for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4314                if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4315                        pmu = &x86_pmu.hybrid_pmu[i];
4316                        break;
4317                }
4318        }
4319        if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4320                cpuc->pmu = NULL;
4321                return false;
4322        }
4323
4324        /* Only check and dump the PMU information for the first CPU */
4325        if (!cpumask_empty(&pmu->supported_cpus))
4326                goto end;
4327
4328        if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4329                return false;
4330
4331        pr_info("%s PMU driver: ", pmu->name);
4332
4333        if (pmu->intel_cap.pebs_output_pt_available)
4334                pr_cont("PEBS-via-PT ");
4335
4336        pr_cont("\n");
4337
4338        x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4339                             pmu->intel_ctrl);
4340
4341end:
4342        cpumask_set_cpu(cpu, &pmu->supported_cpus);
4343        cpuc->pmu = &pmu->pmu;
4344
4345        x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4346
4347        return true;
4348}
4349
4350static void intel_pmu_cpu_starting(int cpu)
4351{
4352        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4353        int core_id = topology_core_id(cpu);
4354        int i;
4355
4356        if (is_hybrid() && !init_hybrid_pmu(cpu))
4357                return;
4358
4359        init_debug_store_on_cpu(cpu);
4360        /*
4361         * Deal with CPUs that don't clear their LBRs on power-up.
4362         */
4363        intel_pmu_lbr_reset();
4364
4365        cpuc->lbr_sel = NULL;
4366
4367        if (x86_pmu.flags & PMU_FL_TFA) {
4368                WARN_ON_ONCE(cpuc->tfa_shadow);
4369                cpuc->tfa_shadow = ~0ULL;
4370                intel_set_tfa(cpuc, false);
4371        }
4372
4373        if (x86_pmu.version > 1)
4374                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4375
4376        /*
4377         * Disable perf metrics if any added CPU doesn't support it.
4378         *
4379         * Turn off the check for a hybrid architecture, because the
4380         * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4381         * the architecture features. The perf metrics is a model-specific
4382         * feature for now. The corresponding bit should always be 0 on
4383         * a hybrid platform, e.g., Alder Lake.
4384         */
4385        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4386                union perf_capabilities perf_cap;
4387
4388                rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4389                if (!perf_cap.perf_metrics) {
4390                        x86_pmu.intel_cap.perf_metrics = 0;
4391                        x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4392                }
4393        }
4394
4395        if (!cpuc->shared_regs)
4396                return;
4397
4398        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4399                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4400                        struct intel_shared_regs *pc;
4401
4402                        pc = per_cpu(cpu_hw_events, i).shared_regs;
4403                        if (pc && pc->core_id == core_id) {
4404                                cpuc->kfree_on_online[0] = cpuc->shared_regs;
4405                                cpuc->shared_regs = pc;
4406                                break;
4407                        }
4408                }
4409                cpuc->shared_regs->core_id = core_id;
4410                cpuc->shared_regs->refcnt++;
4411        }
4412
4413        if (x86_pmu.lbr_sel_map)
4414                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4415
4416        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4417                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4418                        struct cpu_hw_events *sibling;
4419                        struct intel_excl_cntrs *c;
4420
4421                        sibling = &per_cpu(cpu_hw_events, i);
4422                        c = sibling->excl_cntrs;
4423                        if (c && c->core_id == core_id) {
4424                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4425                                cpuc->excl_cntrs = c;
4426                                if (!sibling->excl_thread_id)
4427                                        cpuc->excl_thread_id = 1;
4428                                break;
4429                        }
4430                }
4431                cpuc->excl_cntrs->core_id = core_id;
4432                cpuc->excl_cntrs->refcnt++;
4433        }
4434}
4435
4436static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4437{
4438        struct intel_excl_cntrs *c;
4439
4440        c = cpuc->excl_cntrs;
4441        if (c) {
4442                if (c->core_id == -1 || --c->refcnt == 0)
4443                        kfree(c);
4444                cpuc->excl_cntrs = NULL;
4445        }
4446
4447        kfree(cpuc->constraint_list);
4448        cpuc->constraint_list = NULL;
4449}
4450
4451static void intel_pmu_cpu_dying(int cpu)
4452{
4453        fini_debug_store_on_cpu(cpu);
4454}
4455
4456void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4457{
4458        struct intel_shared_regs *pc;
4459
4460        pc = cpuc->shared_regs;
4461        if (pc) {
4462                if (pc->core_id == -1 || --pc->refcnt == 0)
4463                        kfree(pc);
4464                cpuc->shared_regs = NULL;
4465        }
4466
4467        free_excl_cntrs(cpuc);
4468}
4469
4470static void intel_pmu_cpu_dead(int cpu)
4471{
4472        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4473
4474        intel_cpuc_finish(cpuc);
4475
4476        if (is_hybrid() && cpuc->pmu)
4477                cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4478}
4479
4480static void intel_pmu_sched_task(struct perf_event_context *ctx,
4481                                 bool sched_in)
4482{
4483        intel_pmu_pebs_sched_task(ctx, sched_in);
4484        intel_pmu_lbr_sched_task(ctx, sched_in);
4485}
4486
4487static void intel_pmu_swap_task_ctx(struct perf_event_context *prev,
4488                                    struct perf_event_context *next)
4489{
4490        intel_pmu_lbr_swap_task_ctx(prev, next);
4491}
4492
4493static int intel_pmu_check_period(struct perf_event *event, u64 value)
4494{
4495        return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4496}
4497
4498static int intel_pmu_aux_output_match(struct perf_event *event)
4499{
4500        if (!x86_pmu.intel_cap.pebs_output_pt_available)
4501                return 0;
4502
4503        return is_intel_pt_event(event);
4504}
4505
4506static int intel_pmu_filter_match(struct perf_event *event)
4507{
4508        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4509        unsigned int cpu = smp_processor_id();
4510
4511        return cpumask_test_cpu(cpu, &pmu->supported_cpus);
4512}
4513
4514PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4515
4516PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4517
4518PMU_FORMAT_ATTR(frontend, "config1:0-23");
4519
4520static struct attribute *intel_arch3_formats_attr[] = {
4521        &format_attr_event.attr,
4522        &format_attr_umask.attr,
4523        &format_attr_edge.attr,
4524        &format_attr_pc.attr,
4525        &format_attr_any.attr,
4526        &format_attr_inv.attr,
4527        &format_attr_cmask.attr,
4528        NULL,
4529};
4530
4531static struct attribute *hsw_format_attr[] = {
4532        &format_attr_in_tx.attr,
4533        &format_attr_in_tx_cp.attr,
4534        &format_attr_offcore_rsp.attr,
4535        &format_attr_ldlat.attr,
4536        NULL
4537};
4538
4539static struct attribute *nhm_format_attr[] = {
4540        &format_attr_offcore_rsp.attr,
4541        &format_attr_ldlat.attr,
4542        NULL
4543};
4544
4545static struct attribute *slm_format_attr[] = {
4546        &format_attr_offcore_rsp.attr,
4547        NULL
4548};
4549
4550static struct attribute *skl_format_attr[] = {
4551        &format_attr_frontend.attr,
4552        NULL,
4553};
4554
4555static __initconst const struct x86_pmu core_pmu = {
4556        .name                   = "core",
4557        .handle_irq             = x86_pmu_handle_irq,
4558        .disable_all            = x86_pmu_disable_all,
4559        .enable_all             = core_pmu_enable_all,
4560        .enable                 = core_pmu_enable_event,
4561        .disable                = x86_pmu_disable_event,
4562        .hw_config              = core_pmu_hw_config,
4563        .schedule_events        = x86_schedule_events,
4564        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
4565        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
4566        .event_map              = intel_pmu_event_map,
4567        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
4568        .apic                   = 1,
4569        .large_pebs_flags       = LARGE_PEBS_FLAGS,
4570
4571        /*
4572         * Intel PMCs cannot be accessed sanely above 32-bit width,
4573         * so we install an artificial 1<<31 period regardless of
4574         * the generic event period:
4575         */
4576        .max_period             = (1ULL<<31) - 1,
4577        .get_event_constraints  = intel_get_event_constraints,
4578        .put_event_constraints  = intel_put_event_constraints,
4579        .event_constraints      = intel_core_event_constraints,
4580        .guest_get_msrs         = core_guest_get_msrs,
4581        .format_attrs           = intel_arch_formats_attr,
4582        .events_sysfs_show      = intel_event_sysfs_show,
4583
4584        /*
4585         * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4586         * together with PMU version 1 and thus be using core_pmu with
4587         * shared_regs. We need following callbacks here to allocate
4588         * it properly.
4589         */
4590        .cpu_prepare            = intel_pmu_cpu_prepare,
4591        .cpu_starting           = intel_pmu_cpu_starting,
4592        .cpu_dying              = intel_pmu_cpu_dying,
4593        .cpu_dead               = intel_pmu_cpu_dead,
4594
4595        .check_period           = intel_pmu_check_period,
4596
4597        .lbr_reset              = intel_pmu_lbr_reset_64,
4598        .lbr_read               = intel_pmu_lbr_read_64,
4599        .lbr_save               = intel_pmu_lbr_save,
4600        .lbr_restore            = intel_pmu_lbr_restore,
4601};
4602
4603static __initconst const struct x86_pmu intel_pmu = {
4604        .name                   = "Intel",
4605        .handle_irq             = intel_pmu_handle_irq,
4606        .disable_all            = intel_pmu_disable_all,
4607        .enable_all             = intel_pmu_enable_all,
4608        .enable                 = intel_pmu_enable_event,
4609        .disable                = intel_pmu_disable_event,
4610        .add                    = intel_pmu_add_event,
4611        .del                    = intel_pmu_del_event,
4612        .read                   = intel_pmu_read_event,
4613        .hw_config              = intel_pmu_hw_config,
4614        .schedule_events        = x86_schedule_events,
4615        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
4616        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
4617        .event_map              = intel_pmu_event_map,
4618        .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
4619        .apic                   = 1,
4620        .large_pebs_flags       = LARGE_PEBS_FLAGS,
4621        /*
4622         * Intel PMCs cannot be accessed sanely above 32 bit width,
4623         * so we install an artificial 1<<31 period regardless of
4624         * the generic event period:
4625         */
4626        .max_period             = (1ULL << 31) - 1,
4627        .get_event_constraints  = intel_get_event_constraints,
4628        .put_event_constraints  = intel_put_event_constraints,
4629        .pebs_aliases           = intel_pebs_aliases_core2,
4630
4631        .format_attrs           = intel_arch3_formats_attr,
4632        .events_sysfs_show      = intel_event_sysfs_show,
4633
4634        .cpu_prepare            = intel_pmu_cpu_prepare,
4635        .cpu_starting           = intel_pmu_cpu_starting,
4636        .cpu_dying              = intel_pmu_cpu_dying,
4637        .cpu_dead               = intel_pmu_cpu_dead,
4638
4639        .guest_get_msrs         = intel_guest_get_msrs,
4640        .sched_task             = intel_pmu_sched_task,
4641        .swap_task_ctx          = intel_pmu_swap_task_ctx,
4642
4643        .check_period           = intel_pmu_check_period,
4644
4645        .aux_output_match       = intel_pmu_aux_output_match,
4646
4647        .lbr_reset              = intel_pmu_lbr_reset_64,
4648        .lbr_read               = intel_pmu_lbr_read_64,
4649        .lbr_save               = intel_pmu_lbr_save,
4650        .lbr_restore            = intel_pmu_lbr_restore,
4651};
4652
4653static __init void intel_clovertown_quirk(void)
4654{
4655        /*
4656         * PEBS is unreliable due to:
4657         *
4658         *   AJ67  - PEBS may experience CPL leaks
4659         *   AJ68  - PEBS PMI may be delayed by one event
4660         *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
4661         *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
4662         *
4663         * AJ67 could be worked around by restricting the OS/USR flags.
4664         * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
4665         *
4666         * AJ106 could possibly be worked around by not allowing LBR
4667         *       usage from PEBS, including the fixup.
4668         * AJ68  could possibly be worked around by always programming
4669         *       a pebs_event_reset[0] value and coping with the lost events.
4670         *
4671         * But taken together it might just make sense to not enable PEBS on
4672         * these chips.
4673         */
4674        pr_warn("PEBS disabled due to CPU errata\n");
4675        x86_pmu.pebs = 0;
4676        x86_pmu.pebs_constraints = NULL;
4677}
4678
4679static const struct x86_cpu_desc isolation_ucodes[] = {
4680        INTEL_CPU_DESC(INTEL_FAM6_HASWELL,               3, 0x0000001f),
4681        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L,             1, 0x0000001e),
4682        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G,             1, 0x00000015),
4683        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             2, 0x00000037),
4684        INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             4, 0x0000000a),
4685        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL,             4, 0x00000023),
4686        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G,           1, 0x00000014),
4687        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           2, 0x00000010),
4688        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           3, 0x07000009),
4689        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           4, 0x0f000009),
4690        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D,           5, 0x0e000002),
4691        INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,           1, 0x0b000014),
4692        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             3, 0x00000021),
4693        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             4, 0x00000000),
4694        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             5, 0x00000000),
4695        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             6, 0x00000000),
4696        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             7, 0x00000000),
4697        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L,             3, 0x0000007c),
4698        INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE,               3, 0x0000007c),
4699        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,              9, 0x0000004e),
4700        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L,            9, 0x0000004e),
4701        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L,           10, 0x0000004e),
4702        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L,           11, 0x0000004e),
4703        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L,           12, 0x0000004e),
4704        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,             10, 0x0000004e),
4705        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,             11, 0x0000004e),
4706        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,             12, 0x0000004e),
4707        INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE,             13, 0x0000004e),
4708        {}
4709};
4710
4711static void intel_check_pebs_isolation(void)
4712{
4713        x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
4714}
4715
4716static __init void intel_pebs_isolation_quirk(void)
4717{
4718        WARN_ON_ONCE(x86_pmu.check_microcode);
4719        x86_pmu.check_microcode = intel_check_pebs_isolation;
4720        intel_check_pebs_isolation();
4721}
4722
4723static const struct x86_cpu_desc pebs_ucodes[] = {
4724        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE,          7, 0x00000028),
4725        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        6, 0x00000618),
4726        INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        7, 0x0000070c),
4727        {}
4728};
4729
4730static bool intel_snb_pebs_broken(void)
4731{
4732        return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
4733}
4734
4735static void intel_snb_check_microcode(void)
4736{
4737        if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
4738                return;
4739
4740        /*
4741         * Serialized by the microcode lock..
4742         */
4743        if (x86_pmu.pebs_broken) {
4744                pr_info("PEBS enabled due to microcode update\n");
4745                x86_pmu.pebs_broken = 0;
4746        } else {
4747                pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
4748                x86_pmu.pebs_broken = 1;
4749        }
4750}
4751
4752static bool is_lbr_from(unsigned long msr)
4753{
4754        unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
4755
4756        return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
4757}
4758
4759/*
4760 * Under certain circumstances, access certain MSR may cause #GP.
4761 * The function tests if the input MSR can be safely accessed.
4762 */
4763static bool check_msr(unsigned long msr, u64 mask)
4764{
4765        u64 val_old, val_new, val_tmp;
4766
4767        /*
4768         * Disable the check for real HW, so we don't
4769         * mess with potentially enabled registers:
4770         */
4771        if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
4772                return true;
4773
4774        /*
4775         * Read the current value, change it and read it back to see if it
4776         * matches, this is needed to detect certain hardware emulators
4777         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
4778         */
4779        if (rdmsrl_safe(msr, &val_old))
4780                return false;
4781
4782        /*
4783         * Only change the bits which can be updated by wrmsrl.
4784         */
4785        val_tmp = val_old ^ mask;
4786
4787        if (is_lbr_from(msr))
4788                val_tmp = lbr_from_signext_quirk_wr(val_tmp);
4789
4790        if (wrmsrl_safe(msr, val_tmp) ||
4791            rdmsrl_safe(msr, &val_new))
4792                return false;
4793
4794        /*
4795         * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
4796         * should equal rdmsrl()'s even with the quirk.
4797         */
4798        if (val_new != val_tmp)
4799                return false;
4800
4801        if (is_lbr_from(msr))
4802                val_old = lbr_from_signext_quirk_wr(val_old);
4803
4804        /* Here it's sure that the MSR can be safely accessed.
4805         * Restore the old value and return.
4806         */
4807        wrmsrl(msr, val_old);
4808
4809        return true;
4810}
4811
4812static __init void intel_sandybridge_quirk(void)
4813{
4814        x86_pmu.check_microcode = intel_snb_check_microcode;
4815        cpus_read_lock();
4816        intel_snb_check_microcode();
4817        cpus_read_unlock();
4818}
4819
4820static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
4821        { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
4822        { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
4823        { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
4824        { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
4825        { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
4826        { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
4827        { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
4828};
4829
4830static __init void intel_arch_events_quirk(void)
4831{
4832        int bit;
4833
4834        /* disable event that reported as not present by cpuid */
4835        for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
4836                intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
4837                pr_warn("CPUID marked event: \'%s\' unavailable\n",
4838                        intel_arch_events_map[bit].name);
4839        }
4840}
4841
4842static __init void intel_nehalem_quirk(void)
4843{
4844        union cpuid10_ebx ebx;
4845
4846        ebx.full = x86_pmu.events_maskl;
4847        if (ebx.split.no_branch_misses_retired) {
4848                /*
4849                 * Erratum AAJ80 detected, we work it around by using
4850                 * the BR_MISP_EXEC.ANY event. This will over-count
4851                 * branch-misses, but it's still much better than the
4852                 * architectural event which is often completely bogus:
4853                 */
4854                intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
4855                ebx.split.no_branch_misses_retired = 0;
4856                x86_pmu.events_maskl = ebx.full;
4857                pr_info("CPU erratum AAJ80 worked around\n");
4858        }
4859}
4860
4861/*
4862 * enable software workaround for errata:
4863 * SNB: BJ122
4864 * IVB: BV98
4865 * HSW: HSD29
4866 *
4867 * Only needed when HT is enabled. However detecting
4868 * if HT is enabled is difficult (model specific). So instead,
4869 * we enable the workaround in the early boot, and verify if
4870 * it is needed in a later initcall phase once we have valid
4871 * topology information to check if HT is actually enabled
4872 */
4873static __init void intel_ht_bug(void)
4874{
4875        x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
4876
4877        x86_pmu.start_scheduling = intel_start_scheduling;
4878        x86_pmu.commit_scheduling = intel_commit_scheduling;
4879        x86_pmu.stop_scheduling = intel_stop_scheduling;
4880}
4881
4882EVENT_ATTR_STR(mem-loads,       mem_ld_hsw,     "event=0xcd,umask=0x1,ldlat=3");
4883EVENT_ATTR_STR(mem-stores,      mem_st_hsw,     "event=0xd0,umask=0x82")
4884
4885/* Haswell special events */
4886EVENT_ATTR_STR(tx-start,        tx_start,       "event=0xc9,umask=0x1");
4887EVENT_ATTR_STR(tx-commit,       tx_commit,      "event=0xc9,umask=0x2");
4888EVENT_ATTR_STR(tx-abort,        tx_abort,       "event=0xc9,umask=0x4");
4889EVENT_ATTR_STR(tx-capacity,     tx_capacity,    "event=0x54,umask=0x2");
4890EVENT_ATTR_STR(tx-conflict,     tx_conflict,    "event=0x54,umask=0x1");
4891EVENT_ATTR_STR(el-start,        el_start,       "event=0xc8,umask=0x1");
4892EVENT_ATTR_STR(el-commit,       el_commit,      "event=0xc8,umask=0x2");
4893EVENT_ATTR_STR(el-abort,        el_abort,       "event=0xc8,umask=0x4");
4894EVENT_ATTR_STR(el-capacity,     el_capacity,    "event=0x54,umask=0x2");
4895EVENT_ATTR_STR(el-conflict,     el_conflict,    "event=0x54,umask=0x1");
4896EVENT_ATTR_STR(cycles-t,        cycles_t,       "event=0x3c,in_tx=1");
4897EVENT_ATTR_STR(cycles-ct,       cycles_ct,      "event=0x3c,in_tx=1,in_tx_cp=1");
4898
4899static struct attribute *hsw_events_attrs[] = {
4900        EVENT_PTR(td_slots_issued),
4901        EVENT_PTR(td_slots_retired),
4902        EVENT_PTR(td_fetch_bubbles),
4903        EVENT_PTR(td_total_slots),
4904        EVENT_PTR(td_total_slots_scale),
4905        EVENT_PTR(td_recovery_bubbles),
4906        EVENT_PTR(td_recovery_bubbles_scale),
4907        NULL
4908};
4909
4910static struct attribute *hsw_mem_events_attrs[] = {
4911        EVENT_PTR(mem_ld_hsw),
4912        EVENT_PTR(mem_st_hsw),
4913        NULL,
4914};
4915
4916static struct attribute *hsw_tsx_events_attrs[] = {
4917        EVENT_PTR(tx_start),
4918        EVENT_PTR(tx_commit),
4919        EVENT_PTR(tx_abort),
4920        EVENT_PTR(tx_capacity),
4921        EVENT_PTR(tx_conflict),
4922        EVENT_PTR(el_start),
4923        EVENT_PTR(el_commit),
4924        EVENT_PTR(el_abort),
4925        EVENT_PTR(el_capacity),
4926        EVENT_PTR(el_conflict),
4927        EVENT_PTR(cycles_t),
4928        EVENT_PTR(cycles_ct),
4929        NULL
4930};
4931
4932EVENT_ATTR_STR(tx-capacity-read,  tx_capacity_read,  "event=0x54,umask=0x80");
4933EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
4934EVENT_ATTR_STR(el-capacity-read,  el_capacity_read,  "event=0x54,umask=0x80");
4935EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
4936
4937static struct attribute *icl_events_attrs[] = {
4938        EVENT_PTR(mem_ld_hsw),
4939        EVENT_PTR(mem_st_hsw),
4940        NULL,
4941};
4942
4943static struct attribute *icl_td_events_attrs[] = {
4944        EVENT_PTR(slots),
4945        EVENT_PTR(td_retiring),
4946        EVENT_PTR(td_bad_spec),
4947        EVENT_PTR(td_fe_bound),
4948        EVENT_PTR(td_be_bound),
4949        NULL,
4950};
4951
4952static struct attribute *icl_tsx_events_attrs[] = {
4953        EVENT_PTR(tx_start),
4954        EVENT_PTR(tx_abort),
4955        EVENT_PTR(tx_commit),
4956        EVENT_PTR(tx_capacity_read),
4957        EVENT_PTR(tx_capacity_write),
4958        EVENT_PTR(tx_conflict),
4959        EVENT_PTR(el_start),
4960        EVENT_PTR(el_abort),
4961        EVENT_PTR(el_commit),
4962        EVENT_PTR(el_capacity_read),
4963        EVENT_PTR(el_capacity_write),
4964        EVENT_PTR(el_conflict),
4965        EVENT_PTR(cycles_t),
4966        EVENT_PTR(cycles_ct),
4967        NULL,
4968};
4969
4970
4971EVENT_ATTR_STR(mem-stores,      mem_st_spr,     "event=0xcd,umask=0x2");
4972EVENT_ATTR_STR(mem-loads-aux,   mem_ld_aux,     "event=0x03,umask=0x82");
4973
4974static struct attribute *spr_events_attrs[] = {
4975        EVENT_PTR(mem_ld_hsw),
4976        EVENT_PTR(mem_st_spr),
4977        EVENT_PTR(mem_ld_aux),
4978        NULL,
4979};
4980
4981static struct attribute *spr_td_events_attrs[] = {
4982        EVENT_PTR(slots),
4983        EVENT_PTR(td_retiring),
4984        EVENT_PTR(td_bad_spec),
4985        EVENT_PTR(td_fe_bound),
4986        EVENT_PTR(td_be_bound),
4987        EVENT_PTR(td_heavy_ops),
4988        EVENT_PTR(td_br_mispredict),
4989        EVENT_PTR(td_fetch_lat),
4990        EVENT_PTR(td_mem_bound),
4991        NULL,
4992};
4993
4994static struct attribute *spr_tsx_events_attrs[] = {
4995        EVENT_PTR(tx_start),
4996        EVENT_PTR(tx_abort),
4997        EVENT_PTR(tx_commit),
4998        EVENT_PTR(tx_capacity_read),
4999        EVENT_PTR(tx_capacity_write),
5000        EVENT_PTR(tx_conflict),
5001        EVENT_PTR(cycles_t),
5002        EVENT_PTR(cycles_ct),
5003        NULL,
5004};
5005
5006static ssize_t freeze_on_smi_show(struct device *cdev,
5007                                  struct device_attribute *attr,
5008                                  char *buf)
5009{
5010        return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5011}
5012
5013static DEFINE_MUTEX(freeze_on_smi_mutex);
5014
5015static ssize_t freeze_on_smi_store(struct device *cdev,
5016                                   struct device_attribute *attr,
5017                                   const char *buf, size_t count)
5018{
5019        unsigned long val;
5020        ssize_t ret;
5021
5022        ret = kstrtoul(buf, 0, &val);
5023        if (ret)
5024                return ret;
5025
5026        if (val > 1)
5027                return -EINVAL;
5028
5029        mutex_lock(&freeze_on_smi_mutex);
5030
5031        if (x86_pmu.attr_freeze_on_smi == val)
5032                goto done;
5033
5034        x86_pmu.attr_freeze_on_smi = val;
5035
5036        cpus_read_lock();
5037        on_each_cpu(flip_smm_bit, &val, 1);
5038        cpus_read_unlock();
5039done:
5040        mutex_unlock(&freeze_on_smi_mutex);
5041
5042        return count;
5043}
5044
5045static void update_tfa_sched(void *ignored)
5046{
5047        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5048
5049        /*
5050         * check if PMC3 is used
5051         * and if so force schedule out for all event types all contexts
5052         */
5053        if (test_bit(3, cpuc->active_mask))
5054                perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5055}
5056
5057static ssize_t show_sysctl_tfa(struct device *cdev,
5058                              struct device_attribute *attr,
5059                              char *buf)
5060{
5061        return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5062}
5063
5064static ssize_t set_sysctl_tfa(struct device *cdev,
5065                              struct device_attribute *attr,
5066                              const char *buf, size_t count)
5067{
5068        bool val;
5069        ssize_t ret;
5070
5071        ret = kstrtobool(buf, &val);
5072        if (ret)
5073                return ret;
5074
5075        /* no change */
5076        if (val == allow_tsx_force_abort)
5077                return count;
5078
5079        allow_tsx_force_abort = val;
5080
5081        cpus_read_lock();
5082        on_each_cpu(update_tfa_sched, NULL, 1);
5083        cpus_read_unlock();
5084
5085        return count;
5086}
5087
5088
5089static DEVICE_ATTR_RW(freeze_on_smi);
5090
5091static ssize_t branches_show(struct device *cdev,
5092                             struct device_attribute *attr,
5093                             char *buf)
5094{
5095        return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5096}
5097
5098static DEVICE_ATTR_RO(branches);
5099
5100static struct attribute *lbr_attrs[] = {
5101        &dev_attr_branches.attr,
5102        NULL
5103};
5104
5105static char pmu_name_str[30];
5106
5107static ssize_t pmu_name_show(struct device *cdev,
5108                             struct device_attribute *attr,
5109                             char *buf)
5110{
5111        return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5112}
5113
5114static DEVICE_ATTR_RO(pmu_name);
5115
5116static struct attribute *intel_pmu_caps_attrs[] = {
5117       &dev_attr_pmu_name.attr,
5118       NULL
5119};
5120
5121static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5122                   show_sysctl_tfa,
5123                   set_sysctl_tfa);
5124
5125static struct attribute *intel_pmu_attrs[] = {
5126        &dev_attr_freeze_on_smi.attr,
5127        &dev_attr_allow_tsx_force_abort.attr,
5128        NULL,
5129};
5130
5131static umode_t
5132tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5133{
5134        return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5135}
5136
5137static umode_t
5138pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5139{
5140        return x86_pmu.pebs ? attr->mode : 0;
5141}
5142
5143static umode_t
5144lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5145{
5146        return x86_pmu.lbr_nr ? attr->mode : 0;
5147}
5148
5149static umode_t
5150exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5151{
5152        return x86_pmu.version >= 2 ? attr->mode : 0;
5153}
5154
5155static umode_t
5156default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5157{
5158        if (attr == &dev_attr_allow_tsx_force_abort.attr)
5159                return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5160
5161        return attr->mode;
5162}
5163
5164static struct attribute_group group_events_td  = {
5165        .name = "events",
5166};
5167
5168static struct attribute_group group_events_mem = {
5169        .name       = "events",
5170        .is_visible = pebs_is_visible,
5171};
5172
5173static struct attribute_group group_events_tsx = {
5174        .name       = "events",
5175        .is_visible = tsx_is_visible,
5176};
5177
5178static struct attribute_group group_caps_gen = {
5179        .name  = "caps",
5180        .attrs = intel_pmu_caps_attrs,
5181};
5182
5183static struct attribute_group group_caps_lbr = {
5184        .name       = "caps",
5185        .attrs      = lbr_attrs,
5186        .is_visible = lbr_is_visible,
5187};
5188
5189static struct attribute_group group_format_extra = {
5190        .name       = "format",
5191        .is_visible = exra_is_visible,
5192};
5193
5194static struct attribute_group group_format_extra_skl = {
5195        .name       = "format",
5196        .is_visible = exra_is_visible,
5197};
5198
5199static struct attribute_group group_default = {
5200        .attrs      = intel_pmu_attrs,
5201        .is_visible = default_is_visible,
5202};
5203
5204static const struct attribute_group *attr_update[] = {
5205        &group_events_td,
5206        &group_events_mem,
5207        &group_events_tsx,
5208        &group_caps_gen,
5209        &group_caps_lbr,
5210        &group_format_extra,
5211        &group_format_extra_skl,
5212        &group_default,
5213        NULL,
5214};
5215
5216EVENT_ATTR_STR_HYBRID(slots,                 slots_adl,        "event=0x00,umask=0x4",                       hybrid_big);
5217EVENT_ATTR_STR_HYBRID(topdown-retiring,      td_retiring_adl,  "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5218EVENT_ATTR_STR_HYBRID(topdown-bad-spec,      td_bad_spec_adl,  "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5219EVENT_ATTR_STR_HYBRID(topdown-fe-bound,      td_fe_bound_adl,  "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5220EVENT_ATTR_STR_HYBRID(topdown-be-bound,      td_be_bound_adl,  "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5221EVENT_ATTR_STR_HYBRID(topdown-heavy-ops,     td_heavy_ops_adl, "event=0x00,umask=0x84",                      hybrid_big);
5222EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl,    "event=0x00,umask=0x85",                      hybrid_big);
5223EVENT_ATTR_STR_HYBRID(topdown-fetch-lat,     td_fetch_lat_adl, "event=0x00,umask=0x86",                      hybrid_big);
5224EVENT_ATTR_STR_HYBRID(topdown-mem-bound,     td_mem_bound_adl, "event=0x00,umask=0x87",                      hybrid_big);
5225
5226static struct attribute *adl_hybrid_events_attrs[] = {
5227        EVENT_PTR(slots_adl),
5228        EVENT_PTR(td_retiring_adl),
5229        EVENT_PTR(td_bad_spec_adl),
5230        EVENT_PTR(td_fe_bound_adl),
5231        EVENT_PTR(td_be_bound_adl),
5232        EVENT_PTR(td_heavy_ops_adl),
5233        EVENT_PTR(td_br_mis_adl),
5234        EVENT_PTR(td_fetch_lat_adl),
5235        EVENT_PTR(td_mem_bound_adl),
5236        NULL,
5237};
5238
5239/* Must be in IDX order */
5240EVENT_ATTR_STR_HYBRID(mem-loads,     mem_ld_adl,     "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5241EVENT_ATTR_STR_HYBRID(mem-stores,    mem_st_adl,     "event=0xd0,umask=0x6;event=0xcd,umask=0x2",                 hybrid_big_small);
5242EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82",                                     hybrid_big);
5243
5244static struct attribute *adl_hybrid_mem_attrs[] = {
5245        EVENT_PTR(mem_ld_adl),
5246        EVENT_PTR(mem_st_adl),
5247        EVENT_PTR(mem_ld_aux_adl),
5248        NULL,
5249};
5250
5251EVENT_ATTR_STR_HYBRID(tx-start,          tx_start_adl,          "event=0xc9,umask=0x1",          hybrid_big);
5252EVENT_ATTR_STR_HYBRID(tx-commit,         tx_commit_adl,         "event=0xc9,umask=0x2",          hybrid_big);
5253EVENT_ATTR_STR_HYBRID(tx-abort,          tx_abort_adl,          "event=0xc9,umask=0x4",          hybrid_big);
5254EVENT_ATTR_STR_HYBRID(tx-conflict,       tx_conflict_adl,       "event=0x54,umask=0x1",          hybrid_big);
5255EVENT_ATTR_STR_HYBRID(cycles-t,          cycles_t_adl,          "event=0x3c,in_tx=1",            hybrid_big);
5256EVENT_ATTR_STR_HYBRID(cycles-ct,         cycles_ct_adl,         "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5257EVENT_ATTR_STR_HYBRID(tx-capacity-read,  tx_capacity_read_adl,  "event=0x54,umask=0x80",         hybrid_big);
5258EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2",          hybrid_big);
5259
5260static struct attribute *adl_hybrid_tsx_attrs[] = {
5261        EVENT_PTR(tx_start_adl),
5262        EVENT_PTR(tx_abort_adl),
5263        EVENT_PTR(tx_commit_adl),
5264        EVENT_PTR(tx_capacity_read_adl),
5265        EVENT_PTR(tx_capacity_write_adl),
5266        EVENT_PTR(tx_conflict_adl),
5267        EVENT_PTR(cycles_t_adl),
5268        EVENT_PTR(cycles_ct_adl),
5269        NULL,
5270};
5271
5272FORMAT_ATTR_HYBRID(in_tx,       hybrid_big);
5273FORMAT_ATTR_HYBRID(in_tx_cp,    hybrid_big);
5274FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5275FORMAT_ATTR_HYBRID(ldlat,       hybrid_big_small);
5276FORMAT_ATTR_HYBRID(frontend,    hybrid_big);
5277
5278static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5279        FORMAT_HYBRID_PTR(in_tx),
5280        FORMAT_HYBRID_PTR(in_tx_cp),
5281        FORMAT_HYBRID_PTR(offcore_rsp),
5282        FORMAT_HYBRID_PTR(ldlat),
5283        FORMAT_HYBRID_PTR(frontend),
5284        NULL,
5285};
5286
5287static struct attribute *adl_hybrid_extra_attr[] = {
5288        FORMAT_HYBRID_PTR(offcore_rsp),
5289        FORMAT_HYBRID_PTR(ldlat),
5290        FORMAT_HYBRID_PTR(frontend),
5291        NULL,
5292};
5293
5294static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5295{
5296        struct device *dev = kobj_to_dev(kobj);
5297        struct x86_hybrid_pmu *pmu =
5298                container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5299        struct perf_pmu_events_hybrid_attr *pmu_attr =
5300                container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5301
5302        return pmu->cpu_type & pmu_attr->pmu_type;
5303}
5304
5305static umode_t hybrid_events_is_visible(struct kobject *kobj,
5306                                        struct attribute *attr, int i)
5307{
5308        return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5309}
5310
5311static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5312{
5313        int cpu = cpumask_first(&pmu->supported_cpus);
5314
5315        return (cpu >= nr_cpu_ids) ? -1 : cpu;
5316}
5317
5318static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5319                                     struct attribute *attr, int i)
5320{
5321        struct device *dev = kobj_to_dev(kobj);
5322        struct x86_hybrid_pmu *pmu =
5323                 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5324        int cpu = hybrid_find_supported_cpu(pmu);
5325
5326        return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5327}
5328
5329static umode_t hybrid_format_is_visible(struct kobject *kobj,
5330                                        struct attribute *attr, int i)
5331{
5332        struct device *dev = kobj_to_dev(kobj);
5333        struct x86_hybrid_pmu *pmu =
5334                container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5335        struct perf_pmu_format_hybrid_attr *pmu_attr =
5336                container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5337        int cpu = hybrid_find_supported_cpu(pmu);
5338
5339        return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5340}
5341
5342static struct attribute_group hybrid_group_events_td  = {
5343        .name           = "events",
5344        .is_visible     = hybrid_events_is_visible,
5345};
5346
5347static struct attribute_group hybrid_group_events_mem = {
5348        .name           = "events",
5349        .is_visible     = hybrid_events_is_visible,
5350};
5351
5352static struct attribute_group hybrid_group_events_tsx = {
5353        .name           = "events",
5354        .is_visible     = hybrid_tsx_is_visible,
5355};
5356
5357static struct attribute_group hybrid_group_format_extra = {
5358        .name           = "format",
5359        .is_visible     = hybrid_format_is_visible,
5360};
5361
5362static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5363                                          struct device_attribute *attr,
5364                                          char *buf)
5365{
5366        struct x86_hybrid_pmu *pmu =
5367                container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5368
5369        return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5370}
5371
5372static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5373static struct attribute *intel_hybrid_cpus_attrs[] = {
5374        &dev_attr_cpus.attr,
5375        NULL,
5376};
5377
5378static struct attribute_group hybrid_group_cpus = {
5379        .attrs          = intel_hybrid_cpus_attrs,
5380};
5381
5382static const struct attribute_group *hybrid_attr_update[] = {
5383        &hybrid_group_events_td,
5384        &hybrid_group_events_mem,
5385        &hybrid_group_events_tsx,
5386        &group_caps_gen,
5387        &group_caps_lbr,
5388        &hybrid_group_format_extra,
5389        &group_default,
5390        &hybrid_group_cpus,
5391        NULL,
5392};
5393
5394static struct attribute *empty_attrs;
5395
5396static void intel_pmu_check_num_counters(int *num_counters,
5397                                         int *num_counters_fixed,
5398                                         u64 *intel_ctrl, u64 fixed_mask)
5399{
5400        if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5401                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5402                     *num_counters, INTEL_PMC_MAX_GENERIC);
5403                *num_counters = INTEL_PMC_MAX_GENERIC;
5404        }
5405        *intel_ctrl = (1ULL << *num_counters) - 1;
5406
5407        if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5408                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5409                     *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5410                *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5411        }
5412
5413        *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5414}
5415
5416static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5417                                              int num_counters,
5418                                              int num_counters_fixed,
5419                                              u64 intel_ctrl)
5420{
5421        struct event_constraint *c;
5422
5423        if (!event_constraints)
5424                return;
5425
5426        /*
5427         * event on fixed counter2 (REF_CYCLES) only works on this
5428         * counter, so do not extend mask to generic counters
5429         */
5430        for_each_event_constraint(c, event_constraints) {
5431                /*
5432                 * Don't extend the topdown slots and metrics
5433                 * events to the generic counters.
5434                 */
5435                if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5436                        /*
5437                         * Disable topdown slots and metrics events,
5438                         * if slots event is not in CPUID.
5439                         */
5440                        if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
5441                                c->idxmsk64 = 0;
5442                        c->weight = hweight64(c->idxmsk64);
5443                        continue;
5444                }
5445
5446                if (c->cmask == FIXED_EVENT_FLAGS) {
5447                        /* Disabled fixed counters which are not in CPUID */
5448                        c->idxmsk64 &= intel_ctrl;
5449
5450                        if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
5451                                c->idxmsk64 |= (1ULL << num_counters) - 1;
5452                }
5453                c->idxmsk64 &=
5454                        ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
5455                c->weight = hweight64(c->idxmsk64);
5456        }
5457}
5458
5459static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
5460{
5461        struct extra_reg *er;
5462
5463        /*
5464         * Access extra MSR may cause #GP under certain circumstances.
5465         * E.g. KVM doesn't support offcore event
5466         * Check all extra_regs here.
5467         */
5468        if (!extra_regs)
5469                return;
5470
5471        for (er = extra_regs; er->msr; er++) {
5472                er->extra_msr_access = check_msr(er->msr, 0x11UL);
5473                /* Disable LBR select mapping */
5474                if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5475                        x86_pmu.lbr_sel_map = NULL;
5476        }
5477}
5478
5479static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
5480{
5481        struct x86_hybrid_pmu *pmu;
5482        int i;
5483
5484        for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5485                pmu = &x86_pmu.hybrid_pmu[i];
5486
5487                intel_pmu_check_num_counters(&pmu->num_counters,
5488                                             &pmu->num_counters_fixed,
5489                                             &pmu->intel_ctrl,
5490                                             fixed_mask);
5491
5492                if (pmu->intel_cap.perf_metrics) {
5493                        pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5494                        pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
5495                }
5496
5497                if (pmu->intel_cap.pebs_output_pt_available)
5498                        pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
5499
5500                intel_pmu_check_event_constraints(pmu->event_constraints,
5501                                                  pmu->num_counters,
5502                                                  pmu->num_counters_fixed,
5503                                                  pmu->intel_ctrl);
5504
5505                intel_pmu_check_extra_regs(pmu->extra_regs);
5506        }
5507}
5508
5509__init int intel_pmu_init(void)
5510{
5511        struct attribute **extra_skl_attr = &empty_attrs;
5512        struct attribute **extra_attr = &empty_attrs;
5513        struct attribute **td_attr    = &empty_attrs;
5514        struct attribute **mem_attr   = &empty_attrs;
5515        struct attribute **tsx_attr   = &empty_attrs;
5516        union cpuid10_edx edx;
5517        union cpuid10_eax eax;
5518        union cpuid10_ebx ebx;
5519        unsigned int fixed_mask;
5520        bool pmem = false;
5521        int version, i;
5522        char *name;
5523        struct x86_hybrid_pmu *pmu;
5524
5525        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
5526                switch (boot_cpu_data.x86) {
5527                case 0x6:
5528                        return p6_pmu_init();
5529                case 0xb:
5530                        return knc_pmu_init();
5531                case 0xf:
5532                        return p4_pmu_init();
5533                }
5534                return -ENODEV;
5535        }
5536
5537        /*
5538         * Check whether the Architectural PerfMon supports
5539         * Branch Misses Retired hw_event or not.
5540         */
5541        cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
5542        if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
5543                return -ENODEV;
5544
5545        version = eax.split.version_id;
5546        if (version < 2)
5547                x86_pmu = core_pmu;
5548        else
5549                x86_pmu = intel_pmu;
5550
5551        x86_pmu.version                 = version;
5552        x86_pmu.num_counters            = eax.split.num_counters;
5553        x86_pmu.cntval_bits             = eax.split.bit_width;
5554        x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
5555
5556        x86_pmu.events_maskl            = ebx.full;
5557        x86_pmu.events_mask_len         = eax.split.mask_length;
5558
5559        x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
5560
5561        /*
5562         * Quirk: v2 perfmon does not report fixed-purpose events, so
5563         * assume at least 3 events, when not running in a hypervisor:
5564         */
5565        if (version > 1 && version < 5) {
5566                int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
5567
5568                x86_pmu.num_counters_fixed =
5569                        max((int)edx.split.num_counters_fixed, assume);
5570
5571                fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
5572        } else if (version >= 5)
5573                x86_pmu.num_counters_fixed = fls(fixed_mask);
5574
5575        if (boot_cpu_has(X86_FEATURE_PDCM)) {
5576                u64 capabilities;
5577
5578                rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
5579                x86_pmu.intel_cap.capabilities = capabilities;
5580        }
5581
5582        if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
5583                x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
5584                x86_pmu.lbr_read = intel_pmu_lbr_read_32;
5585        }
5586
5587        if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
5588                intel_pmu_arch_lbr_init();
5589
5590        intel_ds_init();
5591
5592        x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
5593
5594        if (version >= 5) {
5595                x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
5596                if (x86_pmu.intel_cap.anythread_deprecated)
5597                        pr_cont(" AnyThread deprecated, ");
5598        }
5599
5600        /*
5601         * Install the hw-cache-events table:
5602         */
5603        switch (boot_cpu_data.x86_model) {
5604        case INTEL_FAM6_CORE_YONAH:
5605                pr_cont("Core events, ");
5606                name = "core";
5607                break;
5608
5609        case INTEL_FAM6_CORE2_MEROM:
5610                x86_add_quirk(intel_clovertown_quirk);
5611                fallthrough;
5612
5613        case INTEL_FAM6_CORE2_MEROM_L:
5614        case INTEL_FAM6_CORE2_PENRYN:
5615        case INTEL_FAM6_CORE2_DUNNINGTON:
5616                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
5617                       sizeof(hw_cache_event_ids));
5618
5619                intel_pmu_lbr_init_core();
5620
5621                x86_pmu.event_constraints = intel_core2_event_constraints;
5622                x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
5623                pr_cont("Core2 events, ");
5624                name = "core2";
5625                break;
5626
5627        case INTEL_FAM6_NEHALEM:
5628        case INTEL_FAM6_NEHALEM_EP:
5629        case INTEL_FAM6_NEHALEM_EX:
5630                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
5631                       sizeof(hw_cache_event_ids));
5632                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5633                       sizeof(hw_cache_extra_regs));
5634
5635                intel_pmu_lbr_init_nhm();
5636
5637                x86_pmu.event_constraints = intel_nehalem_event_constraints;
5638                x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
5639                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5640                x86_pmu.extra_regs = intel_nehalem_extra_regs;
5641                x86_pmu.limit_period = nhm_limit_period;
5642
5643                mem_attr = nhm_mem_events_attrs;
5644
5645                /* UOPS_ISSUED.STALLED_CYCLES */
5646                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5647                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5648                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5649                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5650                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5651
5652                intel_pmu_pebs_data_source_nhm();
5653                x86_add_quirk(intel_nehalem_quirk);
5654                x86_pmu.pebs_no_tlb = 1;
5655                extra_attr = nhm_format_attr;
5656
5657                pr_cont("Nehalem events, ");
5658                name = "nehalem";
5659                break;
5660
5661        case INTEL_FAM6_ATOM_BONNELL:
5662        case INTEL_FAM6_ATOM_BONNELL_MID:
5663        case INTEL_FAM6_ATOM_SALTWELL:
5664        case INTEL_FAM6_ATOM_SALTWELL_MID:
5665        case INTEL_FAM6_ATOM_SALTWELL_TABLET:
5666                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
5667                       sizeof(hw_cache_event_ids));
5668
5669                intel_pmu_lbr_init_atom();
5670
5671                x86_pmu.event_constraints = intel_gen_event_constraints;
5672                x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
5673                x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
5674                pr_cont("Atom events, ");
5675                name = "bonnell";
5676                break;
5677
5678        case INTEL_FAM6_ATOM_SILVERMONT:
5679        case INTEL_FAM6_ATOM_SILVERMONT_D:
5680        case INTEL_FAM6_ATOM_SILVERMONT_MID:
5681        case INTEL_FAM6_ATOM_AIRMONT:
5682        case INTEL_FAM6_ATOM_AIRMONT_MID:
5683                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
5684                        sizeof(hw_cache_event_ids));
5685                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
5686                       sizeof(hw_cache_extra_regs));
5687
5688                intel_pmu_lbr_init_slm();
5689
5690                x86_pmu.event_constraints = intel_slm_event_constraints;
5691                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5692                x86_pmu.extra_regs = intel_slm_extra_regs;
5693                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5694                td_attr = slm_events_attrs;
5695                extra_attr = slm_format_attr;
5696                pr_cont("Silvermont events, ");
5697                name = "silvermont";
5698                break;
5699
5700        case INTEL_FAM6_ATOM_GOLDMONT:
5701        case INTEL_FAM6_ATOM_GOLDMONT_D:
5702                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
5703                       sizeof(hw_cache_event_ids));
5704                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
5705                       sizeof(hw_cache_extra_regs));
5706
5707                intel_pmu_lbr_init_skl();
5708
5709                x86_pmu.event_constraints = intel_slm_event_constraints;
5710                x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
5711                x86_pmu.extra_regs = intel_glm_extra_regs;
5712                /*
5713                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5714                 * for precise cycles.
5715                 * :pp is identical to :ppp
5716                 */
5717                x86_pmu.pebs_aliases = NULL;
5718                x86_pmu.pebs_prec_dist = true;
5719                x86_pmu.lbr_pt_coexist = true;
5720                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5721                td_attr = glm_events_attrs;
5722                extra_attr = slm_format_attr;
5723                pr_cont("Goldmont events, ");
5724                name = "goldmont";
5725                break;
5726
5727        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
5728                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5729                       sizeof(hw_cache_event_ids));
5730                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
5731                       sizeof(hw_cache_extra_regs));
5732
5733                intel_pmu_lbr_init_skl();
5734
5735                x86_pmu.event_constraints = intel_slm_event_constraints;
5736                x86_pmu.extra_regs = intel_glm_extra_regs;
5737                /*
5738                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5739                 * for precise cycles.
5740                 */
5741                x86_pmu.pebs_aliases = NULL;
5742                x86_pmu.pebs_prec_dist = true;
5743                x86_pmu.lbr_pt_coexist = true;
5744                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5745                x86_pmu.flags |= PMU_FL_PEBS_ALL;
5746                x86_pmu.get_event_constraints = glp_get_event_constraints;
5747                td_attr = glm_events_attrs;
5748                /* Goldmont Plus has 4-wide pipeline */
5749                event_attr_td_total_slots_scale_glm.event_str = "4";
5750                extra_attr = slm_format_attr;
5751                pr_cont("Goldmont plus events, ");
5752                name = "goldmont_plus";
5753                break;
5754
5755        case INTEL_FAM6_ATOM_TREMONT_D:
5756        case INTEL_FAM6_ATOM_TREMONT:
5757        case INTEL_FAM6_ATOM_TREMONT_L:
5758                x86_pmu.late_ack = true;
5759                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
5760                       sizeof(hw_cache_event_ids));
5761                memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
5762                       sizeof(hw_cache_extra_regs));
5763                hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
5764
5765                intel_pmu_lbr_init_skl();
5766
5767                x86_pmu.event_constraints = intel_slm_event_constraints;
5768                x86_pmu.extra_regs = intel_tnt_extra_regs;
5769                /*
5770                 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
5771                 * for precise cycles.
5772                 */
5773                x86_pmu.pebs_aliases = NULL;
5774                x86_pmu.pebs_prec_dist = true;
5775                x86_pmu.lbr_pt_coexist = true;
5776                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5777                x86_pmu.get_event_constraints = tnt_get_event_constraints;
5778                td_attr = tnt_events_attrs;
5779                extra_attr = slm_format_attr;
5780                pr_cont("Tremont events, ");
5781                name = "Tremont";
5782                break;
5783
5784        case INTEL_FAM6_WESTMERE:
5785        case INTEL_FAM6_WESTMERE_EP:
5786        case INTEL_FAM6_WESTMERE_EX:
5787                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
5788                       sizeof(hw_cache_event_ids));
5789                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
5790                       sizeof(hw_cache_extra_regs));
5791
5792                intel_pmu_lbr_init_nhm();
5793
5794                x86_pmu.event_constraints = intel_westmere_event_constraints;
5795                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
5796                x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
5797                x86_pmu.extra_regs = intel_westmere_extra_regs;
5798                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5799
5800                mem_attr = nhm_mem_events_attrs;
5801
5802                /* UOPS_ISSUED.STALLED_CYCLES */
5803                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5804                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5805                /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
5806                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5807                        X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
5808
5809                intel_pmu_pebs_data_source_nhm();
5810                extra_attr = nhm_format_attr;
5811                pr_cont("Westmere events, ");
5812                name = "westmere";
5813                break;
5814
5815        case INTEL_FAM6_SANDYBRIDGE:
5816        case INTEL_FAM6_SANDYBRIDGE_X:
5817                x86_add_quirk(intel_sandybridge_quirk);
5818                x86_add_quirk(intel_ht_bug);
5819                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5820                       sizeof(hw_cache_event_ids));
5821                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5822                       sizeof(hw_cache_extra_regs));
5823
5824                intel_pmu_lbr_init_snb();
5825
5826                x86_pmu.event_constraints = intel_snb_event_constraints;
5827                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
5828                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
5829                if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
5830                        x86_pmu.extra_regs = intel_snbep_extra_regs;
5831                else
5832                        x86_pmu.extra_regs = intel_snb_extra_regs;
5833
5834
5835                /* all extra regs are per-cpu when HT is on */
5836                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5837                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5838
5839                td_attr  = snb_events_attrs;
5840                mem_attr = snb_mem_events_attrs;
5841
5842                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5843                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5844                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5845                /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
5846                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
5847                        X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
5848
5849                extra_attr = nhm_format_attr;
5850
5851                pr_cont("SandyBridge events, ");
5852                name = "sandybridge";
5853                break;
5854
5855        case INTEL_FAM6_IVYBRIDGE:
5856        case INTEL_FAM6_IVYBRIDGE_X:
5857                x86_add_quirk(intel_ht_bug);
5858                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
5859                       sizeof(hw_cache_event_ids));
5860                /* dTLB-load-misses on IVB is different than SNB */
5861                hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
5862
5863                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
5864                       sizeof(hw_cache_extra_regs));
5865
5866                intel_pmu_lbr_init_snb();
5867
5868                x86_pmu.event_constraints = intel_ivb_event_constraints;
5869                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
5870                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5871                x86_pmu.pebs_prec_dist = true;
5872                if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
5873                        x86_pmu.extra_regs = intel_snbep_extra_regs;
5874                else
5875                        x86_pmu.extra_regs = intel_snb_extra_regs;
5876                /* all extra regs are per-cpu when HT is on */
5877                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5878                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5879
5880                td_attr  = snb_events_attrs;
5881                mem_attr = snb_mem_events_attrs;
5882
5883                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
5884                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
5885                        X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
5886
5887                extra_attr = nhm_format_attr;
5888
5889                pr_cont("IvyBridge events, ");
5890                name = "ivybridge";
5891                break;
5892
5893
5894        case INTEL_FAM6_HASWELL:
5895        case INTEL_FAM6_HASWELL_X:
5896        case INTEL_FAM6_HASWELL_L:
5897        case INTEL_FAM6_HASWELL_G:
5898                x86_add_quirk(intel_ht_bug);
5899                x86_add_quirk(intel_pebs_isolation_quirk);
5900                x86_pmu.late_ack = true;
5901                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5902                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5903
5904                intel_pmu_lbr_init_hsw();
5905
5906                x86_pmu.event_constraints = intel_hsw_event_constraints;
5907                x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
5908                x86_pmu.extra_regs = intel_snbep_extra_regs;
5909                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5910                x86_pmu.pebs_prec_dist = true;
5911                /* all extra regs are per-cpu when HT is on */
5912                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5913                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5914
5915                x86_pmu.hw_config = hsw_hw_config;
5916                x86_pmu.get_event_constraints = hsw_get_event_constraints;
5917                x86_pmu.lbr_double_abort = true;
5918                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5919                        hsw_format_attr : nhm_format_attr;
5920                td_attr  = hsw_events_attrs;
5921                mem_attr = hsw_mem_events_attrs;
5922                tsx_attr = hsw_tsx_events_attrs;
5923                pr_cont("Haswell events, ");
5924                name = "haswell";
5925                break;
5926
5927        case INTEL_FAM6_BROADWELL:
5928        case INTEL_FAM6_BROADWELL_D:
5929        case INTEL_FAM6_BROADWELL_G:
5930        case INTEL_FAM6_BROADWELL_X:
5931                x86_add_quirk(intel_pebs_isolation_quirk);
5932                x86_pmu.late_ack = true;
5933                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5934                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5935
5936                /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
5937                hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
5938                                                                         BDW_L3_MISS|HSW_SNOOP_DRAM;
5939                hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
5940                                                                          HSW_SNOOP_DRAM;
5941                hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
5942                                                                             BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5943                hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
5944                                                                              BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
5945
5946                intel_pmu_lbr_init_hsw();
5947
5948                x86_pmu.event_constraints = intel_bdw_event_constraints;
5949                x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
5950                x86_pmu.extra_regs = intel_snbep_extra_regs;
5951                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
5952                x86_pmu.pebs_prec_dist = true;
5953                /* all extra regs are per-cpu when HT is on */
5954                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5955                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5956
5957                x86_pmu.hw_config = hsw_hw_config;
5958                x86_pmu.get_event_constraints = hsw_get_event_constraints;
5959                x86_pmu.limit_period = bdw_limit_period;
5960                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
5961                        hsw_format_attr : nhm_format_attr;
5962                td_attr  = hsw_events_attrs;
5963                mem_attr = hsw_mem_events_attrs;
5964                tsx_attr = hsw_tsx_events_attrs;
5965                pr_cont("Broadwell events, ");
5966                name = "broadwell";
5967                break;
5968
5969        case INTEL_FAM6_XEON_PHI_KNL:
5970        case INTEL_FAM6_XEON_PHI_KNM:
5971                memcpy(hw_cache_event_ids,
5972                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5973                memcpy(hw_cache_extra_regs,
5974                       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5975                intel_pmu_lbr_init_knl();
5976
5977                x86_pmu.event_constraints = intel_slm_event_constraints;
5978                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
5979                x86_pmu.extra_regs = intel_knl_extra_regs;
5980
5981                /* all extra regs are per-cpu when HT is on */
5982                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5983                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5984                extra_attr = slm_format_attr;
5985                pr_cont("Knights Landing/Mill events, ");
5986                name = "knights-landing";
5987                break;
5988
5989        case INTEL_FAM6_SKYLAKE_X:
5990                pmem = true;
5991                fallthrough;
5992        case INTEL_FAM6_SKYLAKE_L:
5993        case INTEL_FAM6_SKYLAKE:
5994        case INTEL_FAM6_KABYLAKE_L:
5995        case INTEL_FAM6_KABYLAKE:
5996        case INTEL_FAM6_COMETLAKE_L:
5997        case INTEL_FAM6_COMETLAKE:
5998                x86_add_quirk(intel_pebs_isolation_quirk);
5999                x86_pmu.late_ack = true;
6000                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6001                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6002                intel_pmu_lbr_init_skl();
6003
6004                /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6005                event_attr_td_recovery_bubbles.event_str_noht =
6006                        "event=0xd,umask=0x1,cmask=1";
6007                event_attr_td_recovery_bubbles.event_str_ht =
6008                        "event=0xd,umask=0x1,cmask=1,any=1";
6009
6010                x86_pmu.event_constraints = intel_skl_event_constraints;
6011                x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6012                x86_pmu.extra_regs = intel_skl_extra_regs;
6013                x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6014                x86_pmu.pebs_prec_dist = true;
6015                /* all extra regs are per-cpu when HT is on */
6016                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6017                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6018
6019                x86_pmu.hw_config = hsw_hw_config;
6020                x86_pmu.get_event_constraints = hsw_get_event_constraints;
6021                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6022                        hsw_format_attr : nhm_format_attr;
6023                extra_skl_attr = skl_format_attr;
6024                td_attr  = hsw_events_attrs;
6025                mem_attr = hsw_mem_events_attrs;
6026                tsx_attr = hsw_tsx_events_attrs;
6027                intel_pmu_pebs_data_source_skl(pmem);
6028
6029                /*
6030                 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6031                 * TSX force abort hooks are not required on these systems. Only deploy
6032                 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6033                 */
6034                if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6035                   !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6036                        x86_pmu.flags |= PMU_FL_TFA;
6037                        x86_pmu.get_event_constraints = tfa_get_event_constraints;
6038                        x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6039                        x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6040                }
6041
6042                pr_cont("Skylake events, ");
6043                name = "skylake";
6044                break;
6045
6046        case INTEL_FAM6_ICELAKE_X:
6047        case INTEL_FAM6_ICELAKE_D:
6048                pmem = true;
6049                fallthrough;
6050        case INTEL_FAM6_ICELAKE_L:
6051        case INTEL_FAM6_ICELAKE:
6052        case INTEL_FAM6_TIGERLAKE_L:
6053        case INTEL_FAM6_TIGERLAKE:
6054        case INTEL_FAM6_ROCKETLAKE:
6055                x86_pmu.late_ack = true;
6056                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6057                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6058                hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6059                intel_pmu_lbr_init_skl();
6060
6061                x86_pmu.event_constraints = intel_icl_event_constraints;
6062                x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6063                x86_pmu.extra_regs = intel_icl_extra_regs;
6064                x86_pmu.pebs_aliases = NULL;
6065                x86_pmu.pebs_prec_dist = true;
6066                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6067                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6068
6069                x86_pmu.hw_config = hsw_hw_config;
6070                x86_pmu.get_event_constraints = icl_get_event_constraints;
6071                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6072                        hsw_format_attr : nhm_format_attr;
6073                extra_skl_attr = skl_format_attr;
6074                mem_attr = icl_events_attrs;
6075                td_attr = icl_td_events_attrs;
6076                tsx_attr = icl_tsx_events_attrs;
6077                x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6078                x86_pmu.lbr_pt_coexist = true;
6079                intel_pmu_pebs_data_source_skl(pmem);
6080                x86_pmu.num_topdown_events = 4;
6081                x86_pmu.update_topdown_event = icl_update_topdown_event;
6082                x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6083                pr_cont("Icelake events, ");
6084                name = "icelake";
6085                break;
6086
6087        case INTEL_FAM6_SAPPHIRERAPIDS_X:
6088                pmem = true;
6089                x86_pmu.late_ack = true;
6090                memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6091                memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6092
6093                x86_pmu.event_constraints = intel_spr_event_constraints;
6094                x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
6095                x86_pmu.extra_regs = intel_spr_extra_regs;
6096                x86_pmu.limit_period = spr_limit_period;
6097                x86_pmu.pebs_aliases = NULL;
6098                x86_pmu.pebs_prec_dist = true;
6099                x86_pmu.pebs_block = true;
6100                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6101                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6102                x86_pmu.flags |= PMU_FL_PEBS_ALL;
6103                x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6104                x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6105
6106                x86_pmu.hw_config = hsw_hw_config;
6107                x86_pmu.get_event_constraints = spr_get_event_constraints;
6108                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6109                        hsw_format_attr : nhm_format_attr;
6110                extra_skl_attr = skl_format_attr;
6111                mem_attr = spr_events_attrs;
6112                td_attr = spr_td_events_attrs;
6113                tsx_attr = spr_tsx_events_attrs;
6114                x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6115                x86_pmu.lbr_pt_coexist = true;
6116                intel_pmu_pebs_data_source_skl(pmem);
6117                x86_pmu.num_topdown_events = 8;
6118                x86_pmu.update_topdown_event = icl_update_topdown_event;
6119                x86_pmu.set_topdown_event_period = icl_set_topdown_event_period;
6120                pr_cont("Sapphire Rapids events, ");
6121                name = "sapphire_rapids";
6122                break;
6123
6124        case INTEL_FAM6_ALDERLAKE:
6125        case INTEL_FAM6_ALDERLAKE_L:
6126                /*
6127                 * Alder Lake has 2 types of CPU, core and atom.
6128                 *
6129                 * Initialize the common PerfMon capabilities here.
6130                 */
6131                x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
6132                                             sizeof(struct x86_hybrid_pmu),
6133                                             GFP_KERNEL);
6134                if (!x86_pmu.hybrid_pmu)
6135                        return -ENOMEM;
6136                static_branch_enable(&perf_is_hybrid);
6137                x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
6138
6139                x86_pmu.pebs_aliases = NULL;
6140                x86_pmu.pebs_prec_dist = true;
6141                x86_pmu.pebs_block = true;
6142                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6143                x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6144                x86_pmu.flags |= PMU_FL_PEBS_ALL;
6145                x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6146                x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6147                x86_pmu.lbr_pt_coexist = true;
6148                intel_pmu_pebs_data_source_skl(false);
6149                x86_pmu.num_topdown_events = 8;
6150                x86_pmu.update_topdown_event = adl_update_topdown_event;
6151                x86_pmu.set_topdown_event_period = adl_set_topdown_event_period;
6152
6153                x86_pmu.filter_match = intel_pmu_filter_match;
6154                x86_pmu.get_event_constraints = adl_get_event_constraints;
6155                x86_pmu.hw_config = adl_hw_config;
6156                x86_pmu.limit_period = spr_limit_period;
6157                x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6158                /*
6159                 * The rtm_abort_event is used to check whether to enable GPRs
6160                 * for the RTM abort event. Atom doesn't have the RTM abort
6161                 * event. There is no harmful to set it in the common
6162                 * x86_pmu.rtm_abort_event.
6163                 */
6164                x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6165
6166                td_attr = adl_hybrid_events_attrs;
6167                mem_attr = adl_hybrid_mem_attrs;
6168                tsx_attr = adl_hybrid_tsx_attrs;
6169                extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6170                        adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6171
6172                /* Initialize big core specific PerfMon capabilities.*/
6173                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6174                pmu->name = "cpu_core";
6175                pmu->cpu_type = hybrid_big;
6176                pmu->late_ack = true;
6177                if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6178                        pmu->num_counters = x86_pmu.num_counters + 2;
6179                        pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6180                } else {
6181                        pmu->num_counters = x86_pmu.num_counters;
6182                        pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6183                }
6184                pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6185                pmu->unconstrained = (struct event_constraint)
6186                                        __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6187                                                           0, pmu->num_counters, 0, 0);
6188                pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6189                pmu->intel_cap.perf_metrics = 1;
6190                pmu->intel_cap.pebs_output_pt_available = 0;
6191
6192                memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6193                memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6194                pmu->event_constraints = intel_spr_event_constraints;
6195                pmu->pebs_constraints = intel_spr_pebs_event_constraints;
6196                pmu->extra_regs = intel_spr_extra_regs;
6197
6198                /* Initialize Atom core specific PerfMon capabilities.*/
6199                pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6200                pmu->name = "cpu_atom";
6201                pmu->cpu_type = hybrid_small;
6202                pmu->mid_ack = true;
6203                pmu->num_counters = x86_pmu.num_counters;
6204                pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6205                pmu->max_pebs_events = x86_pmu.max_pebs_events;
6206                pmu->unconstrained = (struct event_constraint)
6207                                        __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6208                                                           0, pmu->num_counters, 0, 0);
6209                pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6210                pmu->intel_cap.perf_metrics = 0;
6211                pmu->intel_cap.pebs_output_pt_available = 1;
6212
6213                memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6214                memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6215                pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6216                pmu->event_constraints = intel_slm_event_constraints;
6217                pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6218                pmu->extra_regs = intel_grt_extra_regs;
6219                pr_cont("Alderlake Hybrid events, ");
6220                name = "alderlake_hybrid";
6221                break;
6222
6223        default:
6224                switch (x86_pmu.version) {
6225                case 1:
6226                        x86_pmu.event_constraints = intel_v1_event_constraints;
6227                        pr_cont("generic architected perfmon v1, ");
6228                        name = "generic_arch_v1";
6229                        break;
6230                default:
6231                        /*
6232                         * default constraints for v2 and up
6233                         */
6234                        x86_pmu.event_constraints = intel_gen_event_constraints;
6235                        pr_cont("generic architected perfmon, ");
6236                        name = "generic_arch_v2+";
6237                        break;
6238                }
6239        }
6240
6241        snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6242
6243        if (!is_hybrid()) {
6244                group_events_td.attrs  = td_attr;
6245                group_events_mem.attrs = mem_attr;
6246                group_events_tsx.attrs = tsx_attr;
6247                group_format_extra.attrs = extra_attr;
6248                group_format_extra_skl.attrs = extra_skl_attr;
6249
6250                x86_pmu.attr_update = attr_update;
6251        } else {
6252                hybrid_group_events_td.attrs  = td_attr;
6253                hybrid_group_events_mem.attrs = mem_attr;
6254                hybrid_group_events_tsx.attrs = tsx_attr;
6255                hybrid_group_format_extra.attrs = extra_attr;
6256
6257                x86_pmu.attr_update = hybrid_attr_update;
6258        }
6259
6260        intel_pmu_check_num_counters(&x86_pmu.num_counters,
6261                                     &x86_pmu.num_counters_fixed,
6262                                     &x86_pmu.intel_ctrl,
6263                                     (u64)fixed_mask);
6264
6265        /* AnyThread may be deprecated on arch perfmon v5 or later */
6266        if (x86_pmu.intel_cap.anythread_deprecated)
6267                x86_pmu.format_attrs = intel_arch_formats_attr;
6268
6269        intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6270                                          x86_pmu.num_counters,
6271                                          x86_pmu.num_counters_fixed,
6272                                          x86_pmu.intel_ctrl);
6273        /*
6274         * Access LBR MSR may cause #GP under certain circumstances.
6275         * E.g. KVM doesn't support LBR MSR
6276         * Check all LBT MSR here.
6277         * Disable LBR access if any LBR MSRs can not be accessed.
6278         */
6279        if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6280                x86_pmu.lbr_nr = 0;
6281        for (i = 0; i < x86_pmu.lbr_nr; i++) {
6282                if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6283                      check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6284                        x86_pmu.lbr_nr = 0;
6285        }
6286
6287        if (x86_pmu.lbr_nr)
6288                pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6289
6290        intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6291
6292        /* Support full width counters using alternative MSR range */
6293        if (x86_pmu.intel_cap.full_width_write) {
6294                x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6295                x86_pmu.perfctr = MSR_IA32_PMC0;
6296                pr_cont("full-width counters, ");
6297        }
6298
6299        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6300                x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6301
6302        if (is_hybrid())
6303                intel_pmu_check_hybrid_pmus((u64)fixed_mask);
6304
6305        return 0;
6306}
6307
6308/*
6309 * HT bug: phase 2 init
6310 * Called once we have valid topology information to check
6311 * whether or not HT is enabled
6312 * If HT is off, then we disable the workaround
6313 */
6314static __init int fixup_ht_bug(void)
6315{
6316        int c;
6317        /*
6318         * problem not present on this CPU model, nothing to do
6319         */
6320        if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
6321                return 0;
6322
6323        if (topology_max_smt_threads() > 1) {
6324                pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
6325                return 0;
6326        }
6327
6328        cpus_read_lock();
6329
6330        hardlockup_detector_perf_stop();
6331
6332        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
6333
6334        x86_pmu.start_scheduling = NULL;
6335        x86_pmu.commit_scheduling = NULL;
6336        x86_pmu.stop_scheduling = NULL;
6337
6338        hardlockup_detector_perf_restart();
6339
6340        for_each_online_cpu(c)
6341                free_excl_cntrs(&per_cpu(cpu_hw_events, c));
6342
6343        cpus_read_unlock();
6344        pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
6345        return 0;
6346}
6347subsys_initcall(fixup_ht_bug)
6348