linux/arch/x86/events/intel/uncore_snbep.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* SandyBridge-EP/IvyTown uncore support */
   3#include "uncore.h"
   4#include "uncore_discovery.h"
   5
   6/* SNB-EP pci bus to socket mapping */
   7#define SNBEP_CPUNODEID                 0x40
   8#define SNBEP_GIDNIDMAP                 0x54
   9
  10/* SNB-EP Box level control */
  11#define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
  12#define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
  13#define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
  14#define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
  15#define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
  16                                         SNBEP_PMON_BOX_CTL_RST_CTRS | \
  17                                         SNBEP_PMON_BOX_CTL_FRZ_EN)
  18/* SNB-EP event control */
  19#define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
  20#define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
  21#define SNBEP_PMON_CTL_RST              (1 << 17)
  22#define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
  23#define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
  24#define SNBEP_PMON_CTL_EN               (1 << 22)
  25#define SNBEP_PMON_CTL_INVERT           (1 << 23)
  26#define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
  27#define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
  28                                         SNBEP_PMON_CTL_UMASK_MASK | \
  29                                         SNBEP_PMON_CTL_EDGE_DET | \
  30                                         SNBEP_PMON_CTL_INVERT | \
  31                                         SNBEP_PMON_CTL_TRESH_MASK)
  32
  33/* SNB-EP Ubox event control */
  34#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
  35#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
  36                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  37                                 SNBEP_PMON_CTL_UMASK_MASK | \
  38                                 SNBEP_PMON_CTL_EDGE_DET | \
  39                                 SNBEP_PMON_CTL_INVERT | \
  40                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
  41
  42#define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
  43#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
  44                                                 SNBEP_CBO_PMON_CTL_TID_EN)
  45
  46/* SNB-EP PCU event control */
  47#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
  48#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
  49#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
  50#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
  51#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
  52                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  53                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  54                                 SNBEP_PMON_CTL_EDGE_DET | \
  55                                 SNBEP_PMON_CTL_INVERT | \
  56                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
  57                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  58                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  59
  60#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
  61                                (SNBEP_PMON_RAW_EVENT_MASK | \
  62                                 SNBEP_PMON_CTL_EV_SEL_EXT)
  63
  64/* SNB-EP pci control register */
  65#define SNBEP_PCI_PMON_BOX_CTL                  0xf4
  66#define SNBEP_PCI_PMON_CTL0                     0xd8
  67/* SNB-EP pci counter register */
  68#define SNBEP_PCI_PMON_CTR0                     0xa0
  69
  70/* SNB-EP home agent register */
  71#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
  72#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
  73#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
  74/* SNB-EP memory controller register */
  75#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
  76#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
  77/* SNB-EP QPI register */
  78#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
  79#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
  80#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
  81#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
  82
  83/* SNB-EP Ubox register */
  84#define SNBEP_U_MSR_PMON_CTR0                   0xc16
  85#define SNBEP_U_MSR_PMON_CTL0                   0xc10
  86
  87#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
  88#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
  89
  90/* SNB-EP Cbo register */
  91#define SNBEP_C0_MSR_PMON_CTR0                  0xd16
  92#define SNBEP_C0_MSR_PMON_CTL0                  0xd10
  93#define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
  94#define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
  95#define SNBEP_CBO_MSR_OFFSET                    0x20
  96
  97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
  98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
  99#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
 100#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
 101
 102#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
 103        .event = (e),                           \
 104        .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
 105        .config_mask = (m),                     \
 106        .idx = (i)                              \
 107}
 108
 109/* SNB-EP PCU register */
 110#define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
 111#define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
 112#define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
 113#define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
 114#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
 115#define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
 116#define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
 117
 118/* IVBEP event control */
 119#define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
 120                                         SNBEP_PMON_BOX_CTL_RST_CTRS)
 121#define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
 122                                         SNBEP_PMON_CTL_UMASK_MASK | \
 123                                         SNBEP_PMON_CTL_EDGE_DET | \
 124                                         SNBEP_PMON_CTL_TRESH_MASK)
 125/* IVBEP Ubox */
 126#define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
 127#define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
 128#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
 129
 130#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
 131                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 132                                 SNBEP_PMON_CTL_UMASK_MASK | \
 133                                 SNBEP_PMON_CTL_EDGE_DET | \
 134                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
 135/* IVBEP Cbo */
 136#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
 137                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 138
 139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
 140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
 141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
 142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 145#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 146#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 147
 148/* IVBEP home agent */
 149#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
 150#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
 151                                (IVBEP_PMON_RAW_EVENT_MASK | \
 152                                 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
 153/* IVBEP PCU */
 154#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
 155                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 156                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 157                                 SNBEP_PMON_CTL_EDGE_DET | \
 158                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
 159                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 160                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 161/* IVBEP QPI */
 162#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
 163                                (IVBEP_PMON_RAW_EVENT_MASK | \
 164                                 SNBEP_PMON_CTL_EV_SEL_EXT)
 165
 166#define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
 167                                ((1ULL << (n)) - 1)))
 168
 169/* Haswell-EP Ubox */
 170#define HSWEP_U_MSR_PMON_CTR0                   0x709
 171#define HSWEP_U_MSR_PMON_CTL0                   0x705
 172#define HSWEP_U_MSR_PMON_FILTER                 0x707
 173
 174#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
 175#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
 176
 177#define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
 178#define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
 179#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
 180                                        (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
 181                                         HSWEP_U_MSR_PMON_BOX_FILTER_CID)
 182
 183/* Haswell-EP CBo */
 184#define HSWEP_C0_MSR_PMON_CTR0                  0xe08
 185#define HSWEP_C0_MSR_PMON_CTL0                  0xe01
 186#define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
 187#define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
 188#define HSWEP_CBO_MSR_OFFSET                    0x10
 189
 190
 191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
 192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
 193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
 194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 197#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 198#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 199
 200
 201/* Haswell-EP Sbox */
 202#define HSWEP_S0_MSR_PMON_CTR0                  0x726
 203#define HSWEP_S0_MSR_PMON_CTL0                  0x721
 204#define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
 205#define HSWEP_SBOX_MSR_OFFSET                   0xa
 206#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 207                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 208
 209/* Haswell-EP PCU */
 210#define HSWEP_PCU_MSR_PMON_CTR0                 0x717
 211#define HSWEP_PCU_MSR_PMON_CTL0                 0x711
 212#define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
 213#define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
 214
 215/* KNL Ubox */
 216#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
 217                                        (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
 218                                                SNBEP_CBO_PMON_CTL_TID_EN)
 219/* KNL CHA */
 220#define KNL_CHA_MSR_OFFSET                      0xc
 221#define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
 222#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
 223                                        (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
 224                                         KNL_CHA_MSR_PMON_CTL_QOR)
 225#define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
 226#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
 227#define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
 228#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
 229#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
 230#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
 231
 232/* KNL EDC/MC UCLK */
 233#define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
 234#define KNL_UCLK_MSR_PMON_CTL0                  0x420
 235#define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
 236#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
 237#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
 238#define KNL_PMON_FIXED_CTL_EN                   0x1
 239
 240/* KNL EDC */
 241#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
 242#define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
 243#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
 244#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
 245#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
 246
 247/* KNL MC */
 248#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
 249#define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
 250#define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
 251#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
 252#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
 253
 254/* KNL IRP */
 255#define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
 256#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 257                                                 KNL_CHA_MSR_PMON_CTL_QOR)
 258/* KNL PCU */
 259#define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
 260#define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
 261#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
 262#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
 263                                (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
 264                                 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
 265                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 266                                 SNBEP_PMON_CTL_EDGE_DET | \
 267                                 SNBEP_CBO_PMON_CTL_TID_EN | \
 268                                 SNBEP_PMON_CTL_INVERT | \
 269                                 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
 270                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 271                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 272
 273/* SKX pci bus to socket mapping */
 274#define SKX_CPUNODEID                   0xc0
 275#define SKX_GIDNIDMAP                   0xd4
 276
 277/*
 278 * The CPU_BUS_NUMBER MSR returns the values of the respective CPUBUSNO CSR
 279 * that BIOS programmed. MSR has package scope.
 280 * |  Bit  |  Default  |  Description
 281 * | [63]  |    00h    | VALID - When set, indicates the CPU bus
 282 *                       numbers have been initialized. (RO)
 283 * |[62:48]|    ---    | Reserved
 284 * |[47:40]|    00h    | BUS_NUM_5 - Return the bus number BIOS assigned
 285 *                       CPUBUSNO(5). (RO)
 286 * |[39:32]|    00h    | BUS_NUM_4 - Return the bus number BIOS assigned
 287 *                       CPUBUSNO(4). (RO)
 288 * |[31:24]|    00h    | BUS_NUM_3 - Return the bus number BIOS assigned
 289 *                       CPUBUSNO(3). (RO)
 290 * |[23:16]|    00h    | BUS_NUM_2 - Return the bus number BIOS assigned
 291 *                       CPUBUSNO(2). (RO)
 292 * |[15:8] |    00h    | BUS_NUM_1 - Return the bus number BIOS assigned
 293 *                       CPUBUSNO(1). (RO)
 294 * | [7:0] |    00h    | BUS_NUM_0 - Return the bus number BIOS assigned
 295 *                       CPUBUSNO(0). (RO)
 296 */
 297#define SKX_MSR_CPU_BUS_NUMBER          0x300
 298#define SKX_MSR_CPU_BUS_VALID_BIT       (1ULL << 63)
 299#define BUS_NUM_STRIDE                  8
 300
 301/* SKX CHA */
 302#define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
 303#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
 304#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
 305#define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
 306#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
 307#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
 308#define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
 309#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
 310#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
 311#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
 312#define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
 313#define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
 314#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
 315
 316/* SKX IIO */
 317#define SKX_IIO0_MSR_PMON_CTL0          0xa48
 318#define SKX_IIO0_MSR_PMON_CTR0          0xa41
 319#define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
 320#define SKX_IIO_MSR_OFFSET              0x20
 321
 322#define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
 323#define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
 324#define SKX_PMON_CTL_CH_MASK            (0xff << 4)
 325#define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
 326#define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
 327                                         SNBEP_PMON_CTL_UMASK_MASK | \
 328                                         SNBEP_PMON_CTL_EDGE_DET | \
 329                                         SNBEP_PMON_CTL_INVERT | \
 330                                         SKX_PMON_CTL_TRESH_MASK)
 331#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
 332                                         SKX_PMON_CTL_CH_MASK | \
 333                                         SKX_PMON_CTL_FC_MASK)
 334
 335/* SKX IRP */
 336#define SKX_IRP0_MSR_PMON_CTL0          0xa5b
 337#define SKX_IRP0_MSR_PMON_CTR0          0xa59
 338#define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
 339#define SKX_IRP_MSR_OFFSET              0x20
 340
 341/* SKX UPI */
 342#define SKX_UPI_PCI_PMON_CTL0           0x350
 343#define SKX_UPI_PCI_PMON_CTR0           0x318
 344#define SKX_UPI_PCI_PMON_BOX_CTL        0x378
 345#define SKX_UPI_CTL_UMASK_EXT           0xffefff
 346
 347/* SKX M2M */
 348#define SKX_M2M_PCI_PMON_CTL0           0x228
 349#define SKX_M2M_PCI_PMON_CTR0           0x200
 350#define SKX_M2M_PCI_PMON_BOX_CTL        0x258
 351
 352/* Memory Map registers device ID */
 353#define SNR_ICX_MESH2IIO_MMAP_DID               0x9a2
 354#define SNR_ICX_SAD_CONTROL_CFG         0x3f4
 355
 356/* Getting I/O stack id in SAD_COTROL_CFG notation */
 357#define SAD_CONTROL_STACK_ID(data)              (((data) >> 4) & 0x7)
 358
 359/* SNR Ubox */
 360#define SNR_U_MSR_PMON_CTR0                     0x1f98
 361#define SNR_U_MSR_PMON_CTL0                     0x1f91
 362#define SNR_U_MSR_PMON_UCLK_FIXED_CTL           0x1f93
 363#define SNR_U_MSR_PMON_UCLK_FIXED_CTR           0x1f94
 364
 365/* SNR CHA */
 366#define SNR_CHA_RAW_EVENT_MASK_EXT              0x3ffffff
 367#define SNR_CHA_MSR_PMON_CTL0                   0x1c01
 368#define SNR_CHA_MSR_PMON_CTR0                   0x1c08
 369#define SNR_CHA_MSR_PMON_BOX_CTL                0x1c00
 370#define SNR_C0_MSR_PMON_BOX_FILTER0             0x1c05
 371
 372
 373/* SNR IIO */
 374#define SNR_IIO_MSR_PMON_CTL0                   0x1e08
 375#define SNR_IIO_MSR_PMON_CTR0                   0x1e01
 376#define SNR_IIO_MSR_PMON_BOX_CTL                0x1e00
 377#define SNR_IIO_MSR_OFFSET                      0x10
 378#define SNR_IIO_PMON_RAW_EVENT_MASK_EXT         0x7ffff
 379
 380/* SNR IRP */
 381#define SNR_IRP0_MSR_PMON_CTL0                  0x1ea8
 382#define SNR_IRP0_MSR_PMON_CTR0                  0x1ea1
 383#define SNR_IRP0_MSR_PMON_BOX_CTL               0x1ea0
 384#define SNR_IRP_MSR_OFFSET                      0x10
 385
 386/* SNR M2PCIE */
 387#define SNR_M2PCIE_MSR_PMON_CTL0                0x1e58
 388#define SNR_M2PCIE_MSR_PMON_CTR0                0x1e51
 389#define SNR_M2PCIE_MSR_PMON_BOX_CTL             0x1e50
 390#define SNR_M2PCIE_MSR_OFFSET                   0x10
 391
 392/* SNR PCU */
 393#define SNR_PCU_MSR_PMON_CTL0                   0x1ef1
 394#define SNR_PCU_MSR_PMON_CTR0                   0x1ef8
 395#define SNR_PCU_MSR_PMON_BOX_CTL                0x1ef0
 396#define SNR_PCU_MSR_PMON_BOX_FILTER             0x1efc
 397
 398/* SNR M2M */
 399#define SNR_M2M_PCI_PMON_CTL0                   0x468
 400#define SNR_M2M_PCI_PMON_CTR0                   0x440
 401#define SNR_M2M_PCI_PMON_BOX_CTL                0x438
 402#define SNR_M2M_PCI_PMON_UMASK_EXT              0xff
 403
 404/* SNR PCIE3 */
 405#define SNR_PCIE3_PCI_PMON_CTL0                 0x508
 406#define SNR_PCIE3_PCI_PMON_CTR0                 0x4e8
 407#define SNR_PCIE3_PCI_PMON_BOX_CTL              0x4e0
 408
 409/* SNR IMC */
 410#define SNR_IMC_MMIO_PMON_FIXED_CTL             0x54
 411#define SNR_IMC_MMIO_PMON_FIXED_CTR             0x38
 412#define SNR_IMC_MMIO_PMON_CTL0                  0x40
 413#define SNR_IMC_MMIO_PMON_CTR0                  0x8
 414#define SNR_IMC_MMIO_PMON_BOX_CTL               0x22800
 415#define SNR_IMC_MMIO_OFFSET                     0x4000
 416#define SNR_IMC_MMIO_SIZE                       0x4000
 417#define SNR_IMC_MMIO_BASE_OFFSET                0xd0
 418#define SNR_IMC_MMIO_BASE_MASK                  0x1FFFFFFF
 419#define SNR_IMC_MMIO_MEM0_OFFSET                0xd8
 420#define SNR_IMC_MMIO_MEM0_MASK                  0x7FF
 421
 422/* ICX CHA */
 423#define ICX_C34_MSR_PMON_CTR0                   0xb68
 424#define ICX_C34_MSR_PMON_CTL0                   0xb61
 425#define ICX_C34_MSR_PMON_BOX_CTL                0xb60
 426#define ICX_C34_MSR_PMON_BOX_FILTER0            0xb65
 427
 428/* ICX IIO */
 429#define ICX_IIO_MSR_PMON_CTL0                   0xa58
 430#define ICX_IIO_MSR_PMON_CTR0                   0xa51
 431#define ICX_IIO_MSR_PMON_BOX_CTL                0xa50
 432
 433/* ICX IRP */
 434#define ICX_IRP0_MSR_PMON_CTL0                  0xa4d
 435#define ICX_IRP0_MSR_PMON_CTR0                  0xa4b
 436#define ICX_IRP0_MSR_PMON_BOX_CTL               0xa4a
 437
 438/* ICX M2PCIE */
 439#define ICX_M2PCIE_MSR_PMON_CTL0                0xa46
 440#define ICX_M2PCIE_MSR_PMON_CTR0                0xa41
 441#define ICX_M2PCIE_MSR_PMON_BOX_CTL             0xa40
 442
 443/* ICX UPI */
 444#define ICX_UPI_PCI_PMON_CTL0                   0x350
 445#define ICX_UPI_PCI_PMON_CTR0                   0x320
 446#define ICX_UPI_PCI_PMON_BOX_CTL                0x318
 447#define ICX_UPI_CTL_UMASK_EXT                   0xffffff
 448
 449/* ICX M3UPI*/
 450#define ICX_M3UPI_PCI_PMON_CTL0                 0xd8
 451#define ICX_M3UPI_PCI_PMON_CTR0                 0xa8
 452#define ICX_M3UPI_PCI_PMON_BOX_CTL              0xa0
 453
 454/* ICX IMC */
 455#define ICX_NUMBER_IMC_CHN                      3
 456#define ICX_IMC_MEM_STRIDE                      0x4
 457
 458/* SPR */
 459#define SPR_RAW_EVENT_MASK_EXT                  0xffffff
 460
 461/* SPR CHA */
 462#define SPR_CHA_PMON_CTL_TID_EN                 (1 << 16)
 463#define SPR_CHA_PMON_EVENT_MASK                 (SNBEP_PMON_RAW_EVENT_MASK | \
 464                                                 SPR_CHA_PMON_CTL_TID_EN)
 465#define SPR_CHA_PMON_BOX_FILTER_TID             0x3ff
 466
 467#define SPR_C0_MSR_PMON_BOX_FILTER0             0x200e
 468
 469DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 470DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 471DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
 472DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
 473DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 474DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
 475DEFINE_UNCORE_FORMAT_ATTR(umask_ext2, umask, "config:8-15,32-57");
 476DEFINE_UNCORE_FORMAT_ATTR(umask_ext3, umask, "config:8-15,32-39");
 477DEFINE_UNCORE_FORMAT_ATTR(umask_ext4, umask, "config:8-15,32-55");
 478DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 479DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 480DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
 481DEFINE_UNCORE_FORMAT_ATTR(tid_en2, tid_en, "config:16");
 482DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 483DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 484DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
 485DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
 486DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
 487DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
 488DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
 489DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
 490DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
 491DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
 492DEFINE_UNCORE_FORMAT_ATTR(ch_mask2, ch_mask, "config:36-47");
 493DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
 494DEFINE_UNCORE_FORMAT_ATTR(fc_mask2, fc_mask, "config:48-50");
 495DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
 496DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
 497DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
 498DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
 499DEFINE_UNCORE_FORMAT_ATTR(filter_tid5, filter_tid, "config1:0-9");
 500DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
 501DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
 502DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
 503DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
 504DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
 505DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
 506DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
 507DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
 508DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
 509DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
 510DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
 511DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
 512DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
 513DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
 514DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
 515DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
 516DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
 517DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
 518DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
 519DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
 520DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
 521DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
 522DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
 523DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
 524DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
 525DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
 526DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
 527DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
 528DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
 529DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
 530DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
 531DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
 532DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
 533DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
 534DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
 535DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
 536DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
 537DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
 538DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
 539DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
 540DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
 541DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
 542DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
 543DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
 544DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
 545DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
 546DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
 547DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
 548
 549static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
 550{
 551        struct pci_dev *pdev = box->pci_dev;
 552        int box_ctl = uncore_pci_box_ctl(box);
 553        u32 config = 0;
 554
 555        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 556                config |= SNBEP_PMON_BOX_CTL_FRZ;
 557                pci_write_config_dword(pdev, box_ctl, config);
 558        }
 559}
 560
 561static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
 562{
 563        struct pci_dev *pdev = box->pci_dev;
 564        int box_ctl = uncore_pci_box_ctl(box);
 565        u32 config = 0;
 566
 567        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 568                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 569                pci_write_config_dword(pdev, box_ctl, config);
 570        }
 571}
 572
 573static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 574{
 575        struct pci_dev *pdev = box->pci_dev;
 576        struct hw_perf_event *hwc = &event->hw;
 577
 578        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 579}
 580
 581static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 582{
 583        struct pci_dev *pdev = box->pci_dev;
 584        struct hw_perf_event *hwc = &event->hw;
 585
 586        pci_write_config_dword(pdev, hwc->config_base, hwc->config);
 587}
 588
 589static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 590{
 591        struct pci_dev *pdev = box->pci_dev;
 592        struct hw_perf_event *hwc = &event->hw;
 593        u64 count = 0;
 594
 595        pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
 596        pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
 597
 598        return count;
 599}
 600
 601static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
 602{
 603        struct pci_dev *pdev = box->pci_dev;
 604        int box_ctl = uncore_pci_box_ctl(box);
 605
 606        pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
 607}
 608
 609static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
 610{
 611        u64 config;
 612        unsigned msr;
 613
 614        msr = uncore_msr_box_ctl(box);
 615        if (msr) {
 616                rdmsrl(msr, config);
 617                config |= SNBEP_PMON_BOX_CTL_FRZ;
 618                wrmsrl(msr, config);
 619        }
 620}
 621
 622static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
 623{
 624        u64 config;
 625        unsigned msr;
 626
 627        msr = uncore_msr_box_ctl(box);
 628        if (msr) {
 629                rdmsrl(msr, config);
 630                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 631                wrmsrl(msr, config);
 632        }
 633}
 634
 635static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 636{
 637        struct hw_perf_event *hwc = &event->hw;
 638        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 639
 640        if (reg1->idx != EXTRA_REG_NONE)
 641                wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
 642
 643        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 644}
 645
 646static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
 647                                        struct perf_event *event)
 648{
 649        struct hw_perf_event *hwc = &event->hw;
 650
 651        wrmsrl(hwc->config_base, hwc->config);
 652}
 653
 654static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
 655{
 656        unsigned msr = uncore_msr_box_ctl(box);
 657
 658        if (msr)
 659                wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
 660}
 661
 662static struct attribute *snbep_uncore_formats_attr[] = {
 663        &format_attr_event.attr,
 664        &format_attr_umask.attr,
 665        &format_attr_edge.attr,
 666        &format_attr_inv.attr,
 667        &format_attr_thresh8.attr,
 668        NULL,
 669};
 670
 671static struct attribute *snbep_uncore_ubox_formats_attr[] = {
 672        &format_attr_event.attr,
 673        &format_attr_umask.attr,
 674        &format_attr_edge.attr,
 675        &format_attr_inv.attr,
 676        &format_attr_thresh5.attr,
 677        NULL,
 678};
 679
 680static struct attribute *snbep_uncore_cbox_formats_attr[] = {
 681        &format_attr_event.attr,
 682        &format_attr_umask.attr,
 683        &format_attr_edge.attr,
 684        &format_attr_tid_en.attr,
 685        &format_attr_inv.attr,
 686        &format_attr_thresh8.attr,
 687        &format_attr_filter_tid.attr,
 688        &format_attr_filter_nid.attr,
 689        &format_attr_filter_state.attr,
 690        &format_attr_filter_opc.attr,
 691        NULL,
 692};
 693
 694static struct attribute *snbep_uncore_pcu_formats_attr[] = {
 695        &format_attr_event.attr,
 696        &format_attr_occ_sel.attr,
 697        &format_attr_edge.attr,
 698        &format_attr_inv.attr,
 699        &format_attr_thresh5.attr,
 700        &format_attr_occ_invert.attr,
 701        &format_attr_occ_edge.attr,
 702        &format_attr_filter_band0.attr,
 703        &format_attr_filter_band1.attr,
 704        &format_attr_filter_band2.attr,
 705        &format_attr_filter_band3.attr,
 706        NULL,
 707};
 708
 709static struct attribute *snbep_uncore_qpi_formats_attr[] = {
 710        &format_attr_event_ext.attr,
 711        &format_attr_umask.attr,
 712        &format_attr_edge.attr,
 713        &format_attr_inv.attr,
 714        &format_attr_thresh8.attr,
 715        &format_attr_match_rds.attr,
 716        &format_attr_match_rnid30.attr,
 717        &format_attr_match_rnid4.attr,
 718        &format_attr_match_dnid.attr,
 719        &format_attr_match_mc.attr,
 720        &format_attr_match_opc.attr,
 721        &format_attr_match_vnw.attr,
 722        &format_attr_match0.attr,
 723        &format_attr_match1.attr,
 724        &format_attr_mask_rds.attr,
 725        &format_attr_mask_rnid30.attr,
 726        &format_attr_mask_rnid4.attr,
 727        &format_attr_mask_dnid.attr,
 728        &format_attr_mask_mc.attr,
 729        &format_attr_mask_opc.attr,
 730        &format_attr_mask_vnw.attr,
 731        &format_attr_mask0.attr,
 732        &format_attr_mask1.attr,
 733        NULL,
 734};
 735
 736static struct uncore_event_desc snbep_uncore_imc_events[] = {
 737        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
 738        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
 739        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
 740        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
 741        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
 742        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
 743        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
 744        { /* end: all zeroes */ },
 745};
 746
 747static struct uncore_event_desc snbep_uncore_qpi_events[] = {
 748        INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
 749        INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
 750        INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
 751        INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
 752        { /* end: all zeroes */ },
 753};
 754
 755static const struct attribute_group snbep_uncore_format_group = {
 756        .name = "format",
 757        .attrs = snbep_uncore_formats_attr,
 758};
 759
 760static const struct attribute_group snbep_uncore_ubox_format_group = {
 761        .name = "format",
 762        .attrs = snbep_uncore_ubox_formats_attr,
 763};
 764
 765static const struct attribute_group snbep_uncore_cbox_format_group = {
 766        .name = "format",
 767        .attrs = snbep_uncore_cbox_formats_attr,
 768};
 769
 770static const struct attribute_group snbep_uncore_pcu_format_group = {
 771        .name = "format",
 772        .attrs = snbep_uncore_pcu_formats_attr,
 773};
 774
 775static const struct attribute_group snbep_uncore_qpi_format_group = {
 776        .name = "format",
 777        .attrs = snbep_uncore_qpi_formats_attr,
 778};
 779
 780#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
 781        .disable_box    = snbep_uncore_msr_disable_box,         \
 782        .enable_box     = snbep_uncore_msr_enable_box,          \
 783        .disable_event  = snbep_uncore_msr_disable_event,       \
 784        .enable_event   = snbep_uncore_msr_enable_event,        \
 785        .read_counter   = uncore_msr_read_counter
 786
 787#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
 788        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
 789        .init_box       = snbep_uncore_msr_init_box             \
 790
 791static struct intel_uncore_ops snbep_uncore_msr_ops = {
 792        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 793};
 794
 795#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
 796        .init_box       = snbep_uncore_pci_init_box,            \
 797        .disable_box    = snbep_uncore_pci_disable_box,         \
 798        .enable_box     = snbep_uncore_pci_enable_box,          \
 799        .disable_event  = snbep_uncore_pci_disable_event,       \
 800        .read_counter   = snbep_uncore_pci_read_counter
 801
 802static struct intel_uncore_ops snbep_uncore_pci_ops = {
 803        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
 804        .enable_event   = snbep_uncore_pci_enable_event,        \
 805};
 806
 807static struct event_constraint snbep_uncore_cbox_constraints[] = {
 808        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
 809        UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
 810        UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
 811        UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
 812        UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
 813        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
 814        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
 815        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 816        UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
 817        UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
 818        UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
 819        UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
 820        UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
 821        UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
 822        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 823        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 824        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 825        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 826        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 827        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 828        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
 829        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
 830        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 831        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 832        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 833        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
 834        EVENT_CONSTRAINT_END
 835};
 836
 837static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
 838        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 839        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 840        UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
 841        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 842        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 843        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 844        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 845        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 846        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 847        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 848        EVENT_CONSTRAINT_END
 849};
 850
 851static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
 852        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 853        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 854        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 855        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
 856        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
 857        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 858        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
 859        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 860        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 861        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 862        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 863        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
 864        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
 865        UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
 866        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
 867        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
 868        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
 869        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
 870        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
 871        UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
 872        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 873        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 874        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 875        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 876        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
 877        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 878        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 879        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 880        EVENT_CONSTRAINT_END
 881};
 882
 883static struct intel_uncore_type snbep_uncore_ubox = {
 884        .name           = "ubox",
 885        .num_counters   = 2,
 886        .num_boxes      = 1,
 887        .perf_ctr_bits  = 44,
 888        .fixed_ctr_bits = 48,
 889        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
 890        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
 891        .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
 892        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
 893        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
 894        .ops            = &snbep_uncore_msr_ops,
 895        .format_group   = &snbep_uncore_ubox_format_group,
 896};
 897
 898static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
 899        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 900                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 901        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
 902        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
 903        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
 904        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
 905        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
 906        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
 907        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
 908        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
 909        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
 910        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
 911        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
 912        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
 913        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
 914        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
 915        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
 916        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
 917        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
 918        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
 919        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
 920        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
 921        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
 922        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
 923        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
 924        EVENT_EXTRA_END
 925};
 926
 927static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 928{
 929        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 930        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 931        int i;
 932
 933        if (uncore_box_is_fake(box))
 934                return;
 935
 936        for (i = 0; i < 5; i++) {
 937                if (reg1->alloc & (0x1 << i))
 938                        atomic_sub(1 << (i * 6), &er->ref);
 939        }
 940        reg1->alloc = 0;
 941}
 942
 943static struct event_constraint *
 944__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
 945                            u64 (*cbox_filter_mask)(int fields))
 946{
 947        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 948        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 949        int i, alloc = 0;
 950        unsigned long flags;
 951        u64 mask;
 952
 953        if (reg1->idx == EXTRA_REG_NONE)
 954                return NULL;
 955
 956        raw_spin_lock_irqsave(&er->lock, flags);
 957        for (i = 0; i < 5; i++) {
 958                if (!(reg1->idx & (0x1 << i)))
 959                        continue;
 960                if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
 961                        continue;
 962
 963                mask = cbox_filter_mask(0x1 << i);
 964                if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
 965                    !((reg1->config ^ er->config) & mask)) {
 966                        atomic_add(1 << (i * 6), &er->ref);
 967                        er->config &= ~mask;
 968                        er->config |= reg1->config & mask;
 969                        alloc |= (0x1 << i);
 970                } else {
 971                        break;
 972                }
 973        }
 974        raw_spin_unlock_irqrestore(&er->lock, flags);
 975        if (i < 5)
 976                goto fail;
 977
 978        if (!uncore_box_is_fake(box))
 979                reg1->alloc |= alloc;
 980
 981        return NULL;
 982fail:
 983        for (; i >= 0; i--) {
 984                if (alloc & (0x1 << i))
 985                        atomic_sub(1 << (i * 6), &er->ref);
 986        }
 987        return &uncore_constraint_empty;
 988}
 989
 990static u64 snbep_cbox_filter_mask(int fields)
 991{
 992        u64 mask = 0;
 993
 994        if (fields & 0x1)
 995                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
 996        if (fields & 0x2)
 997                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
 998        if (fields & 0x4)
 999                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1000        if (fields & 0x8)
1001                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1002
1003        return mask;
1004}
1005
1006static struct event_constraint *
1007snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1008{
1009        return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
1010}
1011
1012static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1013{
1014        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1015        struct extra_reg *er;
1016        int idx = 0;
1017
1018        for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
1019                if (er->event != (event->hw.config & er->config_mask))
1020                        continue;
1021                idx |= er->idx;
1022        }
1023
1024        if (idx) {
1025                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1026                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1027                reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
1028                reg1->idx = idx;
1029        }
1030        return 0;
1031}
1032
1033static struct intel_uncore_ops snbep_uncore_cbox_ops = {
1034        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1035        .hw_config              = snbep_cbox_hw_config,
1036        .get_constraint         = snbep_cbox_get_constraint,
1037        .put_constraint         = snbep_cbox_put_constraint,
1038};
1039
1040static struct intel_uncore_type snbep_uncore_cbox = {
1041        .name                   = "cbox",
1042        .num_counters           = 4,
1043        .num_boxes              = 8,
1044        .perf_ctr_bits          = 44,
1045        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1046        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1047        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1048        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1049        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1050        .num_shared_regs        = 1,
1051        .constraints            = snbep_uncore_cbox_constraints,
1052        .ops                    = &snbep_uncore_cbox_ops,
1053        .format_group           = &snbep_uncore_cbox_format_group,
1054};
1055
1056static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
1057{
1058        struct hw_perf_event *hwc = &event->hw;
1059        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1060        u64 config = reg1->config;
1061
1062        if (new_idx > reg1->idx)
1063                config <<= 8 * (new_idx - reg1->idx);
1064        else
1065                config >>= 8 * (reg1->idx - new_idx);
1066
1067        if (modify) {
1068                hwc->config += new_idx - reg1->idx;
1069                reg1->config = config;
1070                reg1->idx = new_idx;
1071        }
1072        return config;
1073}
1074
1075static struct event_constraint *
1076snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1077{
1078        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1079        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1080        unsigned long flags;
1081        int idx = reg1->idx;
1082        u64 mask, config1 = reg1->config;
1083        bool ok = false;
1084
1085        if (reg1->idx == EXTRA_REG_NONE ||
1086            (!uncore_box_is_fake(box) && reg1->alloc))
1087                return NULL;
1088again:
1089        mask = 0xffULL << (idx * 8);
1090        raw_spin_lock_irqsave(&er->lock, flags);
1091        if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
1092            !((config1 ^ er->config) & mask)) {
1093                atomic_add(1 << (idx * 8), &er->ref);
1094                er->config &= ~mask;
1095                er->config |= config1 & mask;
1096                ok = true;
1097        }
1098        raw_spin_unlock_irqrestore(&er->lock, flags);
1099
1100        if (!ok) {
1101                idx = (idx + 1) % 4;
1102                if (idx != reg1->idx) {
1103                        config1 = snbep_pcu_alter_er(event, idx, false);
1104                        goto again;
1105                }
1106                return &uncore_constraint_empty;
1107        }
1108
1109        if (!uncore_box_is_fake(box)) {
1110                if (idx != reg1->idx)
1111                        snbep_pcu_alter_er(event, idx, true);
1112                reg1->alloc = 1;
1113        }
1114        return NULL;
1115}
1116
1117static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
1118{
1119        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1120        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
1121
1122        if (uncore_box_is_fake(box) || !reg1->alloc)
1123                return;
1124
1125        atomic_sub(1 << (reg1->idx * 8), &er->ref);
1126        reg1->alloc = 0;
1127}
1128
1129static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1130{
1131        struct hw_perf_event *hwc = &event->hw;
1132        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1133        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
1134
1135        if (ev_sel >= 0xb && ev_sel <= 0xe) {
1136                reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
1137                reg1->idx = ev_sel - 0xb;
1138                reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
1139        }
1140        return 0;
1141}
1142
1143static struct intel_uncore_ops snbep_uncore_pcu_ops = {
1144        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1145        .hw_config              = snbep_pcu_hw_config,
1146        .get_constraint         = snbep_pcu_get_constraint,
1147        .put_constraint         = snbep_pcu_put_constraint,
1148};
1149
1150static struct intel_uncore_type snbep_uncore_pcu = {
1151        .name                   = "pcu",
1152        .num_counters           = 4,
1153        .num_boxes              = 1,
1154        .perf_ctr_bits          = 48,
1155        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1156        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1157        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1158        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1159        .num_shared_regs        = 1,
1160        .ops                    = &snbep_uncore_pcu_ops,
1161        .format_group           = &snbep_uncore_pcu_format_group,
1162};
1163
1164static struct intel_uncore_type *snbep_msr_uncores[] = {
1165        &snbep_uncore_ubox,
1166        &snbep_uncore_cbox,
1167        &snbep_uncore_pcu,
1168        NULL,
1169};
1170
1171void snbep_uncore_cpu_init(void)
1172{
1173        if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1174                snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1175        uncore_msr_uncores = snbep_msr_uncores;
1176}
1177
1178enum {
1179        SNBEP_PCI_QPI_PORT0_FILTER,
1180        SNBEP_PCI_QPI_PORT1_FILTER,
1181        BDX_PCI_QPI_PORT2_FILTER,
1182};
1183
1184static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1185{
1186        struct hw_perf_event *hwc = &event->hw;
1187        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1188        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1189
1190        if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1191                reg1->idx = 0;
1192                reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1193                reg1->config = event->attr.config1;
1194                reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1195                reg2->config = event->attr.config2;
1196        }
1197        return 0;
1198}
1199
1200static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1201{
1202        struct pci_dev *pdev = box->pci_dev;
1203        struct hw_perf_event *hwc = &event->hw;
1204        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1205        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1206
1207        if (reg1->idx != EXTRA_REG_NONE) {
1208                int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1209                int die = box->dieid;
1210                struct pci_dev *filter_pdev = uncore_extra_pci_dev[die].dev[idx];
1211
1212                if (filter_pdev) {
1213                        pci_write_config_dword(filter_pdev, reg1->reg,
1214                                                (u32)reg1->config);
1215                        pci_write_config_dword(filter_pdev, reg1->reg + 4,
1216                                                (u32)(reg1->config >> 32));
1217                        pci_write_config_dword(filter_pdev, reg2->reg,
1218                                                (u32)reg2->config);
1219                        pci_write_config_dword(filter_pdev, reg2->reg + 4,
1220                                                (u32)(reg2->config >> 32));
1221                }
1222        }
1223
1224        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1225}
1226
1227static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1228        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1229        .enable_event           = snbep_qpi_enable_event,
1230        .hw_config              = snbep_qpi_hw_config,
1231        .get_constraint         = uncore_get_constraint,
1232        .put_constraint         = uncore_put_constraint,
1233};
1234
1235#define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1236        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1237        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1238        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1239        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1240        .ops            = &snbep_uncore_pci_ops,                \
1241        .format_group   = &snbep_uncore_format_group
1242
1243static struct intel_uncore_type snbep_uncore_ha = {
1244        .name           = "ha",
1245        .num_counters   = 4,
1246        .num_boxes      = 1,
1247        .perf_ctr_bits  = 48,
1248        SNBEP_UNCORE_PCI_COMMON_INIT(),
1249};
1250
1251static struct intel_uncore_type snbep_uncore_imc = {
1252        .name           = "imc",
1253        .num_counters   = 4,
1254        .num_boxes      = 4,
1255        .perf_ctr_bits  = 48,
1256        .fixed_ctr_bits = 48,
1257        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1258        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1259        .event_descs    = snbep_uncore_imc_events,
1260        SNBEP_UNCORE_PCI_COMMON_INIT(),
1261};
1262
1263static struct intel_uncore_type snbep_uncore_qpi = {
1264        .name                   = "qpi",
1265        .num_counters           = 4,
1266        .num_boxes              = 2,
1267        .perf_ctr_bits          = 48,
1268        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1269        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1270        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1271        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1272        .num_shared_regs        = 1,
1273        .ops                    = &snbep_uncore_qpi_ops,
1274        .event_descs            = snbep_uncore_qpi_events,
1275        .format_group           = &snbep_uncore_qpi_format_group,
1276};
1277
1278
1279static struct intel_uncore_type snbep_uncore_r2pcie = {
1280        .name           = "r2pcie",
1281        .num_counters   = 4,
1282        .num_boxes      = 1,
1283        .perf_ctr_bits  = 44,
1284        .constraints    = snbep_uncore_r2pcie_constraints,
1285        SNBEP_UNCORE_PCI_COMMON_INIT(),
1286};
1287
1288static struct intel_uncore_type snbep_uncore_r3qpi = {
1289        .name           = "r3qpi",
1290        .num_counters   = 3,
1291        .num_boxes      = 2,
1292        .perf_ctr_bits  = 44,
1293        .constraints    = snbep_uncore_r3qpi_constraints,
1294        SNBEP_UNCORE_PCI_COMMON_INIT(),
1295};
1296
1297enum {
1298        SNBEP_PCI_UNCORE_HA,
1299        SNBEP_PCI_UNCORE_IMC,
1300        SNBEP_PCI_UNCORE_QPI,
1301        SNBEP_PCI_UNCORE_R2PCIE,
1302        SNBEP_PCI_UNCORE_R3QPI,
1303};
1304
1305static struct intel_uncore_type *snbep_pci_uncores[] = {
1306        [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1307        [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1308        [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1309        [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1310        [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1311        NULL,
1312};
1313
1314static const struct pci_device_id snbep_uncore_pci_ids[] = {
1315        { /* Home Agent */
1316                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1317                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1318        },
1319        { /* MC Channel 0 */
1320                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1321                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1322        },
1323        { /* MC Channel 1 */
1324                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1325                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1326        },
1327        { /* MC Channel 2 */
1328                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1329                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1330        },
1331        { /* MC Channel 3 */
1332                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1333                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1334        },
1335        { /* QPI Port 0 */
1336                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1337                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1338        },
1339        { /* QPI Port 1 */
1340                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1341                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1342        },
1343        { /* R2PCIe */
1344                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1345                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1346        },
1347        { /* R3QPI Link 0 */
1348                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1349                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1350        },
1351        { /* R3QPI Link 1 */
1352                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1353                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1354        },
1355        { /* QPI Port 0 filter  */
1356                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1357                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1358                                                   SNBEP_PCI_QPI_PORT0_FILTER),
1359        },
1360        { /* QPI Port 0 filter  */
1361                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1362                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1363                                                   SNBEP_PCI_QPI_PORT1_FILTER),
1364        },
1365        { /* end: all zeroes */ }
1366};
1367
1368static struct pci_driver snbep_uncore_pci_driver = {
1369        .name           = "snbep_uncore",
1370        .id_table       = snbep_uncore_pci_ids,
1371};
1372
1373#define NODE_ID_MASK    0x7
1374
1375/*
1376 * build pci bus to socket mapping
1377 */
1378static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1379{
1380        struct pci_dev *ubox_dev = NULL;
1381        int i, bus, nodeid, segment, die_id;
1382        struct pci2phy_map *map;
1383        int err = 0;
1384        u32 config = 0;
1385
1386        while (1) {
1387                /* find the UBOX device */
1388                ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1389                if (!ubox_dev)
1390                        break;
1391                bus = ubox_dev->bus->number;
1392                /*
1393                 * The nodeid and idmap registers only contain enough
1394                 * information to handle 8 nodes.  On systems with more
1395                 * than 8 nodes, we need to rely on NUMA information,
1396                 * filled in from BIOS supplied information, to determine
1397                 * the topology.
1398                 */
1399                if (nr_node_ids <= 8) {
1400                        /* get the Node ID of the local register */
1401                        err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1402                        if (err)
1403                                break;
1404                        nodeid = config & NODE_ID_MASK;
1405                        /* get the Node ID mapping */
1406                        err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1407                        if (err)
1408                                break;
1409
1410                        segment = pci_domain_nr(ubox_dev->bus);
1411                        raw_spin_lock(&pci2phy_map_lock);
1412                        map = __find_pci2phy_map(segment);
1413                        if (!map) {
1414                                raw_spin_unlock(&pci2phy_map_lock);
1415                                err = -ENOMEM;
1416                                break;
1417                        }
1418
1419                        /*
1420                         * every three bits in the Node ID mapping register maps
1421                         * to a particular node.
1422                         */
1423                        for (i = 0; i < 8; i++) {
1424                                if (nodeid == ((config >> (3 * i)) & 0x7)) {
1425                                        if (topology_max_die_per_package() > 1)
1426                                                die_id = i;
1427                                        else
1428                                                die_id = topology_phys_to_logical_pkg(i);
1429                                        if (die_id < 0)
1430                                                die_id = -ENODEV;
1431                                        map->pbus_to_dieid[bus] = die_id;
1432                                        break;
1433                                }
1434                        }
1435                        raw_spin_unlock(&pci2phy_map_lock);
1436                } else {
1437                        int node = pcibus_to_node(ubox_dev->bus);
1438                        int cpu;
1439
1440                        segment = pci_domain_nr(ubox_dev->bus);
1441                        raw_spin_lock(&pci2phy_map_lock);
1442                        map = __find_pci2phy_map(segment);
1443                        if (!map) {
1444                                raw_spin_unlock(&pci2phy_map_lock);
1445                                err = -ENOMEM;
1446                                break;
1447                        }
1448
1449                        die_id = -1;
1450                        for_each_cpu(cpu, cpumask_of_pcibus(ubox_dev->bus)) {
1451                                struct cpuinfo_x86 *c = &cpu_data(cpu);
1452
1453                                if (c->initialized && cpu_to_node(cpu) == node) {
1454                                        map->pbus_to_dieid[bus] = die_id = c->logical_die_id;
1455                                        break;
1456                                }
1457                        }
1458                        raw_spin_unlock(&pci2phy_map_lock);
1459
1460                        if (WARN_ON_ONCE(die_id == -1)) {
1461                                err = -EINVAL;
1462                                break;
1463                        }
1464                }
1465        }
1466
1467        if (!err) {
1468                /*
1469                 * For PCI bus with no UBOX device, find the next bus
1470                 * that has UBOX device and use its mapping.
1471                 */
1472                raw_spin_lock(&pci2phy_map_lock);
1473                list_for_each_entry(map, &pci2phy_map_head, list) {
1474                        i = -1;
1475                        if (reverse) {
1476                                for (bus = 255; bus >= 0; bus--) {
1477                                        if (map->pbus_to_dieid[bus] != -1)
1478                                                i = map->pbus_to_dieid[bus];
1479                                        else
1480                                                map->pbus_to_dieid[bus] = i;
1481                                }
1482                        } else {
1483                                for (bus = 0; bus <= 255; bus++) {
1484                                        if (map->pbus_to_dieid[bus] != -1)
1485                                                i = map->pbus_to_dieid[bus];
1486                                        else
1487                                                map->pbus_to_dieid[bus] = i;
1488                                }
1489                        }
1490                }
1491                raw_spin_unlock(&pci2phy_map_lock);
1492        }
1493
1494        pci_dev_put(ubox_dev);
1495
1496        return err ? pcibios_err_to_errno(err) : 0;
1497}
1498
1499int snbep_uncore_pci_init(void)
1500{
1501        int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1502        if (ret)
1503                return ret;
1504        uncore_pci_uncores = snbep_pci_uncores;
1505        uncore_pci_driver = &snbep_uncore_pci_driver;
1506        return 0;
1507}
1508/* end of Sandy Bridge-EP uncore support */
1509
1510/* IvyTown uncore support */
1511static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1512{
1513        unsigned msr = uncore_msr_box_ctl(box);
1514        if (msr)
1515                wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1516}
1517
1518static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1519{
1520        struct pci_dev *pdev = box->pci_dev;
1521
1522        pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1523}
1524
1525#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1526        .init_box       = ivbep_uncore_msr_init_box,            \
1527        .disable_box    = snbep_uncore_msr_disable_box,         \
1528        .enable_box     = snbep_uncore_msr_enable_box,          \
1529        .disable_event  = snbep_uncore_msr_disable_event,       \
1530        .enable_event   = snbep_uncore_msr_enable_event,        \
1531        .read_counter   = uncore_msr_read_counter
1532
1533static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1534        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1535};
1536
1537static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1538        .init_box       = ivbep_uncore_pci_init_box,
1539        .disable_box    = snbep_uncore_pci_disable_box,
1540        .enable_box     = snbep_uncore_pci_enable_box,
1541        .disable_event  = snbep_uncore_pci_disable_event,
1542        .enable_event   = snbep_uncore_pci_enable_event,
1543        .read_counter   = snbep_uncore_pci_read_counter,
1544};
1545
1546#define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1547        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1548        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1549        .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1550        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1551        .ops            = &ivbep_uncore_pci_ops,                        \
1552        .format_group   = &ivbep_uncore_format_group
1553
1554static struct attribute *ivbep_uncore_formats_attr[] = {
1555        &format_attr_event.attr,
1556        &format_attr_umask.attr,
1557        &format_attr_edge.attr,
1558        &format_attr_inv.attr,
1559        &format_attr_thresh8.attr,
1560        NULL,
1561};
1562
1563static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1564        &format_attr_event.attr,
1565        &format_attr_umask.attr,
1566        &format_attr_edge.attr,
1567        &format_attr_inv.attr,
1568        &format_attr_thresh5.attr,
1569        NULL,
1570};
1571
1572static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1573        &format_attr_event.attr,
1574        &format_attr_umask.attr,
1575        &format_attr_edge.attr,
1576        &format_attr_tid_en.attr,
1577        &format_attr_thresh8.attr,
1578        &format_attr_filter_tid.attr,
1579        &format_attr_filter_link.attr,
1580        &format_attr_filter_state2.attr,
1581        &format_attr_filter_nid2.attr,
1582        &format_attr_filter_opc2.attr,
1583        &format_attr_filter_nc.attr,
1584        &format_attr_filter_c6.attr,
1585        &format_attr_filter_isoc.attr,
1586        NULL,
1587};
1588
1589static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1590        &format_attr_event.attr,
1591        &format_attr_occ_sel.attr,
1592        &format_attr_edge.attr,
1593        &format_attr_thresh5.attr,
1594        &format_attr_occ_invert.attr,
1595        &format_attr_occ_edge.attr,
1596        &format_attr_filter_band0.attr,
1597        &format_attr_filter_band1.attr,
1598        &format_attr_filter_band2.attr,
1599        &format_attr_filter_band3.attr,
1600        NULL,
1601};
1602
1603static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1604        &format_attr_event_ext.attr,
1605        &format_attr_umask.attr,
1606        &format_attr_edge.attr,
1607        &format_attr_thresh8.attr,
1608        &format_attr_match_rds.attr,
1609        &format_attr_match_rnid30.attr,
1610        &format_attr_match_rnid4.attr,
1611        &format_attr_match_dnid.attr,
1612        &format_attr_match_mc.attr,
1613        &format_attr_match_opc.attr,
1614        &format_attr_match_vnw.attr,
1615        &format_attr_match0.attr,
1616        &format_attr_match1.attr,
1617        &format_attr_mask_rds.attr,
1618        &format_attr_mask_rnid30.attr,
1619        &format_attr_mask_rnid4.attr,
1620        &format_attr_mask_dnid.attr,
1621        &format_attr_mask_mc.attr,
1622        &format_attr_mask_opc.attr,
1623        &format_attr_mask_vnw.attr,
1624        &format_attr_mask0.attr,
1625        &format_attr_mask1.attr,
1626        NULL,
1627};
1628
1629static const struct attribute_group ivbep_uncore_format_group = {
1630        .name = "format",
1631        .attrs = ivbep_uncore_formats_attr,
1632};
1633
1634static const struct attribute_group ivbep_uncore_ubox_format_group = {
1635        .name = "format",
1636        .attrs = ivbep_uncore_ubox_formats_attr,
1637};
1638
1639static const struct attribute_group ivbep_uncore_cbox_format_group = {
1640        .name = "format",
1641        .attrs = ivbep_uncore_cbox_formats_attr,
1642};
1643
1644static const struct attribute_group ivbep_uncore_pcu_format_group = {
1645        .name = "format",
1646        .attrs = ivbep_uncore_pcu_formats_attr,
1647};
1648
1649static const struct attribute_group ivbep_uncore_qpi_format_group = {
1650        .name = "format",
1651        .attrs = ivbep_uncore_qpi_formats_attr,
1652};
1653
1654static struct intel_uncore_type ivbep_uncore_ubox = {
1655        .name           = "ubox",
1656        .num_counters   = 2,
1657        .num_boxes      = 1,
1658        .perf_ctr_bits  = 44,
1659        .fixed_ctr_bits = 48,
1660        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1661        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1662        .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1663        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1664        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1665        .ops            = &ivbep_uncore_msr_ops,
1666        .format_group   = &ivbep_uncore_ubox_format_group,
1667};
1668
1669static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1670        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1671                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1672        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1673        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1674        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1675        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1676        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1677        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1678        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1679        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1680        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1681        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1682        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1683        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1684        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1685        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1686        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1687        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1688        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1689        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1690        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1691        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1692        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1693        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1694        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1695        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1696        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1697        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1698        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1699        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1700        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1701        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1702        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1703        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1704        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1705        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1706        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1707        EVENT_EXTRA_END
1708};
1709
1710static u64 ivbep_cbox_filter_mask(int fields)
1711{
1712        u64 mask = 0;
1713
1714        if (fields & 0x1)
1715                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1716        if (fields & 0x2)
1717                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1718        if (fields & 0x4)
1719                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1720        if (fields & 0x8)
1721                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1722        if (fields & 0x10) {
1723                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1724                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1725                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1726                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1727        }
1728
1729        return mask;
1730}
1731
1732static struct event_constraint *
1733ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1734{
1735        return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1736}
1737
1738static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1739{
1740        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1741        struct extra_reg *er;
1742        int idx = 0;
1743
1744        for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1745                if (er->event != (event->hw.config & er->config_mask))
1746                        continue;
1747                idx |= er->idx;
1748        }
1749
1750        if (idx) {
1751                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1752                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1753                reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1754                reg1->idx = idx;
1755        }
1756        return 0;
1757}
1758
1759static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1760{
1761        struct hw_perf_event *hwc = &event->hw;
1762        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1763
1764        if (reg1->idx != EXTRA_REG_NONE) {
1765                u64 filter = uncore_shared_reg_config(box, 0);
1766                wrmsrl(reg1->reg, filter & 0xffffffff);
1767                wrmsrl(reg1->reg + 6, filter >> 32);
1768        }
1769
1770        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1771}
1772
1773static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1774        .init_box               = ivbep_uncore_msr_init_box,
1775        .disable_box            = snbep_uncore_msr_disable_box,
1776        .enable_box             = snbep_uncore_msr_enable_box,
1777        .disable_event          = snbep_uncore_msr_disable_event,
1778        .enable_event           = ivbep_cbox_enable_event,
1779        .read_counter           = uncore_msr_read_counter,
1780        .hw_config              = ivbep_cbox_hw_config,
1781        .get_constraint         = ivbep_cbox_get_constraint,
1782        .put_constraint         = snbep_cbox_put_constraint,
1783};
1784
1785static struct intel_uncore_type ivbep_uncore_cbox = {
1786        .name                   = "cbox",
1787        .num_counters           = 4,
1788        .num_boxes              = 15,
1789        .perf_ctr_bits          = 44,
1790        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1791        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1792        .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1793        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1794        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1795        .num_shared_regs        = 1,
1796        .constraints            = snbep_uncore_cbox_constraints,
1797        .ops                    = &ivbep_uncore_cbox_ops,
1798        .format_group           = &ivbep_uncore_cbox_format_group,
1799};
1800
1801static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1802        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1803        .hw_config              = snbep_pcu_hw_config,
1804        .get_constraint         = snbep_pcu_get_constraint,
1805        .put_constraint         = snbep_pcu_put_constraint,
1806};
1807
1808static struct intel_uncore_type ivbep_uncore_pcu = {
1809        .name                   = "pcu",
1810        .num_counters           = 4,
1811        .num_boxes              = 1,
1812        .perf_ctr_bits          = 48,
1813        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1814        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1815        .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1816        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1817        .num_shared_regs        = 1,
1818        .ops                    = &ivbep_uncore_pcu_ops,
1819        .format_group           = &ivbep_uncore_pcu_format_group,
1820};
1821
1822static struct intel_uncore_type *ivbep_msr_uncores[] = {
1823        &ivbep_uncore_ubox,
1824        &ivbep_uncore_cbox,
1825        &ivbep_uncore_pcu,
1826        NULL,
1827};
1828
1829void ivbep_uncore_cpu_init(void)
1830{
1831        if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1832                ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1833        uncore_msr_uncores = ivbep_msr_uncores;
1834}
1835
1836static struct intel_uncore_type ivbep_uncore_ha = {
1837        .name           = "ha",
1838        .num_counters   = 4,
1839        .num_boxes      = 2,
1840        .perf_ctr_bits  = 48,
1841        IVBEP_UNCORE_PCI_COMMON_INIT(),
1842};
1843
1844static struct intel_uncore_type ivbep_uncore_imc = {
1845        .name           = "imc",
1846        .num_counters   = 4,
1847        .num_boxes      = 8,
1848        .perf_ctr_bits  = 48,
1849        .fixed_ctr_bits = 48,
1850        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1851        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1852        .event_descs    = snbep_uncore_imc_events,
1853        IVBEP_UNCORE_PCI_COMMON_INIT(),
1854};
1855
1856/* registers in IRP boxes are not properly aligned */
1857static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1858static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1859
1860static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1861{
1862        struct pci_dev *pdev = box->pci_dev;
1863        struct hw_perf_event *hwc = &event->hw;
1864
1865        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1866                               hwc->config | SNBEP_PMON_CTL_EN);
1867}
1868
1869static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1870{
1871        struct pci_dev *pdev = box->pci_dev;
1872        struct hw_perf_event *hwc = &event->hw;
1873
1874        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1875}
1876
1877static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1878{
1879        struct pci_dev *pdev = box->pci_dev;
1880        struct hw_perf_event *hwc = &event->hw;
1881        u64 count = 0;
1882
1883        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1884        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1885
1886        return count;
1887}
1888
1889static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1890        .init_box       = ivbep_uncore_pci_init_box,
1891        .disable_box    = snbep_uncore_pci_disable_box,
1892        .enable_box     = snbep_uncore_pci_enable_box,
1893        .disable_event  = ivbep_uncore_irp_disable_event,
1894        .enable_event   = ivbep_uncore_irp_enable_event,
1895        .read_counter   = ivbep_uncore_irp_read_counter,
1896};
1897
1898static struct intel_uncore_type ivbep_uncore_irp = {
1899        .name                   = "irp",
1900        .num_counters           = 4,
1901        .num_boxes              = 1,
1902        .perf_ctr_bits          = 48,
1903        .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1904        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1905        .ops                    = &ivbep_uncore_irp_ops,
1906        .format_group           = &ivbep_uncore_format_group,
1907};
1908
1909static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1910        .init_box       = ivbep_uncore_pci_init_box,
1911        .disable_box    = snbep_uncore_pci_disable_box,
1912        .enable_box     = snbep_uncore_pci_enable_box,
1913        .disable_event  = snbep_uncore_pci_disable_event,
1914        .enable_event   = snbep_qpi_enable_event,
1915        .read_counter   = snbep_uncore_pci_read_counter,
1916        .hw_config      = snbep_qpi_hw_config,
1917        .get_constraint = uncore_get_constraint,
1918        .put_constraint = uncore_put_constraint,
1919};
1920
1921static struct intel_uncore_type ivbep_uncore_qpi = {
1922        .name                   = "qpi",
1923        .num_counters           = 4,
1924        .num_boxes              = 3,
1925        .perf_ctr_bits          = 48,
1926        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1927        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1928        .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1929        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1930        .num_shared_regs        = 1,
1931        .ops                    = &ivbep_uncore_qpi_ops,
1932        .format_group           = &ivbep_uncore_qpi_format_group,
1933};
1934
1935static struct intel_uncore_type ivbep_uncore_r2pcie = {
1936        .name           = "r2pcie",
1937        .num_counters   = 4,
1938        .num_boxes      = 1,
1939        .perf_ctr_bits  = 44,
1940        .constraints    = snbep_uncore_r2pcie_constraints,
1941        IVBEP_UNCORE_PCI_COMMON_INIT(),
1942};
1943
1944static struct intel_uncore_type ivbep_uncore_r3qpi = {
1945        .name           = "r3qpi",
1946        .num_counters   = 3,
1947        .num_boxes      = 2,
1948        .perf_ctr_bits  = 44,
1949        .constraints    = snbep_uncore_r3qpi_constraints,
1950        IVBEP_UNCORE_PCI_COMMON_INIT(),
1951};
1952
1953enum {
1954        IVBEP_PCI_UNCORE_HA,
1955        IVBEP_PCI_UNCORE_IMC,
1956        IVBEP_PCI_UNCORE_IRP,
1957        IVBEP_PCI_UNCORE_QPI,
1958        IVBEP_PCI_UNCORE_R2PCIE,
1959        IVBEP_PCI_UNCORE_R3QPI,
1960};
1961
1962static struct intel_uncore_type *ivbep_pci_uncores[] = {
1963        [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1964        [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1965        [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1966        [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1967        [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1968        [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1969        NULL,
1970};
1971
1972static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1973        { /* Home Agent 0 */
1974                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1975                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1976        },
1977        { /* Home Agent 1 */
1978                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1979                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1980        },
1981        { /* MC0 Channel 0 */
1982                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1983                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1984        },
1985        { /* MC0 Channel 1 */
1986                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1987                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1988        },
1989        { /* MC0 Channel 3 */
1990                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1991                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1992        },
1993        { /* MC0 Channel 4 */
1994                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1995                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1996        },
1997        { /* MC1 Channel 0 */
1998                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1999                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
2000        },
2001        { /* MC1 Channel 1 */
2002                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
2003                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
2004        },
2005        { /* MC1 Channel 3 */
2006                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
2007                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
2008        },
2009        { /* MC1 Channel 4 */
2010                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
2011                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
2012        },
2013        { /* IRP */
2014                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
2015                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
2016        },
2017        { /* QPI0 Port 0 */
2018                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
2019                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
2020        },
2021        { /* QPI0 Port 1 */
2022                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
2023                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
2024        },
2025        { /* QPI1 Port 2 */
2026                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
2027                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
2028        },
2029        { /* R2PCIe */
2030                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
2031                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
2032        },
2033        { /* R3QPI0 Link 0 */
2034                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
2035                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
2036        },
2037        { /* R3QPI0 Link 1 */
2038                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
2039                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
2040        },
2041        { /* R3QPI1 Link 2 */
2042                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
2043                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
2044        },
2045        { /* QPI Port 0 filter  */
2046                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
2047                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2048                                                   SNBEP_PCI_QPI_PORT0_FILTER),
2049        },
2050        { /* QPI Port 0 filter  */
2051                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
2052                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2053                                                   SNBEP_PCI_QPI_PORT1_FILTER),
2054        },
2055        { /* end: all zeroes */ }
2056};
2057
2058static struct pci_driver ivbep_uncore_pci_driver = {
2059        .name           = "ivbep_uncore",
2060        .id_table       = ivbep_uncore_pci_ids,
2061};
2062
2063int ivbep_uncore_pci_init(void)
2064{
2065        int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2066        if (ret)
2067                return ret;
2068        uncore_pci_uncores = ivbep_pci_uncores;
2069        uncore_pci_driver = &ivbep_uncore_pci_driver;
2070        return 0;
2071}
2072/* end of IvyTown uncore support */
2073
2074/* KNL uncore support */
2075static struct attribute *knl_uncore_ubox_formats_attr[] = {
2076        &format_attr_event.attr,
2077        &format_attr_umask.attr,
2078        &format_attr_edge.attr,
2079        &format_attr_tid_en.attr,
2080        &format_attr_inv.attr,
2081        &format_attr_thresh5.attr,
2082        NULL,
2083};
2084
2085static const struct attribute_group knl_uncore_ubox_format_group = {
2086        .name = "format",
2087        .attrs = knl_uncore_ubox_formats_attr,
2088};
2089
2090static struct intel_uncore_type knl_uncore_ubox = {
2091        .name                   = "ubox",
2092        .num_counters           = 2,
2093        .num_boxes              = 1,
2094        .perf_ctr_bits          = 48,
2095        .fixed_ctr_bits         = 48,
2096        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2097        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2098        .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
2099        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2100        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2101        .ops                    = &snbep_uncore_msr_ops,
2102        .format_group           = &knl_uncore_ubox_format_group,
2103};
2104
2105static struct attribute *knl_uncore_cha_formats_attr[] = {
2106        &format_attr_event.attr,
2107        &format_attr_umask.attr,
2108        &format_attr_qor.attr,
2109        &format_attr_edge.attr,
2110        &format_attr_tid_en.attr,
2111        &format_attr_inv.attr,
2112        &format_attr_thresh8.attr,
2113        &format_attr_filter_tid4.attr,
2114        &format_attr_filter_link3.attr,
2115        &format_attr_filter_state4.attr,
2116        &format_attr_filter_local.attr,
2117        &format_attr_filter_all_op.attr,
2118        &format_attr_filter_nnm.attr,
2119        &format_attr_filter_opc3.attr,
2120        &format_attr_filter_nc.attr,
2121        &format_attr_filter_isoc.attr,
2122        NULL,
2123};
2124
2125static const struct attribute_group knl_uncore_cha_format_group = {
2126        .name = "format",
2127        .attrs = knl_uncore_cha_formats_attr,
2128};
2129
2130static struct event_constraint knl_uncore_cha_constraints[] = {
2131        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2132        UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
2133        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2134        EVENT_CONSTRAINT_END
2135};
2136
2137static struct extra_reg knl_uncore_cha_extra_regs[] = {
2138        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2139                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2140        SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
2141        SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
2142        SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
2143        EVENT_EXTRA_END
2144};
2145
2146static u64 knl_cha_filter_mask(int fields)
2147{
2148        u64 mask = 0;
2149
2150        if (fields & 0x1)
2151                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
2152        if (fields & 0x2)
2153                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
2154        if (fields & 0x4)
2155                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
2156        return mask;
2157}
2158
2159static struct event_constraint *
2160knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2161{
2162        return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
2163}
2164
2165static int knl_cha_hw_config(struct intel_uncore_box *box,
2166                             struct perf_event *event)
2167{
2168        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2169        struct extra_reg *er;
2170        int idx = 0;
2171
2172        for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
2173                if (er->event != (event->hw.config & er->config_mask))
2174                        continue;
2175                idx |= er->idx;
2176        }
2177
2178        if (idx) {
2179                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2180                            KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
2181                reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
2182
2183                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
2184                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
2185                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
2186                reg1->idx = idx;
2187        }
2188        return 0;
2189}
2190
2191static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2192                                    struct perf_event *event);
2193
2194static struct intel_uncore_ops knl_uncore_cha_ops = {
2195        .init_box               = snbep_uncore_msr_init_box,
2196        .disable_box            = snbep_uncore_msr_disable_box,
2197        .enable_box             = snbep_uncore_msr_enable_box,
2198        .disable_event          = snbep_uncore_msr_disable_event,
2199        .enable_event           = hswep_cbox_enable_event,
2200        .read_counter           = uncore_msr_read_counter,
2201        .hw_config              = knl_cha_hw_config,
2202        .get_constraint         = knl_cha_get_constraint,
2203        .put_constraint         = snbep_cbox_put_constraint,
2204};
2205
2206static struct intel_uncore_type knl_uncore_cha = {
2207        .name                   = "cha",
2208        .num_counters           = 4,
2209        .num_boxes              = 38,
2210        .perf_ctr_bits          = 48,
2211        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2212        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2213        .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2214        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2215        .msr_offset             = KNL_CHA_MSR_OFFSET,
2216        .num_shared_regs        = 1,
2217        .constraints            = knl_uncore_cha_constraints,
2218        .ops                    = &knl_uncore_cha_ops,
2219        .format_group           = &knl_uncore_cha_format_group,
2220};
2221
2222static struct attribute *knl_uncore_pcu_formats_attr[] = {
2223        &format_attr_event2.attr,
2224        &format_attr_use_occ_ctr.attr,
2225        &format_attr_occ_sel.attr,
2226        &format_attr_edge.attr,
2227        &format_attr_tid_en.attr,
2228        &format_attr_inv.attr,
2229        &format_attr_thresh6.attr,
2230        &format_attr_occ_invert.attr,
2231        &format_attr_occ_edge_det.attr,
2232        NULL,
2233};
2234
2235static const struct attribute_group knl_uncore_pcu_format_group = {
2236        .name = "format",
2237        .attrs = knl_uncore_pcu_formats_attr,
2238};
2239
2240static struct intel_uncore_type knl_uncore_pcu = {
2241        .name                   = "pcu",
2242        .num_counters           = 4,
2243        .num_boxes              = 1,
2244        .perf_ctr_bits          = 48,
2245        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2246        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2247        .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2248        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2249        .ops                    = &snbep_uncore_msr_ops,
2250        .format_group           = &knl_uncore_pcu_format_group,
2251};
2252
2253static struct intel_uncore_type *knl_msr_uncores[] = {
2254        &knl_uncore_ubox,
2255        &knl_uncore_cha,
2256        &knl_uncore_pcu,
2257        NULL,
2258};
2259
2260void knl_uncore_cpu_init(void)
2261{
2262        uncore_msr_uncores = knl_msr_uncores;
2263}
2264
2265static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2266{
2267        struct pci_dev *pdev = box->pci_dev;
2268        int box_ctl = uncore_pci_box_ctl(box);
2269
2270        pci_write_config_dword(pdev, box_ctl, 0);
2271}
2272
2273static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2274                                        struct perf_event *event)
2275{
2276        struct pci_dev *pdev = box->pci_dev;
2277        struct hw_perf_event *hwc = &event->hw;
2278
2279        if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2280                                                        == UNCORE_FIXED_EVENT)
2281                pci_write_config_dword(pdev, hwc->config_base,
2282                                       hwc->config | KNL_PMON_FIXED_CTL_EN);
2283        else
2284                pci_write_config_dword(pdev, hwc->config_base,
2285                                       hwc->config | SNBEP_PMON_CTL_EN);
2286}
2287
2288static struct intel_uncore_ops knl_uncore_imc_ops = {
2289        .init_box       = snbep_uncore_pci_init_box,
2290        .disable_box    = snbep_uncore_pci_disable_box,
2291        .enable_box     = knl_uncore_imc_enable_box,
2292        .read_counter   = snbep_uncore_pci_read_counter,
2293        .enable_event   = knl_uncore_imc_enable_event,
2294        .disable_event  = snbep_uncore_pci_disable_event,
2295};
2296
2297static struct intel_uncore_type knl_uncore_imc_uclk = {
2298        .name                   = "imc_uclk",
2299        .num_counters           = 4,
2300        .num_boxes              = 2,
2301        .perf_ctr_bits          = 48,
2302        .fixed_ctr_bits         = 48,
2303        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2304        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2305        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2306        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2307        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2308        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2309        .ops                    = &knl_uncore_imc_ops,
2310        .format_group           = &snbep_uncore_format_group,
2311};
2312
2313static struct intel_uncore_type knl_uncore_imc_dclk = {
2314        .name                   = "imc",
2315        .num_counters           = 4,
2316        .num_boxes              = 6,
2317        .perf_ctr_bits          = 48,
2318        .fixed_ctr_bits         = 48,
2319        .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2320        .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2321        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2322        .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2323        .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2324        .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2325        .ops                    = &knl_uncore_imc_ops,
2326        .format_group           = &snbep_uncore_format_group,
2327};
2328
2329static struct intel_uncore_type knl_uncore_edc_uclk = {
2330        .name                   = "edc_uclk",
2331        .num_counters           = 4,
2332        .num_boxes              = 8,
2333        .perf_ctr_bits          = 48,
2334        .fixed_ctr_bits         = 48,
2335        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2336        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2337        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2338        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2339        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2340        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2341        .ops                    = &knl_uncore_imc_ops,
2342        .format_group           = &snbep_uncore_format_group,
2343};
2344
2345static struct intel_uncore_type knl_uncore_edc_eclk = {
2346        .name                   = "edc_eclk",
2347        .num_counters           = 4,
2348        .num_boxes              = 8,
2349        .perf_ctr_bits          = 48,
2350        .fixed_ctr_bits         = 48,
2351        .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2352        .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2353        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2354        .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2355        .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2356        .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2357        .ops                    = &knl_uncore_imc_ops,
2358        .format_group           = &snbep_uncore_format_group,
2359};
2360
2361static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2362        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2363        EVENT_CONSTRAINT_END
2364};
2365
2366static struct intel_uncore_type knl_uncore_m2pcie = {
2367        .name           = "m2pcie",
2368        .num_counters   = 4,
2369        .num_boxes      = 1,
2370        .perf_ctr_bits  = 48,
2371        .constraints    = knl_uncore_m2pcie_constraints,
2372        SNBEP_UNCORE_PCI_COMMON_INIT(),
2373};
2374
2375static struct attribute *knl_uncore_irp_formats_attr[] = {
2376        &format_attr_event.attr,
2377        &format_attr_umask.attr,
2378        &format_attr_qor.attr,
2379        &format_attr_edge.attr,
2380        &format_attr_inv.attr,
2381        &format_attr_thresh8.attr,
2382        NULL,
2383};
2384
2385static const struct attribute_group knl_uncore_irp_format_group = {
2386        .name = "format",
2387        .attrs = knl_uncore_irp_formats_attr,
2388};
2389
2390static struct intel_uncore_type knl_uncore_irp = {
2391        .name                   = "irp",
2392        .num_counters           = 2,
2393        .num_boxes              = 1,
2394        .perf_ctr_bits          = 48,
2395        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2396        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2397        .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2398        .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2399        .ops                    = &snbep_uncore_pci_ops,
2400        .format_group           = &knl_uncore_irp_format_group,
2401};
2402
2403enum {
2404        KNL_PCI_UNCORE_MC_UCLK,
2405        KNL_PCI_UNCORE_MC_DCLK,
2406        KNL_PCI_UNCORE_EDC_UCLK,
2407        KNL_PCI_UNCORE_EDC_ECLK,
2408        KNL_PCI_UNCORE_M2PCIE,
2409        KNL_PCI_UNCORE_IRP,
2410};
2411
2412static struct intel_uncore_type *knl_pci_uncores[] = {
2413        [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2414        [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2415        [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2416        [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2417        [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2418        [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2419        NULL,
2420};
2421
2422/*
2423 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2424 * device type. prior to KNL, each instance of a PMU device type had a unique
2425 * device ID.
2426 *
2427 *      PCI Device ID   Uncore PMU Devices
2428 *      ----------------------------------
2429 *      0x7841          MC0 UClk, MC1 UClk
2430 *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2431 *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2432 *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2433 *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2434 *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2435 *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2436 *      0x7817          M2PCIe
2437 *      0x7814          IRP
2438*/
2439
2440static const struct pci_device_id knl_uncore_pci_ids[] = {
2441        { /* MC0 UClk */
2442                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2443                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2444        },
2445        { /* MC1 UClk */
2446                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2447                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2448        },
2449        { /* MC0 DClk CH 0 */
2450                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2451                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2452        },
2453        { /* MC0 DClk CH 1 */
2454                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2455                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2456        },
2457        { /* MC0 DClk CH 2 */
2458                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2459                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2460        },
2461        { /* MC1 DClk CH 0 */
2462                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2463                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2464        },
2465        { /* MC1 DClk CH 1 */
2466                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2467                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2468        },
2469        { /* MC1 DClk CH 2 */
2470                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2471                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2472        },
2473        { /* EDC0 UClk */
2474                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2475                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2476        },
2477        { /* EDC1 UClk */
2478                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2479                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2480        },
2481        { /* EDC2 UClk */
2482                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2483                .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2484        },
2485        { /* EDC3 UClk */
2486                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2487                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2488        },
2489        { /* EDC4 UClk */
2490                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2491                .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2492        },
2493        { /* EDC5 UClk */
2494                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2495                .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2496        },
2497        { /* EDC6 UClk */
2498                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2499                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2500        },
2501        { /* EDC7 UClk */
2502                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2503                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2504        },
2505        { /* EDC0 EClk */
2506                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2507                .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2508        },
2509        { /* EDC1 EClk */
2510                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2511                .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2512        },
2513        { /* EDC2 EClk */
2514                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2515                .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2516        },
2517        { /* EDC3 EClk */
2518                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2519                .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2520        },
2521        { /* EDC4 EClk */
2522                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2523                .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2524        },
2525        { /* EDC5 EClk */
2526                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2527                .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2528        },
2529        { /* EDC6 EClk */
2530                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2531                .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2532        },
2533        { /* EDC7 EClk */
2534                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2535                .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2536        },
2537        { /* M2PCIe */
2538                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2539                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2540        },
2541        { /* IRP */
2542                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2543                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2544        },
2545        { /* end: all zeroes */ }
2546};
2547
2548static struct pci_driver knl_uncore_pci_driver = {
2549        .name           = "knl_uncore",
2550        .id_table       = knl_uncore_pci_ids,
2551};
2552
2553int knl_uncore_pci_init(void)
2554{
2555        int ret;
2556
2557        /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2558        ret = snb_pci2phy_map_init(0x7814); /* IRP */
2559        if (ret)
2560                return ret;
2561        ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2562        if (ret)
2563                return ret;
2564        uncore_pci_uncores = knl_pci_uncores;
2565        uncore_pci_driver = &knl_uncore_pci_driver;
2566        return 0;
2567}
2568
2569/* end of KNL uncore support */
2570
2571/* Haswell-EP uncore support */
2572static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2573        &format_attr_event.attr,
2574        &format_attr_umask.attr,
2575        &format_attr_edge.attr,
2576        &format_attr_inv.attr,
2577        &format_attr_thresh5.attr,
2578        &format_attr_filter_tid2.attr,
2579        &format_attr_filter_cid.attr,
2580        NULL,
2581};
2582
2583static const struct attribute_group hswep_uncore_ubox_format_group = {
2584        .name = "format",
2585        .attrs = hswep_uncore_ubox_formats_attr,
2586};
2587
2588static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2589{
2590        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2591        reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2592        reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2593        reg1->idx = 0;
2594        return 0;
2595}
2596
2597static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2598        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2599        .hw_config              = hswep_ubox_hw_config,
2600        .get_constraint         = uncore_get_constraint,
2601        .put_constraint         = uncore_put_constraint,
2602};
2603
2604static struct intel_uncore_type hswep_uncore_ubox = {
2605        .name                   = "ubox",
2606        .num_counters           = 2,
2607        .num_boxes              = 1,
2608        .perf_ctr_bits          = 44,
2609        .fixed_ctr_bits         = 48,
2610        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2611        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2612        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2613        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2614        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2615        .num_shared_regs        = 1,
2616        .ops                    = &hswep_uncore_ubox_ops,
2617        .format_group           = &hswep_uncore_ubox_format_group,
2618};
2619
2620static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2621        &format_attr_event.attr,
2622        &format_attr_umask.attr,
2623        &format_attr_edge.attr,
2624        &format_attr_tid_en.attr,
2625        &format_attr_thresh8.attr,
2626        &format_attr_filter_tid3.attr,
2627        &format_attr_filter_link2.attr,
2628        &format_attr_filter_state3.attr,
2629        &format_attr_filter_nid2.attr,
2630        &format_attr_filter_opc2.attr,
2631        &format_attr_filter_nc.attr,
2632        &format_attr_filter_c6.attr,
2633        &format_attr_filter_isoc.attr,
2634        NULL,
2635};
2636
2637static const struct attribute_group hswep_uncore_cbox_format_group = {
2638        .name = "format",
2639        .attrs = hswep_uncore_cbox_formats_attr,
2640};
2641
2642static struct event_constraint hswep_uncore_cbox_constraints[] = {
2643        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2644        UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2645        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2646        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2647        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2648        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2649        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2650        EVENT_CONSTRAINT_END
2651};
2652
2653static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2654        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2655                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2656        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2657        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2658        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2659        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2660        SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2661        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2662        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2663        SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2664        SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2665        SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2666        SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2667        SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2668        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2669        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2670        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2671        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2672        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2673        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2674        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2675        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2676        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2677        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2678        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2679        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2680        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2681        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2682        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2683        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2684        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2685        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2686        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2687        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2688        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2689        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2690        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2691        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2692        EVENT_EXTRA_END
2693};
2694
2695static u64 hswep_cbox_filter_mask(int fields)
2696{
2697        u64 mask = 0;
2698        if (fields & 0x1)
2699                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2700        if (fields & 0x2)
2701                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2702        if (fields & 0x4)
2703                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2704        if (fields & 0x8)
2705                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2706        if (fields & 0x10) {
2707                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2708                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2709                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2710                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2711        }
2712        return mask;
2713}
2714
2715static struct event_constraint *
2716hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2717{
2718        return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2719}
2720
2721static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2722{
2723        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2724        struct extra_reg *er;
2725        int idx = 0;
2726
2727        for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2728                if (er->event != (event->hw.config & er->config_mask))
2729                        continue;
2730                idx |= er->idx;
2731        }
2732
2733        if (idx) {
2734                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2735                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2736                reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2737                reg1->idx = idx;
2738        }
2739        return 0;
2740}
2741
2742static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2743                                  struct perf_event *event)
2744{
2745        struct hw_perf_event *hwc = &event->hw;
2746        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2747
2748        if (reg1->idx != EXTRA_REG_NONE) {
2749                u64 filter = uncore_shared_reg_config(box, 0);
2750                wrmsrl(reg1->reg, filter & 0xffffffff);
2751                wrmsrl(reg1->reg + 1, filter >> 32);
2752        }
2753
2754        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2755}
2756
2757static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2758        .init_box               = snbep_uncore_msr_init_box,
2759        .disable_box            = snbep_uncore_msr_disable_box,
2760        .enable_box             = snbep_uncore_msr_enable_box,
2761        .disable_event          = snbep_uncore_msr_disable_event,
2762        .enable_event           = hswep_cbox_enable_event,
2763        .read_counter           = uncore_msr_read_counter,
2764        .hw_config              = hswep_cbox_hw_config,
2765        .get_constraint         = hswep_cbox_get_constraint,
2766        .put_constraint         = snbep_cbox_put_constraint,
2767};
2768
2769static struct intel_uncore_type hswep_uncore_cbox = {
2770        .name                   = "cbox",
2771        .num_counters           = 4,
2772        .num_boxes              = 18,
2773        .perf_ctr_bits          = 48,
2774        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2775        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2776        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2777        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2778        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2779        .num_shared_regs        = 1,
2780        .constraints            = hswep_uncore_cbox_constraints,
2781        .ops                    = &hswep_uncore_cbox_ops,
2782        .format_group           = &hswep_uncore_cbox_format_group,
2783};
2784
2785/*
2786 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2787 */
2788static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2789{
2790        unsigned msr = uncore_msr_box_ctl(box);
2791
2792        if (msr) {
2793                u64 init = SNBEP_PMON_BOX_CTL_INT;
2794                u64 flags = 0;
2795                int i;
2796
2797                for_each_set_bit(i, (unsigned long *)&init, 64) {
2798                        flags |= (1ULL << i);
2799                        wrmsrl(msr, flags);
2800                }
2801        }
2802}
2803
2804static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2805        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2806        .init_box               = hswep_uncore_sbox_msr_init_box
2807};
2808
2809static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2810        &format_attr_event.attr,
2811        &format_attr_umask.attr,
2812        &format_attr_edge.attr,
2813        &format_attr_tid_en.attr,
2814        &format_attr_inv.attr,
2815        &format_attr_thresh8.attr,
2816        NULL,
2817};
2818
2819static const struct attribute_group hswep_uncore_sbox_format_group = {
2820        .name = "format",
2821        .attrs = hswep_uncore_sbox_formats_attr,
2822};
2823
2824static struct intel_uncore_type hswep_uncore_sbox = {
2825        .name                   = "sbox",
2826        .num_counters           = 4,
2827        .num_boxes              = 4,
2828        .perf_ctr_bits          = 44,
2829        .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2830        .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2831        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2832        .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2833        .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2834        .ops                    = &hswep_uncore_sbox_msr_ops,
2835        .format_group           = &hswep_uncore_sbox_format_group,
2836};
2837
2838static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2839{
2840        struct hw_perf_event *hwc = &event->hw;
2841        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2842        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2843
2844        if (ev_sel >= 0xb && ev_sel <= 0xe) {
2845                reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2846                reg1->idx = ev_sel - 0xb;
2847                reg1->config = event->attr.config1 & (0xff << reg1->idx);
2848        }
2849        return 0;
2850}
2851
2852static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2853        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2854        .hw_config              = hswep_pcu_hw_config,
2855        .get_constraint         = snbep_pcu_get_constraint,
2856        .put_constraint         = snbep_pcu_put_constraint,
2857};
2858
2859static struct intel_uncore_type hswep_uncore_pcu = {
2860        .name                   = "pcu",
2861        .num_counters           = 4,
2862        .num_boxes              = 1,
2863        .perf_ctr_bits          = 48,
2864        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2865        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2866        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2867        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2868        .num_shared_regs        = 1,
2869        .ops                    = &hswep_uncore_pcu_ops,
2870        .format_group           = &snbep_uncore_pcu_format_group,
2871};
2872
2873static struct intel_uncore_type *hswep_msr_uncores[] = {
2874        &hswep_uncore_ubox,
2875        &hswep_uncore_cbox,
2876        &hswep_uncore_sbox,
2877        &hswep_uncore_pcu,
2878        NULL,
2879};
2880
2881#define HSWEP_PCU_DID                   0x2fc0
2882#define HSWEP_PCU_CAPID4_OFFET          0x94
2883#define hswep_get_chop(_cap)            (((_cap) >> 6) & 0x3)
2884
2885static bool hswep_has_limit_sbox(unsigned int device)
2886{
2887        struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
2888        u32 capid4;
2889
2890        if (!dev)
2891                return false;
2892
2893        pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
2894        if (!hswep_get_chop(capid4))
2895                return true;
2896
2897        return false;
2898}
2899
2900void hswep_uncore_cpu_init(void)
2901{
2902        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2903                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2904
2905        /* Detect 6-8 core systems with only two SBOXes */
2906        if (hswep_has_limit_sbox(HSWEP_PCU_DID))
2907                hswep_uncore_sbox.num_boxes = 2;
2908
2909        uncore_msr_uncores = hswep_msr_uncores;
2910}
2911
2912static struct intel_uncore_type hswep_uncore_ha = {
2913        .name           = "ha",
2914        .num_counters   = 4,
2915        .num_boxes      = 2,
2916        .perf_ctr_bits  = 48,
2917        SNBEP_UNCORE_PCI_COMMON_INIT(),
2918};
2919
2920static struct uncore_event_desc hswep_uncore_imc_events[] = {
2921        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2922        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2923        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2924        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2925        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2926        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2927        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2928        { /* end: all zeroes */ },
2929};
2930
2931static struct intel_uncore_type hswep_uncore_imc = {
2932        .name           = "imc",
2933        .num_counters   = 4,
2934        .num_boxes      = 8,
2935        .perf_ctr_bits  = 48,
2936        .fixed_ctr_bits = 48,
2937        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2938        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2939        .event_descs    = hswep_uncore_imc_events,
2940        SNBEP_UNCORE_PCI_COMMON_INIT(),
2941};
2942
2943static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2944
2945static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2946{
2947        struct pci_dev *pdev = box->pci_dev;
2948        struct hw_perf_event *hwc = &event->hw;
2949        u64 count = 0;
2950
2951        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2952        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2953
2954        return count;
2955}
2956
2957static struct intel_uncore_ops hswep_uncore_irp_ops = {
2958        .init_box       = snbep_uncore_pci_init_box,
2959        .disable_box    = snbep_uncore_pci_disable_box,
2960        .enable_box     = snbep_uncore_pci_enable_box,
2961        .disable_event  = ivbep_uncore_irp_disable_event,
2962        .enable_event   = ivbep_uncore_irp_enable_event,
2963        .read_counter   = hswep_uncore_irp_read_counter,
2964};
2965
2966static struct intel_uncore_type hswep_uncore_irp = {
2967        .name                   = "irp",
2968        .num_counters           = 4,
2969        .num_boxes              = 1,
2970        .perf_ctr_bits          = 48,
2971        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2972        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2973        .ops                    = &hswep_uncore_irp_ops,
2974        .format_group           = &snbep_uncore_format_group,
2975};
2976
2977static struct intel_uncore_type hswep_uncore_qpi = {
2978        .name                   = "qpi",
2979        .num_counters           = 4,
2980        .num_boxes              = 3,
2981        .perf_ctr_bits          = 48,
2982        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2983        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2984        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2985        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2986        .num_shared_regs        = 1,
2987        .ops                    = &snbep_uncore_qpi_ops,
2988        .format_group           = &snbep_uncore_qpi_format_group,
2989};
2990
2991static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2992        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2993        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2994        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2995        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2996        UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2997        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2998        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2999        UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
3000        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3001        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3002        UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
3003        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
3004        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3005        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3006        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3007        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3008        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3009        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
3010        EVENT_CONSTRAINT_END
3011};
3012
3013static struct intel_uncore_type hswep_uncore_r2pcie = {
3014        .name           = "r2pcie",
3015        .num_counters   = 4,
3016        .num_boxes      = 1,
3017        .perf_ctr_bits  = 48,
3018        .constraints    = hswep_uncore_r2pcie_constraints,
3019        SNBEP_UNCORE_PCI_COMMON_INIT(),
3020};
3021
3022static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
3023        UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
3024        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3025        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3026        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3027        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3028        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3029        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3030        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3031        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
3032        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3033        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3034        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3035        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3036        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3037        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3038        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3039        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3040        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3041        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3042        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3043        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3044        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3045        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3046        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3047        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3048        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
3049        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
3050        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3051        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3052        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3053        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3054        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3055        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3056        EVENT_CONSTRAINT_END
3057};
3058
3059static struct intel_uncore_type hswep_uncore_r3qpi = {
3060        .name           = "r3qpi",
3061        .num_counters   = 3,
3062        .num_boxes      = 3,
3063        .perf_ctr_bits  = 44,
3064        .constraints    = hswep_uncore_r3qpi_constraints,
3065        SNBEP_UNCORE_PCI_COMMON_INIT(),
3066};
3067
3068enum {
3069        HSWEP_PCI_UNCORE_HA,
3070        HSWEP_PCI_UNCORE_IMC,
3071        HSWEP_PCI_UNCORE_IRP,
3072        HSWEP_PCI_UNCORE_QPI,
3073        HSWEP_PCI_UNCORE_R2PCIE,
3074        HSWEP_PCI_UNCORE_R3QPI,
3075};
3076
3077static struct intel_uncore_type *hswep_pci_uncores[] = {
3078        [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
3079        [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
3080        [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
3081        [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
3082        [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
3083        [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
3084        NULL,
3085};
3086
3087static const struct pci_device_id hswep_uncore_pci_ids[] = {
3088        { /* Home Agent 0 */
3089                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
3090                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
3091        },
3092        { /* Home Agent 1 */
3093                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
3094                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
3095        },
3096        { /* MC0 Channel 0 */
3097                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
3098                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
3099        },
3100        { /* MC0 Channel 1 */
3101                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
3102                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
3103        },
3104        { /* MC0 Channel 2 */
3105                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
3106                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
3107        },
3108        { /* MC0 Channel 3 */
3109                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
3110                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
3111        },
3112        { /* MC1 Channel 0 */
3113                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
3114                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
3115        },
3116        { /* MC1 Channel 1 */
3117                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
3118                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
3119        },
3120        { /* MC1 Channel 2 */
3121                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
3122                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
3123        },
3124        { /* MC1 Channel 3 */
3125                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
3126                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
3127        },
3128        { /* IRP */
3129                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
3130                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
3131        },
3132        { /* QPI0 Port 0 */
3133                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
3134                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
3135        },
3136        { /* QPI0 Port 1 */
3137                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
3138                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
3139        },
3140        { /* QPI1 Port 2 */
3141                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
3142                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
3143        },
3144        { /* R2PCIe */
3145                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
3146                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
3147        },
3148        { /* R3QPI0 Link 0 */
3149                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
3150                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
3151        },
3152        { /* R3QPI0 Link 1 */
3153                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
3154                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
3155        },
3156        { /* R3QPI1 Link 2 */
3157                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
3158                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
3159        },
3160        { /* QPI Port 0 filter  */
3161                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
3162                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3163                                                   SNBEP_PCI_QPI_PORT0_FILTER),
3164        },
3165        { /* QPI Port 1 filter  */
3166                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
3167                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3168                                                   SNBEP_PCI_QPI_PORT1_FILTER),
3169        },
3170        { /* end: all zeroes */ }
3171};
3172
3173static struct pci_driver hswep_uncore_pci_driver = {
3174        .name           = "hswep_uncore",
3175        .id_table       = hswep_uncore_pci_ids,
3176};
3177
3178int hswep_uncore_pci_init(void)
3179{
3180        int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3181        if (ret)
3182                return ret;
3183        uncore_pci_uncores = hswep_pci_uncores;
3184        uncore_pci_driver = &hswep_uncore_pci_driver;
3185        return 0;
3186}
3187/* end of Haswell-EP uncore support */
3188
3189/* BDX uncore support */
3190
3191static struct intel_uncore_type bdx_uncore_ubox = {
3192        .name                   = "ubox",
3193        .num_counters           = 2,
3194        .num_boxes              = 1,
3195        .perf_ctr_bits          = 48,
3196        .fixed_ctr_bits         = 48,
3197        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3198        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3199        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3200        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3201        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3202        .num_shared_regs        = 1,
3203        .ops                    = &ivbep_uncore_msr_ops,
3204        .format_group           = &ivbep_uncore_ubox_format_group,
3205};
3206
3207static struct event_constraint bdx_uncore_cbox_constraints[] = {
3208        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3209        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3210        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3211        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3212        EVENT_CONSTRAINT_END
3213};
3214
3215static struct intel_uncore_type bdx_uncore_cbox = {
3216        .name                   = "cbox",
3217        .num_counters           = 4,
3218        .num_boxes              = 24,
3219        .perf_ctr_bits          = 48,
3220        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3221        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3222        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3223        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3224        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3225        .num_shared_regs        = 1,
3226        .constraints            = bdx_uncore_cbox_constraints,
3227        .ops                    = &hswep_uncore_cbox_ops,
3228        .format_group           = &hswep_uncore_cbox_format_group,
3229};
3230
3231static struct intel_uncore_type bdx_uncore_sbox = {
3232        .name                   = "sbox",
3233        .num_counters           = 4,
3234        .num_boxes              = 4,
3235        .perf_ctr_bits          = 48,
3236        .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
3237        .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
3238        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3239        .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
3240        .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
3241        .ops                    = &hswep_uncore_sbox_msr_ops,
3242        .format_group           = &hswep_uncore_sbox_format_group,
3243};
3244
3245#define BDX_MSR_UNCORE_SBOX     3
3246
3247static struct intel_uncore_type *bdx_msr_uncores[] = {
3248        &bdx_uncore_ubox,
3249        &bdx_uncore_cbox,
3250        &hswep_uncore_pcu,
3251        &bdx_uncore_sbox,
3252        NULL,
3253};
3254
3255/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3256static struct event_constraint bdx_uncore_pcu_constraints[] = {
3257        EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3258        EVENT_CONSTRAINT_END
3259};
3260
3261#define BDX_PCU_DID                     0x6fc0
3262
3263void bdx_uncore_cpu_init(void)
3264{
3265        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3266                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3267        uncore_msr_uncores = bdx_msr_uncores;
3268
3269        /* Detect systems with no SBOXes */
3270        if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
3271                uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
3272
3273        hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3274}
3275
3276static struct intel_uncore_type bdx_uncore_ha = {
3277        .name           = "ha",
3278        .num_counters   = 4,
3279        .num_boxes      = 2,
3280        .perf_ctr_bits  = 48,
3281        SNBEP_UNCORE_PCI_COMMON_INIT(),
3282};
3283
3284static struct intel_uncore_type bdx_uncore_imc = {
3285        .name           = "imc",
3286        .num_counters   = 4,
3287        .num_boxes      = 8,
3288        .perf_ctr_bits  = 48,
3289        .fixed_ctr_bits = 48,
3290        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3291        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3292        .event_descs    = hswep_uncore_imc_events,
3293        SNBEP_UNCORE_PCI_COMMON_INIT(),
3294};
3295
3296static struct intel_uncore_type bdx_uncore_irp = {
3297        .name                   = "irp",
3298        .num_counters           = 4,
3299        .num_boxes              = 1,
3300        .perf_ctr_bits          = 48,
3301        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3302        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3303        .ops                    = &hswep_uncore_irp_ops,
3304        .format_group           = &snbep_uncore_format_group,
3305};
3306
3307static struct intel_uncore_type bdx_uncore_qpi = {
3308        .name                   = "qpi",
3309        .num_counters           = 4,
3310        .num_boxes              = 3,
3311        .perf_ctr_bits          = 48,
3312        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3313        .event_ctl              = SNBEP_PCI_PMON_CTL0,
3314        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3315        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3316        .num_shared_regs        = 1,
3317        .ops                    = &snbep_uncore_qpi_ops,
3318        .format_group           = &snbep_uncore_qpi_format_group,
3319};
3320
3321static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3322        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3323        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3324        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3325        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3326        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3327        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3328        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3329        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3330        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3331        EVENT_CONSTRAINT_END
3332};
3333
3334static struct intel_uncore_type bdx_uncore_r2pcie = {
3335        .name           = "r2pcie",
3336        .num_counters   = 4,
3337        .num_boxes      = 1,
3338        .perf_ctr_bits  = 48,
3339        .constraints    = bdx_uncore_r2pcie_constraints,
3340        SNBEP_UNCORE_PCI_COMMON_INIT(),
3341};
3342
3343static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3344        UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3345        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3346        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3347        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3348        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3349        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3350        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3351        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3352        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3353        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3354        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3355        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3356        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3357        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3358        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3359        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3360        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3361        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3362        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3363        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3364        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3365        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3366        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3367        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3368        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3369        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3370        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3371        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3372        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3373        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3374        EVENT_CONSTRAINT_END
3375};
3376
3377static struct intel_uncore_type bdx_uncore_r3qpi = {
3378        .name           = "r3qpi",
3379        .num_counters   = 3,
3380        .num_boxes      = 3,
3381        .perf_ctr_bits  = 48,
3382        .constraints    = bdx_uncore_r3qpi_constraints,
3383        SNBEP_UNCORE_PCI_COMMON_INIT(),
3384};
3385
3386enum {
3387        BDX_PCI_UNCORE_HA,
3388        BDX_PCI_UNCORE_IMC,
3389        BDX_PCI_UNCORE_IRP,
3390        BDX_PCI_UNCORE_QPI,
3391        BDX_PCI_UNCORE_R2PCIE,
3392        BDX_PCI_UNCORE_R3QPI,
3393};
3394
3395static struct intel_uncore_type *bdx_pci_uncores[] = {
3396        [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3397        [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3398        [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3399        [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3400        [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3401        [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3402        NULL,
3403};
3404
3405static const struct pci_device_id bdx_uncore_pci_ids[] = {
3406        { /* Home Agent 0 */
3407                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3408                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3409        },
3410        { /* Home Agent 1 */
3411                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3412                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3413        },
3414        { /* MC0 Channel 0 */
3415                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3416                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3417        },
3418        { /* MC0 Channel 1 */
3419                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3420                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3421        },
3422        { /* MC0 Channel 2 */
3423                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3424                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3425        },
3426        { /* MC0 Channel 3 */
3427                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3428                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3429        },
3430        { /* MC1 Channel 0 */
3431                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3432                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3433        },
3434        { /* MC1 Channel 1 */
3435                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3436                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3437        },
3438        { /* MC1 Channel 2 */
3439                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3440                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3441        },
3442        { /* MC1 Channel 3 */
3443                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3444                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3445        },
3446        { /* IRP */
3447                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3448                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3449        },
3450        { /* QPI0 Port 0 */
3451                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3452                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3453        },
3454        { /* QPI0 Port 1 */
3455                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3456                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3457        },
3458        { /* QPI1 Port 2 */
3459                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3460                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3461        },
3462        { /* R2PCIe */
3463                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3464                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3465        },
3466        { /* R3QPI0 Link 0 */
3467                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3468                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3469        },
3470        { /* R3QPI0 Link 1 */
3471                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3472                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3473        },
3474        { /* R3QPI1 Link 2 */
3475                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3476                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3477        },
3478        { /* QPI Port 0 filter  */
3479                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3480                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3481                                                   SNBEP_PCI_QPI_PORT0_FILTER),
3482        },
3483        { /* QPI Port 1 filter  */
3484                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3485                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3486                                                   SNBEP_PCI_QPI_PORT1_FILTER),
3487        },
3488        { /* QPI Port 2 filter  */
3489                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3490                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3491                                                   BDX_PCI_QPI_PORT2_FILTER),
3492        },
3493        { /* end: all zeroes */ }
3494};
3495
3496static struct pci_driver bdx_uncore_pci_driver = {
3497        .name           = "bdx_uncore",
3498        .id_table       = bdx_uncore_pci_ids,
3499};
3500
3501int bdx_uncore_pci_init(void)
3502{
3503        int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3504
3505        if (ret)
3506                return ret;
3507        uncore_pci_uncores = bdx_pci_uncores;
3508        uncore_pci_driver = &bdx_uncore_pci_driver;
3509        return 0;
3510}
3511
3512/* end of BDX uncore support */
3513
3514/* SKX uncore support */
3515
3516static struct intel_uncore_type skx_uncore_ubox = {
3517        .name                   = "ubox",
3518        .num_counters           = 2,
3519        .num_boxes              = 1,
3520        .perf_ctr_bits          = 48,
3521        .fixed_ctr_bits         = 48,
3522        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3523        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3524        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3525        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3526        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3527        .ops                    = &ivbep_uncore_msr_ops,
3528        .format_group           = &ivbep_uncore_ubox_format_group,
3529};
3530
3531static struct attribute *skx_uncore_cha_formats_attr[] = {
3532        &format_attr_event.attr,
3533        &format_attr_umask.attr,
3534        &format_attr_edge.attr,
3535        &format_attr_tid_en.attr,
3536        &format_attr_inv.attr,
3537        &format_attr_thresh8.attr,
3538        &format_attr_filter_tid4.attr,
3539        &format_attr_filter_state5.attr,
3540        &format_attr_filter_rem.attr,
3541        &format_attr_filter_loc.attr,
3542        &format_attr_filter_nm.attr,
3543        &format_attr_filter_all_op.attr,
3544        &format_attr_filter_not_nm.attr,
3545        &format_attr_filter_opc_0.attr,
3546        &format_attr_filter_opc_1.attr,
3547        &format_attr_filter_nc.attr,
3548        &format_attr_filter_isoc.attr,
3549        NULL,
3550};
3551
3552static const struct attribute_group skx_uncore_chabox_format_group = {
3553        .name = "format",
3554        .attrs = skx_uncore_cha_formats_attr,
3555};
3556
3557static struct event_constraint skx_uncore_chabox_constraints[] = {
3558        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3559        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3560        EVENT_CONSTRAINT_END
3561};
3562
3563static struct extra_reg skx_uncore_cha_extra_regs[] = {
3564        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3565        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3566        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3567        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3568        SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3569        SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3570        SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3571        SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3572        SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
3573        EVENT_EXTRA_END
3574};
3575
3576static u64 skx_cha_filter_mask(int fields)
3577{
3578        u64 mask = 0;
3579
3580        if (fields & 0x1)
3581                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3582        if (fields & 0x2)
3583                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3584        if (fields & 0x4)
3585                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3586        if (fields & 0x8) {
3587                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3588                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3589                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3590                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3591                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3592                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3593                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3594                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3595                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3596        }
3597        return mask;
3598}
3599
3600static struct event_constraint *
3601skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3602{
3603        return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3604}
3605
3606static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3607{
3608        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3609        struct extra_reg *er;
3610        int idx = 0;
3611        /* Any of the CHA events may be filtered by Thread/Core-ID.*/
3612        if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
3613                idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3614
3615        for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3616                if (er->event != (event->hw.config & er->config_mask))
3617                        continue;
3618                idx |= er->idx;
3619        }
3620
3621        if (idx) {
3622                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3623                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3624                reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3625                reg1->idx = idx;
3626        }
3627        return 0;
3628}
3629
3630static struct intel_uncore_ops skx_uncore_chabox_ops = {
3631        /* There is no frz_en for chabox ctl */
3632        .init_box               = ivbep_uncore_msr_init_box,
3633        .disable_box            = snbep_uncore_msr_disable_box,
3634        .enable_box             = snbep_uncore_msr_enable_box,
3635        .disable_event          = snbep_uncore_msr_disable_event,
3636        .enable_event           = hswep_cbox_enable_event,
3637        .read_counter           = uncore_msr_read_counter,
3638        .hw_config              = skx_cha_hw_config,
3639        .get_constraint         = skx_cha_get_constraint,
3640        .put_constraint         = snbep_cbox_put_constraint,
3641};
3642
3643static struct intel_uncore_type skx_uncore_chabox = {
3644        .name                   = "cha",
3645        .num_counters           = 4,
3646        .perf_ctr_bits          = 48,
3647        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3648        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3649        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3650        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3651        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3652        .num_shared_regs        = 1,
3653        .constraints            = skx_uncore_chabox_constraints,
3654        .ops                    = &skx_uncore_chabox_ops,
3655        .format_group           = &skx_uncore_chabox_format_group,
3656};
3657
3658static struct attribute *skx_uncore_iio_formats_attr[] = {
3659        &format_attr_event.attr,
3660        &format_attr_umask.attr,
3661        &format_attr_edge.attr,
3662        &format_attr_inv.attr,
3663        &format_attr_thresh9.attr,
3664        &format_attr_ch_mask.attr,
3665        &format_attr_fc_mask.attr,
3666        NULL,
3667};
3668
3669static const struct attribute_group skx_uncore_iio_format_group = {
3670        .name = "format",
3671        .attrs = skx_uncore_iio_formats_attr,
3672};
3673
3674static struct event_constraint skx_uncore_iio_constraints[] = {
3675        UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3676        UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3677        UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3678        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3679        UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3680        UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3681        UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
3682        EVENT_CONSTRAINT_END
3683};
3684
3685static void skx_iio_enable_event(struct intel_uncore_box *box,
3686                                 struct perf_event *event)
3687{
3688        struct hw_perf_event *hwc = &event->hw;
3689
3690        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3691}
3692
3693static struct intel_uncore_ops skx_uncore_iio_ops = {
3694        .init_box               = ivbep_uncore_msr_init_box,
3695        .disable_box            = snbep_uncore_msr_disable_box,
3696        .enable_box             = snbep_uncore_msr_enable_box,
3697        .disable_event          = snbep_uncore_msr_disable_event,
3698        .enable_event           = skx_iio_enable_event,
3699        .read_counter           = uncore_msr_read_counter,
3700};
3701
3702static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
3703{
3704        return pmu->type->topology[die].configuration >>
3705               (pmu->pmu_idx * BUS_NUM_STRIDE);
3706}
3707
3708static umode_t
3709pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
3710                         int die, int zero_bus_pmu)
3711{
3712        struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
3713
3714        return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
3715}
3716
3717static umode_t
3718skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
3719{
3720        /* Root bus 0x00 is valid only for pmu_idx = 0. */
3721        return pmu_iio_mapping_visible(kobj, attr, die, 0);
3722}
3723
3724static ssize_t skx_iio_mapping_show(struct device *dev,
3725                                    struct device_attribute *attr, char *buf)
3726{
3727        struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
3728        struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
3729        long die = (long)ea->var;
3730
3731        return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
3732                                           skx_iio_stack(pmu, die));
3733}
3734
3735static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
3736{
3737        u64 msr_value;
3738
3739        if (rdmsrl_on_cpu(cpu, SKX_MSR_CPU_BUS_NUMBER, &msr_value) ||
3740                        !(msr_value & SKX_MSR_CPU_BUS_VALID_BIT))
3741                return -ENXIO;
3742
3743        *topology = msr_value;
3744
3745        return 0;
3746}
3747
3748static int die_to_cpu(int die)
3749{
3750        int res = 0, cpu, current_die;
3751        /*
3752         * Using cpus_read_lock() to ensure cpu is not going down between
3753         * looking at cpu_online_mask.
3754         */
3755        cpus_read_lock();
3756        for_each_online_cpu(cpu) {
3757                current_die = topology_logical_die_id(cpu);
3758                if (current_die == die) {
3759                        res = cpu;
3760                        break;
3761                }
3762        }
3763        cpus_read_unlock();
3764        return res;
3765}
3766
3767static int skx_iio_get_topology(struct intel_uncore_type *type)
3768{
3769        int die, ret = -EPERM;
3770
3771        type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
3772                                 GFP_KERNEL);
3773        if (!type->topology)
3774                return -ENOMEM;
3775
3776        for (die = 0; die < uncore_max_dies(); die++) {
3777                ret = skx_msr_cpu_bus_read(die_to_cpu(die),
3778                                           &type->topology[die].configuration);
3779                if (ret)
3780                        break;
3781
3782                ret = uncore_die_to_segment(die);
3783                if (ret < 0)
3784                        break;
3785
3786                type->topology[die].segment = ret;
3787        }
3788
3789        if (ret < 0) {
3790                kfree(type->topology);
3791                type->topology = NULL;
3792        }
3793
3794        return ret;
3795}
3796
3797static struct attribute_group skx_iio_mapping_group = {
3798        .is_visible     = skx_iio_mapping_visible,
3799};
3800
3801static const struct attribute_group *skx_iio_attr_update[] = {
3802        &skx_iio_mapping_group,
3803        NULL,
3804};
3805
3806static int
3807pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3808{
3809        char buf[64];
3810        int ret;
3811        long die = -1;
3812        struct attribute **attrs = NULL;
3813        struct dev_ext_attribute *eas = NULL;
3814
3815        ret = type->get_topology(type);
3816        if (ret < 0)
3817                goto clear_attr_update;
3818
3819        ret = -ENOMEM;
3820
3821        /* One more for NULL. */
3822        attrs = kcalloc((uncore_max_dies() + 1), sizeof(*attrs), GFP_KERNEL);
3823        if (!attrs)
3824                goto clear_topology;
3825
3826        eas = kcalloc(uncore_max_dies(), sizeof(*eas), GFP_KERNEL);
3827        if (!eas)
3828                goto clear_attrs;
3829
3830        for (die = 0; die < uncore_max_dies(); die++) {
3831                sprintf(buf, "die%ld", die);
3832                sysfs_attr_init(&eas[die].attr.attr);
3833                eas[die].attr.attr.name = kstrdup(buf, GFP_KERNEL);
3834                if (!eas[die].attr.attr.name)
3835                        goto err;
3836                eas[die].attr.attr.mode = 0444;
3837                eas[die].attr.show = skx_iio_mapping_show;
3838                eas[die].attr.store = NULL;
3839                eas[die].var = (void *)die;
3840                attrs[die] = &eas[die].attr.attr;
3841        }
3842        ag->attrs = attrs;
3843
3844        return 0;
3845err:
3846        for (; die >= 0; die--)
3847                kfree(eas[die].attr.attr.name);
3848        kfree(eas);
3849clear_attrs:
3850        kfree(attrs);
3851clear_topology:
3852        kfree(type->topology);
3853clear_attr_update:
3854        type->attr_update = NULL;
3855        return ret;
3856}
3857
3858static void
3859pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
3860{
3861        struct attribute **attr = ag->attrs;
3862
3863        if (!attr)
3864                return;
3865
3866        for (; *attr; attr++)
3867                kfree((*attr)->name);
3868        kfree(attr_to_ext_attr(*ag->attrs));
3869        kfree(ag->attrs);
3870        ag->attrs = NULL;
3871        kfree(type->topology);
3872}
3873
3874static int skx_iio_set_mapping(struct intel_uncore_type *type)
3875{
3876        return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
3877}
3878
3879static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
3880{
3881        pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
3882}
3883
3884static struct intel_uncore_type skx_uncore_iio = {
3885        .name                   = "iio",
3886        .num_counters           = 4,
3887        .num_boxes              = 6,
3888        .perf_ctr_bits          = 48,
3889        .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3890        .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3891        .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3892        .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3893        .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3894        .msr_offset             = SKX_IIO_MSR_OFFSET,
3895        .constraints            = skx_uncore_iio_constraints,
3896        .ops                    = &skx_uncore_iio_ops,
3897        .format_group           = &skx_uncore_iio_format_group,
3898        .attr_update            = skx_iio_attr_update,
3899        .get_topology           = skx_iio_get_topology,
3900        .set_mapping            = skx_iio_set_mapping,
3901        .cleanup_mapping        = skx_iio_cleanup_mapping,
3902};
3903
3904enum perf_uncore_iio_freerunning_type_id {
3905        SKX_IIO_MSR_IOCLK                       = 0,
3906        SKX_IIO_MSR_BW                          = 1,
3907        SKX_IIO_MSR_UTIL                        = 2,
3908
3909        SKX_IIO_FREERUNNING_TYPE_MAX,
3910};
3911
3912
3913static struct freerunning_counters skx_iio_freerunning[] = {
3914        [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3915        [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3916        [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3917};
3918
3919static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3920        /* Free-Running IO CLOCKS Counter */
3921        INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3922        /* Free-Running IIO BANDWIDTH Counters */
3923        INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3924        INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3925        INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3926        INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3927        INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3928        INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3929        INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3930        INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3931        INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3932        INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3933        INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3934        INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3935        INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3936        INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3937        INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3938        INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3939        INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3940        INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3941        INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3942        INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3943        INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3944        INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3945        INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3946        INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3947        /* Free-running IIO UTILIZATION Counters */
3948        INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3949        INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3950        INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3951        INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3952        INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3953        INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3954        INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3955        INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3956        { /* end: all zeroes */ },
3957};
3958
3959static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3960        .read_counter           = uncore_msr_read_counter,
3961        .hw_config              = uncore_freerunning_hw_config,
3962};
3963
3964static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3965        &format_attr_event.attr,
3966        &format_attr_umask.attr,
3967        NULL,
3968};
3969
3970static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3971        .name = "format",
3972        .attrs = skx_uncore_iio_freerunning_formats_attr,
3973};
3974
3975static struct intel_uncore_type skx_uncore_iio_free_running = {
3976        .name                   = "iio_free_running",
3977        .num_counters           = 17,
3978        .num_boxes              = 6,
3979        .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3980        .freerunning            = skx_iio_freerunning,
3981        .ops                    = &skx_uncore_iio_freerunning_ops,
3982        .event_descs            = skx_uncore_iio_freerunning_events,
3983        .format_group           = &skx_uncore_iio_freerunning_format_group,
3984};
3985
3986static struct attribute *skx_uncore_formats_attr[] = {
3987        &format_attr_event.attr,
3988        &format_attr_umask.attr,
3989        &format_attr_edge.attr,
3990        &format_attr_inv.attr,
3991        &format_attr_thresh8.attr,
3992        NULL,
3993};
3994
3995static const struct attribute_group skx_uncore_format_group = {
3996        .name = "format",
3997        .attrs = skx_uncore_formats_attr,
3998};
3999
4000static struct intel_uncore_type skx_uncore_irp = {
4001        .name                   = "irp",
4002        .num_counters           = 2,
4003        .num_boxes              = 6,
4004        .perf_ctr_bits          = 48,
4005        .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
4006        .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
4007        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4008        .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
4009        .msr_offset             = SKX_IRP_MSR_OFFSET,
4010        .ops                    = &skx_uncore_iio_ops,
4011        .format_group           = &skx_uncore_format_group,
4012};
4013
4014static struct attribute *skx_uncore_pcu_formats_attr[] = {
4015        &format_attr_event.attr,
4016        &format_attr_umask.attr,
4017        &format_attr_edge.attr,
4018        &format_attr_inv.attr,
4019        &format_attr_thresh8.attr,
4020        &format_attr_occ_invert.attr,
4021        &format_attr_occ_edge_det.attr,
4022        &format_attr_filter_band0.attr,
4023        &format_attr_filter_band1.attr,
4024        &format_attr_filter_band2.attr,
4025        &format_attr_filter_band3.attr,
4026        NULL,
4027};
4028
4029static struct attribute_group skx_uncore_pcu_format_group = {
4030        .name = "format",
4031        .attrs = skx_uncore_pcu_formats_attr,
4032};
4033
4034static struct intel_uncore_ops skx_uncore_pcu_ops = {
4035        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4036        .hw_config              = hswep_pcu_hw_config,
4037        .get_constraint         = snbep_pcu_get_constraint,
4038        .put_constraint         = snbep_pcu_put_constraint,
4039};
4040
4041static struct intel_uncore_type skx_uncore_pcu = {
4042        .name                   = "pcu",
4043        .num_counters           = 4,
4044        .num_boxes              = 1,
4045        .perf_ctr_bits          = 48,
4046        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
4047        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
4048        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
4049        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
4050        .num_shared_regs        = 1,
4051        .ops                    = &skx_uncore_pcu_ops,
4052        .format_group           = &skx_uncore_pcu_format_group,
4053};
4054
4055static struct intel_uncore_type *skx_msr_uncores[] = {
4056        &skx_uncore_ubox,
4057        &skx_uncore_chabox,
4058        &skx_uncore_iio,
4059        &skx_uncore_iio_free_running,
4060        &skx_uncore_irp,
4061        &skx_uncore_pcu,
4062        NULL,
4063};
4064
4065/*
4066 * To determine the number of CHAs, it should read bits 27:0 in the CAPID6
4067 * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
4068 */
4069#define SKX_CAPID6              0x9c
4070#define SKX_CHA_BIT_MASK        GENMASK(27, 0)
4071
4072static int skx_count_chabox(void)
4073{
4074        struct pci_dev *dev = NULL;
4075        u32 val = 0;
4076
4077        dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
4078        if (!dev)
4079                goto out;
4080
4081        pci_read_config_dword(dev, SKX_CAPID6, &val);
4082        val &= SKX_CHA_BIT_MASK;
4083out:
4084        pci_dev_put(dev);
4085        return hweight32(val);
4086}
4087
4088void skx_uncore_cpu_init(void)
4089{
4090        skx_uncore_chabox.num_boxes = skx_count_chabox();
4091        uncore_msr_uncores = skx_msr_uncores;
4092}
4093
4094static struct intel_uncore_type skx_uncore_imc = {
4095        .name           = "imc",
4096        .num_counters   = 4,
4097        .num_boxes      = 6,
4098        .perf_ctr_bits  = 48,
4099        .fixed_ctr_bits = 48,
4100        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
4101        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
4102        .event_descs    = hswep_uncore_imc_events,
4103        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4104        .event_ctl      = SNBEP_PCI_PMON_CTL0,
4105        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4106        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4107        .ops            = &ivbep_uncore_pci_ops,
4108        .format_group   = &skx_uncore_format_group,
4109};
4110
4111static struct attribute *skx_upi_uncore_formats_attr[] = {
4112        &format_attr_event.attr,
4113        &format_attr_umask_ext.attr,
4114        &format_attr_edge.attr,
4115        &format_attr_inv.attr,
4116        &format_attr_thresh8.attr,
4117        NULL,
4118};
4119
4120static const struct attribute_group skx_upi_uncore_format_group = {
4121        .name = "format",
4122        .attrs = skx_upi_uncore_formats_attr,
4123};
4124
4125static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
4126{
4127        struct pci_dev *pdev = box->pci_dev;
4128
4129        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4130        pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4131}
4132
4133static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
4134        .init_box       = skx_upi_uncore_pci_init_box,
4135        .disable_box    = snbep_uncore_pci_disable_box,
4136        .enable_box     = snbep_uncore_pci_enable_box,
4137        .disable_event  = snbep_uncore_pci_disable_event,
4138        .enable_event   = snbep_uncore_pci_enable_event,
4139        .read_counter   = snbep_uncore_pci_read_counter,
4140};
4141
4142static struct intel_uncore_type skx_uncore_upi = {
4143        .name           = "upi",
4144        .num_counters   = 4,
4145        .num_boxes      = 3,
4146        .perf_ctr_bits  = 48,
4147        .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
4148        .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
4149        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4150        .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
4151        .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
4152        .ops            = &skx_upi_uncore_pci_ops,
4153        .format_group   = &skx_upi_uncore_format_group,
4154};
4155
4156static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4157{
4158        struct pci_dev *pdev = box->pci_dev;
4159
4160        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4161        pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
4162}
4163
4164static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
4165        .init_box       = skx_m2m_uncore_pci_init_box,
4166        .disable_box    = snbep_uncore_pci_disable_box,
4167        .enable_box     = snbep_uncore_pci_enable_box,
4168        .disable_event  = snbep_uncore_pci_disable_event,
4169        .enable_event   = snbep_uncore_pci_enable_event,
4170        .read_counter   = snbep_uncore_pci_read_counter,
4171};
4172
4173static struct intel_uncore_type skx_uncore_m2m = {
4174        .name           = "m2m",
4175        .num_counters   = 4,
4176        .num_boxes      = 2,
4177        .perf_ctr_bits  = 48,
4178        .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
4179        .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
4180        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4181        .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
4182        .ops            = &skx_m2m_uncore_pci_ops,
4183        .format_group   = &skx_uncore_format_group,
4184};
4185
4186static struct event_constraint skx_uncore_m2pcie_constraints[] = {
4187        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
4188        EVENT_CONSTRAINT_END
4189};
4190
4191static struct intel_uncore_type skx_uncore_m2pcie = {
4192        .name           = "m2pcie",
4193        .num_counters   = 4,
4194        .num_boxes      = 4,
4195        .perf_ctr_bits  = 48,
4196        .constraints    = skx_uncore_m2pcie_constraints,
4197        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4198        .event_ctl      = SNBEP_PCI_PMON_CTL0,
4199        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4200        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4201        .ops            = &ivbep_uncore_pci_ops,
4202        .format_group   = &skx_uncore_format_group,
4203};
4204
4205static struct event_constraint skx_uncore_m3upi_constraints[] = {
4206        UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
4207        UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
4208        UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
4209        UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
4210        UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
4211        UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
4212        UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
4213        UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
4214        EVENT_CONSTRAINT_END
4215};
4216
4217static struct intel_uncore_type skx_uncore_m3upi = {
4218        .name           = "m3upi",
4219        .num_counters   = 3,
4220        .num_boxes      = 3,
4221        .perf_ctr_bits  = 48,
4222        .constraints    = skx_uncore_m3upi_constraints,
4223        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
4224        .event_ctl      = SNBEP_PCI_PMON_CTL0,
4225        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4226        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
4227        .ops            = &ivbep_uncore_pci_ops,
4228        .format_group   = &skx_uncore_format_group,
4229};
4230
4231enum {
4232        SKX_PCI_UNCORE_IMC,
4233        SKX_PCI_UNCORE_M2M,
4234        SKX_PCI_UNCORE_UPI,
4235        SKX_PCI_UNCORE_M2PCIE,
4236        SKX_PCI_UNCORE_M3UPI,
4237};
4238
4239static struct intel_uncore_type *skx_pci_uncores[] = {
4240        [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
4241        [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
4242        [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
4243        [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
4244        [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
4245        NULL,
4246};
4247
4248static const struct pci_device_id skx_uncore_pci_ids[] = {
4249        { /* MC0 Channel 0 */
4250                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4251                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
4252        },
4253        { /* MC0 Channel 1 */
4254                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4255                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
4256        },
4257        { /* MC0 Channel 2 */
4258                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4259                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
4260        },
4261        { /* MC1 Channel 0 */
4262                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
4263                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
4264        },
4265        { /* MC1 Channel 1 */
4266                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
4267                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
4268        },
4269        { /* MC1 Channel 2 */
4270                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
4271                .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
4272        },
4273        { /* M2M0 */
4274                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4275                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
4276        },
4277        { /* M2M1 */
4278                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
4279                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
4280        },
4281        { /* UPI0 Link 0 */
4282                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4283                .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
4284        },
4285        { /* UPI0 Link 1 */
4286                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4287                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
4288        },
4289        { /* UPI1 Link 2 */
4290                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
4291                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
4292        },
4293        { /* M2PCIe 0 */
4294                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4295                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
4296        },
4297        { /* M2PCIe 1 */
4298                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4299                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
4300        },
4301        { /* M2PCIe 2 */
4302                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4303                .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
4304        },
4305        { /* M2PCIe 3 */
4306                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
4307                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
4308        },
4309        { /* M3UPI0 Link 0 */
4310                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4311                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
4312        },
4313        { /* M3UPI0 Link 1 */
4314                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
4315                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
4316        },
4317        { /* M3UPI1 Link 2 */
4318                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
4319                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
4320        },
4321        { /* end: all zeroes */ }
4322};
4323
4324
4325static struct pci_driver skx_uncore_pci_driver = {
4326        .name           = "skx_uncore",
4327        .id_table       = skx_uncore_pci_ids,
4328};
4329
4330int skx_uncore_pci_init(void)
4331{
4332        /* need to double check pci address */
4333        int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
4334
4335        if (ret)
4336                return ret;
4337
4338        uncore_pci_uncores = skx_pci_uncores;
4339        uncore_pci_driver = &skx_uncore_pci_driver;
4340        return 0;
4341}
4342
4343/* end of SKX uncore support */
4344
4345/* SNR uncore support */
4346
4347static struct intel_uncore_type snr_uncore_ubox = {
4348        .name                   = "ubox",
4349        .num_counters           = 2,
4350        .num_boxes              = 1,
4351        .perf_ctr_bits          = 48,
4352        .fixed_ctr_bits         = 48,
4353        .perf_ctr               = SNR_U_MSR_PMON_CTR0,
4354        .event_ctl              = SNR_U_MSR_PMON_CTL0,
4355        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4356        .fixed_ctr              = SNR_U_MSR_PMON_UCLK_FIXED_CTR,
4357        .fixed_ctl              = SNR_U_MSR_PMON_UCLK_FIXED_CTL,
4358        .ops                    = &ivbep_uncore_msr_ops,
4359        .format_group           = &ivbep_uncore_format_group,
4360};
4361
4362static struct attribute *snr_uncore_cha_formats_attr[] = {
4363        &format_attr_event.attr,
4364        &format_attr_umask_ext2.attr,
4365        &format_attr_edge.attr,
4366        &format_attr_tid_en.attr,
4367        &format_attr_inv.attr,
4368        &format_attr_thresh8.attr,
4369        &format_attr_filter_tid5.attr,
4370        NULL,
4371};
4372static const struct attribute_group snr_uncore_chabox_format_group = {
4373        .name = "format",
4374        .attrs = snr_uncore_cha_formats_attr,
4375};
4376
4377static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4378{
4379        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
4380
4381        reg1->reg = SNR_C0_MSR_PMON_BOX_FILTER0 +
4382                    box->pmu->type->msr_offset * box->pmu->pmu_idx;
4383        reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
4384        reg1->idx = 0;
4385
4386        return 0;
4387}
4388
4389static void snr_cha_enable_event(struct intel_uncore_box *box,
4390                                   struct perf_event *event)
4391{
4392        struct hw_perf_event *hwc = &event->hw;
4393        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4394
4395        if (reg1->idx != EXTRA_REG_NONE)
4396                wrmsrl(reg1->reg, reg1->config);
4397
4398        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
4399}
4400
4401static struct intel_uncore_ops snr_uncore_chabox_ops = {
4402        .init_box               = ivbep_uncore_msr_init_box,
4403        .disable_box            = snbep_uncore_msr_disable_box,
4404        .enable_box             = snbep_uncore_msr_enable_box,
4405        .disable_event          = snbep_uncore_msr_disable_event,
4406        .enable_event           = snr_cha_enable_event,
4407        .read_counter           = uncore_msr_read_counter,
4408        .hw_config              = snr_cha_hw_config,
4409};
4410
4411static struct intel_uncore_type snr_uncore_chabox = {
4412        .name                   = "cha",
4413        .num_counters           = 4,
4414        .num_boxes              = 6,
4415        .perf_ctr_bits          = 48,
4416        .event_ctl              = SNR_CHA_MSR_PMON_CTL0,
4417        .perf_ctr               = SNR_CHA_MSR_PMON_CTR0,
4418        .box_ctl                = SNR_CHA_MSR_PMON_BOX_CTL,
4419        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
4420        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
4421        .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
4422        .ops                    = &snr_uncore_chabox_ops,
4423        .format_group           = &snr_uncore_chabox_format_group,
4424};
4425
4426static struct attribute *snr_uncore_iio_formats_attr[] = {
4427        &format_attr_event.attr,
4428        &format_attr_umask.attr,
4429        &format_attr_edge.attr,
4430        &format_attr_inv.attr,
4431        &format_attr_thresh9.attr,
4432        &format_attr_ch_mask2.attr,
4433        &format_attr_fc_mask2.attr,
4434        NULL,
4435};
4436
4437static const struct attribute_group snr_uncore_iio_format_group = {
4438        .name = "format",
4439        .attrs = snr_uncore_iio_formats_attr,
4440};
4441
4442static umode_t
4443snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
4444{
4445        /* Root bus 0x00 is valid only for pmu_idx = 1. */
4446        return pmu_iio_mapping_visible(kobj, attr, die, 1);
4447}
4448
4449static struct attribute_group snr_iio_mapping_group = {
4450        .is_visible     = snr_iio_mapping_visible,
4451};
4452
4453static const struct attribute_group *snr_iio_attr_update[] = {
4454        &snr_iio_mapping_group,
4455        NULL,
4456};
4457
4458static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
4459{
4460        u32 sad_cfg;
4461        int die, stack_id, ret = -EPERM;
4462        struct pci_dev *dev = NULL;
4463
4464        type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
4465                                 GFP_KERNEL);
4466        if (!type->topology)
4467                return -ENOMEM;
4468
4469        while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
4470                ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
4471                if (ret) {
4472                        ret = pcibios_err_to_errno(ret);
4473                        break;
4474                }
4475
4476                die = uncore_pcibus_to_dieid(dev->bus);
4477                stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
4478                if (die < 0 || stack_id >= type->num_boxes) {
4479                        ret = -EPERM;
4480                        break;
4481                }
4482
4483                /* Convert stack id from SAD_CONTROL to PMON notation. */
4484                stack_id = sad_pmon_mapping[stack_id];
4485
4486                ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
4487                type->topology[die].segment = pci_domain_nr(dev->bus);
4488        }
4489
4490        if (ret) {
4491                kfree(type->topology);
4492                type->topology = NULL;
4493        }
4494
4495        return ret;
4496}
4497
4498/*
4499 * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
4500 */
4501enum {
4502        SNR_QAT_PMON_ID,
4503        SNR_CBDMA_DMI_PMON_ID,
4504        SNR_NIS_PMON_ID,
4505        SNR_DLB_PMON_ID,
4506        SNR_PCIE_GEN3_PMON_ID
4507};
4508
4509static u8 snr_sad_pmon_mapping[] = {
4510        SNR_CBDMA_DMI_PMON_ID,
4511        SNR_PCIE_GEN3_PMON_ID,
4512        SNR_DLB_PMON_ID,
4513        SNR_NIS_PMON_ID,
4514        SNR_QAT_PMON_ID
4515};
4516
4517static int snr_iio_get_topology(struct intel_uncore_type *type)
4518{
4519        return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
4520}
4521
4522static int snr_iio_set_mapping(struct intel_uncore_type *type)
4523{
4524        return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
4525}
4526
4527static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
4528{
4529        pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
4530}
4531
4532static struct event_constraint snr_uncore_iio_constraints[] = {
4533        UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
4534        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
4535        UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
4536        EVENT_CONSTRAINT_END
4537};
4538
4539static struct intel_uncore_type snr_uncore_iio = {
4540        .name                   = "iio",
4541        .num_counters           = 4,
4542        .num_boxes              = 5,
4543        .perf_ctr_bits          = 48,
4544        .event_ctl              = SNR_IIO_MSR_PMON_CTL0,
4545        .perf_ctr               = SNR_IIO_MSR_PMON_CTR0,
4546        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4547        .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
4548        .box_ctl                = SNR_IIO_MSR_PMON_BOX_CTL,
4549        .msr_offset             = SNR_IIO_MSR_OFFSET,
4550        .constraints            = snr_uncore_iio_constraints,
4551        .ops                    = &ivbep_uncore_msr_ops,
4552        .format_group           = &snr_uncore_iio_format_group,
4553        .attr_update            = snr_iio_attr_update,
4554        .get_topology           = snr_iio_get_topology,
4555        .set_mapping            = snr_iio_set_mapping,
4556        .cleanup_mapping        = snr_iio_cleanup_mapping,
4557};
4558
4559static struct intel_uncore_type snr_uncore_irp = {
4560        .name                   = "irp",
4561        .num_counters           = 2,
4562        .num_boxes              = 5,
4563        .perf_ctr_bits          = 48,
4564        .event_ctl              = SNR_IRP0_MSR_PMON_CTL0,
4565        .perf_ctr               = SNR_IRP0_MSR_PMON_CTR0,
4566        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4567        .box_ctl                = SNR_IRP0_MSR_PMON_BOX_CTL,
4568        .msr_offset             = SNR_IRP_MSR_OFFSET,
4569        .ops                    = &ivbep_uncore_msr_ops,
4570        .format_group           = &ivbep_uncore_format_group,
4571};
4572
4573static struct intel_uncore_type snr_uncore_m2pcie = {
4574        .name           = "m2pcie",
4575        .num_counters   = 4,
4576        .num_boxes      = 5,
4577        .perf_ctr_bits  = 48,
4578        .event_ctl      = SNR_M2PCIE_MSR_PMON_CTL0,
4579        .perf_ctr       = SNR_M2PCIE_MSR_PMON_CTR0,
4580        .box_ctl        = SNR_M2PCIE_MSR_PMON_BOX_CTL,
4581        .msr_offset     = SNR_M2PCIE_MSR_OFFSET,
4582        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4583        .ops            = &ivbep_uncore_msr_ops,
4584        .format_group   = &ivbep_uncore_format_group,
4585};
4586
4587static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
4588{
4589        struct hw_perf_event *hwc = &event->hw;
4590        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
4591        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
4592
4593        if (ev_sel >= 0xb && ev_sel <= 0xe) {
4594                reg1->reg = SNR_PCU_MSR_PMON_BOX_FILTER;
4595                reg1->idx = ev_sel - 0xb;
4596                reg1->config = event->attr.config1 & (0xff << reg1->idx);
4597        }
4598        return 0;
4599}
4600
4601static struct intel_uncore_ops snr_uncore_pcu_ops = {
4602        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
4603        .hw_config              = snr_pcu_hw_config,
4604        .get_constraint         = snbep_pcu_get_constraint,
4605        .put_constraint         = snbep_pcu_put_constraint,
4606};
4607
4608static struct intel_uncore_type snr_uncore_pcu = {
4609        .name                   = "pcu",
4610        .num_counters           = 4,
4611        .num_boxes              = 1,
4612        .perf_ctr_bits          = 48,
4613        .perf_ctr               = SNR_PCU_MSR_PMON_CTR0,
4614        .event_ctl              = SNR_PCU_MSR_PMON_CTL0,
4615        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
4616        .box_ctl                = SNR_PCU_MSR_PMON_BOX_CTL,
4617        .num_shared_regs        = 1,
4618        .ops                    = &snr_uncore_pcu_ops,
4619        .format_group           = &skx_uncore_pcu_format_group,
4620};
4621
4622enum perf_uncore_snr_iio_freerunning_type_id {
4623        SNR_IIO_MSR_IOCLK,
4624        SNR_IIO_MSR_BW_IN,
4625
4626        SNR_IIO_FREERUNNING_TYPE_MAX,
4627};
4628
4629static struct freerunning_counters snr_iio_freerunning[] = {
4630        [SNR_IIO_MSR_IOCLK]     = { 0x1eac, 0x1, 0x10, 1, 48 },
4631        [SNR_IIO_MSR_BW_IN]     = { 0x1f00, 0x1, 0x10, 8, 48 },
4632};
4633
4634static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
4635        /* Free-Running IIO CLOCKS Counter */
4636        INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
4637        /* Free-Running IIO BANDWIDTH IN Counters */
4638        INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
4639        INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
4640        INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
4641        INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
4642        INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
4643        INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
4644        INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
4645        INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
4646        INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
4647        INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
4648        INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
4649        INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
4650        INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
4651        INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
4652        INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
4653        INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
4654        INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
4655        INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
4656        INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
4657        INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
4658        INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
4659        INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
4660        INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
4661        INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
4662        { /* end: all zeroes */ },
4663};
4664
4665static struct intel_uncore_type snr_uncore_iio_free_running = {
4666        .name                   = "iio_free_running",
4667        .num_counters           = 9,
4668        .num_boxes              = 5,
4669        .num_freerunning_types  = SNR_IIO_FREERUNNING_TYPE_MAX,
4670        .freerunning            = snr_iio_freerunning,
4671        .ops                    = &skx_uncore_iio_freerunning_ops,
4672        .event_descs            = snr_uncore_iio_freerunning_events,
4673        .format_group           = &skx_uncore_iio_freerunning_format_group,
4674};
4675
4676static struct intel_uncore_type *snr_msr_uncores[] = {
4677        &snr_uncore_ubox,
4678        &snr_uncore_chabox,
4679        &snr_uncore_iio,
4680        &snr_uncore_irp,
4681        &snr_uncore_m2pcie,
4682        &snr_uncore_pcu,
4683        &snr_uncore_iio_free_running,
4684        NULL,
4685};
4686
4687void snr_uncore_cpu_init(void)
4688{
4689        uncore_msr_uncores = snr_msr_uncores;
4690}
4691
4692static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
4693{
4694        struct pci_dev *pdev = box->pci_dev;
4695        int box_ctl = uncore_pci_box_ctl(box);
4696
4697        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
4698        pci_write_config_dword(pdev, box_ctl, IVBEP_PMON_BOX_CTL_INT);
4699}
4700
4701static struct intel_uncore_ops snr_m2m_uncore_pci_ops = {
4702        .init_box       = snr_m2m_uncore_pci_init_box,
4703        .disable_box    = snbep_uncore_pci_disable_box,
4704        .enable_box     = snbep_uncore_pci_enable_box,
4705        .disable_event  = snbep_uncore_pci_disable_event,
4706        .enable_event   = snbep_uncore_pci_enable_event,
4707        .read_counter   = snbep_uncore_pci_read_counter,
4708};
4709
4710static struct attribute *snr_m2m_uncore_formats_attr[] = {
4711        &format_attr_event.attr,
4712        &format_attr_umask_ext3.attr,
4713        &format_attr_edge.attr,
4714        &format_attr_inv.attr,
4715        &format_attr_thresh8.attr,
4716        NULL,
4717};
4718
4719static const struct attribute_group snr_m2m_uncore_format_group = {
4720        .name = "format",
4721        .attrs = snr_m2m_uncore_formats_attr,
4722};
4723
4724static struct intel_uncore_type snr_uncore_m2m = {
4725        .name           = "m2m",
4726        .num_counters   = 4,
4727        .num_boxes      = 1,
4728        .perf_ctr_bits  = 48,
4729        .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
4730        .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
4731        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4732        .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
4733        .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
4734        .ops            = &snr_m2m_uncore_pci_ops,
4735        .format_group   = &snr_m2m_uncore_format_group,
4736};
4737
4738static void snr_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
4739{
4740        struct pci_dev *pdev = box->pci_dev;
4741        struct hw_perf_event *hwc = &event->hw;
4742
4743        pci_write_config_dword(pdev, hwc->config_base, (u32)(hwc->config | SNBEP_PMON_CTL_EN));
4744        pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
4745}
4746
4747static struct intel_uncore_ops snr_pcie3_uncore_pci_ops = {
4748        .init_box       = snr_m2m_uncore_pci_init_box,
4749        .disable_box    = snbep_uncore_pci_disable_box,
4750        .enable_box     = snbep_uncore_pci_enable_box,
4751        .disable_event  = snbep_uncore_pci_disable_event,
4752        .enable_event   = snr_uncore_pci_enable_event,
4753        .read_counter   = snbep_uncore_pci_read_counter,
4754};
4755
4756static struct intel_uncore_type snr_uncore_pcie3 = {
4757        .name           = "pcie3",
4758        .num_counters   = 4,
4759        .num_boxes      = 1,
4760        .perf_ctr_bits  = 48,
4761        .perf_ctr       = SNR_PCIE3_PCI_PMON_CTR0,
4762        .event_ctl      = SNR_PCIE3_PCI_PMON_CTL0,
4763        .event_mask     = SKX_IIO_PMON_RAW_EVENT_MASK,
4764        .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
4765        .box_ctl        = SNR_PCIE3_PCI_PMON_BOX_CTL,
4766        .ops            = &snr_pcie3_uncore_pci_ops,
4767        .format_group   = &skx_uncore_iio_format_group,
4768};
4769
4770enum {
4771        SNR_PCI_UNCORE_M2M,
4772        SNR_PCI_UNCORE_PCIE3,
4773};
4774
4775static struct intel_uncore_type *snr_pci_uncores[] = {
4776        [SNR_PCI_UNCORE_M2M]            = &snr_uncore_m2m,
4777        [SNR_PCI_UNCORE_PCIE3]          = &snr_uncore_pcie3,
4778        NULL,
4779};
4780
4781static const struct pci_device_id snr_uncore_pci_ids[] = {
4782        { /* M2M */
4783                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
4784                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, SNR_PCI_UNCORE_M2M, 0),
4785        },
4786        { /* end: all zeroes */ }
4787};
4788
4789static struct pci_driver snr_uncore_pci_driver = {
4790        .name           = "snr_uncore",
4791        .id_table       = snr_uncore_pci_ids,
4792};
4793
4794static const struct pci_device_id snr_uncore_pci_sub_ids[] = {
4795        { /* PCIe3 RP */
4796                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x334a),
4797                .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 0, SNR_PCI_UNCORE_PCIE3, 0),
4798        },
4799        { /* end: all zeroes */ }
4800};
4801
4802static struct pci_driver snr_uncore_pci_sub_driver = {
4803        .name           = "snr_uncore_sub",
4804        .id_table       = snr_uncore_pci_sub_ids,
4805};
4806
4807int snr_uncore_pci_init(void)
4808{
4809        /* SNR UBOX DID */
4810        int ret = snbep_pci2phy_map_init(0x3460, SKX_CPUNODEID,
4811                                         SKX_GIDNIDMAP, true);
4812
4813        if (ret)
4814                return ret;
4815
4816        uncore_pci_uncores = snr_pci_uncores;
4817        uncore_pci_driver = &snr_uncore_pci_driver;
4818        uncore_pci_sub_driver = &snr_uncore_pci_sub_driver;
4819        return 0;
4820}
4821
4822#define SNR_MC_DEVICE_ID        0x3451
4823
4824static struct pci_dev *snr_uncore_get_mc_dev(unsigned int device, int id)
4825{
4826        struct pci_dev *mc_dev = NULL;
4827        int pkg;
4828
4829        while (1) {
4830                mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, mc_dev);
4831                if (!mc_dev)
4832                        break;
4833                pkg = uncore_pcibus_to_dieid(mc_dev->bus);
4834                if (pkg == id)
4835                        break;
4836        }
4837        return mc_dev;
4838}
4839
4840static int snr_uncore_mmio_map(struct intel_uncore_box *box,
4841                               unsigned int box_ctl, int mem_offset,
4842                               unsigned int device)
4843{
4844        struct pci_dev *pdev = snr_uncore_get_mc_dev(device, box->dieid);
4845        struct intel_uncore_type *type = box->pmu->type;
4846        resource_size_t addr;
4847        u32 pci_dword;
4848
4849        if (!pdev)
4850                return -ENODEV;
4851
4852        pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
4853        addr = ((resource_size_t)pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
4854
4855        pci_read_config_dword(pdev, mem_offset, &pci_dword);
4856        addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
4857
4858        addr += box_ctl;
4859
4860        box->io_addr = ioremap(addr, type->mmio_map_size);
4861        if (!box->io_addr) {
4862                pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
4863                return -EINVAL;
4864        }
4865
4866        return 0;
4867}
4868
4869static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
4870                                       unsigned int box_ctl, int mem_offset,
4871                                       unsigned int device)
4872{
4873        if (!snr_uncore_mmio_map(box, box_ctl, mem_offset, device))
4874                writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
4875}
4876
4877static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
4878{
4879        __snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
4880                                   SNR_IMC_MMIO_MEM0_OFFSET,
4881                                   SNR_MC_DEVICE_ID);
4882}
4883
4884static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
4885{
4886        u32 config;
4887
4888        if (!box->io_addr)
4889                return;
4890
4891        config = readl(box->io_addr);
4892        config |= SNBEP_PMON_BOX_CTL_FRZ;
4893        writel(config, box->io_addr);
4894}
4895
4896static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box)
4897{
4898        u32 config;
4899
4900        if (!box->io_addr)
4901                return;
4902
4903        config = readl(box->io_addr);
4904        config &= ~SNBEP_PMON_BOX_CTL_FRZ;
4905        writel(config, box->io_addr);
4906}
4907
4908static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box,
4909                                           struct perf_event *event)
4910{
4911        struct hw_perf_event *hwc = &event->hw;
4912
4913        if (!box->io_addr)
4914                return;
4915
4916        if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4917                return;
4918
4919        writel(hwc->config | SNBEP_PMON_CTL_EN,
4920               box->io_addr + hwc->config_base);
4921}
4922
4923static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box,
4924                                            struct perf_event *event)
4925{
4926        struct hw_perf_event *hwc = &event->hw;
4927
4928        if (!box->io_addr)
4929                return;
4930
4931        if (!uncore_mmio_is_valid_offset(box, hwc->config_base))
4932                return;
4933
4934        writel(hwc->config, box->io_addr + hwc->config_base);
4935}
4936
4937static struct intel_uncore_ops snr_uncore_mmio_ops = {
4938        .init_box       = snr_uncore_mmio_init_box,
4939        .exit_box       = uncore_mmio_exit_box,
4940        .disable_box    = snr_uncore_mmio_disable_box,
4941        .enable_box     = snr_uncore_mmio_enable_box,
4942        .disable_event  = snr_uncore_mmio_disable_event,
4943        .enable_event   = snr_uncore_mmio_enable_event,
4944        .read_counter   = uncore_mmio_read_counter,
4945};
4946
4947static struct uncore_event_desc snr_uncore_imc_events[] = {
4948        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
4949        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x0f"),
4950        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
4951        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
4952        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x30"),
4953        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
4954        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
4955        { /* end: all zeroes */ },
4956};
4957
4958static struct intel_uncore_type snr_uncore_imc = {
4959        .name           = "imc",
4960        .num_counters   = 4,
4961        .num_boxes      = 2,
4962        .perf_ctr_bits  = 48,
4963        .fixed_ctr_bits = 48,
4964        .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
4965        .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
4966        .event_descs    = snr_uncore_imc_events,
4967        .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
4968        .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
4969        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
4970        .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
4971        .mmio_offset    = SNR_IMC_MMIO_OFFSET,
4972        .mmio_map_size  = SNR_IMC_MMIO_SIZE,
4973        .ops            = &snr_uncore_mmio_ops,
4974        .format_group   = &skx_uncore_format_group,
4975};
4976
4977enum perf_uncore_snr_imc_freerunning_type_id {
4978        SNR_IMC_DCLK,
4979        SNR_IMC_DDR,
4980
4981        SNR_IMC_FREERUNNING_TYPE_MAX,
4982};
4983
4984static struct freerunning_counters snr_imc_freerunning[] = {
4985        [SNR_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
4986        [SNR_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
4987};
4988
4989static struct uncore_event_desc snr_uncore_imc_freerunning_events[] = {
4990        INTEL_UNCORE_EVENT_DESC(dclk,           "event=0xff,umask=0x10"),
4991
4992        INTEL_UNCORE_EVENT_DESC(read,           "event=0xff,umask=0x20"),
4993        INTEL_UNCORE_EVENT_DESC(read.scale,     "6.103515625e-5"),
4994        INTEL_UNCORE_EVENT_DESC(read.unit,      "MiB"),
4995        INTEL_UNCORE_EVENT_DESC(write,          "event=0xff,umask=0x21"),
4996        INTEL_UNCORE_EVENT_DESC(write.scale,    "6.103515625e-5"),
4997        INTEL_UNCORE_EVENT_DESC(write.unit,     "MiB"),
4998        { /* end: all zeroes */ },
4999};
5000
5001static struct intel_uncore_ops snr_uncore_imc_freerunning_ops = {
5002        .init_box       = snr_uncore_mmio_init_box,
5003        .exit_box       = uncore_mmio_exit_box,
5004        .read_counter   = uncore_mmio_read_counter,
5005        .hw_config      = uncore_freerunning_hw_config,
5006};
5007
5008static struct intel_uncore_type snr_uncore_imc_free_running = {
5009        .name                   = "imc_free_running",
5010        .num_counters           = 3,
5011        .num_boxes              = 1,
5012        .num_freerunning_types  = SNR_IMC_FREERUNNING_TYPE_MAX,
5013        .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5014        .freerunning            = snr_imc_freerunning,
5015        .ops                    = &snr_uncore_imc_freerunning_ops,
5016        .event_descs            = snr_uncore_imc_freerunning_events,
5017        .format_group           = &skx_uncore_iio_freerunning_format_group,
5018};
5019
5020static struct intel_uncore_type *snr_mmio_uncores[] = {
5021        &snr_uncore_imc,
5022        &snr_uncore_imc_free_running,
5023        NULL,
5024};
5025
5026void snr_uncore_mmio_init(void)
5027{
5028        uncore_mmio_uncores = snr_mmio_uncores;
5029}
5030
5031/* end of SNR uncore support */
5032
5033/* ICX uncore support */
5034
5035static unsigned icx_cha_msr_offsets[] = {
5036        0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310,
5037        0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e,
5038        0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a,
5039        0x428, 0x436, 0x444, 0x452, 0x460, 0x46e, 0x47c, 0x0,   0xe,
5040        0x1c,  0x2a,  0x38,  0x46,
5041};
5042
5043static int icx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5044{
5045        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5046        bool tie_en = !!(event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN);
5047
5048        if (tie_en) {
5049                reg1->reg = ICX_C34_MSR_PMON_BOX_FILTER0 +
5050                            icx_cha_msr_offsets[box->pmu->pmu_idx];
5051                reg1->config = event->attr.config1 & SKX_CHA_MSR_PMON_BOX_FILTER_TID;
5052                reg1->idx = 0;
5053        }
5054
5055        return 0;
5056}
5057
5058static struct intel_uncore_ops icx_uncore_chabox_ops = {
5059        .init_box               = ivbep_uncore_msr_init_box,
5060        .disable_box            = snbep_uncore_msr_disable_box,
5061        .enable_box             = snbep_uncore_msr_enable_box,
5062        .disable_event          = snbep_uncore_msr_disable_event,
5063        .enable_event           = snr_cha_enable_event,
5064        .read_counter           = uncore_msr_read_counter,
5065        .hw_config              = icx_cha_hw_config,
5066};
5067
5068static struct intel_uncore_type icx_uncore_chabox = {
5069        .name                   = "cha",
5070        .num_counters           = 4,
5071        .perf_ctr_bits          = 48,
5072        .event_ctl              = ICX_C34_MSR_PMON_CTL0,
5073        .perf_ctr               = ICX_C34_MSR_PMON_CTR0,
5074        .box_ctl                = ICX_C34_MSR_PMON_BOX_CTL,
5075        .msr_offsets            = icx_cha_msr_offsets,
5076        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
5077        .event_mask_ext         = SNR_CHA_RAW_EVENT_MASK_EXT,
5078        .constraints            = skx_uncore_chabox_constraints,
5079        .ops                    = &icx_uncore_chabox_ops,
5080        .format_group           = &snr_uncore_chabox_format_group,
5081};
5082
5083static unsigned icx_msr_offsets[] = {
5084        0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5085};
5086
5087static struct event_constraint icx_uncore_iio_constraints[] = {
5088        UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
5089        UNCORE_EVENT_CONSTRAINT(0x03, 0x3),
5090        UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
5091        UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
5092        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
5093        UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
5094        UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
5095        EVENT_CONSTRAINT_END
5096};
5097
5098static umode_t
5099icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
5100{
5101        /* Root bus 0x00 is valid only for pmu_idx = 5. */
5102        return pmu_iio_mapping_visible(kobj, attr, die, 5);
5103}
5104
5105static struct attribute_group icx_iio_mapping_group = {
5106        .is_visible     = icx_iio_mapping_visible,
5107};
5108
5109static const struct attribute_group *icx_iio_attr_update[] = {
5110        &icx_iio_mapping_group,
5111        NULL,
5112};
5113
5114/*
5115 * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
5116 */
5117enum {
5118        ICX_PCIE1_PMON_ID,
5119        ICX_PCIE2_PMON_ID,
5120        ICX_PCIE3_PMON_ID,
5121        ICX_PCIE4_PMON_ID,
5122        ICX_PCIE5_PMON_ID,
5123        ICX_CBDMA_DMI_PMON_ID
5124};
5125
5126static u8 icx_sad_pmon_mapping[] = {
5127        ICX_CBDMA_DMI_PMON_ID,
5128        ICX_PCIE1_PMON_ID,
5129        ICX_PCIE2_PMON_ID,
5130        ICX_PCIE3_PMON_ID,
5131        ICX_PCIE4_PMON_ID,
5132        ICX_PCIE5_PMON_ID,
5133};
5134
5135static int icx_iio_get_topology(struct intel_uncore_type *type)
5136{
5137        return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
5138}
5139
5140static int icx_iio_set_mapping(struct intel_uncore_type *type)
5141{
5142        return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
5143}
5144
5145static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
5146{
5147        pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
5148}
5149
5150static struct intel_uncore_type icx_uncore_iio = {
5151        .name                   = "iio",
5152        .num_counters           = 4,
5153        .num_boxes              = 6,
5154        .perf_ctr_bits          = 48,
5155        .event_ctl              = ICX_IIO_MSR_PMON_CTL0,
5156        .perf_ctr               = ICX_IIO_MSR_PMON_CTR0,
5157        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5158        .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5159        .box_ctl                = ICX_IIO_MSR_PMON_BOX_CTL,
5160        .msr_offsets            = icx_msr_offsets,
5161        .constraints            = icx_uncore_iio_constraints,
5162        .ops                    = &skx_uncore_iio_ops,
5163        .format_group           = &snr_uncore_iio_format_group,
5164        .attr_update            = icx_iio_attr_update,
5165        .get_topology           = icx_iio_get_topology,
5166        .set_mapping            = icx_iio_set_mapping,
5167        .cleanup_mapping        = icx_iio_cleanup_mapping,
5168};
5169
5170static struct intel_uncore_type icx_uncore_irp = {
5171        .name                   = "irp",
5172        .num_counters           = 2,
5173        .num_boxes              = 6,
5174        .perf_ctr_bits          = 48,
5175        .event_ctl              = ICX_IRP0_MSR_PMON_CTL0,
5176        .perf_ctr               = ICX_IRP0_MSR_PMON_CTR0,
5177        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5178        .box_ctl                = ICX_IRP0_MSR_PMON_BOX_CTL,
5179        .msr_offsets            = icx_msr_offsets,
5180        .ops                    = &ivbep_uncore_msr_ops,
5181        .format_group           = &ivbep_uncore_format_group,
5182};
5183
5184static struct event_constraint icx_uncore_m2pcie_constraints[] = {
5185        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5186        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
5187        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5188        EVENT_CONSTRAINT_END
5189};
5190
5191static struct intel_uncore_type icx_uncore_m2pcie = {
5192        .name           = "m2pcie",
5193        .num_counters   = 4,
5194        .num_boxes      = 6,
5195        .perf_ctr_bits  = 48,
5196        .event_ctl      = ICX_M2PCIE_MSR_PMON_CTL0,
5197        .perf_ctr       = ICX_M2PCIE_MSR_PMON_CTR0,
5198        .box_ctl        = ICX_M2PCIE_MSR_PMON_BOX_CTL,
5199        .msr_offsets    = icx_msr_offsets,
5200        .constraints    = icx_uncore_m2pcie_constraints,
5201        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5202        .ops            = &ivbep_uncore_msr_ops,
5203        .format_group   = &ivbep_uncore_format_group,
5204};
5205
5206enum perf_uncore_icx_iio_freerunning_type_id {
5207        ICX_IIO_MSR_IOCLK,
5208        ICX_IIO_MSR_BW_IN,
5209
5210        ICX_IIO_FREERUNNING_TYPE_MAX,
5211};
5212
5213static unsigned icx_iio_clk_freerunning_box_offsets[] = {
5214        0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0,
5215};
5216
5217static unsigned icx_iio_bw_freerunning_box_offsets[] = {
5218        0x0, 0x10, 0x20, 0x90, 0xa0, 0xb0,
5219};
5220
5221static struct freerunning_counters icx_iio_freerunning[] = {
5222        [ICX_IIO_MSR_IOCLK]     = { 0xa55, 0x1, 0x20, 1, 48, icx_iio_clk_freerunning_box_offsets },
5223        [ICX_IIO_MSR_BW_IN]     = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
5224};
5225
5226static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
5227        /* Free-Running IIO CLOCKS Counter */
5228        INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5229        /* Free-Running IIO BANDWIDTH IN Counters */
5230        INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5231        INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5232        INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5233        INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5234        INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5235        INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5236        INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5237        INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5238        INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5239        INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5240        INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5241        INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5242        INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5243        INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5244        INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5245        INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5246        INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5247        INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5248        INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5249        INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5250        INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5251        INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5252        INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5253        INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5254        { /* end: all zeroes */ },
5255};
5256
5257static struct intel_uncore_type icx_uncore_iio_free_running = {
5258        .name                   = "iio_free_running",
5259        .num_counters           = 9,
5260        .num_boxes              = 6,
5261        .num_freerunning_types  = ICX_IIO_FREERUNNING_TYPE_MAX,
5262        .freerunning            = icx_iio_freerunning,
5263        .ops                    = &skx_uncore_iio_freerunning_ops,
5264        .event_descs            = icx_uncore_iio_freerunning_events,
5265        .format_group           = &skx_uncore_iio_freerunning_format_group,
5266};
5267
5268static struct intel_uncore_type *icx_msr_uncores[] = {
5269        &skx_uncore_ubox,
5270        &icx_uncore_chabox,
5271        &icx_uncore_iio,
5272        &icx_uncore_irp,
5273        &icx_uncore_m2pcie,
5274        &skx_uncore_pcu,
5275        &icx_uncore_iio_free_running,
5276        NULL,
5277};
5278
5279/*
5280 * To determine the number of CHAs, it should read CAPID6(Low) and CAPID7 (High)
5281 * registers which located at Device 30, Function 3
5282 */
5283#define ICX_CAPID6              0x9c
5284#define ICX_CAPID7              0xa0
5285
5286static u64 icx_count_chabox(void)
5287{
5288        struct pci_dev *dev = NULL;
5289        u64 caps = 0;
5290
5291        dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x345b, dev);
5292        if (!dev)
5293                goto out;
5294
5295        pci_read_config_dword(dev, ICX_CAPID6, (u32 *)&caps);
5296        pci_read_config_dword(dev, ICX_CAPID7, (u32 *)&caps + 1);
5297out:
5298        pci_dev_put(dev);
5299        return hweight64(caps);
5300}
5301
5302void icx_uncore_cpu_init(void)
5303{
5304        u64 num_boxes = icx_count_chabox();
5305
5306        if (WARN_ON(num_boxes > ARRAY_SIZE(icx_cha_msr_offsets)))
5307                return;
5308        icx_uncore_chabox.num_boxes = num_boxes;
5309        uncore_msr_uncores = icx_msr_uncores;
5310}
5311
5312static struct intel_uncore_type icx_uncore_m2m = {
5313        .name           = "m2m",
5314        .num_counters   = 4,
5315        .num_boxes      = 4,
5316        .perf_ctr_bits  = 48,
5317        .perf_ctr       = SNR_M2M_PCI_PMON_CTR0,
5318        .event_ctl      = SNR_M2M_PCI_PMON_CTL0,
5319        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5320        .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
5321        .box_ctl        = SNR_M2M_PCI_PMON_BOX_CTL,
5322        .ops            = &snr_m2m_uncore_pci_ops,
5323        .format_group   = &snr_m2m_uncore_format_group,
5324};
5325
5326static struct attribute *icx_upi_uncore_formats_attr[] = {
5327        &format_attr_event.attr,
5328        &format_attr_umask_ext4.attr,
5329        &format_attr_edge.attr,
5330        &format_attr_inv.attr,
5331        &format_attr_thresh8.attr,
5332        NULL,
5333};
5334
5335static const struct attribute_group icx_upi_uncore_format_group = {
5336        .name = "format",
5337        .attrs = icx_upi_uncore_formats_attr,
5338};
5339
5340static struct intel_uncore_type icx_uncore_upi = {
5341        .name           = "upi",
5342        .num_counters   = 4,
5343        .num_boxes      = 3,
5344        .perf_ctr_bits  = 48,
5345        .perf_ctr       = ICX_UPI_PCI_PMON_CTR0,
5346        .event_ctl      = ICX_UPI_PCI_PMON_CTL0,
5347        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5348        .event_mask_ext = ICX_UPI_CTL_UMASK_EXT,
5349        .box_ctl        = ICX_UPI_PCI_PMON_BOX_CTL,
5350        .ops            = &skx_upi_uncore_pci_ops,
5351        .format_group   = &icx_upi_uncore_format_group,
5352};
5353
5354static struct event_constraint icx_uncore_m3upi_constraints[] = {
5355        UNCORE_EVENT_CONSTRAINT(0x1c, 0x1),
5356        UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
5357        UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
5358        UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
5359        UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
5360        UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
5361        UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
5362        UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
5363        EVENT_CONSTRAINT_END
5364};
5365
5366static struct intel_uncore_type icx_uncore_m3upi = {
5367        .name           = "m3upi",
5368        .num_counters   = 4,
5369        .num_boxes      = 3,
5370        .perf_ctr_bits  = 48,
5371        .perf_ctr       = ICX_M3UPI_PCI_PMON_CTR0,
5372        .event_ctl      = ICX_M3UPI_PCI_PMON_CTL0,
5373        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5374        .box_ctl        = ICX_M3UPI_PCI_PMON_BOX_CTL,
5375        .constraints    = icx_uncore_m3upi_constraints,
5376        .ops            = &ivbep_uncore_pci_ops,
5377        .format_group   = &skx_uncore_format_group,
5378};
5379
5380enum {
5381        ICX_PCI_UNCORE_M2M,
5382        ICX_PCI_UNCORE_UPI,
5383        ICX_PCI_UNCORE_M3UPI,
5384};
5385
5386static struct intel_uncore_type *icx_pci_uncores[] = {
5387        [ICX_PCI_UNCORE_M2M]            = &icx_uncore_m2m,
5388        [ICX_PCI_UNCORE_UPI]            = &icx_uncore_upi,
5389        [ICX_PCI_UNCORE_M3UPI]          = &icx_uncore_m3upi,
5390        NULL,
5391};
5392
5393static const struct pci_device_id icx_uncore_pci_ids[] = {
5394        { /* M2M 0 */
5395                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5396                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 0, ICX_PCI_UNCORE_M2M, 0),
5397        },
5398        { /* M2M 1 */
5399                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5400                .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 0, ICX_PCI_UNCORE_M2M, 1),
5401        },
5402        { /* M2M 2 */
5403                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5404                .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, ICX_PCI_UNCORE_M2M, 2),
5405        },
5406        { /* M2M 3 */
5407                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x344a),
5408                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, ICX_PCI_UNCORE_M2M, 3),
5409        },
5410        { /* UPI Link 0 */
5411                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5412                .driver_data = UNCORE_PCI_DEV_FULL_DATA(2, 1, ICX_PCI_UNCORE_UPI, 0),
5413        },
5414        { /* UPI Link 1 */
5415                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5416                .driver_data = UNCORE_PCI_DEV_FULL_DATA(3, 1, ICX_PCI_UNCORE_UPI, 1),
5417        },
5418        { /* UPI Link 2 */
5419                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3441),
5420                .driver_data = UNCORE_PCI_DEV_FULL_DATA(4, 1, ICX_PCI_UNCORE_UPI, 2),
5421        },
5422        { /* M3UPI Link 0 */
5423                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5424                .driver_data = UNCORE_PCI_DEV_FULL_DATA(5, 1, ICX_PCI_UNCORE_M3UPI, 0),
5425        },
5426        { /* M3UPI Link 1 */
5427                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5428                .driver_data = UNCORE_PCI_DEV_FULL_DATA(6, 1, ICX_PCI_UNCORE_M3UPI, 1),
5429        },
5430        { /* M3UPI Link 2 */
5431                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3446),
5432                .driver_data = UNCORE_PCI_DEV_FULL_DATA(7, 1, ICX_PCI_UNCORE_M3UPI, 2),
5433        },
5434        { /* end: all zeroes */ }
5435};
5436
5437static struct pci_driver icx_uncore_pci_driver = {
5438        .name           = "icx_uncore",
5439        .id_table       = icx_uncore_pci_ids,
5440};
5441
5442int icx_uncore_pci_init(void)
5443{
5444        /* ICX UBOX DID */
5445        int ret = snbep_pci2phy_map_init(0x3450, SKX_CPUNODEID,
5446                                         SKX_GIDNIDMAP, true);
5447
5448        if (ret)
5449                return ret;
5450
5451        uncore_pci_uncores = icx_pci_uncores;
5452        uncore_pci_driver = &icx_uncore_pci_driver;
5453        return 0;
5454}
5455
5456static void icx_uncore_imc_init_box(struct intel_uncore_box *box)
5457{
5458        unsigned int box_ctl = box->pmu->type->box_ctl +
5459                               box->pmu->type->mmio_offset * (box->pmu->pmu_idx % ICX_NUMBER_IMC_CHN);
5460        int mem_offset = (box->pmu->pmu_idx / ICX_NUMBER_IMC_CHN) * ICX_IMC_MEM_STRIDE +
5461                         SNR_IMC_MMIO_MEM0_OFFSET;
5462
5463        __snr_uncore_mmio_init_box(box, box_ctl, mem_offset,
5464                                   SNR_MC_DEVICE_ID);
5465}
5466
5467static struct intel_uncore_ops icx_uncore_mmio_ops = {
5468        .init_box       = icx_uncore_imc_init_box,
5469        .exit_box       = uncore_mmio_exit_box,
5470        .disable_box    = snr_uncore_mmio_disable_box,
5471        .enable_box     = snr_uncore_mmio_enable_box,
5472        .disable_event  = snr_uncore_mmio_disable_event,
5473        .enable_event   = snr_uncore_mmio_enable_event,
5474        .read_counter   = uncore_mmio_read_counter,
5475};
5476
5477static struct intel_uncore_type icx_uncore_imc = {
5478        .name           = "imc",
5479        .num_counters   = 4,
5480        .num_boxes      = 12,
5481        .perf_ctr_bits  = 48,
5482        .fixed_ctr_bits = 48,
5483        .fixed_ctr      = SNR_IMC_MMIO_PMON_FIXED_CTR,
5484        .fixed_ctl      = SNR_IMC_MMIO_PMON_FIXED_CTL,
5485        .event_descs    = snr_uncore_imc_events,
5486        .perf_ctr       = SNR_IMC_MMIO_PMON_CTR0,
5487        .event_ctl      = SNR_IMC_MMIO_PMON_CTL0,
5488        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
5489        .box_ctl        = SNR_IMC_MMIO_PMON_BOX_CTL,
5490        .mmio_offset    = SNR_IMC_MMIO_OFFSET,
5491        .mmio_map_size  = SNR_IMC_MMIO_SIZE,
5492        .ops            = &icx_uncore_mmio_ops,
5493        .format_group   = &skx_uncore_format_group,
5494};
5495
5496enum perf_uncore_icx_imc_freerunning_type_id {
5497        ICX_IMC_DCLK,
5498        ICX_IMC_DDR,
5499        ICX_IMC_DDRT,
5500
5501        ICX_IMC_FREERUNNING_TYPE_MAX,
5502};
5503
5504static struct freerunning_counters icx_imc_freerunning[] = {
5505        [ICX_IMC_DCLK]  = { 0x22b0, 0x0, 0, 1, 48 },
5506        [ICX_IMC_DDR]   = { 0x2290, 0x8, 0, 2, 48 },
5507        [ICX_IMC_DDRT]  = { 0x22a0, 0x8, 0, 2, 48 },
5508};
5509
5510static struct uncore_event_desc icx_uncore_imc_freerunning_events[] = {
5511        INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
5512
5513        INTEL_UNCORE_EVENT_DESC(read,                   "event=0xff,umask=0x20"),
5514        INTEL_UNCORE_EVENT_DESC(read.scale,             "6.103515625e-5"),
5515        INTEL_UNCORE_EVENT_DESC(read.unit,              "MiB"),
5516        INTEL_UNCORE_EVENT_DESC(write,                  "event=0xff,umask=0x21"),
5517        INTEL_UNCORE_EVENT_DESC(write.scale,            "6.103515625e-5"),
5518        INTEL_UNCORE_EVENT_DESC(write.unit,             "MiB"),
5519
5520        INTEL_UNCORE_EVENT_DESC(ddrt_read,              "event=0xff,umask=0x30"),
5521        INTEL_UNCORE_EVENT_DESC(ddrt_read.scale,        "6.103515625e-5"),
5522        INTEL_UNCORE_EVENT_DESC(ddrt_read.unit,         "MiB"),
5523        INTEL_UNCORE_EVENT_DESC(ddrt_write,             "event=0xff,umask=0x31"),
5524        INTEL_UNCORE_EVENT_DESC(ddrt_write.scale,       "6.103515625e-5"),
5525        INTEL_UNCORE_EVENT_DESC(ddrt_write.unit,        "MiB"),
5526        { /* end: all zeroes */ },
5527};
5528
5529static void icx_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5530{
5531        int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE +
5532                         SNR_IMC_MMIO_MEM0_OFFSET;
5533
5534        snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5535                            mem_offset, SNR_MC_DEVICE_ID);
5536}
5537
5538static struct intel_uncore_ops icx_uncore_imc_freerunning_ops = {
5539        .init_box       = icx_uncore_imc_freerunning_init_box,
5540        .exit_box       = uncore_mmio_exit_box,
5541        .read_counter   = uncore_mmio_read_counter,
5542        .hw_config      = uncore_freerunning_hw_config,
5543};
5544
5545static struct intel_uncore_type icx_uncore_imc_free_running = {
5546        .name                   = "imc_free_running",
5547        .num_counters           = 5,
5548        .num_boxes              = 4,
5549        .num_freerunning_types  = ICX_IMC_FREERUNNING_TYPE_MAX,
5550        .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5551        .freerunning            = icx_imc_freerunning,
5552        .ops                    = &icx_uncore_imc_freerunning_ops,
5553        .event_descs            = icx_uncore_imc_freerunning_events,
5554        .format_group           = &skx_uncore_iio_freerunning_format_group,
5555};
5556
5557static struct intel_uncore_type *icx_mmio_uncores[] = {
5558        &icx_uncore_imc,
5559        &icx_uncore_imc_free_running,
5560        NULL,
5561};
5562
5563void icx_uncore_mmio_init(void)
5564{
5565        uncore_mmio_uncores = icx_mmio_uncores;
5566}
5567
5568/* end of ICX uncore support */
5569
5570/* SPR uncore support */
5571
5572static void spr_uncore_msr_enable_event(struct intel_uncore_box *box,
5573                                        struct perf_event *event)
5574{
5575        struct hw_perf_event *hwc = &event->hw;
5576        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5577
5578        if (reg1->idx != EXTRA_REG_NONE)
5579                wrmsrl(reg1->reg, reg1->config);
5580
5581        wrmsrl(hwc->config_base, hwc->config);
5582}
5583
5584static void spr_uncore_msr_disable_event(struct intel_uncore_box *box,
5585                                         struct perf_event *event)
5586{
5587        struct hw_perf_event *hwc = &event->hw;
5588        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
5589
5590        if (reg1->idx != EXTRA_REG_NONE)
5591                wrmsrl(reg1->reg, 0);
5592
5593        wrmsrl(hwc->config_base, 0);
5594}
5595
5596static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
5597{
5598        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
5599        bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN);
5600        struct intel_uncore_type *type = box->pmu->type;
5601
5602        if (tie_en) {
5603                reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 +
5604                            HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx];
5605                reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID;
5606                reg1->idx = 0;
5607        }
5608
5609        return 0;
5610}
5611
5612static struct intel_uncore_ops spr_uncore_chabox_ops = {
5613        .init_box               = intel_generic_uncore_msr_init_box,
5614        .disable_box            = intel_generic_uncore_msr_disable_box,
5615        .enable_box             = intel_generic_uncore_msr_enable_box,
5616        .disable_event          = spr_uncore_msr_disable_event,
5617        .enable_event           = spr_uncore_msr_enable_event,
5618        .read_counter           = uncore_msr_read_counter,
5619        .hw_config              = spr_cha_hw_config,
5620        .get_constraint         = uncore_get_constraint,
5621        .put_constraint         = uncore_put_constraint,
5622};
5623
5624static struct attribute *spr_uncore_cha_formats_attr[] = {
5625        &format_attr_event.attr,
5626        &format_attr_umask_ext4.attr,
5627        &format_attr_tid_en2.attr,
5628        &format_attr_edge.attr,
5629        &format_attr_inv.attr,
5630        &format_attr_thresh8.attr,
5631        &format_attr_filter_tid5.attr,
5632        NULL,
5633};
5634static const struct attribute_group spr_uncore_chabox_format_group = {
5635        .name = "format",
5636        .attrs = spr_uncore_cha_formats_attr,
5637};
5638
5639static ssize_t alias_show(struct device *dev,
5640                          struct device_attribute *attr,
5641                          char *buf)
5642{
5643        struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
5644        char pmu_name[UNCORE_PMU_NAME_LEN];
5645
5646        uncore_get_alias_name(pmu_name, pmu);
5647        return sysfs_emit(buf, "%s\n", pmu_name);
5648}
5649
5650static DEVICE_ATTR_RO(alias);
5651
5652static struct attribute *uncore_alias_attrs[] = {
5653        &dev_attr_alias.attr,
5654        NULL
5655};
5656
5657ATTRIBUTE_GROUPS(uncore_alias);
5658
5659static struct intel_uncore_type spr_uncore_chabox = {
5660        .name                   = "cha",
5661        .event_mask             = SPR_CHA_PMON_EVENT_MASK,
5662        .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,
5663        .num_shared_regs        = 1,
5664        .constraints            = skx_uncore_chabox_constraints,
5665        .ops                    = &spr_uncore_chabox_ops,
5666        .format_group           = &spr_uncore_chabox_format_group,
5667        .attr_update            = uncore_alias_groups,
5668};
5669
5670static struct intel_uncore_type spr_uncore_iio = {
5671        .name                   = "iio",
5672        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
5673        .event_mask_ext         = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
5674        .format_group           = &snr_uncore_iio_format_group,
5675        .attr_update            = uncore_alias_groups,
5676        .constraints            = icx_uncore_iio_constraints,
5677};
5678
5679static struct attribute *spr_uncore_raw_formats_attr[] = {
5680        &format_attr_event.attr,
5681        &format_attr_umask_ext4.attr,
5682        &format_attr_edge.attr,
5683        &format_attr_inv.attr,
5684        &format_attr_thresh8.attr,
5685        NULL,
5686};
5687
5688static const struct attribute_group spr_uncore_raw_format_group = {
5689        .name                   = "format",
5690        .attrs                  = spr_uncore_raw_formats_attr,
5691};
5692
5693#define SPR_UNCORE_COMMON_FORMAT()                              \
5694        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,    \
5695        .event_mask_ext         = SPR_RAW_EVENT_MASK_EXT,       \
5696        .format_group           = &spr_uncore_raw_format_group, \
5697        .attr_update            = uncore_alias_groups
5698
5699static struct intel_uncore_type spr_uncore_irp = {
5700        SPR_UNCORE_COMMON_FORMAT(),
5701        .name                   = "irp",
5702
5703};
5704
5705static struct event_constraint spr_uncore_m2pcie_constraints[] = {
5706        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
5707        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
5708        EVENT_CONSTRAINT_END
5709};
5710
5711static struct intel_uncore_type spr_uncore_m2pcie = {
5712        SPR_UNCORE_COMMON_FORMAT(),
5713        .name                   = "m2pcie",
5714        .constraints            = spr_uncore_m2pcie_constraints,
5715};
5716
5717static struct intel_uncore_type spr_uncore_pcu = {
5718        .name                   = "pcu",
5719        .attr_update            = uncore_alias_groups,
5720};
5721
5722static void spr_uncore_mmio_enable_event(struct intel_uncore_box *box,
5723                                         struct perf_event *event)
5724{
5725        struct hw_perf_event *hwc = &event->hw;
5726
5727        if (!box->io_addr)
5728                return;
5729
5730        if (uncore_pmc_fixed(hwc->idx))
5731                writel(SNBEP_PMON_CTL_EN, box->io_addr + hwc->config_base);
5732        else
5733                writel(hwc->config, box->io_addr + hwc->config_base);
5734}
5735
5736static struct intel_uncore_ops spr_uncore_mmio_ops = {
5737        .init_box               = intel_generic_uncore_mmio_init_box,
5738        .exit_box               = uncore_mmio_exit_box,
5739        .disable_box            = intel_generic_uncore_mmio_disable_box,
5740        .enable_box             = intel_generic_uncore_mmio_enable_box,
5741        .disable_event          = intel_generic_uncore_mmio_disable_event,
5742        .enable_event           = spr_uncore_mmio_enable_event,
5743        .read_counter           = uncore_mmio_read_counter,
5744};
5745
5746static struct intel_uncore_type spr_uncore_imc = {
5747        SPR_UNCORE_COMMON_FORMAT(),
5748        .name                   = "imc",
5749        .fixed_ctr_bits         = 48,
5750        .fixed_ctr              = SNR_IMC_MMIO_PMON_FIXED_CTR,
5751        .fixed_ctl              = SNR_IMC_MMIO_PMON_FIXED_CTL,
5752        .ops                    = &spr_uncore_mmio_ops,
5753};
5754
5755static void spr_uncore_pci_enable_event(struct intel_uncore_box *box,
5756                                        struct perf_event *event)
5757{
5758        struct pci_dev *pdev = box->pci_dev;
5759        struct hw_perf_event *hwc = &event->hw;
5760
5761        pci_write_config_dword(pdev, hwc->config_base + 4, (u32)(hwc->config >> 32));
5762        pci_write_config_dword(pdev, hwc->config_base, (u32)hwc->config);
5763}
5764
5765static struct intel_uncore_ops spr_uncore_pci_ops = {
5766        .init_box               = intel_generic_uncore_pci_init_box,
5767        .disable_box            = intel_generic_uncore_pci_disable_box,
5768        .enable_box             = intel_generic_uncore_pci_enable_box,
5769        .disable_event          = intel_generic_uncore_pci_disable_event,
5770        .enable_event           = spr_uncore_pci_enable_event,
5771        .read_counter           = intel_generic_uncore_pci_read_counter,
5772};
5773
5774#define SPR_UNCORE_PCI_COMMON_FORMAT()                  \
5775        SPR_UNCORE_COMMON_FORMAT(),                     \
5776        .ops                    = &spr_uncore_pci_ops
5777
5778static struct intel_uncore_type spr_uncore_m2m = {
5779        SPR_UNCORE_PCI_COMMON_FORMAT(),
5780        .name                   = "m2m",
5781};
5782
5783static struct intel_uncore_type spr_uncore_upi = {
5784        SPR_UNCORE_PCI_COMMON_FORMAT(),
5785        .name                   = "upi",
5786};
5787
5788static struct intel_uncore_type spr_uncore_m3upi = {
5789        SPR_UNCORE_PCI_COMMON_FORMAT(),
5790        .name                   = "m3upi",
5791        .constraints            = icx_uncore_m3upi_constraints,
5792};
5793
5794static struct intel_uncore_type spr_uncore_mdf = {
5795        SPR_UNCORE_COMMON_FORMAT(),
5796        .name                   = "mdf",
5797};
5798
5799#define UNCORE_SPR_NUM_UNCORE_TYPES             12
5800#define UNCORE_SPR_IIO                          1
5801#define UNCORE_SPR_IMC                          6
5802
5803static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = {
5804        &spr_uncore_chabox,
5805        &spr_uncore_iio,
5806        &spr_uncore_irp,
5807        &spr_uncore_m2pcie,
5808        &spr_uncore_pcu,
5809        NULL,
5810        &spr_uncore_imc,
5811        &spr_uncore_m2m,
5812        &spr_uncore_upi,
5813        &spr_uncore_m3upi,
5814        NULL,
5815        &spr_uncore_mdf,
5816};
5817
5818enum perf_uncore_spr_iio_freerunning_type_id {
5819        SPR_IIO_MSR_IOCLK,
5820        SPR_IIO_MSR_BW_IN,
5821        SPR_IIO_MSR_BW_OUT,
5822
5823        SPR_IIO_FREERUNNING_TYPE_MAX,
5824};
5825
5826static struct freerunning_counters spr_iio_freerunning[] = {
5827        [SPR_IIO_MSR_IOCLK]     = { 0x340e, 0x1, 0x10, 1, 48 },
5828        [SPR_IIO_MSR_BW_IN]     = { 0x3800, 0x1, 0x10, 8, 48 },
5829        [SPR_IIO_MSR_BW_OUT]    = { 0x3808, 0x1, 0x10, 8, 48 },
5830};
5831
5832static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
5833        /* Free-Running IIO CLOCKS Counter */
5834        INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
5835        /* Free-Running IIO BANDWIDTH IN Counters */
5836        INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
5837        INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
5838        INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
5839        INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
5840        INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
5841        INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
5842        INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
5843        INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
5844        INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
5845        INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
5846        INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
5847        INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
5848        INTEL_UNCORE_EVENT_DESC(bw_in_port4,            "event=0xff,umask=0x24"),
5849        INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale,      "3.814697266e-6"),
5850        INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit,       "MiB"),
5851        INTEL_UNCORE_EVENT_DESC(bw_in_port5,            "event=0xff,umask=0x25"),
5852        INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale,      "3.814697266e-6"),
5853        INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit,       "MiB"),
5854        INTEL_UNCORE_EVENT_DESC(bw_in_port6,            "event=0xff,umask=0x26"),
5855        INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale,      "3.814697266e-6"),
5856        INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit,       "MiB"),
5857        INTEL_UNCORE_EVENT_DESC(bw_in_port7,            "event=0xff,umask=0x27"),
5858        INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale,      "3.814697266e-6"),
5859        INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit,       "MiB"),
5860        /* Free-Running IIO BANDWIDTH OUT Counters */
5861        INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x30"),
5862        INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
5863        INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
5864        INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x31"),
5865        INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
5866        INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
5867        INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x32"),
5868        INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
5869        INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
5870        INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x33"),
5871        INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
5872        INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
5873        INTEL_UNCORE_EVENT_DESC(bw_out_port4,           "event=0xff,umask=0x34"),
5874        INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale,     "3.814697266e-6"),
5875        INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit,      "MiB"),
5876        INTEL_UNCORE_EVENT_DESC(bw_out_port5,           "event=0xff,umask=0x35"),
5877        INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale,     "3.814697266e-6"),
5878        INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit,      "MiB"),
5879        INTEL_UNCORE_EVENT_DESC(bw_out_port6,           "event=0xff,umask=0x36"),
5880        INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale,     "3.814697266e-6"),
5881        INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit,      "MiB"),
5882        INTEL_UNCORE_EVENT_DESC(bw_out_port7,           "event=0xff,umask=0x37"),
5883        INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale,     "3.814697266e-6"),
5884        INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit,      "MiB"),
5885        { /* end: all zeroes */ },
5886};
5887
5888static struct intel_uncore_type spr_uncore_iio_free_running = {
5889        .name                   = "iio_free_running",
5890        .num_counters           = 17,
5891        .num_freerunning_types  = SPR_IIO_FREERUNNING_TYPE_MAX,
5892        .freerunning            = spr_iio_freerunning,
5893        .ops                    = &skx_uncore_iio_freerunning_ops,
5894        .event_descs            = spr_uncore_iio_freerunning_events,
5895        .format_group           = &skx_uncore_iio_freerunning_format_group,
5896};
5897
5898enum perf_uncore_spr_imc_freerunning_type_id {
5899        SPR_IMC_DCLK,
5900        SPR_IMC_PQ_CYCLES,
5901
5902        SPR_IMC_FREERUNNING_TYPE_MAX,
5903};
5904
5905static struct freerunning_counters spr_imc_freerunning[] = {
5906        [SPR_IMC_DCLK]          = { 0x22b0, 0x0, 0, 1, 48 },
5907        [SPR_IMC_PQ_CYCLES]     = { 0x2318, 0x8, 0, 2, 48 },
5908};
5909
5910static struct uncore_event_desc spr_uncore_imc_freerunning_events[] = {
5911        INTEL_UNCORE_EVENT_DESC(dclk,                   "event=0xff,umask=0x10"),
5912
5913        INTEL_UNCORE_EVENT_DESC(rpq_cycles,             "event=0xff,umask=0x20"),
5914        INTEL_UNCORE_EVENT_DESC(wpq_cycles,             "event=0xff,umask=0x21"),
5915        { /* end: all zeroes */ },
5916};
5917
5918#define SPR_MC_DEVICE_ID        0x3251
5919
5920static void spr_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
5921{
5922        int mem_offset = box->pmu->pmu_idx * ICX_IMC_MEM_STRIDE + SNR_IMC_MMIO_MEM0_OFFSET;
5923
5924        snr_uncore_mmio_map(box, uncore_mmio_box_ctl(box),
5925                            mem_offset, SPR_MC_DEVICE_ID);
5926}
5927
5928static struct intel_uncore_ops spr_uncore_imc_freerunning_ops = {
5929        .init_box       = spr_uncore_imc_freerunning_init_box,
5930        .exit_box       = uncore_mmio_exit_box,
5931        .read_counter   = uncore_mmio_read_counter,
5932        .hw_config      = uncore_freerunning_hw_config,
5933};
5934
5935static struct intel_uncore_type spr_uncore_imc_free_running = {
5936        .name                   = "imc_free_running",
5937        .num_counters           = 3,
5938        .mmio_map_size          = SNR_IMC_MMIO_SIZE,
5939        .num_freerunning_types  = SPR_IMC_FREERUNNING_TYPE_MAX,
5940        .freerunning            = spr_imc_freerunning,
5941        .ops                    = &spr_uncore_imc_freerunning_ops,
5942        .event_descs            = spr_uncore_imc_freerunning_events,
5943        .format_group           = &skx_uncore_iio_freerunning_format_group,
5944};
5945
5946#define UNCORE_SPR_MSR_EXTRA_UNCORES            1
5947#define UNCORE_SPR_MMIO_EXTRA_UNCORES           1
5948
5949static struct intel_uncore_type *spr_msr_uncores[UNCORE_SPR_MSR_EXTRA_UNCORES] = {
5950        &spr_uncore_iio_free_running,
5951};
5952
5953static struct intel_uncore_type *spr_mmio_uncores[UNCORE_SPR_MMIO_EXTRA_UNCORES] = {
5954        &spr_uncore_imc_free_running,
5955};
5956
5957static void uncore_type_customized_copy(struct intel_uncore_type *to_type,
5958                                        struct intel_uncore_type *from_type)
5959{
5960        if (!to_type || !from_type)
5961                return;
5962
5963        if (from_type->name)
5964                to_type->name = from_type->name;
5965        if (from_type->fixed_ctr_bits)
5966                to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5967        if (from_type->event_mask)
5968                to_type->event_mask = from_type->event_mask;
5969        if (from_type->event_mask_ext)
5970                to_type->event_mask_ext = from_type->event_mask_ext;
5971        if (from_type->fixed_ctr)
5972                to_type->fixed_ctr = from_type->fixed_ctr;
5973        if (from_type->fixed_ctl)
5974                to_type->fixed_ctl = from_type->fixed_ctl;
5975        if (from_type->fixed_ctr_bits)
5976                to_type->fixed_ctr_bits = from_type->fixed_ctr_bits;
5977        if (from_type->num_shared_regs)
5978                to_type->num_shared_regs = from_type->num_shared_regs;
5979        if (from_type->constraints)
5980                to_type->constraints = from_type->constraints;
5981        if (from_type->ops)
5982                to_type->ops = from_type->ops;
5983        if (from_type->event_descs)
5984                to_type->event_descs = from_type->event_descs;
5985        if (from_type->format_group)
5986                to_type->format_group = from_type->format_group;
5987        if (from_type->attr_update)
5988                to_type->attr_update = from_type->attr_update;
5989}
5990
5991static struct intel_uncore_type **
5992uncore_get_uncores(enum uncore_access_type type_id, int num_extra,
5993                    struct intel_uncore_type **extra)
5994{
5995        struct intel_uncore_type **types, **start_types;
5996        int i;
5997
5998        start_types = types = intel_uncore_generic_init_uncores(type_id, num_extra);
5999
6000        /* Only copy the customized features */
6001        for (; *types; types++) {
6002                if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES)
6003                        continue;
6004                uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]);
6005        }
6006
6007        for (i = 0; i < num_extra; i++, types++)
6008                *types = extra[i];
6009
6010        return start_types;
6011}
6012
6013static struct intel_uncore_type *
6014uncore_find_type_by_id(struct intel_uncore_type **types, int type_id)
6015{
6016        for (; *types; types++) {
6017                if (type_id == (*types)->type_id)
6018                        return *types;
6019        }
6020
6021        return NULL;
6022}
6023
6024static int uncore_type_max_boxes(struct intel_uncore_type **types,
6025                                 int type_id)
6026{
6027        struct intel_uncore_type *type;
6028        int i, max = 0;
6029
6030        type = uncore_find_type_by_id(types, type_id);
6031        if (!type)
6032                return 0;
6033
6034        for (i = 0; i < type->num_boxes; i++) {
6035                if (type->box_ids[i] > max)
6036                        max = type->box_ids[i];
6037        }
6038
6039        return max + 1;
6040}
6041
6042void spr_uncore_cpu_init(void)
6043{
6044        uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
6045                                                UNCORE_SPR_MSR_EXTRA_UNCORES,
6046                                                spr_msr_uncores);
6047
6048        spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
6049}
6050
6051int spr_uncore_pci_init(void)
6052{
6053        uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL);
6054        return 0;
6055}
6056
6057void spr_uncore_mmio_init(void)
6058{
6059        int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true);
6060
6061        if (ret)
6062                uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL);
6063        else {
6064                uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO,
6065                                                         UNCORE_SPR_MMIO_EXTRA_UNCORES,
6066                                                         spr_mmio_uncores);
6067
6068                spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2;
6069        }
6070}
6071
6072/* end of SPR uncore support */
6073