linux/arch/x86/events/intel/uncore_snbep.c
<<
>>
Prefs
   1/* SandyBridge-EP/IvyTown uncore support */
   2#include "uncore.h"
   3
   4/* SNB-EP pci bus to socket mapping */
   5#define SNBEP_CPUNODEID                 0x40
   6#define SNBEP_GIDNIDMAP                 0x54
   7
   8/* SNB-EP Box level control */
   9#define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
  10#define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
  11#define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
  12#define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
  13#define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
  14                                         SNBEP_PMON_BOX_CTL_RST_CTRS | \
  15                                         SNBEP_PMON_BOX_CTL_FRZ_EN)
  16/* SNB-EP event control */
  17#define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
  18#define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
  19#define SNBEP_PMON_CTL_RST              (1 << 17)
  20#define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
  21#define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
  22#define SNBEP_PMON_CTL_EN               (1 << 22)
  23#define SNBEP_PMON_CTL_INVERT           (1 << 23)
  24#define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
  25#define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
  26                                         SNBEP_PMON_CTL_UMASK_MASK | \
  27                                         SNBEP_PMON_CTL_EDGE_DET | \
  28                                         SNBEP_PMON_CTL_INVERT | \
  29                                         SNBEP_PMON_CTL_TRESH_MASK)
  30
  31/* SNB-EP Ubox event control */
  32#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
  33#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
  34                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  35                                 SNBEP_PMON_CTL_UMASK_MASK | \
  36                                 SNBEP_PMON_CTL_EDGE_DET | \
  37                                 SNBEP_PMON_CTL_INVERT | \
  38                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
  39
  40#define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
  41#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
  42                                                 SNBEP_CBO_PMON_CTL_TID_EN)
  43
  44/* SNB-EP PCU event control */
  45#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
  46#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
  47#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
  48#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
  49#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
  50                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  51                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  52                                 SNBEP_PMON_CTL_EDGE_DET | \
  53                                 SNBEP_PMON_CTL_INVERT | \
  54                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
  55                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  56                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  57
  58#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
  59                                (SNBEP_PMON_RAW_EVENT_MASK | \
  60                                 SNBEP_PMON_CTL_EV_SEL_EXT)
  61
  62/* SNB-EP pci control register */
  63#define SNBEP_PCI_PMON_BOX_CTL                  0xf4
  64#define SNBEP_PCI_PMON_CTL0                     0xd8
  65/* SNB-EP pci counter register */
  66#define SNBEP_PCI_PMON_CTR0                     0xa0
  67
  68/* SNB-EP home agent register */
  69#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
  70#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
  71#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
  72/* SNB-EP memory controller register */
  73#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
  74#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
  75/* SNB-EP QPI register */
  76#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
  77#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
  78#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
  79#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
  80
  81/* SNB-EP Ubox register */
  82#define SNBEP_U_MSR_PMON_CTR0                   0xc16
  83#define SNBEP_U_MSR_PMON_CTL0                   0xc10
  84
  85#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
  86#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
  87
  88/* SNB-EP Cbo register */
  89#define SNBEP_C0_MSR_PMON_CTR0                  0xd16
  90#define SNBEP_C0_MSR_PMON_CTL0                  0xd10
  91#define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
  92#define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
  93#define SNBEP_CBO_MSR_OFFSET                    0x20
  94
  95#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
  96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
  97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
  98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
  99
 100#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
 101        .event = (e),                           \
 102        .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
 103        .config_mask = (m),                     \
 104        .idx = (i)                              \
 105}
 106
 107/* SNB-EP PCU register */
 108#define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
 109#define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
 110#define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
 111#define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
 112#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
 113#define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
 114#define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
 115
 116/* IVBEP event control */
 117#define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
 118                                         SNBEP_PMON_BOX_CTL_RST_CTRS)
 119#define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
 120                                         SNBEP_PMON_CTL_UMASK_MASK | \
 121                                         SNBEP_PMON_CTL_EDGE_DET | \
 122                                         SNBEP_PMON_CTL_TRESH_MASK)
 123/* IVBEP Ubox */
 124#define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
 125#define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
 126#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
 127
 128#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
 129                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 130                                 SNBEP_PMON_CTL_UMASK_MASK | \
 131                                 SNBEP_PMON_CTL_EDGE_DET | \
 132                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
 133/* IVBEP Cbo */
 134#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
 135                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 136
 137#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
 138#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
 139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
 140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 145
 146/* IVBEP home agent */
 147#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
 148#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
 149                                (IVBEP_PMON_RAW_EVENT_MASK | \
 150                                 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
 151/* IVBEP PCU */
 152#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
 153                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 154                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 155                                 SNBEP_PMON_CTL_EDGE_DET | \
 156                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
 157                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 158                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 159/* IVBEP QPI */
 160#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
 161                                (IVBEP_PMON_RAW_EVENT_MASK | \
 162                                 SNBEP_PMON_CTL_EV_SEL_EXT)
 163
 164#define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
 165                                ((1ULL << (n)) - 1)))
 166
 167/* Haswell-EP Ubox */
 168#define HSWEP_U_MSR_PMON_CTR0                   0x709
 169#define HSWEP_U_MSR_PMON_CTL0                   0x705
 170#define HSWEP_U_MSR_PMON_FILTER                 0x707
 171
 172#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
 173#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
 174
 175#define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
 176#define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
 177#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
 178                                        (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
 179                                         HSWEP_U_MSR_PMON_BOX_FILTER_CID)
 180
 181/* Haswell-EP CBo */
 182#define HSWEP_C0_MSR_PMON_CTR0                  0xe08
 183#define HSWEP_C0_MSR_PMON_CTL0                  0xe01
 184#define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
 185#define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
 186#define HSWEP_CBO_MSR_OFFSET                    0x10
 187
 188
 189#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
 190#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
 191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
 192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 197
 198
 199/* Haswell-EP Sbox */
 200#define HSWEP_S0_MSR_PMON_CTR0                  0x726
 201#define HSWEP_S0_MSR_PMON_CTL0                  0x721
 202#define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
 203#define HSWEP_SBOX_MSR_OFFSET                   0xa
 204#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 205                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 206
 207/* Haswell-EP PCU */
 208#define HSWEP_PCU_MSR_PMON_CTR0                 0x717
 209#define HSWEP_PCU_MSR_PMON_CTL0                 0x711
 210#define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
 211#define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
 212
 213/* KNL Ubox */
 214#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
 215                                        (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
 216                                                SNBEP_CBO_PMON_CTL_TID_EN)
 217/* KNL CHA */
 218#define KNL_CHA_MSR_OFFSET                      0xc
 219#define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
 220#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
 221                                        (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
 222                                         KNL_CHA_MSR_PMON_CTL_QOR)
 223#define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
 224#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
 225#define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
 226#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
 227#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
 228#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
 229
 230/* KNL EDC/MC UCLK */
 231#define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
 232#define KNL_UCLK_MSR_PMON_CTL0                  0x420
 233#define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
 234#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
 235#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
 236#define KNL_PMON_FIXED_CTL_EN                   0x1
 237
 238/* KNL EDC */
 239#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
 240#define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
 241#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
 242#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
 243#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
 244
 245/* KNL MC */
 246#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
 247#define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
 248#define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
 249#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
 250#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
 251
 252/* KNL IRP */
 253#define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
 254#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 255                                                 KNL_CHA_MSR_PMON_CTL_QOR)
 256/* KNL PCU */
 257#define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
 258#define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
 259#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
 260#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
 261                                (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
 262                                 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
 263                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 264                                 SNBEP_PMON_CTL_EDGE_DET | \
 265                                 SNBEP_CBO_PMON_CTL_TID_EN | \
 266                                 SNBEP_PMON_CTL_INVERT | \
 267                                 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
 268                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 269                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 270
 271/* SKX pci bus to socket mapping */
 272#define SKX_CPUNODEID                   0xc0
 273#define SKX_GIDNIDMAP                   0xd4
 274
 275/* SKX CHA */
 276#define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
 277#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
 278#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
 279#define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
 280#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
 281#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
 282#define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
 283#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
 284#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
 285#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
 286#define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
 287#define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
 288#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
 289
 290/* SKX IIO */
 291#define SKX_IIO0_MSR_PMON_CTL0          0xa48
 292#define SKX_IIO0_MSR_PMON_CTR0          0xa41
 293#define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
 294#define SKX_IIO_MSR_OFFSET              0x20
 295
 296#define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
 297#define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
 298#define SKX_PMON_CTL_CH_MASK            (0xff << 4)
 299#define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
 300#define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
 301                                         SNBEP_PMON_CTL_UMASK_MASK | \
 302                                         SNBEP_PMON_CTL_EDGE_DET | \
 303                                         SNBEP_PMON_CTL_INVERT | \
 304                                         SKX_PMON_CTL_TRESH_MASK)
 305#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
 306                                         SKX_PMON_CTL_CH_MASK | \
 307                                         SKX_PMON_CTL_FC_MASK)
 308
 309/* SKX IRP */
 310#define SKX_IRP0_MSR_PMON_CTL0          0xa5b
 311#define SKX_IRP0_MSR_PMON_CTR0          0xa59
 312#define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
 313#define SKX_IRP_MSR_OFFSET              0x20
 314
 315/* SKX UPI */
 316#define SKX_UPI_PCI_PMON_CTL0           0x350
 317#define SKX_UPI_PCI_PMON_CTR0           0x318
 318#define SKX_UPI_PCI_PMON_BOX_CTL        0x378
 319#define SKX_PMON_CTL_UMASK_EXT          0xff
 320
 321/* SKX M2M */
 322#define SKX_M2M_PCI_PMON_CTL0           0x228
 323#define SKX_M2M_PCI_PMON_CTR0           0x200
 324#define SKX_M2M_PCI_PMON_BOX_CTL        0x258
 325
 326DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 327DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 328DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
 329DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
 330DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 331DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
 332DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 333DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 334DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
 335DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 336DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 337DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
 338DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
 339DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
 340DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
 341DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
 342DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
 343DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
 344DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
 345DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
 346DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
 347DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
 348DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
 349DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
 350DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
 351DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
 352DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
 353DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
 354DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
 355DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
 356DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
 357DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
 358DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
 359DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
 360DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
 361DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
 362DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
 363DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
 364DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
 365DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
 366DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
 367DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
 368DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
 369DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
 370DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
 371DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
 372DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
 373DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
 374DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
 375DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
 376DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
 377DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
 378DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
 379DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
 380DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
 381DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
 382DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
 383DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
 384DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
 385DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
 386DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
 387DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
 388DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
 389DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
 390DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
 391DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
 392DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
 393DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
 394DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
 395DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
 396DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
 397DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
 398DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
 399
 400static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
 401{
 402        struct pci_dev *pdev = box->pci_dev;
 403        int box_ctl = uncore_pci_box_ctl(box);
 404        u32 config = 0;
 405
 406        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 407                config |= SNBEP_PMON_BOX_CTL_FRZ;
 408                pci_write_config_dword(pdev, box_ctl, config);
 409        }
 410}
 411
 412static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
 413{
 414        struct pci_dev *pdev = box->pci_dev;
 415        int box_ctl = uncore_pci_box_ctl(box);
 416        u32 config = 0;
 417
 418        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 419                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 420                pci_write_config_dword(pdev, box_ctl, config);
 421        }
 422}
 423
 424static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 425{
 426        struct pci_dev *pdev = box->pci_dev;
 427        struct hw_perf_event *hwc = &event->hw;
 428
 429        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 430}
 431
 432static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 433{
 434        struct pci_dev *pdev = box->pci_dev;
 435        struct hw_perf_event *hwc = &event->hw;
 436
 437        pci_write_config_dword(pdev, hwc->config_base, hwc->config);
 438}
 439
 440static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 441{
 442        struct pci_dev *pdev = box->pci_dev;
 443        struct hw_perf_event *hwc = &event->hw;
 444        u64 count = 0;
 445
 446        pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
 447        pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
 448
 449        return count;
 450}
 451
 452static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
 453{
 454        struct pci_dev *pdev = box->pci_dev;
 455        int box_ctl = uncore_pci_box_ctl(box);
 456
 457        pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
 458}
 459
 460static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
 461{
 462        u64 config;
 463        unsigned msr;
 464
 465        msr = uncore_msr_box_ctl(box);
 466        if (msr) {
 467                rdmsrl(msr, config);
 468                config |= SNBEP_PMON_BOX_CTL_FRZ;
 469                wrmsrl(msr, config);
 470        }
 471}
 472
 473static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
 474{
 475        u64 config;
 476        unsigned msr;
 477
 478        msr = uncore_msr_box_ctl(box);
 479        if (msr) {
 480                rdmsrl(msr, config);
 481                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 482                wrmsrl(msr, config);
 483        }
 484}
 485
 486static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 487{
 488        struct hw_perf_event *hwc = &event->hw;
 489        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 490
 491        if (reg1->idx != EXTRA_REG_NONE)
 492                wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
 493
 494        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 495}
 496
 497static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
 498                                        struct perf_event *event)
 499{
 500        struct hw_perf_event *hwc = &event->hw;
 501
 502        wrmsrl(hwc->config_base, hwc->config);
 503}
 504
 505static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
 506{
 507        unsigned msr = uncore_msr_box_ctl(box);
 508
 509        if (msr)
 510                wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
 511}
 512
 513static struct attribute *snbep_uncore_formats_attr[] = {
 514        &format_attr_event.attr,
 515        &format_attr_umask.attr,
 516        &format_attr_edge.attr,
 517        &format_attr_inv.attr,
 518        &format_attr_thresh8.attr,
 519        NULL,
 520};
 521
 522static struct attribute *snbep_uncore_ubox_formats_attr[] = {
 523        &format_attr_event.attr,
 524        &format_attr_umask.attr,
 525        &format_attr_edge.attr,
 526        &format_attr_inv.attr,
 527        &format_attr_thresh5.attr,
 528        NULL,
 529};
 530
 531static struct attribute *snbep_uncore_cbox_formats_attr[] = {
 532        &format_attr_event.attr,
 533        &format_attr_umask.attr,
 534        &format_attr_edge.attr,
 535        &format_attr_tid_en.attr,
 536        &format_attr_inv.attr,
 537        &format_attr_thresh8.attr,
 538        &format_attr_filter_tid.attr,
 539        &format_attr_filter_nid.attr,
 540        &format_attr_filter_state.attr,
 541        &format_attr_filter_opc.attr,
 542        NULL,
 543};
 544
 545static struct attribute *snbep_uncore_pcu_formats_attr[] = {
 546        &format_attr_event.attr,
 547        &format_attr_occ_sel.attr,
 548        &format_attr_edge.attr,
 549        &format_attr_inv.attr,
 550        &format_attr_thresh5.attr,
 551        &format_attr_occ_invert.attr,
 552        &format_attr_occ_edge.attr,
 553        &format_attr_filter_band0.attr,
 554        &format_attr_filter_band1.attr,
 555        &format_attr_filter_band2.attr,
 556        &format_attr_filter_band3.attr,
 557        NULL,
 558};
 559
 560static struct attribute *snbep_uncore_qpi_formats_attr[] = {
 561        &format_attr_event_ext.attr,
 562        &format_attr_umask.attr,
 563        &format_attr_edge.attr,
 564        &format_attr_inv.attr,
 565        &format_attr_thresh8.attr,
 566        &format_attr_match_rds.attr,
 567        &format_attr_match_rnid30.attr,
 568        &format_attr_match_rnid4.attr,
 569        &format_attr_match_dnid.attr,
 570        &format_attr_match_mc.attr,
 571        &format_attr_match_opc.attr,
 572        &format_attr_match_vnw.attr,
 573        &format_attr_match0.attr,
 574        &format_attr_match1.attr,
 575        &format_attr_mask_rds.attr,
 576        &format_attr_mask_rnid30.attr,
 577        &format_attr_mask_rnid4.attr,
 578        &format_attr_mask_dnid.attr,
 579        &format_attr_mask_mc.attr,
 580        &format_attr_mask_opc.attr,
 581        &format_attr_mask_vnw.attr,
 582        &format_attr_mask0.attr,
 583        &format_attr_mask1.attr,
 584        NULL,
 585};
 586
 587static struct uncore_event_desc snbep_uncore_imc_events[] = {
 588        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
 589        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
 590        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
 591        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
 592        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
 593        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
 594        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
 595        { /* end: all zeroes */ },
 596};
 597
 598static struct uncore_event_desc snbep_uncore_qpi_events[] = {
 599        INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
 600        INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
 601        INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
 602        INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
 603        { /* end: all zeroes */ },
 604};
 605
 606static struct attribute_group snbep_uncore_format_group = {
 607        .name = "format",
 608        .attrs = snbep_uncore_formats_attr,
 609};
 610
 611static struct attribute_group snbep_uncore_ubox_format_group = {
 612        .name = "format",
 613        .attrs = snbep_uncore_ubox_formats_attr,
 614};
 615
 616static struct attribute_group snbep_uncore_cbox_format_group = {
 617        .name = "format",
 618        .attrs = snbep_uncore_cbox_formats_attr,
 619};
 620
 621static struct attribute_group snbep_uncore_pcu_format_group = {
 622        .name = "format",
 623        .attrs = snbep_uncore_pcu_formats_attr,
 624};
 625
 626static struct attribute_group snbep_uncore_qpi_format_group = {
 627        .name = "format",
 628        .attrs = snbep_uncore_qpi_formats_attr,
 629};
 630
 631#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
 632        .disable_box    = snbep_uncore_msr_disable_box,         \
 633        .enable_box     = snbep_uncore_msr_enable_box,          \
 634        .disable_event  = snbep_uncore_msr_disable_event,       \
 635        .enable_event   = snbep_uncore_msr_enable_event,        \
 636        .read_counter   = uncore_msr_read_counter
 637
 638#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
 639        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
 640        .init_box       = snbep_uncore_msr_init_box             \
 641
 642static struct intel_uncore_ops snbep_uncore_msr_ops = {
 643        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 644};
 645
 646#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
 647        .init_box       = snbep_uncore_pci_init_box,            \
 648        .disable_box    = snbep_uncore_pci_disable_box,         \
 649        .enable_box     = snbep_uncore_pci_enable_box,          \
 650        .disable_event  = snbep_uncore_pci_disable_event,       \
 651        .read_counter   = snbep_uncore_pci_read_counter
 652
 653static struct intel_uncore_ops snbep_uncore_pci_ops = {
 654        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
 655        .enable_event   = snbep_uncore_pci_enable_event,        \
 656};
 657
 658static struct event_constraint snbep_uncore_cbox_constraints[] = {
 659        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
 660        UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
 661        UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
 662        UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
 663        UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
 664        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
 665        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
 666        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 667        UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
 668        UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
 669        UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
 670        UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
 671        UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
 672        EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
 673        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 674        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 675        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 676        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 677        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 678        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 679        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
 680        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
 681        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 682        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 683        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 684        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
 685        EVENT_CONSTRAINT_END
 686};
 687
 688static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
 689        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 690        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 691        UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
 692        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 693        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 694        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 695        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 696        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 697        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 698        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 699        EVENT_CONSTRAINT_END
 700};
 701
 702static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
 703        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 704        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 705        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 706        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
 707        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
 708        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 709        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
 710        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 711        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 712        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 713        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 714        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
 715        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
 716        UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
 717        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
 718        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
 719        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
 720        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
 721        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
 722        UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
 723        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 724        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 725        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 726        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 727        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
 728        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 729        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 730        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 731        EVENT_CONSTRAINT_END
 732};
 733
 734static struct intel_uncore_type snbep_uncore_ubox = {
 735        .name           = "ubox",
 736        .num_counters   = 2,
 737        .num_boxes      = 1,
 738        .perf_ctr_bits  = 44,
 739        .fixed_ctr_bits = 48,
 740        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
 741        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
 742        .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
 743        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
 744        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
 745        .ops            = &snbep_uncore_msr_ops,
 746        .format_group   = &snbep_uncore_ubox_format_group,
 747};
 748
 749static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
 750        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 751                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 752        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
 753        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
 754        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
 755        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
 756        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
 757        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
 758        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
 759        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
 760        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
 761        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
 762        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
 763        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
 764        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
 765        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
 766        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
 767        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
 768        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
 769        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
 770        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
 771        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
 772        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
 773        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
 774        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
 775        EVENT_EXTRA_END
 776};
 777
 778static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 779{
 780        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 781        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 782        int i;
 783
 784        if (uncore_box_is_fake(box))
 785                return;
 786
 787        for (i = 0; i < 5; i++) {
 788                if (reg1->alloc & (0x1 << i))
 789                        atomic_sub(1 << (i * 6), &er->ref);
 790        }
 791        reg1->alloc = 0;
 792}
 793
 794static struct event_constraint *
 795__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
 796                            u64 (*cbox_filter_mask)(int fields))
 797{
 798        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 799        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 800        int i, alloc = 0;
 801        unsigned long flags;
 802        u64 mask;
 803
 804        if (reg1->idx == EXTRA_REG_NONE)
 805                return NULL;
 806
 807        raw_spin_lock_irqsave(&er->lock, flags);
 808        for (i = 0; i < 5; i++) {
 809                if (!(reg1->idx & (0x1 << i)))
 810                        continue;
 811                if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
 812                        continue;
 813
 814                mask = cbox_filter_mask(0x1 << i);
 815                if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
 816                    !((reg1->config ^ er->config) & mask)) {
 817                        atomic_add(1 << (i * 6), &er->ref);
 818                        er->config &= ~mask;
 819                        er->config |= reg1->config & mask;
 820                        alloc |= (0x1 << i);
 821                } else {
 822                        break;
 823                }
 824        }
 825        raw_spin_unlock_irqrestore(&er->lock, flags);
 826        if (i < 5)
 827                goto fail;
 828
 829        if (!uncore_box_is_fake(box))
 830                reg1->alloc |= alloc;
 831
 832        return NULL;
 833fail:
 834        for (; i >= 0; i--) {
 835                if (alloc & (0x1 << i))
 836                        atomic_sub(1 << (i * 6), &er->ref);
 837        }
 838        return &uncore_constraint_empty;
 839}
 840
 841static u64 snbep_cbox_filter_mask(int fields)
 842{
 843        u64 mask = 0;
 844
 845        if (fields & 0x1)
 846                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
 847        if (fields & 0x2)
 848                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
 849        if (fields & 0x4)
 850                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
 851        if (fields & 0x8)
 852                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
 853
 854        return mask;
 855}
 856
 857static struct event_constraint *
 858snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 859{
 860        return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
 861}
 862
 863static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 864{
 865        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 866        struct extra_reg *er;
 867        int idx = 0;
 868
 869        for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
 870                if (er->event != (event->hw.config & er->config_mask))
 871                        continue;
 872                idx |= er->idx;
 873        }
 874
 875        if (idx) {
 876                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
 877                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
 878                reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
 879                reg1->idx = idx;
 880        }
 881        return 0;
 882}
 883
 884static struct intel_uncore_ops snbep_uncore_cbox_ops = {
 885        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 886        .hw_config              = snbep_cbox_hw_config,
 887        .get_constraint         = snbep_cbox_get_constraint,
 888        .put_constraint         = snbep_cbox_put_constraint,
 889};
 890
 891static struct intel_uncore_type snbep_uncore_cbox = {
 892        .name                   = "cbox",
 893        .num_counters           = 4,
 894        .num_boxes              = 8,
 895        .perf_ctr_bits          = 44,
 896        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
 897        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
 898        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
 899        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
 900        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
 901        .num_shared_regs        = 1,
 902        .constraints            = snbep_uncore_cbox_constraints,
 903        .ops                    = &snbep_uncore_cbox_ops,
 904        .format_group           = &snbep_uncore_cbox_format_group,
 905};
 906
 907static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
 908{
 909        struct hw_perf_event *hwc = &event->hw;
 910        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 911        u64 config = reg1->config;
 912
 913        if (new_idx > reg1->idx)
 914                config <<= 8 * (new_idx - reg1->idx);
 915        else
 916                config >>= 8 * (reg1->idx - new_idx);
 917
 918        if (modify) {
 919                hwc->config += new_idx - reg1->idx;
 920                reg1->config = config;
 921                reg1->idx = new_idx;
 922        }
 923        return config;
 924}
 925
 926static struct event_constraint *
 927snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 928{
 929        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 930        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 931        unsigned long flags;
 932        int idx = reg1->idx;
 933        u64 mask, config1 = reg1->config;
 934        bool ok = false;
 935
 936        if (reg1->idx == EXTRA_REG_NONE ||
 937            (!uncore_box_is_fake(box) && reg1->alloc))
 938                return NULL;
 939again:
 940        mask = 0xffULL << (idx * 8);
 941        raw_spin_lock_irqsave(&er->lock, flags);
 942        if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
 943            !((config1 ^ er->config) & mask)) {
 944                atomic_add(1 << (idx * 8), &er->ref);
 945                er->config &= ~mask;
 946                er->config |= config1 & mask;
 947                ok = true;
 948        }
 949        raw_spin_unlock_irqrestore(&er->lock, flags);
 950
 951        if (!ok) {
 952                idx = (idx + 1) % 4;
 953                if (idx != reg1->idx) {
 954                        config1 = snbep_pcu_alter_er(event, idx, false);
 955                        goto again;
 956                }
 957                return &uncore_constraint_empty;
 958        }
 959
 960        if (!uncore_box_is_fake(box)) {
 961                if (idx != reg1->idx)
 962                        snbep_pcu_alter_er(event, idx, true);
 963                reg1->alloc = 1;
 964        }
 965        return NULL;
 966}
 967
 968static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 969{
 970        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 971        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 972
 973        if (uncore_box_is_fake(box) || !reg1->alloc)
 974                return;
 975
 976        atomic_sub(1 << (reg1->idx * 8), &er->ref);
 977        reg1->alloc = 0;
 978}
 979
 980static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 981{
 982        struct hw_perf_event *hwc = &event->hw;
 983        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 984        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
 985
 986        if (ev_sel >= 0xb && ev_sel <= 0xe) {
 987                reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
 988                reg1->idx = ev_sel - 0xb;
 989                reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
 990        }
 991        return 0;
 992}
 993
 994static struct intel_uncore_ops snbep_uncore_pcu_ops = {
 995        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 996        .hw_config              = snbep_pcu_hw_config,
 997        .get_constraint         = snbep_pcu_get_constraint,
 998        .put_constraint         = snbep_pcu_put_constraint,
 999};
1000
1001static struct intel_uncore_type snbep_uncore_pcu = {
1002        .name                   = "pcu",
1003        .num_counters           = 4,
1004        .num_boxes              = 1,
1005        .perf_ctr_bits          = 48,
1006        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1007        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1008        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1009        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1010        .num_shared_regs        = 1,
1011        .ops                    = &snbep_uncore_pcu_ops,
1012        .format_group           = &snbep_uncore_pcu_format_group,
1013};
1014
1015static struct intel_uncore_type *snbep_msr_uncores[] = {
1016        &snbep_uncore_ubox,
1017        &snbep_uncore_cbox,
1018        &snbep_uncore_pcu,
1019        NULL,
1020};
1021
1022void snbep_uncore_cpu_init(void)
1023{
1024        if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1025                snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1026        uncore_msr_uncores = snbep_msr_uncores;
1027}
1028
1029enum {
1030        SNBEP_PCI_QPI_PORT0_FILTER,
1031        SNBEP_PCI_QPI_PORT1_FILTER,
1032        HSWEP_PCI_PCU_3,
1033};
1034
1035static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1036{
1037        struct hw_perf_event *hwc = &event->hw;
1038        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1039        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1040
1041        if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1042                reg1->idx = 0;
1043                reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1044                reg1->config = event->attr.config1;
1045                reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1046                reg2->config = event->attr.config2;
1047        }
1048        return 0;
1049}
1050
1051static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1052{
1053        struct pci_dev *pdev = box->pci_dev;
1054        struct hw_perf_event *hwc = &event->hw;
1055        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1056        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1057
1058        if (reg1->idx != EXTRA_REG_NONE) {
1059                int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1060                int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
1061                struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1062
1063                if (filter_pdev) {
1064                        pci_write_config_dword(filter_pdev, reg1->reg,
1065                                                (u32)reg1->config);
1066                        pci_write_config_dword(filter_pdev, reg1->reg + 4,
1067                                                (u32)(reg1->config >> 32));
1068                        pci_write_config_dword(filter_pdev, reg2->reg,
1069                                                (u32)reg2->config);
1070                        pci_write_config_dword(filter_pdev, reg2->reg + 4,
1071                                                (u32)(reg2->config >> 32));
1072                }
1073        }
1074
1075        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1076}
1077
1078static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1079        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1080        .enable_event           = snbep_qpi_enable_event,
1081        .hw_config              = snbep_qpi_hw_config,
1082        .get_constraint         = uncore_get_constraint,
1083        .put_constraint         = uncore_put_constraint,
1084};
1085
1086#define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1087        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1088        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1089        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1090        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1091        .ops            = &snbep_uncore_pci_ops,                \
1092        .format_group   = &snbep_uncore_format_group
1093
1094static struct intel_uncore_type snbep_uncore_ha = {
1095        .name           = "ha",
1096        .num_counters   = 4,
1097        .num_boxes      = 1,
1098        .perf_ctr_bits  = 48,
1099        SNBEP_UNCORE_PCI_COMMON_INIT(),
1100};
1101
1102static struct intel_uncore_type snbep_uncore_imc = {
1103        .name           = "imc",
1104        .num_counters   = 4,
1105        .num_boxes      = 4,
1106        .perf_ctr_bits  = 48,
1107        .fixed_ctr_bits = 48,
1108        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1109        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1110        .event_descs    = snbep_uncore_imc_events,
1111        SNBEP_UNCORE_PCI_COMMON_INIT(),
1112};
1113
1114static struct intel_uncore_type snbep_uncore_qpi = {
1115        .name                   = "qpi",
1116        .num_counters           = 4,
1117        .num_boxes              = 2,
1118        .perf_ctr_bits          = 48,
1119        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1120        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1121        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1122        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1123        .num_shared_regs        = 1,
1124        .ops                    = &snbep_uncore_qpi_ops,
1125        .event_descs            = snbep_uncore_qpi_events,
1126        .format_group           = &snbep_uncore_qpi_format_group,
1127};
1128
1129
1130static struct intel_uncore_type snbep_uncore_r2pcie = {
1131        .name           = "r2pcie",
1132        .num_counters   = 4,
1133        .num_boxes      = 1,
1134        .perf_ctr_bits  = 44,
1135        .constraints    = snbep_uncore_r2pcie_constraints,
1136        SNBEP_UNCORE_PCI_COMMON_INIT(),
1137};
1138
1139static struct intel_uncore_type snbep_uncore_r3qpi = {
1140        .name           = "r3qpi",
1141        .num_counters   = 3,
1142        .num_boxes      = 2,
1143        .perf_ctr_bits  = 44,
1144        .constraints    = snbep_uncore_r3qpi_constraints,
1145        SNBEP_UNCORE_PCI_COMMON_INIT(),
1146};
1147
1148enum {
1149        SNBEP_PCI_UNCORE_HA,
1150        SNBEP_PCI_UNCORE_IMC,
1151        SNBEP_PCI_UNCORE_QPI,
1152        SNBEP_PCI_UNCORE_R2PCIE,
1153        SNBEP_PCI_UNCORE_R3QPI,
1154};
1155
1156static struct intel_uncore_type *snbep_pci_uncores[] = {
1157        [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1158        [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1159        [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1160        [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1161        [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1162        NULL,
1163};
1164
1165static const struct pci_device_id snbep_uncore_pci_ids[] = {
1166        { /* Home Agent */
1167                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1168                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1169        },
1170        { /* MC Channel 0 */
1171                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1172                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1173        },
1174        { /* MC Channel 1 */
1175                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1176                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1177        },
1178        { /* MC Channel 2 */
1179                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1180                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1181        },
1182        { /* MC Channel 3 */
1183                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1184                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1185        },
1186        { /* QPI Port 0 */
1187                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1188                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1189        },
1190        { /* QPI Port 1 */
1191                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1192                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1193        },
1194        { /* R2PCIe */
1195                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1196                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1197        },
1198        { /* R3QPI Link 0 */
1199                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1200                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1201        },
1202        { /* R3QPI Link 1 */
1203                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1204                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1205        },
1206        { /* QPI Port 0 filter  */
1207                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1208                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1209                                                   SNBEP_PCI_QPI_PORT0_FILTER),
1210        },
1211        { /* QPI Port 0 filter  */
1212                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1213                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1214                                                   SNBEP_PCI_QPI_PORT1_FILTER),
1215        },
1216        { /* end: all zeroes */ }
1217};
1218
1219static struct pci_driver snbep_uncore_pci_driver = {
1220        .name           = "snbep_uncore",
1221        .id_table       = snbep_uncore_pci_ids,
1222};
1223
1224/*
1225 * build pci bus to socket mapping
1226 */
1227static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1228{
1229        struct pci_dev *ubox_dev = NULL;
1230        int i, bus, nodeid, segment;
1231        struct pci2phy_map *map;
1232        int err = 0;
1233        u32 config = 0;
1234
1235        while (1) {
1236                /* find the UBOX device */
1237                ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1238                if (!ubox_dev)
1239                        break;
1240                bus = ubox_dev->bus->number;
1241                /* get the Node ID of the local register */
1242                err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1243                if (err)
1244                        break;
1245                nodeid = config;
1246                /* get the Node ID mapping */
1247                err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1248                if (err)
1249                        break;
1250
1251                segment = pci_domain_nr(ubox_dev->bus);
1252                raw_spin_lock(&pci2phy_map_lock);
1253                map = __find_pci2phy_map(segment);
1254                if (!map) {
1255                        raw_spin_unlock(&pci2phy_map_lock);
1256                        err = -ENOMEM;
1257                        break;
1258                }
1259
1260                /*
1261                 * every three bits in the Node ID mapping register maps
1262                 * to a particular node.
1263                 */
1264                for (i = 0; i < 8; i++) {
1265                        if (nodeid == ((config >> (3 * i)) & 0x7)) {
1266                                map->pbus_to_physid[bus] = i;
1267                                break;
1268                        }
1269                }
1270                raw_spin_unlock(&pci2phy_map_lock);
1271        }
1272
1273        if (!err) {
1274                /*
1275                 * For PCI bus with no UBOX device, find the next bus
1276                 * that has UBOX device and use its mapping.
1277                 */
1278                raw_spin_lock(&pci2phy_map_lock);
1279                list_for_each_entry(map, &pci2phy_map_head, list) {
1280                        i = -1;
1281                        if (reverse) {
1282                                for (bus = 255; bus >= 0; bus--) {
1283                                        if (map->pbus_to_physid[bus] >= 0)
1284                                                i = map->pbus_to_physid[bus];
1285                                        else
1286                                                map->pbus_to_physid[bus] = i;
1287                                }
1288                        } else {
1289                                for (bus = 0; bus <= 255; bus++) {
1290                                        if (map->pbus_to_physid[bus] >= 0)
1291                                                i = map->pbus_to_physid[bus];
1292                                        else
1293                                                map->pbus_to_physid[bus] = i;
1294                                }
1295                        }
1296                }
1297                raw_spin_unlock(&pci2phy_map_lock);
1298        }
1299
1300        pci_dev_put(ubox_dev);
1301
1302        return err ? pcibios_err_to_errno(err) : 0;
1303}
1304
1305int snbep_uncore_pci_init(void)
1306{
1307        int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1308        if (ret)
1309                return ret;
1310        uncore_pci_uncores = snbep_pci_uncores;
1311        uncore_pci_driver = &snbep_uncore_pci_driver;
1312        return 0;
1313}
1314/* end of Sandy Bridge-EP uncore support */
1315
1316/* IvyTown uncore support */
1317static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1318{
1319        unsigned msr = uncore_msr_box_ctl(box);
1320        if (msr)
1321                wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1322}
1323
1324static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1325{
1326        struct pci_dev *pdev = box->pci_dev;
1327
1328        pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1329}
1330
1331#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1332        .init_box       = ivbep_uncore_msr_init_box,            \
1333        .disable_box    = snbep_uncore_msr_disable_box,         \
1334        .enable_box     = snbep_uncore_msr_enable_box,          \
1335        .disable_event  = snbep_uncore_msr_disable_event,       \
1336        .enable_event   = snbep_uncore_msr_enable_event,        \
1337        .read_counter   = uncore_msr_read_counter
1338
1339static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1340        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1341};
1342
1343static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1344        .init_box       = ivbep_uncore_pci_init_box,
1345        .disable_box    = snbep_uncore_pci_disable_box,
1346        .enable_box     = snbep_uncore_pci_enable_box,
1347        .disable_event  = snbep_uncore_pci_disable_event,
1348        .enable_event   = snbep_uncore_pci_enable_event,
1349        .read_counter   = snbep_uncore_pci_read_counter,
1350};
1351
1352#define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1353        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1354        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1355        .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1356        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1357        .ops            = &ivbep_uncore_pci_ops,                        \
1358        .format_group   = &ivbep_uncore_format_group
1359
1360static struct attribute *ivbep_uncore_formats_attr[] = {
1361        &format_attr_event.attr,
1362        &format_attr_umask.attr,
1363        &format_attr_edge.attr,
1364        &format_attr_inv.attr,
1365        &format_attr_thresh8.attr,
1366        NULL,
1367};
1368
1369static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1370        &format_attr_event.attr,
1371        &format_attr_umask.attr,
1372        &format_attr_edge.attr,
1373        &format_attr_inv.attr,
1374        &format_attr_thresh5.attr,
1375        NULL,
1376};
1377
1378static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1379        &format_attr_event.attr,
1380        &format_attr_umask.attr,
1381        &format_attr_edge.attr,
1382        &format_attr_tid_en.attr,
1383        &format_attr_thresh8.attr,
1384        &format_attr_filter_tid.attr,
1385        &format_attr_filter_link.attr,
1386        &format_attr_filter_state2.attr,
1387        &format_attr_filter_nid2.attr,
1388        &format_attr_filter_opc2.attr,
1389        &format_attr_filter_nc.attr,
1390        &format_attr_filter_c6.attr,
1391        &format_attr_filter_isoc.attr,
1392        NULL,
1393};
1394
1395static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1396        &format_attr_event.attr,
1397        &format_attr_occ_sel.attr,
1398        &format_attr_edge.attr,
1399        &format_attr_thresh5.attr,
1400        &format_attr_occ_invert.attr,
1401        &format_attr_occ_edge.attr,
1402        &format_attr_filter_band0.attr,
1403        &format_attr_filter_band1.attr,
1404        &format_attr_filter_band2.attr,
1405        &format_attr_filter_band3.attr,
1406        NULL,
1407};
1408
1409static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1410        &format_attr_event_ext.attr,
1411        &format_attr_umask.attr,
1412        &format_attr_edge.attr,
1413        &format_attr_thresh8.attr,
1414        &format_attr_match_rds.attr,
1415        &format_attr_match_rnid30.attr,
1416        &format_attr_match_rnid4.attr,
1417        &format_attr_match_dnid.attr,
1418        &format_attr_match_mc.attr,
1419        &format_attr_match_opc.attr,
1420        &format_attr_match_vnw.attr,
1421        &format_attr_match0.attr,
1422        &format_attr_match1.attr,
1423        &format_attr_mask_rds.attr,
1424        &format_attr_mask_rnid30.attr,
1425        &format_attr_mask_rnid4.attr,
1426        &format_attr_mask_dnid.attr,
1427        &format_attr_mask_mc.attr,
1428        &format_attr_mask_opc.attr,
1429        &format_attr_mask_vnw.attr,
1430        &format_attr_mask0.attr,
1431        &format_attr_mask1.attr,
1432        NULL,
1433};
1434
1435static struct attribute_group ivbep_uncore_format_group = {
1436        .name = "format",
1437        .attrs = ivbep_uncore_formats_attr,
1438};
1439
1440static struct attribute_group ivbep_uncore_ubox_format_group = {
1441        .name = "format",
1442        .attrs = ivbep_uncore_ubox_formats_attr,
1443};
1444
1445static struct attribute_group ivbep_uncore_cbox_format_group = {
1446        .name = "format",
1447        .attrs = ivbep_uncore_cbox_formats_attr,
1448};
1449
1450static struct attribute_group ivbep_uncore_pcu_format_group = {
1451        .name = "format",
1452        .attrs = ivbep_uncore_pcu_formats_attr,
1453};
1454
1455static struct attribute_group ivbep_uncore_qpi_format_group = {
1456        .name = "format",
1457        .attrs = ivbep_uncore_qpi_formats_attr,
1458};
1459
1460static struct intel_uncore_type ivbep_uncore_ubox = {
1461        .name           = "ubox",
1462        .num_counters   = 2,
1463        .num_boxes      = 1,
1464        .perf_ctr_bits  = 44,
1465        .fixed_ctr_bits = 48,
1466        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1467        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1468        .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1469        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1470        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1471        .ops            = &ivbep_uncore_msr_ops,
1472        .format_group   = &ivbep_uncore_ubox_format_group,
1473};
1474
1475static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1476        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1477                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1478        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1479        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1480        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1481        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1482        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1483        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1484        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1485        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1486        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1487        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1488        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1489        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1490        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1491        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1492        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1493        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1494        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1495        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1496        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1497        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1498        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1499        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1500        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1501        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1502        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1503        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1504        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1505        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1506        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1507        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1508        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1509        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1510        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1511        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1512        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1513        EVENT_EXTRA_END
1514};
1515
1516static u64 ivbep_cbox_filter_mask(int fields)
1517{
1518        u64 mask = 0;
1519
1520        if (fields & 0x1)
1521                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1522        if (fields & 0x2)
1523                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1524        if (fields & 0x4)
1525                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1526        if (fields & 0x8)
1527                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1528        if (fields & 0x10) {
1529                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1530                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1531                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1532                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1533        }
1534
1535        return mask;
1536}
1537
1538static struct event_constraint *
1539ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1540{
1541        return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1542}
1543
1544static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1545{
1546        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1547        struct extra_reg *er;
1548        int idx = 0;
1549
1550        for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1551                if (er->event != (event->hw.config & er->config_mask))
1552                        continue;
1553                idx |= er->idx;
1554        }
1555
1556        if (idx) {
1557                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1558                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1559                reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1560                reg1->idx = idx;
1561        }
1562        return 0;
1563}
1564
1565static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1566{
1567        struct hw_perf_event *hwc = &event->hw;
1568        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1569
1570        if (reg1->idx != EXTRA_REG_NONE) {
1571                u64 filter = uncore_shared_reg_config(box, 0);
1572                wrmsrl(reg1->reg, filter & 0xffffffff);
1573                wrmsrl(reg1->reg + 6, filter >> 32);
1574        }
1575
1576        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1577}
1578
1579static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1580        .init_box               = ivbep_uncore_msr_init_box,
1581        .disable_box            = snbep_uncore_msr_disable_box,
1582        .enable_box             = snbep_uncore_msr_enable_box,
1583        .disable_event          = snbep_uncore_msr_disable_event,
1584        .enable_event           = ivbep_cbox_enable_event,
1585        .read_counter           = uncore_msr_read_counter,
1586        .hw_config              = ivbep_cbox_hw_config,
1587        .get_constraint         = ivbep_cbox_get_constraint,
1588        .put_constraint         = snbep_cbox_put_constraint,
1589};
1590
1591static struct intel_uncore_type ivbep_uncore_cbox = {
1592        .name                   = "cbox",
1593        .num_counters           = 4,
1594        .num_boxes              = 15,
1595        .perf_ctr_bits          = 44,
1596        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1597        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1598        .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1599        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1600        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1601        .num_shared_regs        = 1,
1602        .constraints            = snbep_uncore_cbox_constraints,
1603        .ops                    = &ivbep_uncore_cbox_ops,
1604        .format_group           = &ivbep_uncore_cbox_format_group,
1605};
1606
1607static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1608        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1609        .hw_config              = snbep_pcu_hw_config,
1610        .get_constraint         = snbep_pcu_get_constraint,
1611        .put_constraint         = snbep_pcu_put_constraint,
1612};
1613
1614static struct intel_uncore_type ivbep_uncore_pcu = {
1615        .name                   = "pcu",
1616        .num_counters           = 4,
1617        .num_boxes              = 1,
1618        .perf_ctr_bits          = 48,
1619        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1620        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1621        .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1622        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1623        .num_shared_regs        = 1,
1624        .ops                    = &ivbep_uncore_pcu_ops,
1625        .format_group           = &ivbep_uncore_pcu_format_group,
1626};
1627
1628static struct intel_uncore_type *ivbep_msr_uncores[] = {
1629        &ivbep_uncore_ubox,
1630        &ivbep_uncore_cbox,
1631        &ivbep_uncore_pcu,
1632        NULL,
1633};
1634
1635void ivbep_uncore_cpu_init(void)
1636{
1637        if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1638                ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1639        uncore_msr_uncores = ivbep_msr_uncores;
1640}
1641
1642static struct intel_uncore_type ivbep_uncore_ha = {
1643        .name           = "ha",
1644        .num_counters   = 4,
1645        .num_boxes      = 2,
1646        .perf_ctr_bits  = 48,
1647        IVBEP_UNCORE_PCI_COMMON_INIT(),
1648};
1649
1650static struct intel_uncore_type ivbep_uncore_imc = {
1651        .name           = "imc",
1652        .num_counters   = 4,
1653        .num_boxes      = 8,
1654        .perf_ctr_bits  = 48,
1655        .fixed_ctr_bits = 48,
1656        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1657        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1658        .event_descs    = snbep_uncore_imc_events,
1659        IVBEP_UNCORE_PCI_COMMON_INIT(),
1660};
1661
1662/* registers in IRP boxes are not properly aligned */
1663static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1664static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1665
1666static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1667{
1668        struct pci_dev *pdev = box->pci_dev;
1669        struct hw_perf_event *hwc = &event->hw;
1670
1671        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1672                               hwc->config | SNBEP_PMON_CTL_EN);
1673}
1674
1675static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1676{
1677        struct pci_dev *pdev = box->pci_dev;
1678        struct hw_perf_event *hwc = &event->hw;
1679
1680        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1681}
1682
1683static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1684{
1685        struct pci_dev *pdev = box->pci_dev;
1686        struct hw_perf_event *hwc = &event->hw;
1687        u64 count = 0;
1688
1689        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1690        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1691
1692        return count;
1693}
1694
1695static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1696        .init_box       = ivbep_uncore_pci_init_box,
1697        .disable_box    = snbep_uncore_pci_disable_box,
1698        .enable_box     = snbep_uncore_pci_enable_box,
1699        .disable_event  = ivbep_uncore_irp_disable_event,
1700        .enable_event   = ivbep_uncore_irp_enable_event,
1701        .read_counter   = ivbep_uncore_irp_read_counter,
1702};
1703
1704static struct intel_uncore_type ivbep_uncore_irp = {
1705        .name                   = "irp",
1706        .num_counters           = 4,
1707        .num_boxes              = 1,
1708        .perf_ctr_bits          = 48,
1709        .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1710        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1711        .ops                    = &ivbep_uncore_irp_ops,
1712        .format_group           = &ivbep_uncore_format_group,
1713};
1714
1715static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1716        .init_box       = ivbep_uncore_pci_init_box,
1717        .disable_box    = snbep_uncore_pci_disable_box,
1718        .enable_box     = snbep_uncore_pci_enable_box,
1719        .disable_event  = snbep_uncore_pci_disable_event,
1720        .enable_event   = snbep_qpi_enable_event,
1721        .read_counter   = snbep_uncore_pci_read_counter,
1722        .hw_config      = snbep_qpi_hw_config,
1723        .get_constraint = uncore_get_constraint,
1724        .put_constraint = uncore_put_constraint,
1725};
1726
1727static struct intel_uncore_type ivbep_uncore_qpi = {
1728        .name                   = "qpi",
1729        .num_counters           = 4,
1730        .num_boxes              = 3,
1731        .perf_ctr_bits          = 48,
1732        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1733        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1734        .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1735        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1736        .num_shared_regs        = 1,
1737        .ops                    = &ivbep_uncore_qpi_ops,
1738        .format_group           = &ivbep_uncore_qpi_format_group,
1739};
1740
1741static struct intel_uncore_type ivbep_uncore_r2pcie = {
1742        .name           = "r2pcie",
1743        .num_counters   = 4,
1744        .num_boxes      = 1,
1745        .perf_ctr_bits  = 44,
1746        .constraints    = snbep_uncore_r2pcie_constraints,
1747        IVBEP_UNCORE_PCI_COMMON_INIT(),
1748};
1749
1750static struct intel_uncore_type ivbep_uncore_r3qpi = {
1751        .name           = "r3qpi",
1752        .num_counters   = 3,
1753        .num_boxes      = 2,
1754        .perf_ctr_bits  = 44,
1755        .constraints    = snbep_uncore_r3qpi_constraints,
1756        IVBEP_UNCORE_PCI_COMMON_INIT(),
1757};
1758
1759enum {
1760        IVBEP_PCI_UNCORE_HA,
1761        IVBEP_PCI_UNCORE_IMC,
1762        IVBEP_PCI_UNCORE_IRP,
1763        IVBEP_PCI_UNCORE_QPI,
1764        IVBEP_PCI_UNCORE_R2PCIE,
1765        IVBEP_PCI_UNCORE_R3QPI,
1766};
1767
1768static struct intel_uncore_type *ivbep_pci_uncores[] = {
1769        [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1770        [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1771        [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1772        [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1773        [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1774        [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1775        NULL,
1776};
1777
1778static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1779        { /* Home Agent 0 */
1780                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1781                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1782        },
1783        { /* Home Agent 1 */
1784                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1785                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1786        },
1787        { /* MC0 Channel 0 */
1788                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1789                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1790        },
1791        { /* MC0 Channel 1 */
1792                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1793                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1794        },
1795        { /* MC0 Channel 3 */
1796                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1797                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1798        },
1799        { /* MC0 Channel 4 */
1800                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1801                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1802        },
1803        { /* MC1 Channel 0 */
1804                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1805                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1806        },
1807        { /* MC1 Channel 1 */
1808                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1809                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1810        },
1811        { /* MC1 Channel 3 */
1812                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1813                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1814        },
1815        { /* MC1 Channel 4 */
1816                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1817                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1818        },
1819        { /* IRP */
1820                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1821                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1822        },
1823        { /* QPI0 Port 0 */
1824                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1825                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1826        },
1827        { /* QPI0 Port 1 */
1828                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1829                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1830        },
1831        { /* QPI1 Port 2 */
1832                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1833                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1834        },
1835        { /* R2PCIe */
1836                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1837                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1838        },
1839        { /* R3QPI0 Link 0 */
1840                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1841                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1842        },
1843        { /* R3QPI0 Link 1 */
1844                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1845                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1846        },
1847        { /* R3QPI1 Link 2 */
1848                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1849                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1850        },
1851        { /* QPI Port 0 filter  */
1852                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1853                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1854                                                   SNBEP_PCI_QPI_PORT0_FILTER),
1855        },
1856        { /* QPI Port 0 filter  */
1857                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1858                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1859                                                   SNBEP_PCI_QPI_PORT1_FILTER),
1860        },
1861        { /* end: all zeroes */ }
1862};
1863
1864static struct pci_driver ivbep_uncore_pci_driver = {
1865        .name           = "ivbep_uncore",
1866        .id_table       = ivbep_uncore_pci_ids,
1867};
1868
1869int ivbep_uncore_pci_init(void)
1870{
1871        int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1872        if (ret)
1873                return ret;
1874        uncore_pci_uncores = ivbep_pci_uncores;
1875        uncore_pci_driver = &ivbep_uncore_pci_driver;
1876        return 0;
1877}
1878/* end of IvyTown uncore support */
1879
1880/* KNL uncore support */
1881static struct attribute *knl_uncore_ubox_formats_attr[] = {
1882        &format_attr_event.attr,
1883        &format_attr_umask.attr,
1884        &format_attr_edge.attr,
1885        &format_attr_tid_en.attr,
1886        &format_attr_inv.attr,
1887        &format_attr_thresh5.attr,
1888        NULL,
1889};
1890
1891static struct attribute_group knl_uncore_ubox_format_group = {
1892        .name = "format",
1893        .attrs = knl_uncore_ubox_formats_attr,
1894};
1895
1896static struct intel_uncore_type knl_uncore_ubox = {
1897        .name                   = "ubox",
1898        .num_counters           = 2,
1899        .num_boxes              = 1,
1900        .perf_ctr_bits          = 48,
1901        .fixed_ctr_bits         = 48,
1902        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1903        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1904        .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1905        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1906        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1907        .ops                    = &snbep_uncore_msr_ops,
1908        .format_group           = &knl_uncore_ubox_format_group,
1909};
1910
1911static struct attribute *knl_uncore_cha_formats_attr[] = {
1912        &format_attr_event.attr,
1913        &format_attr_umask.attr,
1914        &format_attr_qor.attr,
1915        &format_attr_edge.attr,
1916        &format_attr_tid_en.attr,
1917        &format_attr_inv.attr,
1918        &format_attr_thresh8.attr,
1919        &format_attr_filter_tid4.attr,
1920        &format_attr_filter_link3.attr,
1921        &format_attr_filter_state4.attr,
1922        &format_attr_filter_local.attr,
1923        &format_attr_filter_all_op.attr,
1924        &format_attr_filter_nnm.attr,
1925        &format_attr_filter_opc3.attr,
1926        &format_attr_filter_nc.attr,
1927        &format_attr_filter_isoc.attr,
1928        NULL,
1929};
1930
1931static struct attribute_group knl_uncore_cha_format_group = {
1932        .name = "format",
1933        .attrs = knl_uncore_cha_formats_attr,
1934};
1935
1936static struct event_constraint knl_uncore_cha_constraints[] = {
1937        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1938        UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1939        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1940        EVENT_CONSTRAINT_END
1941};
1942
1943static struct extra_reg knl_uncore_cha_extra_regs[] = {
1944        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1945                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1946        SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1947        SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1948        SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1949        EVENT_EXTRA_END
1950};
1951
1952static u64 knl_cha_filter_mask(int fields)
1953{
1954        u64 mask = 0;
1955
1956        if (fields & 0x1)
1957                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1958        if (fields & 0x2)
1959                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1960        if (fields & 0x4)
1961                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1962        return mask;
1963}
1964
1965static struct event_constraint *
1966knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1967{
1968        return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1969}
1970
1971static int knl_cha_hw_config(struct intel_uncore_box *box,
1972                             struct perf_event *event)
1973{
1974        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1975        struct extra_reg *er;
1976        int idx = 0;
1977
1978        for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1979                if (er->event != (event->hw.config & er->config_mask))
1980                        continue;
1981                idx |= er->idx;
1982        }
1983
1984        if (idx) {
1985                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1986                            KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1987                reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1988
1989                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1990                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1991                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1992                reg1->idx = idx;
1993        }
1994        return 0;
1995}
1996
1997static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1998                                    struct perf_event *event);
1999
2000static struct intel_uncore_ops knl_uncore_cha_ops = {
2001        .init_box               = snbep_uncore_msr_init_box,
2002        .disable_box            = snbep_uncore_msr_disable_box,
2003        .enable_box             = snbep_uncore_msr_enable_box,
2004        .disable_event          = snbep_uncore_msr_disable_event,
2005        .enable_event           = hswep_cbox_enable_event,
2006        .read_counter           = uncore_msr_read_counter,
2007        .hw_config              = knl_cha_hw_config,
2008        .get_constraint         = knl_cha_get_constraint,
2009        .put_constraint         = snbep_cbox_put_constraint,
2010};
2011
2012static struct intel_uncore_type knl_uncore_cha = {
2013        .name                   = "cha",
2014        .num_counters           = 4,
2015        .num_boxes              = 38,
2016        .perf_ctr_bits          = 48,
2017        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2018        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2019        .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2020        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2021        .msr_offset             = KNL_CHA_MSR_OFFSET,
2022        .num_shared_regs        = 1,
2023        .constraints            = knl_uncore_cha_constraints,
2024        .ops                    = &knl_uncore_cha_ops,
2025        .format_group           = &knl_uncore_cha_format_group,
2026};
2027
2028static struct attribute *knl_uncore_pcu_formats_attr[] = {
2029        &format_attr_event2.attr,
2030        &format_attr_use_occ_ctr.attr,
2031        &format_attr_occ_sel.attr,
2032        &format_attr_edge.attr,
2033        &format_attr_tid_en.attr,
2034        &format_attr_inv.attr,
2035        &format_attr_thresh6.attr,
2036        &format_attr_occ_invert.attr,
2037        &format_attr_occ_edge_det.attr,
2038        NULL,
2039};
2040
2041static struct attribute_group knl_uncore_pcu_format_group = {
2042        .name = "format",
2043        .attrs = knl_uncore_pcu_formats_attr,
2044};
2045
2046static struct intel_uncore_type knl_uncore_pcu = {
2047        .name                   = "pcu",
2048        .num_counters           = 4,
2049        .num_boxes              = 1,
2050        .perf_ctr_bits          = 48,
2051        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2052        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2053        .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2054        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2055        .ops                    = &snbep_uncore_msr_ops,
2056        .format_group           = &knl_uncore_pcu_format_group,
2057};
2058
2059static struct intel_uncore_type *knl_msr_uncores[] = {
2060        &knl_uncore_ubox,
2061        &knl_uncore_cha,
2062        &knl_uncore_pcu,
2063        NULL,
2064};
2065
2066void knl_uncore_cpu_init(void)
2067{
2068        uncore_msr_uncores = knl_msr_uncores;
2069}
2070
2071static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2072{
2073        struct pci_dev *pdev = box->pci_dev;
2074        int box_ctl = uncore_pci_box_ctl(box);
2075
2076        pci_write_config_dword(pdev, box_ctl, 0);
2077}
2078
2079static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2080                                        struct perf_event *event)
2081{
2082        struct pci_dev *pdev = box->pci_dev;
2083        struct hw_perf_event *hwc = &event->hw;
2084
2085        if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2086                                                        == UNCORE_FIXED_EVENT)
2087                pci_write_config_dword(pdev, hwc->config_base,
2088                                       hwc->config | KNL_PMON_FIXED_CTL_EN);
2089        else
2090                pci_write_config_dword(pdev, hwc->config_base,
2091                                       hwc->config | SNBEP_PMON_CTL_EN);
2092}
2093
2094static struct intel_uncore_ops knl_uncore_imc_ops = {
2095        .init_box       = snbep_uncore_pci_init_box,
2096        .disable_box    = snbep_uncore_pci_disable_box,
2097        .enable_box     = knl_uncore_imc_enable_box,
2098        .read_counter   = snbep_uncore_pci_read_counter,
2099        .enable_event   = knl_uncore_imc_enable_event,
2100        .disable_event  = snbep_uncore_pci_disable_event,
2101};
2102
2103static struct intel_uncore_type knl_uncore_imc_uclk = {
2104        .name                   = "imc_uclk",
2105        .num_counters           = 4,
2106        .num_boxes              = 2,
2107        .perf_ctr_bits          = 48,
2108        .fixed_ctr_bits         = 48,
2109        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2110        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2111        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2112        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2113        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2114        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2115        .ops                    = &knl_uncore_imc_ops,
2116        .format_group           = &snbep_uncore_format_group,
2117};
2118
2119static struct intel_uncore_type knl_uncore_imc_dclk = {
2120        .name                   = "imc",
2121        .num_counters           = 4,
2122        .num_boxes              = 6,
2123        .perf_ctr_bits          = 48,
2124        .fixed_ctr_bits         = 48,
2125        .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2126        .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2127        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2128        .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2129        .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2130        .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2131        .ops                    = &knl_uncore_imc_ops,
2132        .format_group           = &snbep_uncore_format_group,
2133};
2134
2135static struct intel_uncore_type knl_uncore_edc_uclk = {
2136        .name                   = "edc_uclk",
2137        .num_counters           = 4,
2138        .num_boxes              = 8,
2139        .perf_ctr_bits          = 48,
2140        .fixed_ctr_bits         = 48,
2141        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2142        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2143        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2144        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2145        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2146        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2147        .ops                    = &knl_uncore_imc_ops,
2148        .format_group           = &snbep_uncore_format_group,
2149};
2150
2151static struct intel_uncore_type knl_uncore_edc_eclk = {
2152        .name                   = "edc_eclk",
2153        .num_counters           = 4,
2154        .num_boxes              = 8,
2155        .perf_ctr_bits          = 48,
2156        .fixed_ctr_bits         = 48,
2157        .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2158        .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2159        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2160        .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2161        .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2162        .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2163        .ops                    = &knl_uncore_imc_ops,
2164        .format_group           = &snbep_uncore_format_group,
2165};
2166
2167static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2168        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2169        EVENT_CONSTRAINT_END
2170};
2171
2172static struct intel_uncore_type knl_uncore_m2pcie = {
2173        .name           = "m2pcie",
2174        .num_counters   = 4,
2175        .num_boxes      = 1,
2176        .perf_ctr_bits  = 48,
2177        .constraints    = knl_uncore_m2pcie_constraints,
2178        SNBEP_UNCORE_PCI_COMMON_INIT(),
2179};
2180
2181static struct attribute *knl_uncore_irp_formats_attr[] = {
2182        &format_attr_event.attr,
2183        &format_attr_umask.attr,
2184        &format_attr_qor.attr,
2185        &format_attr_edge.attr,
2186        &format_attr_inv.attr,
2187        &format_attr_thresh8.attr,
2188        NULL,
2189};
2190
2191static struct attribute_group knl_uncore_irp_format_group = {
2192        .name = "format",
2193        .attrs = knl_uncore_irp_formats_attr,
2194};
2195
2196static struct intel_uncore_type knl_uncore_irp = {
2197        .name                   = "irp",
2198        .num_counters           = 2,
2199        .num_boxes              = 1,
2200        .perf_ctr_bits          = 48,
2201        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2202        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2203        .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2204        .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2205        .ops                    = &snbep_uncore_pci_ops,
2206        .format_group           = &knl_uncore_irp_format_group,
2207};
2208
2209enum {
2210        KNL_PCI_UNCORE_MC_UCLK,
2211        KNL_PCI_UNCORE_MC_DCLK,
2212        KNL_PCI_UNCORE_EDC_UCLK,
2213        KNL_PCI_UNCORE_EDC_ECLK,
2214        KNL_PCI_UNCORE_M2PCIE,
2215        KNL_PCI_UNCORE_IRP,
2216};
2217
2218static struct intel_uncore_type *knl_pci_uncores[] = {
2219        [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2220        [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2221        [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2222        [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2223        [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2224        [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2225        NULL,
2226};
2227
2228/*
2229 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2230 * device type. prior to KNL, each instance of a PMU device type had a unique
2231 * device ID.
2232 *
2233 *      PCI Device ID   Uncore PMU Devices
2234 *      ----------------------------------
2235 *      0x7841          MC0 UClk, MC1 UClk
2236 *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2237 *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2238 *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2239 *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2240 *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2241 *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2242 *      0x7817          M2PCIe
2243 *      0x7814          IRP
2244*/
2245
2246static const struct pci_device_id knl_uncore_pci_ids[] = {
2247        { /* MC0 UClk */
2248                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2249                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2250        },
2251        { /* MC1 UClk */
2252                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2253                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2254        },
2255        { /* MC0 DClk CH 0 */
2256                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2257                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2258        },
2259        { /* MC0 DClk CH 1 */
2260                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2261                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2262        },
2263        { /* MC0 DClk CH 2 */
2264                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2265                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2266        },
2267        { /* MC1 DClk CH 0 */
2268                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2269                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2270        },
2271        { /* MC1 DClk CH 1 */
2272                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2273                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2274        },
2275        { /* MC1 DClk CH 2 */
2276                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2277                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2278        },
2279        { /* EDC0 UClk */
2280                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2281                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2282        },
2283        { /* EDC1 UClk */
2284                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2285                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2286        },
2287        { /* EDC2 UClk */
2288                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2289                .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2290        },
2291        { /* EDC3 UClk */
2292                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2293                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2294        },
2295        { /* EDC4 UClk */
2296                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2297                .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2298        },
2299        { /* EDC5 UClk */
2300                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2301                .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2302        },
2303        { /* EDC6 UClk */
2304                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2305                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2306        },
2307        { /* EDC7 UClk */
2308                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2309                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2310        },
2311        { /* EDC0 EClk */
2312                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2313                .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2314        },
2315        { /* EDC1 EClk */
2316                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2317                .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2318        },
2319        { /* EDC2 EClk */
2320                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2321                .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2322        },
2323        { /* EDC3 EClk */
2324                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2325                .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2326        },
2327        { /* EDC4 EClk */
2328                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2329                .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2330        },
2331        { /* EDC5 EClk */
2332                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2333                .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2334        },
2335        { /* EDC6 EClk */
2336                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2337                .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2338        },
2339        { /* EDC7 EClk */
2340                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2341                .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2342        },
2343        { /* M2PCIe */
2344                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2345                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2346        },
2347        { /* IRP */
2348                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2349                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2350        },
2351        { /* end: all zeroes */ }
2352};
2353
2354static struct pci_driver knl_uncore_pci_driver = {
2355        .name           = "knl_uncore",
2356        .id_table       = knl_uncore_pci_ids,
2357};
2358
2359int knl_uncore_pci_init(void)
2360{
2361        int ret;
2362
2363        /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2364        ret = snb_pci2phy_map_init(0x7814); /* IRP */
2365        if (ret)
2366                return ret;
2367        ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2368        if (ret)
2369                return ret;
2370        uncore_pci_uncores = knl_pci_uncores;
2371        uncore_pci_driver = &knl_uncore_pci_driver;
2372        return 0;
2373}
2374
2375/* end of KNL uncore support */
2376
2377/* Haswell-EP uncore support */
2378static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2379        &format_attr_event.attr,
2380        &format_attr_umask.attr,
2381        &format_attr_edge.attr,
2382        &format_attr_inv.attr,
2383        &format_attr_thresh5.attr,
2384        &format_attr_filter_tid2.attr,
2385        &format_attr_filter_cid.attr,
2386        NULL,
2387};
2388
2389static struct attribute_group hswep_uncore_ubox_format_group = {
2390        .name = "format",
2391        .attrs = hswep_uncore_ubox_formats_attr,
2392};
2393
2394static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2395{
2396        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2397        reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2398        reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2399        reg1->idx = 0;
2400        return 0;
2401}
2402
2403static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2404        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2405        .hw_config              = hswep_ubox_hw_config,
2406        .get_constraint         = uncore_get_constraint,
2407        .put_constraint         = uncore_put_constraint,
2408};
2409
2410static struct intel_uncore_type hswep_uncore_ubox = {
2411        .name                   = "ubox",
2412        .num_counters           = 2,
2413        .num_boxes              = 1,
2414        .perf_ctr_bits          = 44,
2415        .fixed_ctr_bits         = 48,
2416        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2417        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2418        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2419        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2420        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2421        .num_shared_regs        = 1,
2422        .ops                    = &hswep_uncore_ubox_ops,
2423        .format_group           = &hswep_uncore_ubox_format_group,
2424};
2425
2426static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2427        &format_attr_event.attr,
2428        &format_attr_umask.attr,
2429        &format_attr_edge.attr,
2430        &format_attr_tid_en.attr,
2431        &format_attr_thresh8.attr,
2432        &format_attr_filter_tid3.attr,
2433        &format_attr_filter_link2.attr,
2434        &format_attr_filter_state3.attr,
2435        &format_attr_filter_nid2.attr,
2436        &format_attr_filter_opc2.attr,
2437        &format_attr_filter_nc.attr,
2438        &format_attr_filter_c6.attr,
2439        &format_attr_filter_isoc.attr,
2440        NULL,
2441};
2442
2443static struct attribute_group hswep_uncore_cbox_format_group = {
2444        .name = "format",
2445        .attrs = hswep_uncore_cbox_formats_attr,
2446};
2447
2448static struct event_constraint hswep_uncore_cbox_constraints[] = {
2449        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2450        UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2451        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2452        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2453        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2454        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2455        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2456        EVENT_CONSTRAINT_END
2457};
2458
2459static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2460        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2461                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2462        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2463        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2464        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2465        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2466        SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2467        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2468        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2469        SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2470        SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2471        SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2472        SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2473        SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2474        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2475        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2476        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2477        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2478        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2479        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2480        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2481        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2482        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2483        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2484        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2485        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2486        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2487        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2488        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2489        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2490        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2491        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2492        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2493        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2494        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2495        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2496        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2497        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2498        EVENT_EXTRA_END
2499};
2500
2501static u64 hswep_cbox_filter_mask(int fields)
2502{
2503        u64 mask = 0;
2504        if (fields & 0x1)
2505                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2506        if (fields & 0x2)
2507                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2508        if (fields & 0x4)
2509                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2510        if (fields & 0x8)
2511                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2512        if (fields & 0x10) {
2513                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2514                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2515                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2516                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2517        }
2518        return mask;
2519}
2520
2521static struct event_constraint *
2522hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2523{
2524        return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2525}
2526
2527static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2528{
2529        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2530        struct extra_reg *er;
2531        int idx = 0;
2532
2533        for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2534                if (er->event != (event->hw.config & er->config_mask))
2535                        continue;
2536                idx |= er->idx;
2537        }
2538
2539        if (idx) {
2540                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2541                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2542                reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2543                reg1->idx = idx;
2544        }
2545        return 0;
2546}
2547
2548static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2549                                  struct perf_event *event)
2550{
2551        struct hw_perf_event *hwc = &event->hw;
2552        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2553
2554        if (reg1->idx != EXTRA_REG_NONE) {
2555                u64 filter = uncore_shared_reg_config(box, 0);
2556                wrmsrl(reg1->reg, filter & 0xffffffff);
2557                wrmsrl(reg1->reg + 1, filter >> 32);
2558        }
2559
2560        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2561}
2562
2563static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2564        .init_box               = snbep_uncore_msr_init_box,
2565        .disable_box            = snbep_uncore_msr_disable_box,
2566        .enable_box             = snbep_uncore_msr_enable_box,
2567        .disable_event          = snbep_uncore_msr_disable_event,
2568        .enable_event           = hswep_cbox_enable_event,
2569        .read_counter           = uncore_msr_read_counter,
2570        .hw_config              = hswep_cbox_hw_config,
2571        .get_constraint         = hswep_cbox_get_constraint,
2572        .put_constraint         = snbep_cbox_put_constraint,
2573};
2574
2575static struct intel_uncore_type hswep_uncore_cbox = {
2576        .name                   = "cbox",
2577        .num_counters           = 4,
2578        .num_boxes              = 18,
2579        .perf_ctr_bits          = 48,
2580        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2581        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2582        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2583        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2584        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2585        .num_shared_regs        = 1,
2586        .constraints            = hswep_uncore_cbox_constraints,
2587        .ops                    = &hswep_uncore_cbox_ops,
2588        .format_group           = &hswep_uncore_cbox_format_group,
2589};
2590
2591/*
2592 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2593 */
2594static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2595{
2596        unsigned msr = uncore_msr_box_ctl(box);
2597
2598        if (msr) {
2599                u64 init = SNBEP_PMON_BOX_CTL_INT;
2600                u64 flags = 0;
2601                int i;
2602
2603                for_each_set_bit(i, (unsigned long *)&init, 64) {
2604                        flags |= (1ULL << i);
2605                        wrmsrl(msr, flags);
2606                }
2607        }
2608}
2609
2610static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2611        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2612        .init_box               = hswep_uncore_sbox_msr_init_box
2613};
2614
2615static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2616        &format_attr_event.attr,
2617        &format_attr_umask.attr,
2618        &format_attr_edge.attr,
2619        &format_attr_tid_en.attr,
2620        &format_attr_inv.attr,
2621        &format_attr_thresh8.attr,
2622        NULL,
2623};
2624
2625static struct attribute_group hswep_uncore_sbox_format_group = {
2626        .name = "format",
2627        .attrs = hswep_uncore_sbox_formats_attr,
2628};
2629
2630static struct intel_uncore_type hswep_uncore_sbox = {
2631        .name                   = "sbox",
2632        .num_counters           = 4,
2633        .num_boxes              = 4,
2634        .perf_ctr_bits          = 44,
2635        .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2636        .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2637        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2638        .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2639        .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2640        .ops                    = &hswep_uncore_sbox_msr_ops,
2641        .format_group           = &hswep_uncore_sbox_format_group,
2642};
2643
2644static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2645{
2646        struct hw_perf_event *hwc = &event->hw;
2647        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2648        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2649
2650        if (ev_sel >= 0xb && ev_sel <= 0xe) {
2651                reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2652                reg1->idx = ev_sel - 0xb;
2653                reg1->config = event->attr.config1 & (0xff << reg1->idx);
2654        }
2655        return 0;
2656}
2657
2658static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2659        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2660        .hw_config              = hswep_pcu_hw_config,
2661        .get_constraint         = snbep_pcu_get_constraint,
2662        .put_constraint         = snbep_pcu_put_constraint,
2663};
2664
2665static struct intel_uncore_type hswep_uncore_pcu = {
2666        .name                   = "pcu",
2667        .num_counters           = 4,
2668        .num_boxes              = 1,
2669        .perf_ctr_bits          = 48,
2670        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2671        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2672        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2673        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2674        .num_shared_regs        = 1,
2675        .ops                    = &hswep_uncore_pcu_ops,
2676        .format_group           = &snbep_uncore_pcu_format_group,
2677};
2678
2679static struct intel_uncore_type *hswep_msr_uncores[] = {
2680        &hswep_uncore_ubox,
2681        &hswep_uncore_cbox,
2682        &hswep_uncore_sbox,
2683        &hswep_uncore_pcu,
2684        NULL,
2685};
2686
2687void hswep_uncore_cpu_init(void)
2688{
2689        int pkg = topology_phys_to_logical_pkg(0);
2690
2691        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2692                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2693
2694        /* Detect 6-8 core systems with only two SBOXes */
2695        if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2696                u32 capid4;
2697
2698                pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2699                                      0x94, &capid4);
2700                if (((capid4 >> 6) & 0x3) == 0)
2701                        hswep_uncore_sbox.num_boxes = 2;
2702        }
2703
2704        uncore_msr_uncores = hswep_msr_uncores;
2705}
2706
2707static struct intel_uncore_type hswep_uncore_ha = {
2708        .name           = "ha",
2709        .num_counters   = 4,
2710        .num_boxes      = 2,
2711        .perf_ctr_bits  = 48,
2712        SNBEP_UNCORE_PCI_COMMON_INIT(),
2713};
2714
2715static struct uncore_event_desc hswep_uncore_imc_events[] = {
2716        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2717        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2718        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2719        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2720        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2721        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2722        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2723        { /* end: all zeroes */ },
2724};
2725
2726static struct intel_uncore_type hswep_uncore_imc = {
2727        .name           = "imc",
2728        .num_counters   = 4,
2729        .num_boxes      = 8,
2730        .perf_ctr_bits  = 48,
2731        .fixed_ctr_bits = 48,
2732        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2733        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2734        .event_descs    = hswep_uncore_imc_events,
2735        SNBEP_UNCORE_PCI_COMMON_INIT(),
2736};
2737
2738static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2739
2740static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2741{
2742        struct pci_dev *pdev = box->pci_dev;
2743        struct hw_perf_event *hwc = &event->hw;
2744        u64 count = 0;
2745
2746        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2747        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2748
2749        return count;
2750}
2751
2752static struct intel_uncore_ops hswep_uncore_irp_ops = {
2753        .init_box       = snbep_uncore_pci_init_box,
2754        .disable_box    = snbep_uncore_pci_disable_box,
2755        .enable_box     = snbep_uncore_pci_enable_box,
2756        .disable_event  = ivbep_uncore_irp_disable_event,
2757        .enable_event   = ivbep_uncore_irp_enable_event,
2758        .read_counter   = hswep_uncore_irp_read_counter,
2759};
2760
2761static struct intel_uncore_type hswep_uncore_irp = {
2762        .name                   = "irp",
2763        .num_counters           = 4,
2764        .num_boxes              = 1,
2765        .perf_ctr_bits          = 48,
2766        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2767        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2768        .ops                    = &hswep_uncore_irp_ops,
2769        .format_group           = &snbep_uncore_format_group,
2770};
2771
2772static struct intel_uncore_type hswep_uncore_qpi = {
2773        .name                   = "qpi",
2774        .num_counters           = 4,
2775        .num_boxes              = 3,
2776        .perf_ctr_bits          = 48,
2777        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2778        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2779        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2780        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2781        .num_shared_regs        = 1,
2782        .ops                    = &snbep_uncore_qpi_ops,
2783        .format_group           = &snbep_uncore_qpi_format_group,
2784};
2785
2786static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2787        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2788        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2789        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2790        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2791        UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2792        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2793        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2794        UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2795        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2796        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2797        UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2798        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2799        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2800        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2801        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2802        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2803        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2804        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2805        EVENT_CONSTRAINT_END
2806};
2807
2808static struct intel_uncore_type hswep_uncore_r2pcie = {
2809        .name           = "r2pcie",
2810        .num_counters   = 4,
2811        .num_boxes      = 1,
2812        .perf_ctr_bits  = 48,
2813        .constraints    = hswep_uncore_r2pcie_constraints,
2814        SNBEP_UNCORE_PCI_COMMON_INIT(),
2815};
2816
2817static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2818        UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2819        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2820        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2821        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2822        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2823        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2824        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2825        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2826        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2827        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2828        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2829        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2830        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2831        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2832        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2833        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2834        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2835        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2836        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2837        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2838        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2839        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2840        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2841        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2842        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2843        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2844        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2845        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2846        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2847        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2848        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2849        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2850        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2851        EVENT_CONSTRAINT_END
2852};
2853
2854static struct intel_uncore_type hswep_uncore_r3qpi = {
2855        .name           = "r3qpi",
2856        .num_counters   = 3,
2857        .num_boxes      = 3,
2858        .perf_ctr_bits  = 44,
2859        .constraints    = hswep_uncore_r3qpi_constraints,
2860        SNBEP_UNCORE_PCI_COMMON_INIT(),
2861};
2862
2863enum {
2864        HSWEP_PCI_UNCORE_HA,
2865        HSWEP_PCI_UNCORE_IMC,
2866        HSWEP_PCI_UNCORE_IRP,
2867        HSWEP_PCI_UNCORE_QPI,
2868        HSWEP_PCI_UNCORE_R2PCIE,
2869        HSWEP_PCI_UNCORE_R3QPI,
2870};
2871
2872static struct intel_uncore_type *hswep_pci_uncores[] = {
2873        [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2874        [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2875        [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2876        [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2877        [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2878        [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2879        NULL,
2880};
2881
2882static const struct pci_device_id hswep_uncore_pci_ids[] = {
2883        { /* Home Agent 0 */
2884                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2885                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2886        },
2887        { /* Home Agent 1 */
2888                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2889                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2890        },
2891        { /* MC0 Channel 0 */
2892                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2893                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2894        },
2895        { /* MC0 Channel 1 */
2896                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2897                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2898        },
2899        { /* MC0 Channel 2 */
2900                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2901                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2902        },
2903        { /* MC0 Channel 3 */
2904                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2905                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2906        },
2907        { /* MC1 Channel 0 */
2908                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2909                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2910        },
2911        { /* MC1 Channel 1 */
2912                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2913                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2914        },
2915        { /* MC1 Channel 2 */
2916                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2917                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2918        },
2919        { /* MC1 Channel 3 */
2920                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2921                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2922        },
2923        { /* IRP */
2924                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2925                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2926        },
2927        { /* QPI0 Port 0 */
2928                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2929                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2930        },
2931        { /* QPI0 Port 1 */
2932                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2933                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2934        },
2935        { /* QPI1 Port 2 */
2936                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2937                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2938        },
2939        { /* R2PCIe */
2940                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2941                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2942        },
2943        { /* R3QPI0 Link 0 */
2944                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2945                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2946        },
2947        { /* R3QPI0 Link 1 */
2948                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2949                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2950        },
2951        { /* R3QPI1 Link 2 */
2952                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2953                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2954        },
2955        { /* QPI Port 0 filter  */
2956                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2957                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2958                                                   SNBEP_PCI_QPI_PORT0_FILTER),
2959        },
2960        { /* QPI Port 1 filter  */
2961                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2962                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2963                                                   SNBEP_PCI_QPI_PORT1_FILTER),
2964        },
2965        { /* PCU.3 (for Capability registers) */
2966                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2967                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2968                                                   HSWEP_PCI_PCU_3),
2969        },
2970        { /* end: all zeroes */ }
2971};
2972
2973static struct pci_driver hswep_uncore_pci_driver = {
2974        .name           = "hswep_uncore",
2975        .id_table       = hswep_uncore_pci_ids,
2976};
2977
2978int hswep_uncore_pci_init(void)
2979{
2980        int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2981        if (ret)
2982                return ret;
2983        uncore_pci_uncores = hswep_pci_uncores;
2984        uncore_pci_driver = &hswep_uncore_pci_driver;
2985        return 0;
2986}
2987/* end of Haswell-EP uncore support */
2988
2989/* BDX uncore support */
2990
2991static struct intel_uncore_type bdx_uncore_ubox = {
2992        .name                   = "ubox",
2993        .num_counters           = 2,
2994        .num_boxes              = 1,
2995        .perf_ctr_bits          = 48,
2996        .fixed_ctr_bits         = 48,
2997        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2998        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2999        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3000        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3001        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3002        .num_shared_regs        = 1,
3003        .ops                    = &ivbep_uncore_msr_ops,
3004        .format_group           = &ivbep_uncore_ubox_format_group,
3005};
3006
3007static struct event_constraint bdx_uncore_cbox_constraints[] = {
3008        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3009        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3010        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3011        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3012        EVENT_CONSTRAINT_END
3013};
3014
3015static struct intel_uncore_type bdx_uncore_cbox = {
3016        .name                   = "cbox",
3017        .num_counters           = 4,
3018        .num_boxes              = 24,
3019        .perf_ctr_bits          = 48,
3020        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3021        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3022        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3023        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3024        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3025        .num_shared_regs        = 1,
3026        .constraints            = bdx_uncore_cbox_constraints,
3027        .ops                    = &hswep_uncore_cbox_ops,
3028        .format_group           = &hswep_uncore_cbox_format_group,
3029};
3030
3031static struct intel_uncore_type *bdx_msr_uncores[] = {
3032        &bdx_uncore_ubox,
3033        &bdx_uncore_cbox,
3034        &hswep_uncore_pcu,
3035        NULL,
3036};
3037
3038void bdx_uncore_cpu_init(void)
3039{
3040        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3041                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3042        uncore_msr_uncores = bdx_msr_uncores;
3043}
3044
3045static struct intel_uncore_type bdx_uncore_ha = {
3046        .name           = "ha",
3047        .num_counters   = 4,
3048        .num_boxes      = 2,
3049        .perf_ctr_bits  = 48,
3050        SNBEP_UNCORE_PCI_COMMON_INIT(),
3051};
3052
3053static struct intel_uncore_type bdx_uncore_imc = {
3054        .name           = "imc",
3055        .num_counters   = 4,
3056        .num_boxes      = 8,
3057        .perf_ctr_bits  = 48,
3058        .fixed_ctr_bits = 48,
3059        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3060        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3061        .event_descs    = hswep_uncore_imc_events,
3062        SNBEP_UNCORE_PCI_COMMON_INIT(),
3063};
3064
3065static struct intel_uncore_type bdx_uncore_irp = {
3066        .name                   = "irp",
3067        .num_counters           = 4,
3068        .num_boxes              = 1,
3069        .perf_ctr_bits          = 48,
3070        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3071        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3072        .ops                    = &hswep_uncore_irp_ops,
3073        .format_group           = &snbep_uncore_format_group,
3074};
3075
3076static struct intel_uncore_type bdx_uncore_qpi = {
3077        .name                   = "qpi",
3078        .num_counters           = 4,
3079        .num_boxes              = 3,
3080        .perf_ctr_bits          = 48,
3081        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3082        .event_ctl              = SNBEP_PCI_PMON_CTL0,
3083        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3084        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3085        .num_shared_regs        = 1,
3086        .ops                    = &snbep_uncore_qpi_ops,
3087        .format_group           = &snbep_uncore_qpi_format_group,
3088};
3089
3090static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3091        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3092        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3093        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3094        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3095        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3096        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3097        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3098        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3099        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3100        EVENT_CONSTRAINT_END
3101};
3102
3103static struct intel_uncore_type bdx_uncore_r2pcie = {
3104        .name           = "r2pcie",
3105        .num_counters   = 4,
3106        .num_boxes      = 1,
3107        .perf_ctr_bits  = 48,
3108        .constraints    = bdx_uncore_r2pcie_constraints,
3109        SNBEP_UNCORE_PCI_COMMON_INIT(),
3110};
3111
3112static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3113        UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3114        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3115        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3116        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3117        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3118        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3119        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3120        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3121        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3122        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3123        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3124        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3125        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3126        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3127        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3128        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3129        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3130        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3131        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3132        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3133        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3134        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3135        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3136        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3137        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3138        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3139        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3140        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3141        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3142        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3143        EVENT_CONSTRAINT_END
3144};
3145
3146static struct intel_uncore_type bdx_uncore_r3qpi = {
3147        .name           = "r3qpi",
3148        .num_counters   = 3,
3149        .num_boxes      = 3,
3150        .perf_ctr_bits  = 48,
3151        .constraints    = bdx_uncore_r3qpi_constraints,
3152        SNBEP_UNCORE_PCI_COMMON_INIT(),
3153};
3154
3155enum {
3156        BDX_PCI_UNCORE_HA,
3157        BDX_PCI_UNCORE_IMC,
3158        BDX_PCI_UNCORE_IRP,
3159        BDX_PCI_UNCORE_QPI,
3160        BDX_PCI_UNCORE_R2PCIE,
3161        BDX_PCI_UNCORE_R3QPI,
3162};
3163
3164static struct intel_uncore_type *bdx_pci_uncores[] = {
3165        [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3166        [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3167        [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3168        [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3169        [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3170        [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3171        NULL,
3172};
3173
3174static const struct pci_device_id bdx_uncore_pci_ids[] = {
3175        { /* Home Agent 0 */
3176                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3177                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3178        },
3179        { /* Home Agent 1 */
3180                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3181                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3182        },
3183        { /* MC0 Channel 0 */
3184                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3185                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3186        },
3187        { /* MC0 Channel 1 */
3188                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3189                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3190        },
3191        { /* MC0 Channel 2 */
3192                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3193                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3194        },
3195        { /* MC0 Channel 3 */
3196                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3197                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3198        },
3199        { /* MC1 Channel 0 */
3200                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3201                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3202        },
3203        { /* MC1 Channel 1 */
3204                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3205                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3206        },
3207        { /* MC1 Channel 2 */
3208                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3209                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3210        },
3211        { /* MC1 Channel 3 */
3212                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3213                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3214        },
3215        { /* IRP */
3216                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3217                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3218        },
3219        { /* QPI0 Port 0 */
3220                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3221                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3222        },
3223        { /* QPI0 Port 1 */
3224                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3225                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3226        },
3227        { /* QPI1 Port 2 */
3228                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3229                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3230        },
3231        { /* R2PCIe */
3232                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3233                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3234        },
3235        { /* R3QPI0 Link 0 */
3236                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3237                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3238        },
3239        { /* R3QPI0 Link 1 */
3240                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3241                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3242        },
3243        { /* R3QPI1 Link 2 */
3244                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3245                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3246        },
3247        { /* QPI Port 0 filter  */
3248                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3249                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3250        },
3251        { /* QPI Port 1 filter  */
3252                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3253                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3254        },
3255        { /* QPI Port 2 filter  */
3256                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3257                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3258        },
3259        { /* end: all zeroes */ }
3260};
3261
3262static struct pci_driver bdx_uncore_pci_driver = {
3263        .name           = "bdx_uncore",
3264        .id_table       = bdx_uncore_pci_ids,
3265};
3266
3267int bdx_uncore_pci_init(void)
3268{
3269        int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3270
3271        if (ret)
3272                return ret;
3273        uncore_pci_uncores = bdx_pci_uncores;
3274        uncore_pci_driver = &bdx_uncore_pci_driver;
3275        return 0;
3276}
3277
3278/* end of BDX uncore support */
3279
3280/* SKX uncore support */
3281
3282static struct intel_uncore_type skx_uncore_ubox = {
3283        .name                   = "ubox",
3284        .num_counters           = 2,
3285        .num_boxes              = 1,
3286        .perf_ctr_bits          = 48,
3287        .fixed_ctr_bits         = 48,
3288        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3289        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3290        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3291        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3292        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3293        .ops                    = &ivbep_uncore_msr_ops,
3294        .format_group           = &ivbep_uncore_ubox_format_group,
3295};
3296
3297static struct attribute *skx_uncore_cha_formats_attr[] = {
3298        &format_attr_event.attr,
3299        &format_attr_umask.attr,
3300        &format_attr_edge.attr,
3301        &format_attr_tid_en.attr,
3302        &format_attr_inv.attr,
3303        &format_attr_thresh8.attr,
3304        &format_attr_filter_tid4.attr,
3305        &format_attr_filter_link4.attr,
3306        &format_attr_filter_state5.attr,
3307        &format_attr_filter_rem.attr,
3308        &format_attr_filter_loc.attr,
3309        &format_attr_filter_nm.attr,
3310        &format_attr_filter_all_op.attr,
3311        &format_attr_filter_not_nm.attr,
3312        &format_attr_filter_opc_0.attr,
3313        &format_attr_filter_opc_1.attr,
3314        &format_attr_filter_nc.attr,
3315        &format_attr_filter_c6.attr,
3316        &format_attr_filter_isoc.attr,
3317        NULL,
3318};
3319
3320static struct attribute_group skx_uncore_chabox_format_group = {
3321        .name = "format",
3322        .attrs = skx_uncore_cha_formats_attr,
3323};
3324
3325static struct event_constraint skx_uncore_chabox_constraints[] = {
3326        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3327        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3328        EVENT_CONSTRAINT_END
3329};
3330
3331static struct extra_reg skx_uncore_cha_extra_regs[] = {
3332        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3333        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3334        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3335        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3336        SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
3337        SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
3338};
3339
3340static u64 skx_cha_filter_mask(int fields)
3341{
3342        u64 mask = 0;
3343
3344        if (fields & 0x1)
3345                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3346        if (fields & 0x2)
3347                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3348        if (fields & 0x4)
3349                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3350        return mask;
3351}
3352
3353static struct event_constraint *
3354skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3355{
3356        return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3357}
3358
3359static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3360{
3361        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3362        struct extra_reg *er;
3363        int idx = 0;
3364
3365        for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3366                if (er->event != (event->hw.config & er->config_mask))
3367                        continue;
3368                idx |= er->idx;
3369        }
3370
3371        if (idx) {
3372                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3373                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3374                reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3375                reg1->idx = idx;
3376        }
3377        return 0;
3378}
3379
3380static struct intel_uncore_ops skx_uncore_chabox_ops = {
3381        /* There is no frz_en for chabox ctl */
3382        .init_box               = ivbep_uncore_msr_init_box,
3383        .disable_box            = snbep_uncore_msr_disable_box,
3384        .enable_box             = snbep_uncore_msr_enable_box,
3385        .disable_event          = snbep_uncore_msr_disable_event,
3386        .enable_event           = hswep_cbox_enable_event,
3387        .read_counter           = uncore_msr_read_counter,
3388        .hw_config              = skx_cha_hw_config,
3389        .get_constraint         = skx_cha_get_constraint,
3390        .put_constraint         = snbep_cbox_put_constraint,
3391};
3392
3393static struct intel_uncore_type skx_uncore_chabox = {
3394        .name                   = "cha",
3395        .num_counters           = 4,
3396        .perf_ctr_bits          = 48,
3397        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3398        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3399        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3400        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3401        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3402        .num_shared_regs        = 1,
3403        .constraints            = skx_uncore_chabox_constraints,
3404        .ops                    = &skx_uncore_chabox_ops,
3405        .format_group           = &skx_uncore_chabox_format_group,
3406};
3407
3408static struct attribute *skx_uncore_iio_formats_attr[] = {
3409        &format_attr_event.attr,
3410        &format_attr_umask.attr,
3411        &format_attr_edge.attr,
3412        &format_attr_inv.attr,
3413        &format_attr_thresh9.attr,
3414        &format_attr_ch_mask.attr,
3415        &format_attr_fc_mask.attr,
3416        NULL,
3417};
3418
3419static struct attribute_group skx_uncore_iio_format_group = {
3420        .name = "format",
3421        .attrs = skx_uncore_iio_formats_attr,
3422};
3423
3424static struct event_constraint skx_uncore_iio_constraints[] = {
3425        UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3426        UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3427        UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3428        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3429        UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3430        UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3431        EVENT_CONSTRAINT_END
3432};
3433
3434static void skx_iio_enable_event(struct intel_uncore_box *box,
3435                                 struct perf_event *event)
3436{
3437        struct hw_perf_event *hwc = &event->hw;
3438
3439        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3440}
3441
3442static struct intel_uncore_ops skx_uncore_iio_ops = {
3443        .init_box               = ivbep_uncore_msr_init_box,
3444        .disable_box            = snbep_uncore_msr_disable_box,
3445        .enable_box             = snbep_uncore_msr_enable_box,
3446        .disable_event          = snbep_uncore_msr_disable_event,
3447        .enable_event           = skx_iio_enable_event,
3448        .read_counter           = uncore_msr_read_counter,
3449};
3450
3451static struct intel_uncore_type skx_uncore_iio = {
3452        .name                   = "iio",
3453        .num_counters           = 4,
3454        .num_boxes              = 5,
3455        .perf_ctr_bits          = 48,
3456        .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3457        .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3458        .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3459        .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3460        .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3461        .msr_offset             = SKX_IIO_MSR_OFFSET,
3462        .constraints            = skx_uncore_iio_constraints,
3463        .ops                    = &skx_uncore_iio_ops,
3464        .format_group           = &skx_uncore_iio_format_group,
3465};
3466
3467static struct attribute *skx_uncore_formats_attr[] = {
3468        &format_attr_event.attr,
3469        &format_attr_umask.attr,
3470        &format_attr_edge.attr,
3471        &format_attr_inv.attr,
3472        &format_attr_thresh8.attr,
3473        NULL,
3474};
3475
3476static struct attribute_group skx_uncore_format_group = {
3477        .name = "format",
3478        .attrs = skx_uncore_formats_attr,
3479};
3480
3481static struct intel_uncore_type skx_uncore_irp = {
3482        .name                   = "irp",
3483        .num_counters           = 2,
3484        .num_boxes              = 5,
3485        .perf_ctr_bits          = 48,
3486        .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3487        .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3488        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3489        .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3490        .msr_offset             = SKX_IRP_MSR_OFFSET,
3491        .ops                    = &skx_uncore_iio_ops,
3492        .format_group           = &skx_uncore_format_group,
3493};
3494
3495static struct intel_uncore_ops skx_uncore_pcu_ops = {
3496        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3497        .hw_config              = hswep_pcu_hw_config,
3498        .get_constraint         = snbep_pcu_get_constraint,
3499        .put_constraint         = snbep_pcu_put_constraint,
3500};
3501
3502static struct intel_uncore_type skx_uncore_pcu = {
3503        .name                   = "pcu",
3504        .num_counters           = 4,
3505        .num_boxes              = 1,
3506        .perf_ctr_bits          = 48,
3507        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3508        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3509        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3510        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3511        .num_shared_regs        = 1,
3512        .ops                    = &skx_uncore_pcu_ops,
3513        .format_group           = &snbep_uncore_pcu_format_group,
3514};
3515
3516static struct intel_uncore_type *skx_msr_uncores[] = {
3517        &skx_uncore_ubox,
3518        &skx_uncore_chabox,
3519        &skx_uncore_iio,
3520        &skx_uncore_irp,
3521        &skx_uncore_pcu,
3522        NULL,
3523};
3524
3525static int skx_count_chabox(void)
3526{
3527        struct pci_dev *chabox_dev = NULL;
3528        int bus, count = 0;
3529
3530        while (1) {
3531                chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
3532                if (!chabox_dev)
3533                        break;
3534                if (count == 0)
3535                        bus = chabox_dev->bus->number;
3536                if (bus != chabox_dev->bus->number)
3537                        break;
3538                count++;
3539        }
3540
3541        pci_dev_put(chabox_dev);
3542        return count;
3543}
3544
3545void skx_uncore_cpu_init(void)
3546{
3547        skx_uncore_chabox.num_boxes = skx_count_chabox();
3548        uncore_msr_uncores = skx_msr_uncores;
3549}
3550
3551static struct intel_uncore_type skx_uncore_imc = {
3552        .name           = "imc",
3553        .num_counters   = 4,
3554        .num_boxes      = 6,
3555        .perf_ctr_bits  = 48,
3556        .fixed_ctr_bits = 48,
3557        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3558        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3559        .event_descs    = hswep_uncore_imc_events,
3560        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3561        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3562        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3563        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3564        .ops            = &ivbep_uncore_pci_ops,
3565        .format_group   = &skx_uncore_format_group,
3566};
3567
3568static struct attribute *skx_upi_uncore_formats_attr[] = {
3569        &format_attr_event_ext.attr,
3570        &format_attr_umask_ext.attr,
3571        &format_attr_edge.attr,
3572        &format_attr_inv.attr,
3573        &format_attr_thresh8.attr,
3574        NULL,
3575};
3576
3577static struct attribute_group skx_upi_uncore_format_group = {
3578        .name = "format",
3579        .attrs = skx_upi_uncore_formats_attr,
3580};
3581
3582static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3583{
3584        struct pci_dev *pdev = box->pci_dev;
3585
3586        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3587        pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3588}
3589
3590static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3591        .init_box       = skx_upi_uncore_pci_init_box,
3592        .disable_box    = snbep_uncore_pci_disable_box,
3593        .enable_box     = snbep_uncore_pci_enable_box,
3594        .disable_event  = snbep_uncore_pci_disable_event,
3595        .enable_event   = snbep_uncore_pci_enable_event,
3596        .read_counter   = snbep_uncore_pci_read_counter,
3597};
3598
3599static struct intel_uncore_type skx_uncore_upi = {
3600        .name           = "upi",
3601        .num_counters   = 4,
3602        .num_boxes      = 3,
3603        .perf_ctr_bits  = 48,
3604        .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3605        .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3606        .event_mask     = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3607        .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
3608        .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3609        .ops            = &skx_upi_uncore_pci_ops,
3610        .format_group   = &skx_upi_uncore_format_group,
3611};
3612
3613static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3614{
3615        struct pci_dev *pdev = box->pci_dev;
3616
3617        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3618        pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3619}
3620
3621static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3622        .init_box       = skx_m2m_uncore_pci_init_box,
3623        .disable_box    = snbep_uncore_pci_disable_box,
3624        .enable_box     = snbep_uncore_pci_enable_box,
3625        .disable_event  = snbep_uncore_pci_disable_event,
3626        .enable_event   = snbep_uncore_pci_enable_event,
3627        .read_counter   = snbep_uncore_pci_read_counter,
3628};
3629
3630static struct intel_uncore_type skx_uncore_m2m = {
3631        .name           = "m2m",
3632        .num_counters   = 4,
3633        .num_boxes      = 2,
3634        .perf_ctr_bits  = 48,
3635        .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3636        .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3637        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3638        .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3639        .ops            = &skx_m2m_uncore_pci_ops,
3640        .format_group   = &skx_uncore_format_group,
3641};
3642
3643static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3644        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3645        EVENT_CONSTRAINT_END
3646};
3647
3648static struct intel_uncore_type skx_uncore_m2pcie = {
3649        .name           = "m2pcie",
3650        .num_counters   = 4,
3651        .num_boxes      = 4,
3652        .perf_ctr_bits  = 48,
3653        .constraints    = skx_uncore_m2pcie_constraints,
3654        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3655        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3656        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3657        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3658        .ops            = &ivbep_uncore_pci_ops,
3659        .format_group   = &skx_uncore_format_group,
3660};
3661
3662static struct event_constraint skx_uncore_m3upi_constraints[] = {
3663        UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3664        UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3665        UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3666        UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3667        UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3668        UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3669        UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3670        UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3671        EVENT_CONSTRAINT_END
3672};
3673
3674static struct intel_uncore_type skx_uncore_m3upi = {
3675        .name           = "m3upi",
3676        .num_counters   = 3,
3677        .num_boxes      = 3,
3678        .perf_ctr_bits  = 48,
3679        .constraints    = skx_uncore_m3upi_constraints,
3680        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3681        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3682        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3683        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3684        .ops            = &ivbep_uncore_pci_ops,
3685        .format_group   = &skx_uncore_format_group,
3686};
3687
3688enum {
3689        SKX_PCI_UNCORE_IMC,
3690        SKX_PCI_UNCORE_M2M,
3691        SKX_PCI_UNCORE_UPI,
3692        SKX_PCI_UNCORE_M2PCIE,
3693        SKX_PCI_UNCORE_M3UPI,
3694};
3695
3696static struct intel_uncore_type *skx_pci_uncores[] = {
3697        [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3698        [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3699        [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3700        [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3701        [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3702        NULL,
3703};
3704
3705static const struct pci_device_id skx_uncore_pci_ids[] = {
3706        { /* MC0 Channel 0 */
3707                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3708                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3709        },
3710        { /* MC0 Channel 1 */
3711                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3712                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3713        },
3714        { /* MC0 Channel 2 */
3715                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3716                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3717        },
3718        { /* MC1 Channel 0 */
3719                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3720                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3721        },
3722        { /* MC1 Channel 1 */
3723                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3724                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3725        },
3726        { /* MC1 Channel 2 */
3727                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3728                .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3729        },
3730        { /* M2M0 */
3731                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3732                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3733        },
3734        { /* M2M1 */
3735                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3736                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3737        },
3738        { /* UPI0 Link 0 */
3739                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3740                .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3741        },
3742        { /* UPI0 Link 1 */
3743                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3744                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3745        },
3746        { /* UPI1 Link 2 */
3747                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3748                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3749        },
3750        { /* M2PCIe 0 */
3751                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3752                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3753        },
3754        { /* M2PCIe 1 */
3755                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3756                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3757        },
3758        { /* M2PCIe 2 */
3759                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3760                .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3761        },
3762        { /* M2PCIe 3 */
3763                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3764                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3765        },
3766        { /* M3UPI0 Link 0 */
3767                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3768                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
3769        },
3770        { /* M3UPI0 Link 1 */
3771                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3772                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
3773        },
3774        { /* M3UPI1 Link 2 */
3775                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3776                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
3777        },
3778        { /* end: all zeroes */ }
3779};
3780
3781
3782static struct pci_driver skx_uncore_pci_driver = {
3783        .name           = "skx_uncore",
3784        .id_table       = skx_uncore_pci_ids,
3785};
3786
3787int skx_uncore_pci_init(void)
3788{
3789        /* need to double check pci address */
3790        int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3791
3792        if (ret)
3793                return ret;
3794
3795        uncore_pci_uncores = skx_pci_uncores;
3796        uncore_pci_driver = &skx_uncore_pci_driver;
3797        return 0;
3798}
3799
3800/* end of SKX uncore support */
3801