linux/arch/x86/events/intel/uncore_snbep.c
<<
>>
Prefs
   1/* SandyBridge-EP/IvyTown uncore support */
   2#include "uncore.h"
   3
   4/* SNB-EP pci bus to socket mapping */
   5#define SNBEP_CPUNODEID                 0x40
   6#define SNBEP_GIDNIDMAP                 0x54
   7
   8/* SNB-EP Box level control */
   9#define SNBEP_PMON_BOX_CTL_RST_CTRL     (1 << 0)
  10#define SNBEP_PMON_BOX_CTL_RST_CTRS     (1 << 1)
  11#define SNBEP_PMON_BOX_CTL_FRZ          (1 << 8)
  12#define SNBEP_PMON_BOX_CTL_FRZ_EN       (1 << 16)
  13#define SNBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
  14                                         SNBEP_PMON_BOX_CTL_RST_CTRS | \
  15                                         SNBEP_PMON_BOX_CTL_FRZ_EN)
  16/* SNB-EP event control */
  17#define SNBEP_PMON_CTL_EV_SEL_MASK      0x000000ff
  18#define SNBEP_PMON_CTL_UMASK_MASK       0x0000ff00
  19#define SNBEP_PMON_CTL_RST              (1 << 17)
  20#define SNBEP_PMON_CTL_EDGE_DET         (1 << 18)
  21#define SNBEP_PMON_CTL_EV_SEL_EXT       (1 << 21)
  22#define SNBEP_PMON_CTL_EN               (1 << 22)
  23#define SNBEP_PMON_CTL_INVERT           (1 << 23)
  24#define SNBEP_PMON_CTL_TRESH_MASK       0xff000000
  25#define SNBEP_PMON_RAW_EVENT_MASK       (SNBEP_PMON_CTL_EV_SEL_MASK | \
  26                                         SNBEP_PMON_CTL_UMASK_MASK | \
  27                                         SNBEP_PMON_CTL_EDGE_DET | \
  28                                         SNBEP_PMON_CTL_INVERT | \
  29                                         SNBEP_PMON_CTL_TRESH_MASK)
  30
  31/* SNB-EP Ubox event control */
  32#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK         0x1f000000
  33#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK         \
  34                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  35                                 SNBEP_PMON_CTL_UMASK_MASK | \
  36                                 SNBEP_PMON_CTL_EDGE_DET | \
  37                                 SNBEP_PMON_CTL_INVERT | \
  38                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
  39
  40#define SNBEP_CBO_PMON_CTL_TID_EN               (1 << 19)
  41#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK       (SNBEP_PMON_RAW_EVENT_MASK | \
  42                                                 SNBEP_CBO_PMON_CTL_TID_EN)
  43
  44/* SNB-EP PCU event control */
  45#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK     0x0000c000
  46#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK       0x1f000000
  47#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT       (1 << 30)
  48#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET     (1 << 31)
  49#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
  50                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
  51                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
  52                                 SNBEP_PMON_CTL_EDGE_DET | \
  53                                 SNBEP_PMON_CTL_INVERT | \
  54                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
  55                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
  56                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
  57
  58#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
  59                                (SNBEP_PMON_RAW_EVENT_MASK | \
  60                                 SNBEP_PMON_CTL_EV_SEL_EXT)
  61
  62/* SNB-EP pci control register */
  63#define SNBEP_PCI_PMON_BOX_CTL                  0xf4
  64#define SNBEP_PCI_PMON_CTL0                     0xd8
  65/* SNB-EP pci counter register */
  66#define SNBEP_PCI_PMON_CTR0                     0xa0
  67
  68/* SNB-EP home agent register */
  69#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0        0x40
  70#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1        0x44
  71#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH       0x48
  72/* SNB-EP memory controller register */
  73#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL         0xf0
  74#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR         0xd0
  75/* SNB-EP QPI register */
  76#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0          0x228
  77#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1          0x22c
  78#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0           0x238
  79#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1           0x23c
  80
  81/* SNB-EP Ubox register */
  82#define SNBEP_U_MSR_PMON_CTR0                   0xc16
  83#define SNBEP_U_MSR_PMON_CTL0                   0xc10
  84
  85#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL         0xc08
  86#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR         0xc09
  87
  88/* SNB-EP Cbo register */
  89#define SNBEP_C0_MSR_PMON_CTR0                  0xd16
  90#define SNBEP_C0_MSR_PMON_CTL0                  0xd10
  91#define SNBEP_C0_MSR_PMON_BOX_CTL               0xd04
  92#define SNBEP_C0_MSR_PMON_BOX_FILTER            0xd14
  93#define SNBEP_CBO_MSR_OFFSET                    0x20
  94
  95#define SNBEP_CB0_MSR_PMON_BOX_FILTER_TID       0x1f
  96#define SNBEP_CB0_MSR_PMON_BOX_FILTER_NID       0x3fc00
  97#define SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE     0x7c0000
  98#define SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC       0xff800000
  99
 100#define SNBEP_CBO_EVENT_EXTRA_REG(e, m, i) {    \
 101        .event = (e),                           \
 102        .msr = SNBEP_C0_MSR_PMON_BOX_FILTER,    \
 103        .config_mask = (m),                     \
 104        .idx = (i)                              \
 105}
 106
 107/* SNB-EP PCU register */
 108#define SNBEP_PCU_MSR_PMON_CTR0                 0xc36
 109#define SNBEP_PCU_MSR_PMON_CTL0                 0xc30
 110#define SNBEP_PCU_MSR_PMON_BOX_CTL              0xc24
 111#define SNBEP_PCU_MSR_PMON_BOX_FILTER           0xc34
 112#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK      0xffffffff
 113#define SNBEP_PCU_MSR_CORE_C3_CTR               0x3fc
 114#define SNBEP_PCU_MSR_CORE_C6_CTR               0x3fd
 115
 116/* IVBEP event control */
 117#define IVBEP_PMON_BOX_CTL_INT          (SNBEP_PMON_BOX_CTL_RST_CTRL | \
 118                                         SNBEP_PMON_BOX_CTL_RST_CTRS)
 119#define IVBEP_PMON_RAW_EVENT_MASK               (SNBEP_PMON_CTL_EV_SEL_MASK | \
 120                                         SNBEP_PMON_CTL_UMASK_MASK | \
 121                                         SNBEP_PMON_CTL_EDGE_DET | \
 122                                         SNBEP_PMON_CTL_TRESH_MASK)
 123/* IVBEP Ubox */
 124#define IVBEP_U_MSR_PMON_GLOBAL_CTL             0xc00
 125#define IVBEP_U_PMON_GLOBAL_FRZ_ALL             (1 << 31)
 126#define IVBEP_U_PMON_GLOBAL_UNFRZ_ALL           (1 << 29)
 127
 128#define IVBEP_U_MSR_PMON_RAW_EVENT_MASK \
 129                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 130                                 SNBEP_PMON_CTL_UMASK_MASK | \
 131                                 SNBEP_PMON_CTL_EDGE_DET | \
 132                                 SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
 133/* IVBEP Cbo */
 134#define IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK               (IVBEP_PMON_RAW_EVENT_MASK | \
 135                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 136
 137#define IVBEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x1fULL << 0)
 138#define IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 5)
 139#define IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x3fULL << 17)
 140#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 141#define IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 142#define IVBEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 143#define IVBEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 144#define IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 145
 146/* IVBEP home agent */
 147#define IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST         (1 << 16)
 148#define IVBEP_HA_PCI_PMON_RAW_EVENT_MASK                \
 149                                (IVBEP_PMON_RAW_EVENT_MASK | \
 150                                 IVBEP_HA_PCI_PMON_CTL_Q_OCC_RST)
 151/* IVBEP PCU */
 152#define IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK       \
 153                                (SNBEP_PMON_CTL_EV_SEL_MASK | \
 154                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 155                                 SNBEP_PMON_CTL_EDGE_DET | \
 156                                 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
 157                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 158                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 159/* IVBEP QPI */
 160#define IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK       \
 161                                (IVBEP_PMON_RAW_EVENT_MASK | \
 162                                 SNBEP_PMON_CTL_EV_SEL_EXT)
 163
 164#define __BITS_VALUE(x, i, n)  ((typeof(x))(((x) >> ((i) * (n))) & \
 165                                ((1ULL << (n)) - 1)))
 166
 167/* Haswell-EP Ubox */
 168#define HSWEP_U_MSR_PMON_CTR0                   0x709
 169#define HSWEP_U_MSR_PMON_CTL0                   0x705
 170#define HSWEP_U_MSR_PMON_FILTER                 0x707
 171
 172#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL         0x703
 173#define HSWEP_U_MSR_PMON_UCLK_FIXED_CTR         0x704
 174
 175#define HSWEP_U_MSR_PMON_BOX_FILTER_TID         (0x1 << 0)
 176#define HSWEP_U_MSR_PMON_BOX_FILTER_CID         (0x1fULL << 1)
 177#define HSWEP_U_MSR_PMON_BOX_FILTER_MASK \
 178                                        (HSWEP_U_MSR_PMON_BOX_FILTER_TID | \
 179                                         HSWEP_U_MSR_PMON_BOX_FILTER_CID)
 180
 181/* Haswell-EP CBo */
 182#define HSWEP_C0_MSR_PMON_CTR0                  0xe08
 183#define HSWEP_C0_MSR_PMON_CTL0                  0xe01
 184#define HSWEP_C0_MSR_PMON_BOX_CTL                       0xe00
 185#define HSWEP_C0_MSR_PMON_BOX_FILTER0           0xe05
 186#define HSWEP_CBO_MSR_OFFSET                    0x10
 187
 188
 189#define HSWEP_CB0_MSR_PMON_BOX_FILTER_TID               (0x3fULL << 0)
 190#define HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK      (0xfULL << 6)
 191#define HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE     (0x7fULL << 17)
 192#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NID               (0xffffULL << 32)
 193#define HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC               (0x1ffULL << 52)
 194#define HSWEP_CB0_MSR_PMON_BOX_FILTER_C6                (0x1ULL << 61)
 195#define HSWEP_CB0_MSR_PMON_BOX_FILTER_NC                (0x1ULL << 62)
 196#define HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC      (0x1ULL << 63)
 197
 198
 199/* Haswell-EP Sbox */
 200#define HSWEP_S0_MSR_PMON_CTR0                  0x726
 201#define HSWEP_S0_MSR_PMON_CTL0                  0x721
 202#define HSWEP_S0_MSR_PMON_BOX_CTL                       0x720
 203#define HSWEP_SBOX_MSR_OFFSET                   0xa
 204#define HSWEP_S_MSR_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 205                                                 SNBEP_CBO_PMON_CTL_TID_EN)
 206
 207/* Haswell-EP PCU */
 208#define HSWEP_PCU_MSR_PMON_CTR0                 0x717
 209#define HSWEP_PCU_MSR_PMON_CTL0                 0x711
 210#define HSWEP_PCU_MSR_PMON_BOX_CTL              0x710
 211#define HSWEP_PCU_MSR_PMON_BOX_FILTER           0x715
 212
 213/* KNL Ubox */
 214#define KNL_U_MSR_PMON_RAW_EVENT_MASK \
 215                                        (SNBEP_U_MSR_PMON_RAW_EVENT_MASK | \
 216                                                SNBEP_CBO_PMON_CTL_TID_EN)
 217/* KNL CHA */
 218#define KNL_CHA_MSR_OFFSET                      0xc
 219#define KNL_CHA_MSR_PMON_CTL_QOR                (1 << 16)
 220#define KNL_CHA_MSR_PMON_RAW_EVENT_MASK \
 221                                        (SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK | \
 222                                         KNL_CHA_MSR_PMON_CTL_QOR)
 223#define KNL_CHA_MSR_PMON_BOX_FILTER_TID         0x1ff
 224#define KNL_CHA_MSR_PMON_BOX_FILTER_STATE       (7 << 18)
 225#define KNL_CHA_MSR_PMON_BOX_FILTER_OP          (0xfffffe2aULL << 32)
 226#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE (0x1ULL << 32)
 227#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE  (0x1ULL << 33)
 228#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC         (0x1ULL << 37)
 229
 230/* KNL EDC/MC UCLK */
 231#define KNL_UCLK_MSR_PMON_CTR0_LOW              0x400
 232#define KNL_UCLK_MSR_PMON_CTL0                  0x420
 233#define KNL_UCLK_MSR_PMON_BOX_CTL               0x430
 234#define KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW        0x44c
 235#define KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL        0x454
 236#define KNL_PMON_FIXED_CTL_EN                   0x1
 237
 238/* KNL EDC */
 239#define KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW         0xa00
 240#define KNL_EDC0_ECLK_MSR_PMON_CTL0             0xa20
 241#define KNL_EDC0_ECLK_MSR_PMON_BOX_CTL          0xa30
 242#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW   0xa3c
 243#define KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL   0xa44
 244
 245/* KNL MC */
 246#define KNL_MC0_CH0_MSR_PMON_CTR0_LOW           0xb00
 247#define KNL_MC0_CH0_MSR_PMON_CTL0               0xb20
 248#define KNL_MC0_CH0_MSR_PMON_BOX_CTL            0xb30
 249#define KNL_MC0_CH0_MSR_PMON_FIXED_LOW          0xb3c
 250#define KNL_MC0_CH0_MSR_PMON_FIXED_CTL          0xb44
 251
 252/* KNL IRP */
 253#define KNL_IRP_PCI_PMON_BOX_CTL                0xf0
 254#define KNL_IRP_PCI_PMON_RAW_EVENT_MASK         (SNBEP_PMON_RAW_EVENT_MASK | \
 255                                                 KNL_CHA_MSR_PMON_CTL_QOR)
 256/* KNL PCU */
 257#define KNL_PCU_PMON_CTL_EV_SEL_MASK            0x0000007f
 258#define KNL_PCU_PMON_CTL_USE_OCC_CTR            (1 << 7)
 259#define KNL_PCU_MSR_PMON_CTL_TRESH_MASK         0x3f000000
 260#define KNL_PCU_MSR_PMON_RAW_EVENT_MASK \
 261                                (KNL_PCU_PMON_CTL_EV_SEL_MASK | \
 262                                 KNL_PCU_PMON_CTL_USE_OCC_CTR | \
 263                                 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
 264                                 SNBEP_PMON_CTL_EDGE_DET | \
 265                                 SNBEP_CBO_PMON_CTL_TID_EN | \
 266                                 SNBEP_PMON_CTL_INVERT | \
 267                                 KNL_PCU_MSR_PMON_CTL_TRESH_MASK | \
 268                                 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
 269                                 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
 270
 271/* SKX pci bus to socket mapping */
 272#define SKX_CPUNODEID                   0xc0
 273#define SKX_GIDNIDMAP                   0xd4
 274
 275/* SKX CHA */
 276#define SKX_CHA_MSR_PMON_BOX_FILTER_TID         (0x1ffULL << 0)
 277#define SKX_CHA_MSR_PMON_BOX_FILTER_LINK        (0xfULL << 9)
 278#define SKX_CHA_MSR_PMON_BOX_FILTER_STATE       (0x3ffULL << 17)
 279#define SKX_CHA_MSR_PMON_BOX_FILTER_REM         (0x1ULL << 32)
 280#define SKX_CHA_MSR_PMON_BOX_FILTER_LOC         (0x1ULL << 33)
 281#define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC     (0x1ULL << 35)
 282#define SKX_CHA_MSR_PMON_BOX_FILTER_NM          (0x1ULL << 36)
 283#define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM      (0x1ULL << 37)
 284#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0        (0x3ffULL << 41)
 285#define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1        (0x3ffULL << 51)
 286#define SKX_CHA_MSR_PMON_BOX_FILTER_C6          (0x1ULL << 61)
 287#define SKX_CHA_MSR_PMON_BOX_FILTER_NC          (0x1ULL << 62)
 288#define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC        (0x1ULL << 63)
 289
 290/* SKX IIO */
 291#define SKX_IIO0_MSR_PMON_CTL0          0xa48
 292#define SKX_IIO0_MSR_PMON_CTR0          0xa41
 293#define SKX_IIO0_MSR_PMON_BOX_CTL       0xa40
 294#define SKX_IIO_MSR_OFFSET              0x20
 295
 296#define SKX_PMON_CTL_TRESH_MASK         (0xff << 24)
 297#define SKX_PMON_CTL_TRESH_MASK_EXT     (0xf)
 298#define SKX_PMON_CTL_CH_MASK            (0xff << 4)
 299#define SKX_PMON_CTL_FC_MASK            (0x7 << 12)
 300#define SKX_IIO_PMON_RAW_EVENT_MASK     (SNBEP_PMON_CTL_EV_SEL_MASK | \
 301                                         SNBEP_PMON_CTL_UMASK_MASK | \
 302                                         SNBEP_PMON_CTL_EDGE_DET | \
 303                                         SNBEP_PMON_CTL_INVERT | \
 304                                         SKX_PMON_CTL_TRESH_MASK)
 305#define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \
 306                                         SKX_PMON_CTL_CH_MASK | \
 307                                         SKX_PMON_CTL_FC_MASK)
 308
 309/* SKX IRP */
 310#define SKX_IRP0_MSR_PMON_CTL0          0xa5b
 311#define SKX_IRP0_MSR_PMON_CTR0          0xa59
 312#define SKX_IRP0_MSR_PMON_BOX_CTL       0xa58
 313#define SKX_IRP_MSR_OFFSET              0x20
 314
 315/* SKX UPI */
 316#define SKX_UPI_PCI_PMON_CTL0           0x350
 317#define SKX_UPI_PCI_PMON_CTR0           0x318
 318#define SKX_UPI_PCI_PMON_BOX_CTL        0x378
 319#define SKX_UPI_CTL_UMASK_EXT           0xffefff
 320
 321/* SKX M2M */
 322#define SKX_M2M_PCI_PMON_CTL0           0x228
 323#define SKX_M2M_PCI_PMON_CTR0           0x200
 324#define SKX_M2M_PCI_PMON_BOX_CTL        0x258
 325
 326DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 327DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
 328DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
 329DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
 330DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 331DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
 332DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
 333DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
 334DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
 335DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
 336DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35");
 337DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
 338DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29");
 339DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
 340DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
 341DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
 342DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
 343DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31");
 344DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43");
 345DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46");
 346DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
 347DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0");
 348DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5");
 349DEFINE_UNCORE_FORMAT_ATTR(filter_tid4, filter_tid, "config1:0-8");
 350DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
 351DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
 352DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
 353DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
 354DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
 355DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
 356DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
 357DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22");
 358DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23");
 359DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20");
 360DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26");
 361DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32");
 362DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33");
 363DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36");
 364DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37");
 365DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33");
 366DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35");
 367DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37");
 368DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
 369DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60");
 370DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60");
 371DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50");
 372DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60");
 373DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62");
 374DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61");
 375DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63");
 376DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
 377DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
 378DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
 379DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
 380DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
 381DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
 382DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
 383DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
 384DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
 385DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
 386DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
 387DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
 388DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
 389DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
 390DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
 391DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
 392DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
 393DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
 394DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
 395DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
 396DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
 397DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
 398
 399static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
 400{
 401        struct pci_dev *pdev = box->pci_dev;
 402        int box_ctl = uncore_pci_box_ctl(box);
 403        u32 config = 0;
 404
 405        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 406                config |= SNBEP_PMON_BOX_CTL_FRZ;
 407                pci_write_config_dword(pdev, box_ctl, config);
 408        }
 409}
 410
 411static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
 412{
 413        struct pci_dev *pdev = box->pci_dev;
 414        int box_ctl = uncore_pci_box_ctl(box);
 415        u32 config = 0;
 416
 417        if (!pci_read_config_dword(pdev, box_ctl, &config)) {
 418                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 419                pci_write_config_dword(pdev, box_ctl, config);
 420        }
 421}
 422
 423static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 424{
 425        struct pci_dev *pdev = box->pci_dev;
 426        struct hw_perf_event *hwc = &event->hw;
 427
 428        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 429}
 430
 431static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 432{
 433        struct pci_dev *pdev = box->pci_dev;
 434        struct hw_perf_event *hwc = &event->hw;
 435
 436        pci_write_config_dword(pdev, hwc->config_base, hwc->config);
 437}
 438
 439static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 440{
 441        struct pci_dev *pdev = box->pci_dev;
 442        struct hw_perf_event *hwc = &event->hw;
 443        u64 count = 0;
 444
 445        pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
 446        pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
 447
 448        return count;
 449}
 450
 451static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
 452{
 453        struct pci_dev *pdev = box->pci_dev;
 454        int box_ctl = uncore_pci_box_ctl(box);
 455
 456        pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
 457}
 458
 459static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
 460{
 461        u64 config;
 462        unsigned msr;
 463
 464        msr = uncore_msr_box_ctl(box);
 465        if (msr) {
 466                rdmsrl(msr, config);
 467                config |= SNBEP_PMON_BOX_CTL_FRZ;
 468                wrmsrl(msr, config);
 469        }
 470}
 471
 472static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
 473{
 474        u64 config;
 475        unsigned msr;
 476
 477        msr = uncore_msr_box_ctl(box);
 478        if (msr) {
 479                rdmsrl(msr, config);
 480                config &= ~SNBEP_PMON_BOX_CTL_FRZ;
 481                wrmsrl(msr, config);
 482        }
 483}
 484
 485static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 486{
 487        struct hw_perf_event *hwc = &event->hw;
 488        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 489
 490        if (reg1->idx != EXTRA_REG_NONE)
 491                wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0));
 492
 493        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
 494}
 495
 496static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
 497                                        struct perf_event *event)
 498{
 499        struct hw_perf_event *hwc = &event->hw;
 500
 501        wrmsrl(hwc->config_base, hwc->config);
 502}
 503
 504static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
 505{
 506        unsigned msr = uncore_msr_box_ctl(box);
 507
 508        if (msr)
 509                wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
 510}
 511
 512static struct attribute *snbep_uncore_formats_attr[] = {
 513        &format_attr_event.attr,
 514        &format_attr_umask.attr,
 515        &format_attr_edge.attr,
 516        &format_attr_inv.attr,
 517        &format_attr_thresh8.attr,
 518        NULL,
 519};
 520
 521static struct attribute *snbep_uncore_ubox_formats_attr[] = {
 522        &format_attr_event.attr,
 523        &format_attr_umask.attr,
 524        &format_attr_edge.attr,
 525        &format_attr_inv.attr,
 526        &format_attr_thresh5.attr,
 527        NULL,
 528};
 529
 530static struct attribute *snbep_uncore_cbox_formats_attr[] = {
 531        &format_attr_event.attr,
 532        &format_attr_umask.attr,
 533        &format_attr_edge.attr,
 534        &format_attr_tid_en.attr,
 535        &format_attr_inv.attr,
 536        &format_attr_thresh8.attr,
 537        &format_attr_filter_tid.attr,
 538        &format_attr_filter_nid.attr,
 539        &format_attr_filter_state.attr,
 540        &format_attr_filter_opc.attr,
 541        NULL,
 542};
 543
 544static struct attribute *snbep_uncore_pcu_formats_attr[] = {
 545        &format_attr_event.attr,
 546        &format_attr_occ_sel.attr,
 547        &format_attr_edge.attr,
 548        &format_attr_inv.attr,
 549        &format_attr_thresh5.attr,
 550        &format_attr_occ_invert.attr,
 551        &format_attr_occ_edge.attr,
 552        &format_attr_filter_band0.attr,
 553        &format_attr_filter_band1.attr,
 554        &format_attr_filter_band2.attr,
 555        &format_attr_filter_band3.attr,
 556        NULL,
 557};
 558
 559static struct attribute *snbep_uncore_qpi_formats_attr[] = {
 560        &format_attr_event_ext.attr,
 561        &format_attr_umask.attr,
 562        &format_attr_edge.attr,
 563        &format_attr_inv.attr,
 564        &format_attr_thresh8.attr,
 565        &format_attr_match_rds.attr,
 566        &format_attr_match_rnid30.attr,
 567        &format_attr_match_rnid4.attr,
 568        &format_attr_match_dnid.attr,
 569        &format_attr_match_mc.attr,
 570        &format_attr_match_opc.attr,
 571        &format_attr_match_vnw.attr,
 572        &format_attr_match0.attr,
 573        &format_attr_match1.attr,
 574        &format_attr_mask_rds.attr,
 575        &format_attr_mask_rnid30.attr,
 576        &format_attr_mask_rnid4.attr,
 577        &format_attr_mask_dnid.attr,
 578        &format_attr_mask_mc.attr,
 579        &format_attr_mask_opc.attr,
 580        &format_attr_mask_vnw.attr,
 581        &format_attr_mask0.attr,
 582        &format_attr_mask1.attr,
 583        NULL,
 584};
 585
 586static struct uncore_event_desc snbep_uncore_imc_events[] = {
 587        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0xff,umask=0x00"),
 588        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
 589        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
 590        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
 591        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
 592        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
 593        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
 594        { /* end: all zeroes */ },
 595};
 596
 597static struct uncore_event_desc snbep_uncore_qpi_events[] = {
 598        INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
 599        INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
 600        INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
 601        INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
 602        { /* end: all zeroes */ },
 603};
 604
 605static const struct attribute_group snbep_uncore_format_group = {
 606        .name = "format",
 607        .attrs = snbep_uncore_formats_attr,
 608};
 609
 610static const struct attribute_group snbep_uncore_ubox_format_group = {
 611        .name = "format",
 612        .attrs = snbep_uncore_ubox_formats_attr,
 613};
 614
 615static const struct attribute_group snbep_uncore_cbox_format_group = {
 616        .name = "format",
 617        .attrs = snbep_uncore_cbox_formats_attr,
 618};
 619
 620static const struct attribute_group snbep_uncore_pcu_format_group = {
 621        .name = "format",
 622        .attrs = snbep_uncore_pcu_formats_attr,
 623};
 624
 625static const struct attribute_group snbep_uncore_qpi_format_group = {
 626        .name = "format",
 627        .attrs = snbep_uncore_qpi_formats_attr,
 628};
 629
 630#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                    \
 631        .disable_box    = snbep_uncore_msr_disable_box,         \
 632        .enable_box     = snbep_uncore_msr_enable_box,          \
 633        .disable_event  = snbep_uncore_msr_disable_event,       \
 634        .enable_event   = snbep_uncore_msr_enable_event,        \
 635        .read_counter   = uncore_msr_read_counter
 636
 637#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
 638        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),                   \
 639        .init_box       = snbep_uncore_msr_init_box             \
 640
 641static struct intel_uncore_ops snbep_uncore_msr_ops = {
 642        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 643};
 644
 645#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT()                      \
 646        .init_box       = snbep_uncore_pci_init_box,            \
 647        .disable_box    = snbep_uncore_pci_disable_box,         \
 648        .enable_box     = snbep_uncore_pci_enable_box,          \
 649        .disable_event  = snbep_uncore_pci_disable_event,       \
 650        .read_counter   = snbep_uncore_pci_read_counter
 651
 652static struct intel_uncore_ops snbep_uncore_pci_ops = {
 653        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
 654        .enable_event   = snbep_uncore_pci_enable_event,        \
 655};
 656
 657static struct event_constraint snbep_uncore_cbox_constraints[] = {
 658        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
 659        UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
 660        UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
 661        UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
 662        UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
 663        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
 664        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
 665        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 666        UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
 667        UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
 668        UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
 669        UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
 670        UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
 671        UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
 672        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 673        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 674        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 675        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 676        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 677        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 678        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
 679        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
 680        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 681        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 682        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 683        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
 684        EVENT_CONSTRAINT_END
 685};
 686
 687static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
 688        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 689        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 690        UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
 691        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 692        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 693        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 694        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 695        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 696        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 697        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 698        EVENT_CONSTRAINT_END
 699};
 700
 701static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
 702        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
 703        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
 704        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
 705        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
 706        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
 707        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
 708        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
 709        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
 710        UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
 711        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
 712        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
 713        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
 714        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
 715        UNCORE_EVENT_CONSTRAINT(0x2a, 0x3),
 716        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
 717        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
 718        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
 719        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
 720        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
 721        UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
 722        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
 723        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
 724        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
 725        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
 726        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
 727        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
 728        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
 729        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
 730        EVENT_CONSTRAINT_END
 731};
 732
 733static struct intel_uncore_type snbep_uncore_ubox = {
 734        .name           = "ubox",
 735        .num_counters   = 2,
 736        .num_boxes      = 1,
 737        .perf_ctr_bits  = 44,
 738        .fixed_ctr_bits = 48,
 739        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
 740        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
 741        .event_mask     = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
 742        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
 743        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
 744        .ops            = &snbep_uncore_msr_ops,
 745        .format_group   = &snbep_uncore_ubox_format_group,
 746};
 747
 748static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
 749        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
 750                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
 751        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
 752        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
 753        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
 754        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
 755        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
 756        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
 757        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
 758        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
 759        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
 760        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
 761        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
 762        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
 763        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
 764        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
 765        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
 766        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
 767        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
 768        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
 769        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
 770        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
 771        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
 772        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
 773        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x2),
 774        EVENT_EXTRA_END
 775};
 776
 777static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 778{
 779        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 780        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 781        int i;
 782
 783        if (uncore_box_is_fake(box))
 784                return;
 785
 786        for (i = 0; i < 5; i++) {
 787                if (reg1->alloc & (0x1 << i))
 788                        atomic_sub(1 << (i * 6), &er->ref);
 789        }
 790        reg1->alloc = 0;
 791}
 792
 793static struct event_constraint *
 794__snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event,
 795                            u64 (*cbox_filter_mask)(int fields))
 796{
 797        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 798        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 799        int i, alloc = 0;
 800        unsigned long flags;
 801        u64 mask;
 802
 803        if (reg1->idx == EXTRA_REG_NONE)
 804                return NULL;
 805
 806        raw_spin_lock_irqsave(&er->lock, flags);
 807        for (i = 0; i < 5; i++) {
 808                if (!(reg1->idx & (0x1 << i)))
 809                        continue;
 810                if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
 811                        continue;
 812
 813                mask = cbox_filter_mask(0x1 << i);
 814                if (!__BITS_VALUE(atomic_read(&er->ref), i, 6) ||
 815                    !((reg1->config ^ er->config) & mask)) {
 816                        atomic_add(1 << (i * 6), &er->ref);
 817                        er->config &= ~mask;
 818                        er->config |= reg1->config & mask;
 819                        alloc |= (0x1 << i);
 820                } else {
 821                        break;
 822                }
 823        }
 824        raw_spin_unlock_irqrestore(&er->lock, flags);
 825        if (i < 5)
 826                goto fail;
 827
 828        if (!uncore_box_is_fake(box))
 829                reg1->alloc |= alloc;
 830
 831        return NULL;
 832fail:
 833        for (; i >= 0; i--) {
 834                if (alloc & (0x1 << i))
 835                        atomic_sub(1 << (i * 6), &er->ref);
 836        }
 837        return &uncore_constraint_empty;
 838}
 839
 840static u64 snbep_cbox_filter_mask(int fields)
 841{
 842        u64 mask = 0;
 843
 844        if (fields & 0x1)
 845                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_TID;
 846        if (fields & 0x2)
 847                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_NID;
 848        if (fields & 0x4)
 849                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
 850        if (fields & 0x8)
 851                mask |= SNBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
 852
 853        return mask;
 854}
 855
 856static struct event_constraint *
 857snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 858{
 859        return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask);
 860}
 861
 862static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 863{
 864        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 865        struct extra_reg *er;
 866        int idx = 0;
 867
 868        for (er = snbep_uncore_cbox_extra_regs; er->msr; er++) {
 869                if (er->event != (event->hw.config & er->config_mask))
 870                        continue;
 871                idx |= er->idx;
 872        }
 873
 874        if (idx) {
 875                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
 876                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
 877                reg1->config = event->attr.config1 & snbep_cbox_filter_mask(idx);
 878                reg1->idx = idx;
 879        }
 880        return 0;
 881}
 882
 883static struct intel_uncore_ops snbep_uncore_cbox_ops = {
 884        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 885        .hw_config              = snbep_cbox_hw_config,
 886        .get_constraint         = snbep_cbox_get_constraint,
 887        .put_constraint         = snbep_cbox_put_constraint,
 888};
 889
 890static struct intel_uncore_type snbep_uncore_cbox = {
 891        .name                   = "cbox",
 892        .num_counters           = 4,
 893        .num_boxes              = 8,
 894        .perf_ctr_bits          = 44,
 895        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
 896        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
 897        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
 898        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
 899        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
 900        .num_shared_regs        = 1,
 901        .constraints            = snbep_uncore_cbox_constraints,
 902        .ops                    = &snbep_uncore_cbox_ops,
 903        .format_group           = &snbep_uncore_cbox_format_group,
 904};
 905
 906static u64 snbep_pcu_alter_er(struct perf_event *event, int new_idx, bool modify)
 907{
 908        struct hw_perf_event *hwc = &event->hw;
 909        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 910        u64 config = reg1->config;
 911
 912        if (new_idx > reg1->idx)
 913                config <<= 8 * (new_idx - reg1->idx);
 914        else
 915                config >>= 8 * (reg1->idx - new_idx);
 916
 917        if (modify) {
 918                hwc->config += new_idx - reg1->idx;
 919                reg1->config = config;
 920                reg1->idx = new_idx;
 921        }
 922        return config;
 923}
 924
 925static struct event_constraint *
 926snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 927{
 928        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 929        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 930        unsigned long flags;
 931        int idx = reg1->idx;
 932        u64 mask, config1 = reg1->config;
 933        bool ok = false;
 934
 935        if (reg1->idx == EXTRA_REG_NONE ||
 936            (!uncore_box_is_fake(box) && reg1->alloc))
 937                return NULL;
 938again:
 939        mask = 0xffULL << (idx * 8);
 940        raw_spin_lock_irqsave(&er->lock, flags);
 941        if (!__BITS_VALUE(atomic_read(&er->ref), idx, 8) ||
 942            !((config1 ^ er->config) & mask)) {
 943                atomic_add(1 << (idx * 8), &er->ref);
 944                er->config &= ~mask;
 945                er->config |= config1 & mask;
 946                ok = true;
 947        }
 948        raw_spin_unlock_irqrestore(&er->lock, flags);
 949
 950        if (!ok) {
 951                idx = (idx + 1) % 4;
 952                if (idx != reg1->idx) {
 953                        config1 = snbep_pcu_alter_er(event, idx, false);
 954                        goto again;
 955                }
 956                return &uncore_constraint_empty;
 957        }
 958
 959        if (!uncore_box_is_fake(box)) {
 960                if (idx != reg1->idx)
 961                        snbep_pcu_alter_er(event, idx, true);
 962                reg1->alloc = 1;
 963        }
 964        return NULL;
 965}
 966
 967static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 968{
 969        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 970        struct intel_uncore_extra_reg *er = &box->shared_regs[0];
 971
 972        if (uncore_box_is_fake(box) || !reg1->alloc)
 973                return;
 974
 975        atomic_sub(1 << (reg1->idx * 8), &er->ref);
 976        reg1->alloc = 0;
 977}
 978
 979static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 980{
 981        struct hw_perf_event *hwc = &event->hw;
 982        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
 983        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
 984
 985        if (ev_sel >= 0xb && ev_sel <= 0xe) {
 986                reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
 987                reg1->idx = ev_sel - 0xb;
 988                reg1->config = event->attr.config1 & (0xff << (reg1->idx * 8));
 989        }
 990        return 0;
 991}
 992
 993static struct intel_uncore_ops snbep_uncore_pcu_ops = {
 994        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
 995        .hw_config              = snbep_pcu_hw_config,
 996        .get_constraint         = snbep_pcu_get_constraint,
 997        .put_constraint         = snbep_pcu_put_constraint,
 998};
 999
1000static struct intel_uncore_type snbep_uncore_pcu = {
1001        .name                   = "pcu",
1002        .num_counters           = 4,
1003        .num_boxes              = 1,
1004        .perf_ctr_bits          = 48,
1005        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1006        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1007        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1008        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1009        .num_shared_regs        = 1,
1010        .ops                    = &snbep_uncore_pcu_ops,
1011        .format_group           = &snbep_uncore_pcu_format_group,
1012};
1013
1014static struct intel_uncore_type *snbep_msr_uncores[] = {
1015        &snbep_uncore_ubox,
1016        &snbep_uncore_cbox,
1017        &snbep_uncore_pcu,
1018        NULL,
1019};
1020
1021void snbep_uncore_cpu_init(void)
1022{
1023        if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1024                snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1025        uncore_msr_uncores = snbep_msr_uncores;
1026}
1027
1028enum {
1029        SNBEP_PCI_QPI_PORT0_FILTER,
1030        SNBEP_PCI_QPI_PORT1_FILTER,
1031        HSWEP_PCI_PCU_3,
1032};
1033
1034static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1035{
1036        struct hw_perf_event *hwc = &event->hw;
1037        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1038        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1039
1040        if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
1041                reg1->idx = 0;
1042                reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
1043                reg1->config = event->attr.config1;
1044                reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
1045                reg2->config = event->attr.config2;
1046        }
1047        return 0;
1048}
1049
1050static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1051{
1052        struct pci_dev *pdev = box->pci_dev;
1053        struct hw_perf_event *hwc = &event->hw;
1054        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1055        struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
1056
1057        if (reg1->idx != EXTRA_REG_NONE) {
1058                int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
1059                int pkg = box->pkgid;
1060                struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
1061
1062                if (filter_pdev) {
1063                        pci_write_config_dword(filter_pdev, reg1->reg,
1064                                                (u32)reg1->config);
1065                        pci_write_config_dword(filter_pdev, reg1->reg + 4,
1066                                                (u32)(reg1->config >> 32));
1067                        pci_write_config_dword(filter_pdev, reg2->reg,
1068                                                (u32)reg2->config);
1069                        pci_write_config_dword(filter_pdev, reg2->reg + 4,
1070                                                (u32)(reg2->config >> 32));
1071                }
1072        }
1073
1074        pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1075}
1076
1077static struct intel_uncore_ops snbep_uncore_qpi_ops = {
1078        SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
1079        .enable_event           = snbep_qpi_enable_event,
1080        .hw_config              = snbep_qpi_hw_config,
1081        .get_constraint         = uncore_get_constraint,
1082        .put_constraint         = uncore_put_constraint,
1083};
1084
1085#define SNBEP_UNCORE_PCI_COMMON_INIT()                          \
1086        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1087        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1088        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,            \
1089        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1090        .ops            = &snbep_uncore_pci_ops,                \
1091        .format_group   = &snbep_uncore_format_group
1092
1093static struct intel_uncore_type snbep_uncore_ha = {
1094        .name           = "ha",
1095        .num_counters   = 4,
1096        .num_boxes      = 1,
1097        .perf_ctr_bits  = 48,
1098        SNBEP_UNCORE_PCI_COMMON_INIT(),
1099};
1100
1101static struct intel_uncore_type snbep_uncore_imc = {
1102        .name           = "imc",
1103        .num_counters   = 4,
1104        .num_boxes      = 4,
1105        .perf_ctr_bits  = 48,
1106        .fixed_ctr_bits = 48,
1107        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1108        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1109        .event_descs    = snbep_uncore_imc_events,
1110        SNBEP_UNCORE_PCI_COMMON_INIT(),
1111};
1112
1113static struct intel_uncore_type snbep_uncore_qpi = {
1114        .name                   = "qpi",
1115        .num_counters           = 4,
1116        .num_boxes              = 2,
1117        .perf_ctr_bits          = 48,
1118        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1119        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1120        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1121        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1122        .num_shared_regs        = 1,
1123        .ops                    = &snbep_uncore_qpi_ops,
1124        .event_descs            = snbep_uncore_qpi_events,
1125        .format_group           = &snbep_uncore_qpi_format_group,
1126};
1127
1128
1129static struct intel_uncore_type snbep_uncore_r2pcie = {
1130        .name           = "r2pcie",
1131        .num_counters   = 4,
1132        .num_boxes      = 1,
1133        .perf_ctr_bits  = 44,
1134        .constraints    = snbep_uncore_r2pcie_constraints,
1135        SNBEP_UNCORE_PCI_COMMON_INIT(),
1136};
1137
1138static struct intel_uncore_type snbep_uncore_r3qpi = {
1139        .name           = "r3qpi",
1140        .num_counters   = 3,
1141        .num_boxes      = 2,
1142        .perf_ctr_bits  = 44,
1143        .constraints    = snbep_uncore_r3qpi_constraints,
1144        SNBEP_UNCORE_PCI_COMMON_INIT(),
1145};
1146
1147enum {
1148        SNBEP_PCI_UNCORE_HA,
1149        SNBEP_PCI_UNCORE_IMC,
1150        SNBEP_PCI_UNCORE_QPI,
1151        SNBEP_PCI_UNCORE_R2PCIE,
1152        SNBEP_PCI_UNCORE_R3QPI,
1153};
1154
1155static struct intel_uncore_type *snbep_pci_uncores[] = {
1156        [SNBEP_PCI_UNCORE_HA]           = &snbep_uncore_ha,
1157        [SNBEP_PCI_UNCORE_IMC]          = &snbep_uncore_imc,
1158        [SNBEP_PCI_UNCORE_QPI]          = &snbep_uncore_qpi,
1159        [SNBEP_PCI_UNCORE_R2PCIE]       = &snbep_uncore_r2pcie,
1160        [SNBEP_PCI_UNCORE_R3QPI]        = &snbep_uncore_r3qpi,
1161        NULL,
1162};
1163
1164static const struct pci_device_id snbep_uncore_pci_ids[] = {
1165        { /* Home Agent */
1166                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
1167                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
1168        },
1169        { /* MC Channel 0 */
1170                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
1171                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
1172        },
1173        { /* MC Channel 1 */
1174                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
1175                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
1176        },
1177        { /* MC Channel 2 */
1178                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
1179                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
1180        },
1181        { /* MC Channel 3 */
1182                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
1183                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
1184        },
1185        { /* QPI Port 0 */
1186                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
1187                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
1188        },
1189        { /* QPI Port 1 */
1190                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
1191                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
1192        },
1193        { /* R2PCIe */
1194                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
1195                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
1196        },
1197        { /* R3QPI Link 0 */
1198                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
1199                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
1200        },
1201        { /* R3QPI Link 1 */
1202                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
1203                .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
1204        },
1205        { /* QPI Port 0 filter  */
1206                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
1207                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1208                                                   SNBEP_PCI_QPI_PORT0_FILTER),
1209        },
1210        { /* QPI Port 0 filter  */
1211                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
1212                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1213                                                   SNBEP_PCI_QPI_PORT1_FILTER),
1214        },
1215        { /* end: all zeroes */ }
1216};
1217
1218static struct pci_driver snbep_uncore_pci_driver = {
1219        .name           = "snbep_uncore",
1220        .id_table       = snbep_uncore_pci_ids,
1221};
1222
1223/*
1224 * build pci bus to socket mapping
1225 */
1226static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool reverse)
1227{
1228        struct pci_dev *ubox_dev = NULL;
1229        int i, bus, nodeid, segment;
1230        struct pci2phy_map *map;
1231        int err = 0;
1232        u32 config = 0;
1233
1234        while (1) {
1235                /* find the UBOX device */
1236                ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, ubox_dev);
1237                if (!ubox_dev)
1238                        break;
1239                bus = ubox_dev->bus->number;
1240                /* get the Node ID of the local register */
1241                err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
1242                if (err)
1243                        break;
1244                nodeid = config;
1245                /* get the Node ID mapping */
1246                err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
1247                if (err)
1248                        break;
1249
1250                segment = pci_domain_nr(ubox_dev->bus);
1251                raw_spin_lock(&pci2phy_map_lock);
1252                map = __find_pci2phy_map(segment);
1253                if (!map) {
1254                        raw_spin_unlock(&pci2phy_map_lock);
1255                        err = -ENOMEM;
1256                        break;
1257                }
1258
1259                /*
1260                 * every three bits in the Node ID mapping register maps
1261                 * to a particular node.
1262                 */
1263                for (i = 0; i < 8; i++) {
1264                        if (nodeid == ((config >> (3 * i)) & 0x7)) {
1265                                map->pbus_to_physid[bus] = i;
1266                                break;
1267                        }
1268                }
1269                raw_spin_unlock(&pci2phy_map_lock);
1270        }
1271
1272        if (!err) {
1273                /*
1274                 * For PCI bus with no UBOX device, find the next bus
1275                 * that has UBOX device and use its mapping.
1276                 */
1277                raw_spin_lock(&pci2phy_map_lock);
1278                list_for_each_entry(map, &pci2phy_map_head, list) {
1279                        i = -1;
1280                        if (reverse) {
1281                                for (bus = 255; bus >= 0; bus--) {
1282                                        if (map->pbus_to_physid[bus] >= 0)
1283                                                i = map->pbus_to_physid[bus];
1284                                        else
1285                                                map->pbus_to_physid[bus] = i;
1286                                }
1287                        } else {
1288                                for (bus = 0; bus <= 255; bus++) {
1289                                        if (map->pbus_to_physid[bus] >= 0)
1290                                                i = map->pbus_to_physid[bus];
1291                                        else
1292                                                map->pbus_to_physid[bus] = i;
1293                                }
1294                        }
1295                }
1296                raw_spin_unlock(&pci2phy_map_lock);
1297        }
1298
1299        pci_dev_put(ubox_dev);
1300
1301        return err ? pcibios_err_to_errno(err) : 0;
1302}
1303
1304int snbep_uncore_pci_init(void)
1305{
1306        int ret = snbep_pci2phy_map_init(0x3ce0, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1307        if (ret)
1308                return ret;
1309        uncore_pci_uncores = snbep_pci_uncores;
1310        uncore_pci_driver = &snbep_uncore_pci_driver;
1311        return 0;
1312}
1313/* end of Sandy Bridge-EP uncore support */
1314
1315/* IvyTown uncore support */
1316static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
1317{
1318        unsigned msr = uncore_msr_box_ctl(box);
1319        if (msr)
1320                wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
1321}
1322
1323static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
1324{
1325        struct pci_dev *pdev = box->pci_dev;
1326
1327        pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
1328}
1329
1330#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT()                      \
1331        .init_box       = ivbep_uncore_msr_init_box,            \
1332        .disable_box    = snbep_uncore_msr_disable_box,         \
1333        .enable_box     = snbep_uncore_msr_enable_box,          \
1334        .disable_event  = snbep_uncore_msr_disable_event,       \
1335        .enable_event   = snbep_uncore_msr_enable_event,        \
1336        .read_counter   = uncore_msr_read_counter
1337
1338static struct intel_uncore_ops ivbep_uncore_msr_ops = {
1339        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1340};
1341
1342static struct intel_uncore_ops ivbep_uncore_pci_ops = {
1343        .init_box       = ivbep_uncore_pci_init_box,
1344        .disable_box    = snbep_uncore_pci_disable_box,
1345        .enable_box     = snbep_uncore_pci_enable_box,
1346        .disable_event  = snbep_uncore_pci_disable_event,
1347        .enable_event   = snbep_uncore_pci_enable_event,
1348        .read_counter   = snbep_uncore_pci_read_counter,
1349};
1350
1351#define IVBEP_UNCORE_PCI_COMMON_INIT()                          \
1352        .perf_ctr       = SNBEP_PCI_PMON_CTR0,                  \
1353        .event_ctl      = SNBEP_PCI_PMON_CTL0,                  \
1354        .event_mask     = IVBEP_PMON_RAW_EVENT_MASK,            \
1355        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,               \
1356        .ops            = &ivbep_uncore_pci_ops,                        \
1357        .format_group   = &ivbep_uncore_format_group
1358
1359static struct attribute *ivbep_uncore_formats_attr[] = {
1360        &format_attr_event.attr,
1361        &format_attr_umask.attr,
1362        &format_attr_edge.attr,
1363        &format_attr_inv.attr,
1364        &format_attr_thresh8.attr,
1365        NULL,
1366};
1367
1368static struct attribute *ivbep_uncore_ubox_formats_attr[] = {
1369        &format_attr_event.attr,
1370        &format_attr_umask.attr,
1371        &format_attr_edge.attr,
1372        &format_attr_inv.attr,
1373        &format_attr_thresh5.attr,
1374        NULL,
1375};
1376
1377static struct attribute *ivbep_uncore_cbox_formats_attr[] = {
1378        &format_attr_event.attr,
1379        &format_attr_umask.attr,
1380        &format_attr_edge.attr,
1381        &format_attr_tid_en.attr,
1382        &format_attr_thresh8.attr,
1383        &format_attr_filter_tid.attr,
1384        &format_attr_filter_link.attr,
1385        &format_attr_filter_state2.attr,
1386        &format_attr_filter_nid2.attr,
1387        &format_attr_filter_opc2.attr,
1388        &format_attr_filter_nc.attr,
1389        &format_attr_filter_c6.attr,
1390        &format_attr_filter_isoc.attr,
1391        NULL,
1392};
1393
1394static struct attribute *ivbep_uncore_pcu_formats_attr[] = {
1395        &format_attr_event.attr,
1396        &format_attr_occ_sel.attr,
1397        &format_attr_edge.attr,
1398        &format_attr_thresh5.attr,
1399        &format_attr_occ_invert.attr,
1400        &format_attr_occ_edge.attr,
1401        &format_attr_filter_band0.attr,
1402        &format_attr_filter_band1.attr,
1403        &format_attr_filter_band2.attr,
1404        &format_attr_filter_band3.attr,
1405        NULL,
1406};
1407
1408static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
1409        &format_attr_event_ext.attr,
1410        &format_attr_umask.attr,
1411        &format_attr_edge.attr,
1412        &format_attr_thresh8.attr,
1413        &format_attr_match_rds.attr,
1414        &format_attr_match_rnid30.attr,
1415        &format_attr_match_rnid4.attr,
1416        &format_attr_match_dnid.attr,
1417        &format_attr_match_mc.attr,
1418        &format_attr_match_opc.attr,
1419        &format_attr_match_vnw.attr,
1420        &format_attr_match0.attr,
1421        &format_attr_match1.attr,
1422        &format_attr_mask_rds.attr,
1423        &format_attr_mask_rnid30.attr,
1424        &format_attr_mask_rnid4.attr,
1425        &format_attr_mask_dnid.attr,
1426        &format_attr_mask_mc.attr,
1427        &format_attr_mask_opc.attr,
1428        &format_attr_mask_vnw.attr,
1429        &format_attr_mask0.attr,
1430        &format_attr_mask1.attr,
1431        NULL,
1432};
1433
1434static const struct attribute_group ivbep_uncore_format_group = {
1435        .name = "format",
1436        .attrs = ivbep_uncore_formats_attr,
1437};
1438
1439static const struct attribute_group ivbep_uncore_ubox_format_group = {
1440        .name = "format",
1441        .attrs = ivbep_uncore_ubox_formats_attr,
1442};
1443
1444static const struct attribute_group ivbep_uncore_cbox_format_group = {
1445        .name = "format",
1446        .attrs = ivbep_uncore_cbox_formats_attr,
1447};
1448
1449static const struct attribute_group ivbep_uncore_pcu_format_group = {
1450        .name = "format",
1451        .attrs = ivbep_uncore_pcu_formats_attr,
1452};
1453
1454static const struct attribute_group ivbep_uncore_qpi_format_group = {
1455        .name = "format",
1456        .attrs = ivbep_uncore_qpi_formats_attr,
1457};
1458
1459static struct intel_uncore_type ivbep_uncore_ubox = {
1460        .name           = "ubox",
1461        .num_counters   = 2,
1462        .num_boxes      = 1,
1463        .perf_ctr_bits  = 44,
1464        .fixed_ctr_bits = 48,
1465        .perf_ctr       = SNBEP_U_MSR_PMON_CTR0,
1466        .event_ctl      = SNBEP_U_MSR_PMON_CTL0,
1467        .event_mask     = IVBEP_U_MSR_PMON_RAW_EVENT_MASK,
1468        .fixed_ctr      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
1469        .fixed_ctl      = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
1470        .ops            = &ivbep_uncore_msr_ops,
1471        .format_group   = &ivbep_uncore_ubox_format_group,
1472};
1473
1474static struct extra_reg ivbep_uncore_cbox_extra_regs[] = {
1475        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1476                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1477        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1478        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1479        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1480        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
1481        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
1482        SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
1483        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
1484        SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
1485        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
1486        SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
1487        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
1488        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
1489        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
1490        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
1491        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
1492        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
1493        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
1494        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
1495        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
1496        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
1497        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
1498        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1499        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1500        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1501        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1502        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1503        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1504        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
1505        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
1506        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
1507        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
1508        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
1509        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
1510        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
1511        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
1512        EVENT_EXTRA_END
1513};
1514
1515static u64 ivbep_cbox_filter_mask(int fields)
1516{
1517        u64 mask = 0;
1518
1519        if (fields & 0x1)
1520                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_TID;
1521        if (fields & 0x2)
1522                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_LINK;
1523        if (fields & 0x4)
1524                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_STATE;
1525        if (fields & 0x8)
1526                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NID;
1527        if (fields & 0x10) {
1528                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_OPC;
1529                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_NC;
1530                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_C6;
1531                mask |= IVBEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
1532        }
1533
1534        return mask;
1535}
1536
1537static struct event_constraint *
1538ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1539{
1540        return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask);
1541}
1542
1543static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
1544{
1545        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1546        struct extra_reg *er;
1547        int idx = 0;
1548
1549        for (er = ivbep_uncore_cbox_extra_regs; er->msr; er++) {
1550                if (er->event != (event->hw.config & er->config_mask))
1551                        continue;
1552                idx |= er->idx;
1553        }
1554
1555        if (idx) {
1556                reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
1557                        SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
1558                reg1->config = event->attr.config1 & ivbep_cbox_filter_mask(idx);
1559                reg1->idx = idx;
1560        }
1561        return 0;
1562}
1563
1564static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1565{
1566        struct hw_perf_event *hwc = &event->hw;
1567        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
1568
1569        if (reg1->idx != EXTRA_REG_NONE) {
1570                u64 filter = uncore_shared_reg_config(box, 0);
1571                wrmsrl(reg1->reg, filter & 0xffffffff);
1572                wrmsrl(reg1->reg + 6, filter >> 32);
1573        }
1574
1575        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
1576}
1577
1578static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
1579        .init_box               = ivbep_uncore_msr_init_box,
1580        .disable_box            = snbep_uncore_msr_disable_box,
1581        .enable_box             = snbep_uncore_msr_enable_box,
1582        .disable_event          = snbep_uncore_msr_disable_event,
1583        .enable_event           = ivbep_cbox_enable_event,
1584        .read_counter           = uncore_msr_read_counter,
1585        .hw_config              = ivbep_cbox_hw_config,
1586        .get_constraint         = ivbep_cbox_get_constraint,
1587        .put_constraint         = snbep_cbox_put_constraint,
1588};
1589
1590static struct intel_uncore_type ivbep_uncore_cbox = {
1591        .name                   = "cbox",
1592        .num_counters           = 4,
1593        .num_boxes              = 15,
1594        .perf_ctr_bits          = 44,
1595        .event_ctl              = SNBEP_C0_MSR_PMON_CTL0,
1596        .perf_ctr               = SNBEP_C0_MSR_PMON_CTR0,
1597        .event_mask             = IVBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
1598        .box_ctl                = SNBEP_C0_MSR_PMON_BOX_CTL,
1599        .msr_offset             = SNBEP_CBO_MSR_OFFSET,
1600        .num_shared_regs        = 1,
1601        .constraints            = snbep_uncore_cbox_constraints,
1602        .ops                    = &ivbep_uncore_cbox_ops,
1603        .format_group           = &ivbep_uncore_cbox_format_group,
1604};
1605
1606static struct intel_uncore_ops ivbep_uncore_pcu_ops = {
1607        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1608        .hw_config              = snbep_pcu_hw_config,
1609        .get_constraint         = snbep_pcu_get_constraint,
1610        .put_constraint         = snbep_pcu_put_constraint,
1611};
1612
1613static struct intel_uncore_type ivbep_uncore_pcu = {
1614        .name                   = "pcu",
1615        .num_counters           = 4,
1616        .num_boxes              = 1,
1617        .perf_ctr_bits          = 48,
1618        .perf_ctr               = SNBEP_PCU_MSR_PMON_CTR0,
1619        .event_ctl              = SNBEP_PCU_MSR_PMON_CTL0,
1620        .event_mask             = IVBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
1621        .box_ctl                = SNBEP_PCU_MSR_PMON_BOX_CTL,
1622        .num_shared_regs        = 1,
1623        .ops                    = &ivbep_uncore_pcu_ops,
1624        .format_group           = &ivbep_uncore_pcu_format_group,
1625};
1626
1627static struct intel_uncore_type *ivbep_msr_uncores[] = {
1628        &ivbep_uncore_ubox,
1629        &ivbep_uncore_cbox,
1630        &ivbep_uncore_pcu,
1631        NULL,
1632};
1633
1634void ivbep_uncore_cpu_init(void)
1635{
1636        if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
1637                ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
1638        uncore_msr_uncores = ivbep_msr_uncores;
1639}
1640
1641static struct intel_uncore_type ivbep_uncore_ha = {
1642        .name           = "ha",
1643        .num_counters   = 4,
1644        .num_boxes      = 2,
1645        .perf_ctr_bits  = 48,
1646        IVBEP_UNCORE_PCI_COMMON_INIT(),
1647};
1648
1649static struct intel_uncore_type ivbep_uncore_imc = {
1650        .name           = "imc",
1651        .num_counters   = 4,
1652        .num_boxes      = 8,
1653        .perf_ctr_bits  = 48,
1654        .fixed_ctr_bits = 48,
1655        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
1656        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
1657        .event_descs    = snbep_uncore_imc_events,
1658        IVBEP_UNCORE_PCI_COMMON_INIT(),
1659};
1660
1661/* registers in IRP boxes are not properly aligned */
1662static unsigned ivbep_uncore_irp_ctls[] = {0xd8, 0xdc, 0xe0, 0xe4};
1663static unsigned ivbep_uncore_irp_ctrs[] = {0xa0, 0xb0, 0xb8, 0xc0};
1664
1665static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1666{
1667        struct pci_dev *pdev = box->pci_dev;
1668        struct hw_perf_event *hwc = &event->hw;
1669
1670        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx],
1671                               hwc->config | SNBEP_PMON_CTL_EN);
1672}
1673
1674static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event)
1675{
1676        struct pci_dev *pdev = box->pci_dev;
1677        struct hw_perf_event *hwc = &event->hw;
1678
1679        pci_write_config_dword(pdev, ivbep_uncore_irp_ctls[hwc->idx], hwc->config);
1680}
1681
1682static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
1683{
1684        struct pci_dev *pdev = box->pci_dev;
1685        struct hw_perf_event *hwc = &event->hw;
1686        u64 count = 0;
1687
1688        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
1689        pci_read_config_dword(pdev, ivbep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
1690
1691        return count;
1692}
1693
1694static struct intel_uncore_ops ivbep_uncore_irp_ops = {
1695        .init_box       = ivbep_uncore_pci_init_box,
1696        .disable_box    = snbep_uncore_pci_disable_box,
1697        .enable_box     = snbep_uncore_pci_enable_box,
1698        .disable_event  = ivbep_uncore_irp_disable_event,
1699        .enable_event   = ivbep_uncore_irp_enable_event,
1700        .read_counter   = ivbep_uncore_irp_read_counter,
1701};
1702
1703static struct intel_uncore_type ivbep_uncore_irp = {
1704        .name                   = "irp",
1705        .num_counters           = 4,
1706        .num_boxes              = 1,
1707        .perf_ctr_bits          = 48,
1708        .event_mask             = IVBEP_PMON_RAW_EVENT_MASK,
1709        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1710        .ops                    = &ivbep_uncore_irp_ops,
1711        .format_group           = &ivbep_uncore_format_group,
1712};
1713
1714static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
1715        .init_box       = ivbep_uncore_pci_init_box,
1716        .disable_box    = snbep_uncore_pci_disable_box,
1717        .enable_box     = snbep_uncore_pci_enable_box,
1718        .disable_event  = snbep_uncore_pci_disable_event,
1719        .enable_event   = snbep_qpi_enable_event,
1720        .read_counter   = snbep_uncore_pci_read_counter,
1721        .hw_config      = snbep_qpi_hw_config,
1722        .get_constraint = uncore_get_constraint,
1723        .put_constraint = uncore_put_constraint,
1724};
1725
1726static struct intel_uncore_type ivbep_uncore_qpi = {
1727        .name                   = "qpi",
1728        .num_counters           = 4,
1729        .num_boxes              = 3,
1730        .perf_ctr_bits          = 48,
1731        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
1732        .event_ctl              = SNBEP_PCI_PMON_CTL0,
1733        .event_mask             = IVBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
1734        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
1735        .num_shared_regs        = 1,
1736        .ops                    = &ivbep_uncore_qpi_ops,
1737        .format_group           = &ivbep_uncore_qpi_format_group,
1738};
1739
1740static struct intel_uncore_type ivbep_uncore_r2pcie = {
1741        .name           = "r2pcie",
1742        .num_counters   = 4,
1743        .num_boxes      = 1,
1744        .perf_ctr_bits  = 44,
1745        .constraints    = snbep_uncore_r2pcie_constraints,
1746        IVBEP_UNCORE_PCI_COMMON_INIT(),
1747};
1748
1749static struct intel_uncore_type ivbep_uncore_r3qpi = {
1750        .name           = "r3qpi",
1751        .num_counters   = 3,
1752        .num_boxes      = 2,
1753        .perf_ctr_bits  = 44,
1754        .constraints    = snbep_uncore_r3qpi_constraints,
1755        IVBEP_UNCORE_PCI_COMMON_INIT(),
1756};
1757
1758enum {
1759        IVBEP_PCI_UNCORE_HA,
1760        IVBEP_PCI_UNCORE_IMC,
1761        IVBEP_PCI_UNCORE_IRP,
1762        IVBEP_PCI_UNCORE_QPI,
1763        IVBEP_PCI_UNCORE_R2PCIE,
1764        IVBEP_PCI_UNCORE_R3QPI,
1765};
1766
1767static struct intel_uncore_type *ivbep_pci_uncores[] = {
1768        [IVBEP_PCI_UNCORE_HA]   = &ivbep_uncore_ha,
1769        [IVBEP_PCI_UNCORE_IMC]  = &ivbep_uncore_imc,
1770        [IVBEP_PCI_UNCORE_IRP]  = &ivbep_uncore_irp,
1771        [IVBEP_PCI_UNCORE_QPI]  = &ivbep_uncore_qpi,
1772        [IVBEP_PCI_UNCORE_R2PCIE]       = &ivbep_uncore_r2pcie,
1773        [IVBEP_PCI_UNCORE_R3QPI]        = &ivbep_uncore_r3qpi,
1774        NULL,
1775};
1776
1777static const struct pci_device_id ivbep_uncore_pci_ids[] = {
1778        { /* Home Agent 0 */
1779                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1780                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 0),
1781        },
1782        { /* Home Agent 1 */
1783                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1784                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_HA, 1),
1785        },
1786        { /* MC0 Channel 0 */
1787                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1788                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 0),
1789        },
1790        { /* MC0 Channel 1 */
1791                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1792                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 1),
1793        },
1794        { /* MC0 Channel 3 */
1795                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1796                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 2),
1797        },
1798        { /* MC0 Channel 4 */
1799                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1800                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 3),
1801        },
1802        { /* MC1 Channel 0 */
1803                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1804                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 4),
1805        },
1806        { /* MC1 Channel 1 */
1807                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1808                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 5),
1809        },
1810        { /* MC1 Channel 3 */
1811                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1812                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 6),
1813        },
1814        { /* MC1 Channel 4 */
1815                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1816                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IMC, 7),
1817        },
1818        { /* IRP */
1819                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe39),
1820                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_IRP, 0),
1821        },
1822        { /* QPI0 Port 0 */
1823                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1824                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 0),
1825        },
1826        { /* QPI0 Port 1 */
1827                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1828                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 1),
1829        },
1830        { /* QPI1 Port 2 */
1831                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1832                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_QPI, 2),
1833        },
1834        { /* R2PCIe */
1835                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1836                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R2PCIE, 0),
1837        },
1838        { /* R3QPI0 Link 0 */
1839                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1840                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 0),
1841        },
1842        { /* R3QPI0 Link 1 */
1843                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1844                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 1),
1845        },
1846        { /* R3QPI1 Link 2 */
1847                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1848                .driver_data = UNCORE_PCI_DEV_DATA(IVBEP_PCI_UNCORE_R3QPI, 2),
1849        },
1850        { /* QPI Port 0 filter  */
1851                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe86),
1852                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1853                                                   SNBEP_PCI_QPI_PORT0_FILTER),
1854        },
1855        { /* QPI Port 0 filter  */
1856                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe96),
1857                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
1858                                                   SNBEP_PCI_QPI_PORT1_FILTER),
1859        },
1860        { /* end: all zeroes */ }
1861};
1862
1863static struct pci_driver ivbep_uncore_pci_driver = {
1864        .name           = "ivbep_uncore",
1865        .id_table       = ivbep_uncore_pci_ids,
1866};
1867
1868int ivbep_uncore_pci_init(void)
1869{
1870        int ret = snbep_pci2phy_map_init(0x0e1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
1871        if (ret)
1872                return ret;
1873        uncore_pci_uncores = ivbep_pci_uncores;
1874        uncore_pci_driver = &ivbep_uncore_pci_driver;
1875        return 0;
1876}
1877/* end of IvyTown uncore support */
1878
1879/* KNL uncore support */
1880static struct attribute *knl_uncore_ubox_formats_attr[] = {
1881        &format_attr_event.attr,
1882        &format_attr_umask.attr,
1883        &format_attr_edge.attr,
1884        &format_attr_tid_en.attr,
1885        &format_attr_inv.attr,
1886        &format_attr_thresh5.attr,
1887        NULL,
1888};
1889
1890static const struct attribute_group knl_uncore_ubox_format_group = {
1891        .name = "format",
1892        .attrs = knl_uncore_ubox_formats_attr,
1893};
1894
1895static struct intel_uncore_type knl_uncore_ubox = {
1896        .name                   = "ubox",
1897        .num_counters           = 2,
1898        .num_boxes              = 1,
1899        .perf_ctr_bits          = 48,
1900        .fixed_ctr_bits         = 48,
1901        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
1902        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
1903        .event_mask             = KNL_U_MSR_PMON_RAW_EVENT_MASK,
1904        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
1905        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
1906        .ops                    = &snbep_uncore_msr_ops,
1907        .format_group           = &knl_uncore_ubox_format_group,
1908};
1909
1910static struct attribute *knl_uncore_cha_formats_attr[] = {
1911        &format_attr_event.attr,
1912        &format_attr_umask.attr,
1913        &format_attr_qor.attr,
1914        &format_attr_edge.attr,
1915        &format_attr_tid_en.attr,
1916        &format_attr_inv.attr,
1917        &format_attr_thresh8.attr,
1918        &format_attr_filter_tid4.attr,
1919        &format_attr_filter_link3.attr,
1920        &format_attr_filter_state4.attr,
1921        &format_attr_filter_local.attr,
1922        &format_attr_filter_all_op.attr,
1923        &format_attr_filter_nnm.attr,
1924        &format_attr_filter_opc3.attr,
1925        &format_attr_filter_nc.attr,
1926        &format_attr_filter_isoc.attr,
1927        NULL,
1928};
1929
1930static const struct attribute_group knl_uncore_cha_format_group = {
1931        .name = "format",
1932        .attrs = knl_uncore_cha_formats_attr,
1933};
1934
1935static struct event_constraint knl_uncore_cha_constraints[] = {
1936        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
1937        UNCORE_EVENT_CONSTRAINT(0x1f, 0x1),
1938        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
1939        EVENT_CONSTRAINT_END
1940};
1941
1942static struct extra_reg knl_uncore_cha_extra_regs[] = {
1943        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1944                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1945        SNBEP_CBO_EVENT_EXTRA_REG(0x3d, 0xff, 0x2),
1946        SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x4),
1947        SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x4),
1948        EVENT_EXTRA_END
1949};
1950
1951static u64 knl_cha_filter_mask(int fields)
1952{
1953        u64 mask = 0;
1954
1955        if (fields & 0x1)
1956                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_TID;
1957        if (fields & 0x2)
1958                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_STATE;
1959        if (fields & 0x4)
1960                mask |= KNL_CHA_MSR_PMON_BOX_FILTER_OP;
1961        return mask;
1962}
1963
1964static struct event_constraint *
1965knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
1966{
1967        return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask);
1968}
1969
1970static int knl_cha_hw_config(struct intel_uncore_box *box,
1971                             struct perf_event *event)
1972{
1973        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
1974        struct extra_reg *er;
1975        int idx = 0;
1976
1977        for (er = knl_uncore_cha_extra_regs; er->msr; er++) {
1978                if (er->event != (event->hw.config & er->config_mask))
1979                        continue;
1980                idx |= er->idx;
1981        }
1982
1983        if (idx) {
1984                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
1985                            KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
1986                reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
1987
1988                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
1989                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
1990                reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
1991                reg1->idx = idx;
1992        }
1993        return 0;
1994}
1995
1996static void hswep_cbox_enable_event(struct intel_uncore_box *box,
1997                                    struct perf_event *event);
1998
1999static struct intel_uncore_ops knl_uncore_cha_ops = {
2000        .init_box               = snbep_uncore_msr_init_box,
2001        .disable_box            = snbep_uncore_msr_disable_box,
2002        .enable_box             = snbep_uncore_msr_enable_box,
2003        .disable_event          = snbep_uncore_msr_disable_event,
2004        .enable_event           = hswep_cbox_enable_event,
2005        .read_counter           = uncore_msr_read_counter,
2006        .hw_config              = knl_cha_hw_config,
2007        .get_constraint         = knl_cha_get_constraint,
2008        .put_constraint         = snbep_cbox_put_constraint,
2009};
2010
2011static struct intel_uncore_type knl_uncore_cha = {
2012        .name                   = "cha",
2013        .num_counters           = 4,
2014        .num_boxes              = 38,
2015        .perf_ctr_bits          = 48,
2016        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2017        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2018        .event_mask             = KNL_CHA_MSR_PMON_RAW_EVENT_MASK,
2019        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2020        .msr_offset             = KNL_CHA_MSR_OFFSET,
2021        .num_shared_regs        = 1,
2022        .constraints            = knl_uncore_cha_constraints,
2023        .ops                    = &knl_uncore_cha_ops,
2024        .format_group           = &knl_uncore_cha_format_group,
2025};
2026
2027static struct attribute *knl_uncore_pcu_formats_attr[] = {
2028        &format_attr_event2.attr,
2029        &format_attr_use_occ_ctr.attr,
2030        &format_attr_occ_sel.attr,
2031        &format_attr_edge.attr,
2032        &format_attr_tid_en.attr,
2033        &format_attr_inv.attr,
2034        &format_attr_thresh6.attr,
2035        &format_attr_occ_invert.attr,
2036        &format_attr_occ_edge_det.attr,
2037        NULL,
2038};
2039
2040static const struct attribute_group knl_uncore_pcu_format_group = {
2041        .name = "format",
2042        .attrs = knl_uncore_pcu_formats_attr,
2043};
2044
2045static struct intel_uncore_type knl_uncore_pcu = {
2046        .name                   = "pcu",
2047        .num_counters           = 4,
2048        .num_boxes              = 1,
2049        .perf_ctr_bits          = 48,
2050        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2051        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2052        .event_mask             = KNL_PCU_MSR_PMON_RAW_EVENT_MASK,
2053        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2054        .ops                    = &snbep_uncore_msr_ops,
2055        .format_group           = &knl_uncore_pcu_format_group,
2056};
2057
2058static struct intel_uncore_type *knl_msr_uncores[] = {
2059        &knl_uncore_ubox,
2060        &knl_uncore_cha,
2061        &knl_uncore_pcu,
2062        NULL,
2063};
2064
2065void knl_uncore_cpu_init(void)
2066{
2067        uncore_msr_uncores = knl_msr_uncores;
2068}
2069
2070static void knl_uncore_imc_enable_box(struct intel_uncore_box *box)
2071{
2072        struct pci_dev *pdev = box->pci_dev;
2073        int box_ctl = uncore_pci_box_ctl(box);
2074
2075        pci_write_config_dword(pdev, box_ctl, 0);
2076}
2077
2078static void knl_uncore_imc_enable_event(struct intel_uncore_box *box,
2079                                        struct perf_event *event)
2080{
2081        struct pci_dev *pdev = box->pci_dev;
2082        struct hw_perf_event *hwc = &event->hw;
2083
2084        if ((event->attr.config & SNBEP_PMON_CTL_EV_SEL_MASK)
2085                                                        == UNCORE_FIXED_EVENT)
2086                pci_write_config_dword(pdev, hwc->config_base,
2087                                       hwc->config | KNL_PMON_FIXED_CTL_EN);
2088        else
2089                pci_write_config_dword(pdev, hwc->config_base,
2090                                       hwc->config | SNBEP_PMON_CTL_EN);
2091}
2092
2093static struct intel_uncore_ops knl_uncore_imc_ops = {
2094        .init_box       = snbep_uncore_pci_init_box,
2095        .disable_box    = snbep_uncore_pci_disable_box,
2096        .enable_box     = knl_uncore_imc_enable_box,
2097        .read_counter   = snbep_uncore_pci_read_counter,
2098        .enable_event   = knl_uncore_imc_enable_event,
2099        .disable_event  = snbep_uncore_pci_disable_event,
2100};
2101
2102static struct intel_uncore_type knl_uncore_imc_uclk = {
2103        .name                   = "imc_uclk",
2104        .num_counters           = 4,
2105        .num_boxes              = 2,
2106        .perf_ctr_bits          = 48,
2107        .fixed_ctr_bits         = 48,
2108        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2109        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2110        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2111        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2112        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2113        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2114        .ops                    = &knl_uncore_imc_ops,
2115        .format_group           = &snbep_uncore_format_group,
2116};
2117
2118static struct intel_uncore_type knl_uncore_imc_dclk = {
2119        .name                   = "imc",
2120        .num_counters           = 4,
2121        .num_boxes              = 6,
2122        .perf_ctr_bits          = 48,
2123        .fixed_ctr_bits         = 48,
2124        .perf_ctr               = KNL_MC0_CH0_MSR_PMON_CTR0_LOW,
2125        .event_ctl              = KNL_MC0_CH0_MSR_PMON_CTL0,
2126        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2127        .fixed_ctr              = KNL_MC0_CH0_MSR_PMON_FIXED_LOW,
2128        .fixed_ctl              = KNL_MC0_CH0_MSR_PMON_FIXED_CTL,
2129        .box_ctl                = KNL_MC0_CH0_MSR_PMON_BOX_CTL,
2130        .ops                    = &knl_uncore_imc_ops,
2131        .format_group           = &snbep_uncore_format_group,
2132};
2133
2134static struct intel_uncore_type knl_uncore_edc_uclk = {
2135        .name                   = "edc_uclk",
2136        .num_counters           = 4,
2137        .num_boxes              = 8,
2138        .perf_ctr_bits          = 48,
2139        .fixed_ctr_bits         = 48,
2140        .perf_ctr               = KNL_UCLK_MSR_PMON_CTR0_LOW,
2141        .event_ctl              = KNL_UCLK_MSR_PMON_CTL0,
2142        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2143        .fixed_ctr              = KNL_UCLK_MSR_PMON_UCLK_FIXED_LOW,
2144        .fixed_ctl              = KNL_UCLK_MSR_PMON_UCLK_FIXED_CTL,
2145        .box_ctl                = KNL_UCLK_MSR_PMON_BOX_CTL,
2146        .ops                    = &knl_uncore_imc_ops,
2147        .format_group           = &snbep_uncore_format_group,
2148};
2149
2150static struct intel_uncore_type knl_uncore_edc_eclk = {
2151        .name                   = "edc_eclk",
2152        .num_counters           = 4,
2153        .num_boxes              = 8,
2154        .perf_ctr_bits          = 48,
2155        .fixed_ctr_bits         = 48,
2156        .perf_ctr               = KNL_EDC0_ECLK_MSR_PMON_CTR0_LOW,
2157        .event_ctl              = KNL_EDC0_ECLK_MSR_PMON_CTL0,
2158        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2159        .fixed_ctr              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_LOW,
2160        .fixed_ctl              = KNL_EDC0_ECLK_MSR_PMON_ECLK_FIXED_CTL,
2161        .box_ctl                = KNL_EDC0_ECLK_MSR_PMON_BOX_CTL,
2162        .ops                    = &knl_uncore_imc_ops,
2163        .format_group           = &snbep_uncore_format_group,
2164};
2165
2166static struct event_constraint knl_uncore_m2pcie_constraints[] = {
2167        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2168        EVENT_CONSTRAINT_END
2169};
2170
2171static struct intel_uncore_type knl_uncore_m2pcie = {
2172        .name           = "m2pcie",
2173        .num_counters   = 4,
2174        .num_boxes      = 1,
2175        .perf_ctr_bits  = 48,
2176        .constraints    = knl_uncore_m2pcie_constraints,
2177        SNBEP_UNCORE_PCI_COMMON_INIT(),
2178};
2179
2180static struct attribute *knl_uncore_irp_formats_attr[] = {
2181        &format_attr_event.attr,
2182        &format_attr_umask.attr,
2183        &format_attr_qor.attr,
2184        &format_attr_edge.attr,
2185        &format_attr_inv.attr,
2186        &format_attr_thresh8.attr,
2187        NULL,
2188};
2189
2190static const struct attribute_group knl_uncore_irp_format_group = {
2191        .name = "format",
2192        .attrs = knl_uncore_irp_formats_attr,
2193};
2194
2195static struct intel_uncore_type knl_uncore_irp = {
2196        .name                   = "irp",
2197        .num_counters           = 2,
2198        .num_boxes              = 1,
2199        .perf_ctr_bits          = 48,
2200        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2201        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2202        .event_mask             = KNL_IRP_PCI_PMON_RAW_EVENT_MASK,
2203        .box_ctl                = KNL_IRP_PCI_PMON_BOX_CTL,
2204        .ops                    = &snbep_uncore_pci_ops,
2205        .format_group           = &knl_uncore_irp_format_group,
2206};
2207
2208enum {
2209        KNL_PCI_UNCORE_MC_UCLK,
2210        KNL_PCI_UNCORE_MC_DCLK,
2211        KNL_PCI_UNCORE_EDC_UCLK,
2212        KNL_PCI_UNCORE_EDC_ECLK,
2213        KNL_PCI_UNCORE_M2PCIE,
2214        KNL_PCI_UNCORE_IRP,
2215};
2216
2217static struct intel_uncore_type *knl_pci_uncores[] = {
2218        [KNL_PCI_UNCORE_MC_UCLK]        = &knl_uncore_imc_uclk,
2219        [KNL_PCI_UNCORE_MC_DCLK]        = &knl_uncore_imc_dclk,
2220        [KNL_PCI_UNCORE_EDC_UCLK]       = &knl_uncore_edc_uclk,
2221        [KNL_PCI_UNCORE_EDC_ECLK]       = &knl_uncore_edc_eclk,
2222        [KNL_PCI_UNCORE_M2PCIE]         = &knl_uncore_m2pcie,
2223        [KNL_PCI_UNCORE_IRP]            = &knl_uncore_irp,
2224        NULL,
2225};
2226
2227/*
2228 * KNL uses a common PCI device ID for multiple instances of an Uncore PMU
2229 * device type. prior to KNL, each instance of a PMU device type had a unique
2230 * device ID.
2231 *
2232 *      PCI Device ID   Uncore PMU Devices
2233 *      ----------------------------------
2234 *      0x7841          MC0 UClk, MC1 UClk
2235 *      0x7843          MC0 DClk CH 0, MC0 DClk CH 1, MC0 DClk CH 2,
2236 *                      MC1 DClk CH 0, MC1 DClk CH 1, MC1 DClk CH 2
2237 *      0x7833          EDC0 UClk, EDC1 UClk, EDC2 UClk, EDC3 UClk,
2238 *                      EDC4 UClk, EDC5 UClk, EDC6 UClk, EDC7 UClk
2239 *      0x7835          EDC0 EClk, EDC1 EClk, EDC2 EClk, EDC3 EClk,
2240 *                      EDC4 EClk, EDC5 EClk, EDC6 EClk, EDC7 EClk
2241 *      0x7817          M2PCIe
2242 *      0x7814          IRP
2243*/
2244
2245static const struct pci_device_id knl_uncore_pci_ids[] = {
2246        { /* MC0 UClk */
2247                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2248                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
2249        },
2250        { /* MC1 UClk */
2251                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
2252                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
2253        },
2254        { /* MC0 DClk CH 0 */
2255                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2256                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
2257        },
2258        { /* MC0 DClk CH 1 */
2259                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2260                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
2261        },
2262        { /* MC0 DClk CH 2 */
2263                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2264                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
2265        },
2266        { /* MC1 DClk CH 0 */
2267                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2268                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
2269        },
2270        { /* MC1 DClk CH 1 */
2271                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2272                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
2273        },
2274        { /* MC1 DClk CH 2 */
2275                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
2276                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
2277        },
2278        { /* EDC0 UClk */
2279                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2280                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
2281        },
2282        { /* EDC1 UClk */
2283                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2284                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
2285        },
2286        { /* EDC2 UClk */
2287                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2288                .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
2289        },
2290        { /* EDC3 UClk */
2291                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2292                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
2293        },
2294        { /* EDC4 UClk */
2295                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2296                .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
2297        },
2298        { /* EDC5 UClk */
2299                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2300                .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
2301        },
2302        { /* EDC6 UClk */
2303                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2304                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
2305        },
2306        { /* EDC7 UClk */
2307                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
2308                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
2309        },
2310        { /* EDC0 EClk */
2311                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2312                .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
2313        },
2314        { /* EDC1 EClk */
2315                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2316                .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
2317        },
2318        { /* EDC2 EClk */
2319                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2320                .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
2321        },
2322        { /* EDC3 EClk */
2323                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2324                .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
2325        },
2326        { /* EDC4 EClk */
2327                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2328                .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
2329        },
2330        { /* EDC5 EClk */
2331                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2332                .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
2333        },
2334        { /* EDC6 EClk */
2335                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2336                .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
2337        },
2338        { /* EDC7 EClk */
2339                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
2340                .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
2341        },
2342        { /* M2PCIe */
2343                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
2344                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_M2PCIE, 0),
2345        },
2346        { /* IRP */
2347                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7814),
2348                .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_IRP, 0),
2349        },
2350        { /* end: all zeroes */ }
2351};
2352
2353static struct pci_driver knl_uncore_pci_driver = {
2354        .name           = "knl_uncore",
2355        .id_table       = knl_uncore_pci_ids,
2356};
2357
2358int knl_uncore_pci_init(void)
2359{
2360        int ret;
2361
2362        /* All KNL PCI based PMON units are on the same PCI bus except IRP */
2363        ret = snb_pci2phy_map_init(0x7814); /* IRP */
2364        if (ret)
2365                return ret;
2366        ret = snb_pci2phy_map_init(0x7817); /* M2PCIe */
2367        if (ret)
2368                return ret;
2369        uncore_pci_uncores = knl_pci_uncores;
2370        uncore_pci_driver = &knl_uncore_pci_driver;
2371        return 0;
2372}
2373
2374/* end of KNL uncore support */
2375
2376/* Haswell-EP uncore support */
2377static struct attribute *hswep_uncore_ubox_formats_attr[] = {
2378        &format_attr_event.attr,
2379        &format_attr_umask.attr,
2380        &format_attr_edge.attr,
2381        &format_attr_inv.attr,
2382        &format_attr_thresh5.attr,
2383        &format_attr_filter_tid2.attr,
2384        &format_attr_filter_cid.attr,
2385        NULL,
2386};
2387
2388static const struct attribute_group hswep_uncore_ubox_format_group = {
2389        .name = "format",
2390        .attrs = hswep_uncore_ubox_formats_attr,
2391};
2392
2393static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2394{
2395        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2396        reg1->reg = HSWEP_U_MSR_PMON_FILTER;
2397        reg1->config = event->attr.config1 & HSWEP_U_MSR_PMON_BOX_FILTER_MASK;
2398        reg1->idx = 0;
2399        return 0;
2400}
2401
2402static struct intel_uncore_ops hswep_uncore_ubox_ops = {
2403        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2404        .hw_config              = hswep_ubox_hw_config,
2405        .get_constraint         = uncore_get_constraint,
2406        .put_constraint         = uncore_put_constraint,
2407};
2408
2409static struct intel_uncore_type hswep_uncore_ubox = {
2410        .name                   = "ubox",
2411        .num_counters           = 2,
2412        .num_boxes              = 1,
2413        .perf_ctr_bits          = 44,
2414        .fixed_ctr_bits         = 48,
2415        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2416        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2417        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2418        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
2419        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
2420        .num_shared_regs        = 1,
2421        .ops                    = &hswep_uncore_ubox_ops,
2422        .format_group           = &hswep_uncore_ubox_format_group,
2423};
2424
2425static struct attribute *hswep_uncore_cbox_formats_attr[] = {
2426        &format_attr_event.attr,
2427        &format_attr_umask.attr,
2428        &format_attr_edge.attr,
2429        &format_attr_tid_en.attr,
2430        &format_attr_thresh8.attr,
2431        &format_attr_filter_tid3.attr,
2432        &format_attr_filter_link2.attr,
2433        &format_attr_filter_state3.attr,
2434        &format_attr_filter_nid2.attr,
2435        &format_attr_filter_opc2.attr,
2436        &format_attr_filter_nc.attr,
2437        &format_attr_filter_c6.attr,
2438        &format_attr_filter_isoc.attr,
2439        NULL,
2440};
2441
2442static const struct attribute_group hswep_uncore_cbox_format_group = {
2443        .name = "format",
2444        .attrs = hswep_uncore_cbox_formats_attr,
2445};
2446
2447static struct event_constraint hswep_uncore_cbox_constraints[] = {
2448        UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
2449        UNCORE_EVENT_CONSTRAINT(0x09, 0x1),
2450        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
2451        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
2452        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2453        UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
2454        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
2455        EVENT_CONSTRAINT_END
2456};
2457
2458static struct extra_reg hswep_uncore_cbox_extra_regs[] = {
2459        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
2460                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
2461        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
2462        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
2463        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
2464        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
2465        SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
2466        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x4),
2467        SNBEP_CBO_EVENT_EXTRA_REG(0x4037, 0x40ff, 0x8),
2468        SNBEP_CBO_EVENT_EXTRA_REG(0x4028, 0x40ff, 0x8),
2469        SNBEP_CBO_EVENT_EXTRA_REG(0x4032, 0x40ff, 0x8),
2470        SNBEP_CBO_EVENT_EXTRA_REG(0x4029, 0x40ff, 0x8),
2471        SNBEP_CBO_EVENT_EXTRA_REG(0x4033, 0x40ff, 0x8),
2472        SNBEP_CBO_EVENT_EXTRA_REG(0x402A, 0x40ff, 0x8),
2473        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x12),
2474        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
2475        SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0x18),
2476        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x8),
2477        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x8),
2478        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x8),
2479        SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0x18),
2480        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x8),
2481        SNBEP_CBO_EVENT_EXTRA_REG(0x2335, 0xffff, 0x10),
2482        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
2483        SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
2484        SNBEP_CBO_EVENT_EXTRA_REG(0x8135, 0xffff, 0x10),
2485        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
2486        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
2487        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
2488        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x8),
2489        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x8),
2490        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
2491        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x8),
2492        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
2493        SNBEP_CBO_EVENT_EXTRA_REG(0x8336, 0xffff, 0x10),
2494        SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
2495        SNBEP_CBO_EVENT_EXTRA_REG(0x8136, 0xffff, 0x10),
2496        SNBEP_CBO_EVENT_EXTRA_REG(0x5036, 0xffff, 0x8),
2497        EVENT_EXTRA_END
2498};
2499
2500static u64 hswep_cbox_filter_mask(int fields)
2501{
2502        u64 mask = 0;
2503        if (fields & 0x1)
2504                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_TID;
2505        if (fields & 0x2)
2506                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_LINK;
2507        if (fields & 0x4)
2508                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_STATE;
2509        if (fields & 0x8)
2510                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NID;
2511        if (fields & 0x10) {
2512                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_OPC;
2513                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_NC;
2514                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_C6;
2515                mask |= HSWEP_CB0_MSR_PMON_BOX_FILTER_ISOC;
2516        }
2517        return mask;
2518}
2519
2520static struct event_constraint *
2521hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
2522{
2523        return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask);
2524}
2525
2526static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2527{
2528        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
2529        struct extra_reg *er;
2530        int idx = 0;
2531
2532        for (er = hswep_uncore_cbox_extra_regs; er->msr; er++) {
2533                if (er->event != (event->hw.config & er->config_mask))
2534                        continue;
2535                idx |= er->idx;
2536        }
2537
2538        if (idx) {
2539                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
2540                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
2541                reg1->config = event->attr.config1 & hswep_cbox_filter_mask(idx);
2542                reg1->idx = idx;
2543        }
2544        return 0;
2545}
2546
2547static void hswep_cbox_enable_event(struct intel_uncore_box *box,
2548                                  struct perf_event *event)
2549{
2550        struct hw_perf_event *hwc = &event->hw;
2551        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2552
2553        if (reg1->idx != EXTRA_REG_NONE) {
2554                u64 filter = uncore_shared_reg_config(box, 0);
2555                wrmsrl(reg1->reg, filter & 0xffffffff);
2556                wrmsrl(reg1->reg + 1, filter >> 32);
2557        }
2558
2559        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
2560}
2561
2562static struct intel_uncore_ops hswep_uncore_cbox_ops = {
2563        .init_box               = snbep_uncore_msr_init_box,
2564        .disable_box            = snbep_uncore_msr_disable_box,
2565        .enable_box             = snbep_uncore_msr_enable_box,
2566        .disable_event          = snbep_uncore_msr_disable_event,
2567        .enable_event           = hswep_cbox_enable_event,
2568        .read_counter           = uncore_msr_read_counter,
2569        .hw_config              = hswep_cbox_hw_config,
2570        .get_constraint         = hswep_cbox_get_constraint,
2571        .put_constraint         = snbep_cbox_put_constraint,
2572};
2573
2574static struct intel_uncore_type hswep_uncore_cbox = {
2575        .name                   = "cbox",
2576        .num_counters           = 4,
2577        .num_boxes              = 18,
2578        .perf_ctr_bits          = 48,
2579        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
2580        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
2581        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
2582        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
2583        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
2584        .num_shared_regs        = 1,
2585        .constraints            = hswep_uncore_cbox_constraints,
2586        .ops                    = &hswep_uncore_cbox_ops,
2587        .format_group           = &hswep_uncore_cbox_format_group,
2588};
2589
2590/*
2591 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
2592 */
2593static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
2594{
2595        unsigned msr = uncore_msr_box_ctl(box);
2596
2597        if (msr) {
2598                u64 init = SNBEP_PMON_BOX_CTL_INT;
2599                u64 flags = 0;
2600                int i;
2601
2602                for_each_set_bit(i, (unsigned long *)&init, 64) {
2603                        flags |= (1ULL << i);
2604                        wrmsrl(msr, flags);
2605                }
2606        }
2607}
2608
2609static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
2610        __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2611        .init_box               = hswep_uncore_sbox_msr_init_box
2612};
2613
2614static struct attribute *hswep_uncore_sbox_formats_attr[] = {
2615        &format_attr_event.attr,
2616        &format_attr_umask.attr,
2617        &format_attr_edge.attr,
2618        &format_attr_tid_en.attr,
2619        &format_attr_inv.attr,
2620        &format_attr_thresh8.attr,
2621        NULL,
2622};
2623
2624static const struct attribute_group hswep_uncore_sbox_format_group = {
2625        .name = "format",
2626        .attrs = hswep_uncore_sbox_formats_attr,
2627};
2628
2629static struct intel_uncore_type hswep_uncore_sbox = {
2630        .name                   = "sbox",
2631        .num_counters           = 4,
2632        .num_boxes              = 4,
2633        .perf_ctr_bits          = 44,
2634        .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
2635        .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
2636        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
2637        .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
2638        .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
2639        .ops                    = &hswep_uncore_sbox_msr_ops,
2640        .format_group           = &hswep_uncore_sbox_format_group,
2641};
2642
2643static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event)
2644{
2645        struct hw_perf_event *hwc = &event->hw;
2646        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
2647        int ev_sel = hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK;
2648
2649        if (ev_sel >= 0xb && ev_sel <= 0xe) {
2650                reg1->reg = HSWEP_PCU_MSR_PMON_BOX_FILTER;
2651                reg1->idx = ev_sel - 0xb;
2652                reg1->config = event->attr.config1 & (0xff << reg1->idx);
2653        }
2654        return 0;
2655}
2656
2657static struct intel_uncore_ops hswep_uncore_pcu_ops = {
2658        SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
2659        .hw_config              = hswep_pcu_hw_config,
2660        .get_constraint         = snbep_pcu_get_constraint,
2661        .put_constraint         = snbep_pcu_put_constraint,
2662};
2663
2664static struct intel_uncore_type hswep_uncore_pcu = {
2665        .name                   = "pcu",
2666        .num_counters           = 4,
2667        .num_boxes              = 1,
2668        .perf_ctr_bits          = 48,
2669        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
2670        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
2671        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
2672        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
2673        .num_shared_regs        = 1,
2674        .ops                    = &hswep_uncore_pcu_ops,
2675        .format_group           = &snbep_uncore_pcu_format_group,
2676};
2677
2678static struct intel_uncore_type *hswep_msr_uncores[] = {
2679        &hswep_uncore_ubox,
2680        &hswep_uncore_cbox,
2681        &hswep_uncore_sbox,
2682        &hswep_uncore_pcu,
2683        NULL,
2684};
2685
2686void hswep_uncore_cpu_init(void)
2687{
2688        int pkg = rh_boot_cpu_data.logical_proc_id;
2689
2690        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2691                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
2692
2693        /* Detect 6-8 core systems with only two SBOXes */
2694        if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
2695                u32 capid4;
2696
2697                pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
2698                                      0x94, &capid4);
2699                if (((capid4 >> 6) & 0x3) == 0)
2700                        hswep_uncore_sbox.num_boxes = 2;
2701        }
2702
2703        uncore_msr_uncores = hswep_msr_uncores;
2704}
2705
2706static struct intel_uncore_type hswep_uncore_ha = {
2707        .name           = "ha",
2708        .num_counters   = 4,
2709        .num_boxes      = 2,
2710        .perf_ctr_bits  = 48,
2711        SNBEP_UNCORE_PCI_COMMON_INIT(),
2712};
2713
2714static struct uncore_event_desc hswep_uncore_imc_events[] = {
2715        INTEL_UNCORE_EVENT_DESC(clockticks,      "event=0x00,umask=0x00"),
2716        INTEL_UNCORE_EVENT_DESC(cas_count_read,  "event=0x04,umask=0x03"),
2717        INTEL_UNCORE_EVENT_DESC(cas_count_read.scale, "6.103515625e-5"),
2718        INTEL_UNCORE_EVENT_DESC(cas_count_read.unit, "MiB"),
2719        INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
2720        INTEL_UNCORE_EVENT_DESC(cas_count_write.scale, "6.103515625e-5"),
2721        INTEL_UNCORE_EVENT_DESC(cas_count_write.unit, "MiB"),
2722        { /* end: all zeroes */ },
2723};
2724
2725static struct intel_uncore_type hswep_uncore_imc = {
2726        .name           = "imc",
2727        .num_counters   = 4,
2728        .num_boxes      = 8,
2729        .perf_ctr_bits  = 48,
2730        .fixed_ctr_bits = 48,
2731        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
2732        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
2733        .event_descs    = hswep_uncore_imc_events,
2734        SNBEP_UNCORE_PCI_COMMON_INIT(),
2735};
2736
2737static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2738
2739static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2740{
2741        struct pci_dev *pdev = box->pci_dev;
2742        struct hw_perf_event *hwc = &event->hw;
2743        u64 count = 0;
2744
2745        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2746        pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2747
2748        return count;
2749}
2750
2751static struct intel_uncore_ops hswep_uncore_irp_ops = {
2752        .init_box       = snbep_uncore_pci_init_box,
2753        .disable_box    = snbep_uncore_pci_disable_box,
2754        .enable_box     = snbep_uncore_pci_enable_box,
2755        .disable_event  = ivbep_uncore_irp_disable_event,
2756        .enable_event   = ivbep_uncore_irp_enable_event,
2757        .read_counter   = hswep_uncore_irp_read_counter,
2758};
2759
2760static struct intel_uncore_type hswep_uncore_irp = {
2761        .name                   = "irp",
2762        .num_counters           = 4,
2763        .num_boxes              = 1,
2764        .perf_ctr_bits          = 48,
2765        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
2766        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2767        .ops                    = &hswep_uncore_irp_ops,
2768        .format_group           = &snbep_uncore_format_group,
2769};
2770
2771static struct intel_uncore_type hswep_uncore_qpi = {
2772        .name                   = "qpi",
2773        .num_counters           = 4,
2774        .num_boxes              = 3,
2775        .perf_ctr_bits          = 48,
2776        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
2777        .event_ctl              = SNBEP_PCI_PMON_CTL0,
2778        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
2779        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
2780        .num_shared_regs        = 1,
2781        .ops                    = &snbep_uncore_qpi_ops,
2782        .format_group           = &snbep_uncore_qpi_format_group,
2783};
2784
2785static struct event_constraint hswep_uncore_r2pcie_constraints[] = {
2786        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2787        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2788        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2789        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
2790        UNCORE_EVENT_CONSTRAINT(0x24, 0x1),
2791        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
2792        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2793        UNCORE_EVENT_CONSTRAINT(0x27, 0x1),
2794        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2795        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2796        UNCORE_EVENT_CONSTRAINT(0x2a, 0x1),
2797        UNCORE_EVENT_CONSTRAINT(0x2b, 0x3),
2798        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2799        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2800        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2801        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2802        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2803        UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
2804        EVENT_CONSTRAINT_END
2805};
2806
2807static struct intel_uncore_type hswep_uncore_r2pcie = {
2808        .name           = "r2pcie",
2809        .num_counters   = 4,
2810        .num_boxes      = 1,
2811        .perf_ctr_bits  = 48,
2812        .constraints    = hswep_uncore_r2pcie_constraints,
2813        SNBEP_UNCORE_PCI_COMMON_INIT(),
2814};
2815
2816static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
2817        UNCORE_EVENT_CONSTRAINT(0x01, 0x3),
2818        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
2819        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
2820        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
2821        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
2822        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
2823        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
2824        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
2825        UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
2826        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
2827        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
2828        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
2829        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
2830        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
2831        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
2832        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
2833        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
2834        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
2835        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
2836        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
2837        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
2838        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
2839        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
2840        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
2841        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
2842        UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
2843        UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
2844        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
2845        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
2846        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
2847        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
2848        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
2849        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
2850        EVENT_CONSTRAINT_END
2851};
2852
2853static struct intel_uncore_type hswep_uncore_r3qpi = {
2854        .name           = "r3qpi",
2855        .num_counters   = 3,
2856        .num_boxes      = 3,
2857        .perf_ctr_bits  = 44,
2858        .constraints    = hswep_uncore_r3qpi_constraints,
2859        SNBEP_UNCORE_PCI_COMMON_INIT(),
2860};
2861
2862enum {
2863        HSWEP_PCI_UNCORE_HA,
2864        HSWEP_PCI_UNCORE_IMC,
2865        HSWEP_PCI_UNCORE_IRP,
2866        HSWEP_PCI_UNCORE_QPI,
2867        HSWEP_PCI_UNCORE_R2PCIE,
2868        HSWEP_PCI_UNCORE_R3QPI,
2869};
2870
2871static struct intel_uncore_type *hswep_pci_uncores[] = {
2872        [HSWEP_PCI_UNCORE_HA]   = &hswep_uncore_ha,
2873        [HSWEP_PCI_UNCORE_IMC]  = &hswep_uncore_imc,
2874        [HSWEP_PCI_UNCORE_IRP]  = &hswep_uncore_irp,
2875        [HSWEP_PCI_UNCORE_QPI]  = &hswep_uncore_qpi,
2876        [HSWEP_PCI_UNCORE_R2PCIE]       = &hswep_uncore_r2pcie,
2877        [HSWEP_PCI_UNCORE_R3QPI]        = &hswep_uncore_r3qpi,
2878        NULL,
2879};
2880
2881static const struct pci_device_id hswep_uncore_pci_ids[] = {
2882        { /* Home Agent 0 */
2883                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
2884                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
2885        },
2886        { /* Home Agent 1 */
2887                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f38),
2888                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 1),
2889        },
2890        { /* MC0 Channel 0 */
2891                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb0),
2892                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 0),
2893        },
2894        { /* MC0 Channel 1 */
2895                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb1),
2896                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 1),
2897        },
2898        { /* MC0 Channel 2 */
2899                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb4),
2900                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 2),
2901        },
2902        { /* MC0 Channel 3 */
2903                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fb5),
2904                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 3),
2905        },
2906        { /* MC1 Channel 0 */
2907                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd0),
2908                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 4),
2909        },
2910        { /* MC1 Channel 1 */
2911                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd1),
2912                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 5),
2913        },
2914        { /* MC1 Channel 2 */
2915                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd4),
2916                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 6),
2917        },
2918        { /* MC1 Channel 3 */
2919                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fd5),
2920                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IMC, 7),
2921        },
2922        { /* IRP */
2923                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f39),
2924                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_IRP, 0),
2925        },
2926        { /* QPI0 Port 0 */
2927                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f32),
2928                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 0),
2929        },
2930        { /* QPI0 Port 1 */
2931                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f33),
2932                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 1),
2933        },
2934        { /* QPI1 Port 2 */
2935                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3a),
2936                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_QPI, 2),
2937        },
2938        { /* R2PCIe */
2939                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f34),
2940                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R2PCIE, 0),
2941        },
2942        { /* R3QPI0 Link 0 */
2943                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f36),
2944                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 0),
2945        },
2946        { /* R3QPI0 Link 1 */
2947                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f37),
2948                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 1),
2949        },
2950        { /* R3QPI1 Link 2 */
2951                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f3e),
2952                .driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_R3QPI, 2),
2953        },
2954        { /* QPI Port 0 filter  */
2955                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f86),
2956                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2957                                                   SNBEP_PCI_QPI_PORT0_FILTER),
2958        },
2959        { /* QPI Port 1 filter  */
2960                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f96),
2961                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2962                                                   SNBEP_PCI_QPI_PORT1_FILTER),
2963        },
2964        { /* PCU.3 (for Capability registers) */
2965                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
2966                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
2967                                                   HSWEP_PCI_PCU_3),
2968        },
2969        { /* end: all zeroes */ }
2970};
2971
2972static struct pci_driver hswep_uncore_pci_driver = {
2973        .name           = "hswep_uncore",
2974        .id_table       = hswep_uncore_pci_ids,
2975};
2976
2977int hswep_uncore_pci_init(void)
2978{
2979        int ret = snbep_pci2phy_map_init(0x2f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
2980        if (ret)
2981                return ret;
2982        uncore_pci_uncores = hswep_pci_uncores;
2983        uncore_pci_driver = &hswep_uncore_pci_driver;
2984        return 0;
2985}
2986/* end of Haswell-EP uncore support */
2987
2988/* BDX uncore support */
2989
2990static struct intel_uncore_type bdx_uncore_ubox = {
2991        .name                   = "ubox",
2992        .num_counters           = 2,
2993        .num_boxes              = 1,
2994        .perf_ctr_bits          = 48,
2995        .fixed_ctr_bits         = 48,
2996        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
2997        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
2998        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
2999        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3000        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3001        .num_shared_regs        = 1,
3002        .ops                    = &ivbep_uncore_msr_ops,
3003        .format_group           = &ivbep_uncore_ubox_format_group,
3004};
3005
3006static struct event_constraint bdx_uncore_cbox_constraints[] = {
3007        UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
3008        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3009        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3010        UNCORE_EVENT_CONSTRAINT(0x3e, 0x1),
3011        EVENT_CONSTRAINT_END
3012};
3013
3014static struct intel_uncore_type bdx_uncore_cbox = {
3015        .name                   = "cbox",
3016        .num_counters           = 4,
3017        .num_boxes              = 24,
3018        .perf_ctr_bits          = 48,
3019        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3020        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3021        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
3022        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3023        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3024        .num_shared_regs        = 1,
3025        .constraints            = bdx_uncore_cbox_constraints,
3026        .ops                    = &hswep_uncore_cbox_ops,
3027        .format_group           = &hswep_uncore_cbox_format_group,
3028};
3029
3030static struct intel_uncore_type *bdx_msr_uncores[] = {
3031        &bdx_uncore_ubox,
3032        &bdx_uncore_cbox,
3033        &hswep_uncore_pcu,
3034        NULL,
3035};
3036
3037/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
3038static struct event_constraint bdx_uncore_pcu_constraints[] = {
3039        EVENT_CONSTRAINT(0x80, 0xe, 0x80),
3040        EVENT_CONSTRAINT_END
3041};
3042
3043void bdx_uncore_cpu_init(void)
3044{
3045        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
3046                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
3047        uncore_msr_uncores = bdx_msr_uncores;
3048
3049        hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
3050}
3051
3052static struct intel_uncore_type bdx_uncore_ha = {
3053        .name           = "ha",
3054        .num_counters   = 4,
3055        .num_boxes      = 2,
3056        .perf_ctr_bits  = 48,
3057        SNBEP_UNCORE_PCI_COMMON_INIT(),
3058};
3059
3060static struct intel_uncore_type bdx_uncore_imc = {
3061        .name           = "imc",
3062        .num_counters   = 4,
3063        .num_boxes      = 8,
3064        .perf_ctr_bits  = 48,
3065        .fixed_ctr_bits = 48,
3066        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3067        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3068        .event_descs    = hswep_uncore_imc_events,
3069        SNBEP_UNCORE_PCI_COMMON_INIT(),
3070};
3071
3072static struct intel_uncore_type bdx_uncore_irp = {
3073        .name                   = "irp",
3074        .num_counters           = 4,
3075        .num_boxes              = 1,
3076        .perf_ctr_bits          = 48,
3077        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3078        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3079        .ops                    = &hswep_uncore_irp_ops,
3080        .format_group           = &snbep_uncore_format_group,
3081};
3082
3083static struct intel_uncore_type bdx_uncore_qpi = {
3084        .name                   = "qpi",
3085        .num_counters           = 4,
3086        .num_boxes              = 3,
3087        .perf_ctr_bits          = 48,
3088        .perf_ctr               = SNBEP_PCI_PMON_CTR0,
3089        .event_ctl              = SNBEP_PCI_PMON_CTL0,
3090        .event_mask             = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
3091        .box_ctl                = SNBEP_PCI_PMON_BOX_CTL,
3092        .num_shared_regs        = 1,
3093        .ops                    = &snbep_uncore_qpi_ops,
3094        .format_group           = &snbep_uncore_qpi_format_group,
3095};
3096
3097static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
3098        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3099        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3100        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3101        UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
3102        UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
3103        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3104        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3105        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3106        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3107        EVENT_CONSTRAINT_END
3108};
3109
3110static struct intel_uncore_type bdx_uncore_r2pcie = {
3111        .name           = "r2pcie",
3112        .num_counters   = 4,
3113        .num_boxes      = 1,
3114        .perf_ctr_bits  = 48,
3115        .constraints    = bdx_uncore_r2pcie_constraints,
3116        SNBEP_UNCORE_PCI_COMMON_INIT(),
3117};
3118
3119static struct event_constraint bdx_uncore_r3qpi_constraints[] = {
3120        UNCORE_EVENT_CONSTRAINT(0x01, 0x7),
3121        UNCORE_EVENT_CONSTRAINT(0x07, 0x7),
3122        UNCORE_EVENT_CONSTRAINT(0x08, 0x7),
3123        UNCORE_EVENT_CONSTRAINT(0x09, 0x7),
3124        UNCORE_EVENT_CONSTRAINT(0x0a, 0x7),
3125        UNCORE_EVENT_CONSTRAINT(0x0e, 0x7),
3126        UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
3127        UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
3128        UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
3129        UNCORE_EVENT_CONSTRAINT(0x14, 0x3),
3130        UNCORE_EVENT_CONSTRAINT(0x15, 0x3),
3131        UNCORE_EVENT_CONSTRAINT(0x1f, 0x3),
3132        UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
3133        UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
3134        UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
3135        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3136        UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
3137        UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
3138        UNCORE_EVENT_CONSTRAINT(0x28, 0x3),
3139        UNCORE_EVENT_CONSTRAINT(0x29, 0x3),
3140        UNCORE_EVENT_CONSTRAINT(0x2c, 0x3),
3141        UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
3142        UNCORE_EVENT_CONSTRAINT(0x2e, 0x3),
3143        UNCORE_EVENT_CONSTRAINT(0x2f, 0x3),
3144        UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
3145        UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
3146        UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
3147        UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
3148        UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
3149        UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
3150        EVENT_CONSTRAINT_END
3151};
3152
3153static struct intel_uncore_type bdx_uncore_r3qpi = {
3154        .name           = "r3qpi",
3155        .num_counters   = 3,
3156        .num_boxes      = 3,
3157        .perf_ctr_bits  = 48,
3158        .constraints    = bdx_uncore_r3qpi_constraints,
3159        SNBEP_UNCORE_PCI_COMMON_INIT(),
3160};
3161
3162enum {
3163        BDX_PCI_UNCORE_HA,
3164        BDX_PCI_UNCORE_IMC,
3165        BDX_PCI_UNCORE_IRP,
3166        BDX_PCI_UNCORE_QPI,
3167        BDX_PCI_UNCORE_R2PCIE,
3168        BDX_PCI_UNCORE_R3QPI,
3169};
3170
3171static struct intel_uncore_type *bdx_pci_uncores[] = {
3172        [BDX_PCI_UNCORE_HA]     = &bdx_uncore_ha,
3173        [BDX_PCI_UNCORE_IMC]    = &bdx_uncore_imc,
3174        [BDX_PCI_UNCORE_IRP]    = &bdx_uncore_irp,
3175        [BDX_PCI_UNCORE_QPI]    = &bdx_uncore_qpi,
3176        [BDX_PCI_UNCORE_R2PCIE] = &bdx_uncore_r2pcie,
3177        [BDX_PCI_UNCORE_R3QPI]  = &bdx_uncore_r3qpi,
3178        NULL,
3179};
3180
3181static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = {
3182        { /* Home Agent 0 */
3183                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
3184                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
3185        },
3186        { /* Home Agent 1 */
3187                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f38),
3188                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 1),
3189        },
3190        { /* MC0 Channel 0 */
3191                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
3192                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
3193        },
3194        { /* MC0 Channel 1 */
3195                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
3196                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
3197        },
3198        { /* MC0 Channel 2 */
3199                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb4),
3200                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 2),
3201        },
3202        { /* MC0 Channel 3 */
3203                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb5),
3204                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 3),
3205        },
3206        { /* MC1 Channel 0 */
3207                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd0),
3208                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 4),
3209        },
3210        { /* MC1 Channel 1 */
3211                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd1),
3212                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 5),
3213        },
3214        { /* MC1 Channel 2 */
3215                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd4),
3216                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 6),
3217        },
3218        { /* MC1 Channel 3 */
3219                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fd5),
3220                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 7),
3221        },
3222        { /* IRP */
3223                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
3224                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
3225        },
3226        { /* QPI0 Port 0 */
3227                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f32),
3228                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 0),
3229        },
3230        { /* QPI0 Port 1 */
3231                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f33),
3232                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 1),
3233        },
3234        { /* QPI1 Port 2 */
3235                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3a),
3236                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_QPI, 2),
3237        },
3238        { /* R2PCIe */
3239                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
3240                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
3241        },
3242        { /* R3QPI0 Link 0 */
3243                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f36),
3244                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 0),
3245        },
3246        { /* R3QPI0 Link 1 */
3247                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f37),
3248                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 1),
3249        },
3250        { /* R3QPI1 Link 2 */
3251                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f3e),
3252                .driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R3QPI, 2),
3253        },
3254        { /* QPI Port 0 filter  */
3255                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3256                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
3257        },
3258        { /* QPI Port 1 filter  */
3259                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3260                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
3261        },
3262        { /* QPI Port 2 filter  */
3263                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3264                .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
3265        },
3266        { /* end: all zeroes */ }
3267};
3268
3269static struct pci_driver bdx_uncore_pci_driver = {
3270        .name           = "bdx_uncore",
3271        .id_table       = bdx_uncore_pci_ids,
3272};
3273
3274int bdx_uncore_pci_init(void)
3275{
3276        int ret = snbep_pci2phy_map_init(0x6f1e, SNBEP_CPUNODEID, SNBEP_GIDNIDMAP, true);
3277
3278        if (ret)
3279                return ret;
3280        uncore_pci_uncores = bdx_pci_uncores;
3281        uncore_pci_driver = &bdx_uncore_pci_driver;
3282        return 0;
3283}
3284
3285/* end of BDX uncore support */
3286
3287/* SKX uncore support */
3288
3289static struct intel_uncore_type skx_uncore_ubox = {
3290        .name                   = "ubox",
3291        .num_counters           = 2,
3292        .num_boxes              = 1,
3293        .perf_ctr_bits          = 48,
3294        .fixed_ctr_bits         = 48,
3295        .perf_ctr               = HSWEP_U_MSR_PMON_CTR0,
3296        .event_ctl              = HSWEP_U_MSR_PMON_CTL0,
3297        .event_mask             = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
3298        .fixed_ctr              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
3299        .fixed_ctl              = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
3300        .ops                    = &ivbep_uncore_msr_ops,
3301        .format_group           = &ivbep_uncore_ubox_format_group,
3302};
3303
3304static struct attribute *skx_uncore_cha_formats_attr[] = {
3305        &format_attr_event.attr,
3306        &format_attr_umask.attr,
3307        &format_attr_edge.attr,
3308        &format_attr_tid_en.attr,
3309        &format_attr_inv.attr,
3310        &format_attr_thresh8.attr,
3311        &format_attr_filter_tid4.attr,
3312        &format_attr_filter_state5.attr,
3313        &format_attr_filter_rem.attr,
3314        &format_attr_filter_loc.attr,
3315        &format_attr_filter_nm.attr,
3316        &format_attr_filter_all_op.attr,
3317        &format_attr_filter_not_nm.attr,
3318        &format_attr_filter_opc_0.attr,
3319        &format_attr_filter_opc_1.attr,
3320        &format_attr_filter_nc.attr,
3321        &format_attr_filter_isoc.attr,
3322        NULL,
3323};
3324
3325static const struct attribute_group skx_uncore_chabox_format_group = {
3326        .name = "format",
3327        .attrs = skx_uncore_cha_formats_attr,
3328};
3329
3330static struct event_constraint skx_uncore_chabox_constraints[] = {
3331        UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
3332        UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
3333        EVENT_CONSTRAINT_END
3334};
3335
3336static struct extra_reg skx_uncore_cha_extra_regs[] = {
3337        SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
3338        SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
3339        SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
3340        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
3341        SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
3342        SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
3343        SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
3344        SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
3345        EVENT_EXTRA_END
3346};
3347
3348static u64 skx_cha_filter_mask(int fields)
3349{
3350        u64 mask = 0;
3351
3352        if (fields & 0x1)
3353                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID;
3354        if (fields & 0x2)
3355                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
3356        if (fields & 0x4)
3357                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
3358        if (fields & 0x8) {
3359                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
3360                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
3361                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
3362                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
3363                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
3364                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
3365                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
3366                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
3367                mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
3368        }
3369        return mask;
3370}
3371
3372static struct event_constraint *
3373skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
3374{
3375        return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask);
3376}
3377
3378static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event)
3379{
3380        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
3381        struct extra_reg *er;
3382        int idx = 0;
3383
3384        for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
3385                if (er->event != (event->hw.config & er->config_mask))
3386                        continue;
3387                idx |= er->idx;
3388        }
3389
3390        if (idx) {
3391                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
3392                            HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
3393                reg1->config = event->attr.config1 & skx_cha_filter_mask(idx);
3394                reg1->idx = idx;
3395        }
3396        return 0;
3397}
3398
3399static struct intel_uncore_ops skx_uncore_chabox_ops = {
3400        /* There is no frz_en for chabox ctl */
3401        .init_box               = ivbep_uncore_msr_init_box,
3402        .disable_box            = snbep_uncore_msr_disable_box,
3403        .enable_box             = snbep_uncore_msr_enable_box,
3404        .disable_event          = snbep_uncore_msr_disable_event,
3405        .enable_event           = hswep_cbox_enable_event,
3406        .read_counter           = uncore_msr_read_counter,
3407        .hw_config              = skx_cha_hw_config,
3408        .get_constraint         = skx_cha_get_constraint,
3409        .put_constraint         = snbep_cbox_put_constraint,
3410};
3411
3412static struct intel_uncore_type skx_uncore_chabox = {
3413        .name                   = "cha",
3414        .num_counters           = 4,
3415        .perf_ctr_bits          = 48,
3416        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
3417        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
3418        .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
3419        .box_ctl                = HSWEP_C0_MSR_PMON_BOX_CTL,
3420        .msr_offset             = HSWEP_CBO_MSR_OFFSET,
3421        .num_shared_regs        = 1,
3422        .constraints            = skx_uncore_chabox_constraints,
3423        .ops                    = &skx_uncore_chabox_ops,
3424        .format_group           = &skx_uncore_chabox_format_group,
3425};
3426
3427static struct attribute *skx_uncore_iio_formats_attr[] = {
3428        &format_attr_event.attr,
3429        &format_attr_umask.attr,
3430        &format_attr_edge.attr,
3431        &format_attr_inv.attr,
3432        &format_attr_thresh9.attr,
3433        &format_attr_ch_mask.attr,
3434        &format_attr_fc_mask.attr,
3435        NULL,
3436};
3437
3438static const struct attribute_group skx_uncore_iio_format_group = {
3439        .name = "format",
3440        .attrs = skx_uncore_iio_formats_attr,
3441};
3442
3443static struct event_constraint skx_uncore_iio_constraints[] = {
3444        UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
3445        UNCORE_EVENT_CONSTRAINT(0x88, 0xc),
3446        UNCORE_EVENT_CONSTRAINT(0x95, 0xc),
3447        UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
3448        UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
3449        UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
3450        EVENT_CONSTRAINT_END
3451};
3452
3453static void skx_iio_enable_event(struct intel_uncore_box *box,
3454                                 struct perf_event *event)
3455{
3456        struct hw_perf_event *hwc = &event->hw;
3457
3458        wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
3459}
3460
3461static struct intel_uncore_ops skx_uncore_iio_ops = {
3462        .init_box               = ivbep_uncore_msr_init_box,
3463        .disable_box            = snbep_uncore_msr_disable_box,
3464        .enable_box             = snbep_uncore_msr_enable_box,
3465        .disable_event          = snbep_uncore_msr_disable_event,
3466        .enable_event           = skx_iio_enable_event,
3467        .read_counter           = uncore_msr_read_counter,
3468};
3469
3470static struct intel_uncore_type skx_uncore_iio = {
3471        .name                   = "iio",
3472        .num_counters           = 4,
3473        .num_boxes              = 6,
3474        .perf_ctr_bits          = 48,
3475        .event_ctl              = SKX_IIO0_MSR_PMON_CTL0,
3476        .perf_ctr               = SKX_IIO0_MSR_PMON_CTR0,
3477        .event_mask             = SKX_IIO_PMON_RAW_EVENT_MASK,
3478        .event_mask_ext         = SKX_IIO_PMON_RAW_EVENT_MASK_EXT,
3479        .box_ctl                = SKX_IIO0_MSR_PMON_BOX_CTL,
3480        .msr_offset             = SKX_IIO_MSR_OFFSET,
3481        .constraints            = skx_uncore_iio_constraints,
3482        .ops                    = &skx_uncore_iio_ops,
3483        .format_group           = &skx_uncore_iio_format_group,
3484};
3485
3486enum perf_uncore_iio_freerunning_type_id {
3487        SKX_IIO_MSR_IOCLK                       = 0,
3488        SKX_IIO_MSR_BW                          = 1,
3489        SKX_IIO_MSR_UTIL                        = 2,
3490
3491        SKX_IIO_FREERUNNING_TYPE_MAX,
3492};
3493
3494
3495static struct freerunning_counters skx_iio_freerunning[] = {
3496        [SKX_IIO_MSR_IOCLK]     = { 0xa45, 0x1, 0x20, 1, 36 },
3497        [SKX_IIO_MSR_BW]        = { 0xb00, 0x1, 0x10, 8, 36 },
3498        [SKX_IIO_MSR_UTIL]      = { 0xb08, 0x1, 0x10, 8, 36 },
3499};
3500
3501static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
3502        /* Free-Running IO CLOCKS Counter */
3503        INTEL_UNCORE_EVENT_DESC(ioclk,                  "event=0xff,umask=0x10"),
3504        /* Free-Running IIO BANDWIDTH Counters */
3505        INTEL_UNCORE_EVENT_DESC(bw_in_port0,            "event=0xff,umask=0x20"),
3506        INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale,      "3.814697266e-6"),
3507        INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit,       "MiB"),
3508        INTEL_UNCORE_EVENT_DESC(bw_in_port1,            "event=0xff,umask=0x21"),
3509        INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale,      "3.814697266e-6"),
3510        INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit,       "MiB"),
3511        INTEL_UNCORE_EVENT_DESC(bw_in_port2,            "event=0xff,umask=0x22"),
3512        INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale,      "3.814697266e-6"),
3513        INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit,       "MiB"),
3514        INTEL_UNCORE_EVENT_DESC(bw_in_port3,            "event=0xff,umask=0x23"),
3515        INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale,      "3.814697266e-6"),
3516        INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit,       "MiB"),
3517        INTEL_UNCORE_EVENT_DESC(bw_out_port0,           "event=0xff,umask=0x24"),
3518        INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale,     "3.814697266e-6"),
3519        INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit,      "MiB"),
3520        INTEL_UNCORE_EVENT_DESC(bw_out_port1,           "event=0xff,umask=0x25"),
3521        INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale,     "3.814697266e-6"),
3522        INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit,      "MiB"),
3523        INTEL_UNCORE_EVENT_DESC(bw_out_port2,           "event=0xff,umask=0x26"),
3524        INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale,     "3.814697266e-6"),
3525        INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit,      "MiB"),
3526        INTEL_UNCORE_EVENT_DESC(bw_out_port3,           "event=0xff,umask=0x27"),
3527        INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale,     "3.814697266e-6"),
3528        INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit,      "MiB"),
3529        /* Free-running IIO UTILIZATION Counters */
3530        INTEL_UNCORE_EVENT_DESC(util_in_port0,          "event=0xff,umask=0x30"),
3531        INTEL_UNCORE_EVENT_DESC(util_out_port0,         "event=0xff,umask=0x31"),
3532        INTEL_UNCORE_EVENT_DESC(util_in_port1,          "event=0xff,umask=0x32"),
3533        INTEL_UNCORE_EVENT_DESC(util_out_port1,         "event=0xff,umask=0x33"),
3534        INTEL_UNCORE_EVENT_DESC(util_in_port2,          "event=0xff,umask=0x34"),
3535        INTEL_UNCORE_EVENT_DESC(util_out_port2,         "event=0xff,umask=0x35"),
3536        INTEL_UNCORE_EVENT_DESC(util_in_port3,          "event=0xff,umask=0x36"),
3537        INTEL_UNCORE_EVENT_DESC(util_out_port3,         "event=0xff,umask=0x37"),
3538        { /* end: all zeroes */ },
3539};
3540
3541static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
3542        .read_counter           = uncore_msr_read_counter,
3543};
3544
3545static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
3546        &format_attr_event.attr,
3547        &format_attr_umask.attr,
3548        NULL,
3549};
3550
3551static const struct attribute_group skx_uncore_iio_freerunning_format_group = {
3552        .name = "format",
3553        .attrs = skx_uncore_iio_freerunning_formats_attr,
3554};
3555
3556static struct intel_uncore_type skx_uncore_iio_free_running = {
3557        .name                   = "iio_free_running",
3558        .num_counters           = 17,
3559        .num_boxes              = 6,
3560        .num_freerunning_types  = SKX_IIO_FREERUNNING_TYPE_MAX,
3561        .freerunning            = skx_iio_freerunning,
3562        .ops                    = &skx_uncore_iio_freerunning_ops,
3563        .event_descs            = skx_uncore_iio_freerunning_events,
3564        .format_group           = &skx_uncore_iio_freerunning_format_group,
3565};
3566
3567static struct attribute *skx_uncore_formats_attr[] = {
3568        &format_attr_event.attr,
3569        &format_attr_umask.attr,
3570        &format_attr_edge.attr,
3571        &format_attr_inv.attr,
3572        &format_attr_thresh8.attr,
3573        NULL,
3574};
3575
3576static const struct attribute_group skx_uncore_format_group = {
3577        .name = "format",
3578        .attrs = skx_uncore_formats_attr,
3579};
3580
3581static struct intel_uncore_type skx_uncore_irp = {
3582        .name                   = "irp",
3583        .num_counters           = 2,
3584        .num_boxes              = 6,
3585        .perf_ctr_bits          = 48,
3586        .event_ctl              = SKX_IRP0_MSR_PMON_CTL0,
3587        .perf_ctr               = SKX_IRP0_MSR_PMON_CTR0,
3588        .event_mask             = SNBEP_PMON_RAW_EVENT_MASK,
3589        .box_ctl                = SKX_IRP0_MSR_PMON_BOX_CTL,
3590        .msr_offset             = SKX_IRP_MSR_OFFSET,
3591        .ops                    = &skx_uncore_iio_ops,
3592        .format_group           = &skx_uncore_format_group,
3593};
3594
3595static struct attribute *skx_uncore_pcu_formats_attr[] = {
3596        &format_attr_event.attr,
3597        &format_attr_umask.attr,
3598        &format_attr_edge.attr,
3599        &format_attr_inv.attr,
3600        &format_attr_thresh8.attr,
3601        &format_attr_occ_invert.attr,
3602        &format_attr_occ_edge_det.attr,
3603        &format_attr_filter_band0.attr,
3604        &format_attr_filter_band1.attr,
3605        &format_attr_filter_band2.attr,
3606        &format_attr_filter_band3.attr,
3607        NULL,
3608};
3609
3610static struct attribute_group skx_uncore_pcu_format_group = {
3611        .name = "format",
3612        .attrs = skx_uncore_pcu_formats_attr,
3613};
3614
3615static struct intel_uncore_ops skx_uncore_pcu_ops = {
3616        IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
3617        .hw_config              = hswep_pcu_hw_config,
3618        .get_constraint         = snbep_pcu_get_constraint,
3619        .put_constraint         = snbep_pcu_put_constraint,
3620};
3621
3622static struct intel_uncore_type skx_uncore_pcu = {
3623        .name                   = "pcu",
3624        .num_counters           = 4,
3625        .num_boxes              = 1,
3626        .perf_ctr_bits          = 48,
3627        .perf_ctr               = HSWEP_PCU_MSR_PMON_CTR0,
3628        .event_ctl              = HSWEP_PCU_MSR_PMON_CTL0,
3629        .event_mask             = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
3630        .box_ctl                = HSWEP_PCU_MSR_PMON_BOX_CTL,
3631        .num_shared_regs        = 1,
3632        .ops                    = &skx_uncore_pcu_ops,
3633        .format_group           = &skx_uncore_pcu_format_group,
3634};
3635
3636static struct intel_uncore_type *skx_msr_uncores[] = {
3637        &skx_uncore_ubox,
3638        &skx_uncore_chabox,
3639        &skx_uncore_iio,
3640        &skx_uncore_iio_free_running,
3641        &skx_uncore_irp,
3642        &skx_uncore_pcu,
3643        NULL,
3644};
3645
3646static int skx_count_chabox(void)
3647{
3648        struct pci_dev *chabox_dev = NULL;
3649        int bus, count = 0;
3650
3651        while (1) {
3652                chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
3653                if (!chabox_dev)
3654                        break;
3655                if (count == 0)
3656                        bus = chabox_dev->bus->number;
3657                if (bus != chabox_dev->bus->number)
3658                        break;
3659                count++;
3660        }
3661
3662        pci_dev_put(chabox_dev);
3663        return count;
3664}
3665
3666void skx_uncore_cpu_init(void)
3667{
3668        skx_uncore_chabox.num_boxes = skx_count_chabox();
3669        uncore_msr_uncores = skx_msr_uncores;
3670}
3671
3672static struct intel_uncore_type skx_uncore_imc = {
3673        .name           = "imc",
3674        .num_counters   = 4,
3675        .num_boxes      = 6,
3676        .perf_ctr_bits  = 48,
3677        .fixed_ctr_bits = 48,
3678        .fixed_ctr      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
3679        .fixed_ctl      = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
3680        .event_descs    = hswep_uncore_imc_events,
3681        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3682        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3683        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3684        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3685        .ops            = &ivbep_uncore_pci_ops,
3686        .format_group   = &skx_uncore_format_group,
3687};
3688
3689static struct attribute *skx_upi_uncore_formats_attr[] = {
3690        &format_attr_event_ext.attr,
3691        &format_attr_umask_ext.attr,
3692        &format_attr_edge.attr,
3693        &format_attr_inv.attr,
3694        &format_attr_thresh8.attr,
3695        NULL,
3696};
3697
3698static const struct attribute_group skx_upi_uncore_format_group = {
3699        .name = "format",
3700        .attrs = skx_upi_uncore_formats_attr,
3701};
3702
3703static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box)
3704{
3705        struct pci_dev *pdev = box->pci_dev;
3706
3707        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3708        pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3709}
3710
3711static struct intel_uncore_ops skx_upi_uncore_pci_ops = {
3712        .init_box       = skx_upi_uncore_pci_init_box,
3713        .disable_box    = snbep_uncore_pci_disable_box,
3714        .enable_box     = snbep_uncore_pci_enable_box,
3715        .disable_event  = snbep_uncore_pci_disable_event,
3716        .enable_event   = snbep_uncore_pci_enable_event,
3717        .read_counter   = snbep_uncore_pci_read_counter,
3718};
3719
3720static struct intel_uncore_type skx_uncore_upi = {
3721        .name           = "upi",
3722        .num_counters   = 4,
3723        .num_boxes      = 3,
3724        .perf_ctr_bits  = 48,
3725        .perf_ctr       = SKX_UPI_PCI_PMON_CTR0,
3726        .event_ctl      = SKX_UPI_PCI_PMON_CTL0,
3727        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3728        .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
3729        .box_ctl        = SKX_UPI_PCI_PMON_BOX_CTL,
3730        .ops            = &skx_upi_uncore_pci_ops,
3731        .format_group   = &skx_upi_uncore_format_group,
3732};
3733
3734static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box)
3735{
3736        struct pci_dev *pdev = box->pci_dev;
3737
3738        __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
3739        pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
3740}
3741
3742static struct intel_uncore_ops skx_m2m_uncore_pci_ops = {
3743        .init_box       = skx_m2m_uncore_pci_init_box,
3744        .disable_box    = snbep_uncore_pci_disable_box,
3745        .enable_box     = snbep_uncore_pci_enable_box,
3746        .disable_event  = snbep_uncore_pci_disable_event,
3747        .enable_event   = snbep_uncore_pci_enable_event,
3748        .read_counter   = snbep_uncore_pci_read_counter,
3749};
3750
3751static struct intel_uncore_type skx_uncore_m2m = {
3752        .name           = "m2m",
3753        .num_counters   = 4,
3754        .num_boxes      = 2,
3755        .perf_ctr_bits  = 48,
3756        .perf_ctr       = SKX_M2M_PCI_PMON_CTR0,
3757        .event_ctl      = SKX_M2M_PCI_PMON_CTL0,
3758        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3759        .box_ctl        = SKX_M2M_PCI_PMON_BOX_CTL,
3760        .ops            = &skx_m2m_uncore_pci_ops,
3761        .format_group   = &skx_uncore_format_group,
3762};
3763
3764static struct event_constraint skx_uncore_m2pcie_constraints[] = {
3765        UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
3766        EVENT_CONSTRAINT_END
3767};
3768
3769static struct intel_uncore_type skx_uncore_m2pcie = {
3770        .name           = "m2pcie",
3771        .num_counters   = 4,
3772        .num_boxes      = 4,
3773        .perf_ctr_bits  = 48,
3774        .constraints    = skx_uncore_m2pcie_constraints,
3775        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3776        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3777        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3778        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3779        .ops            = &ivbep_uncore_pci_ops,
3780        .format_group   = &skx_uncore_format_group,
3781};
3782
3783static struct event_constraint skx_uncore_m3upi_constraints[] = {
3784        UNCORE_EVENT_CONSTRAINT(0x1d, 0x1),
3785        UNCORE_EVENT_CONSTRAINT(0x1e, 0x1),
3786        UNCORE_EVENT_CONSTRAINT(0x40, 0x7),
3787        UNCORE_EVENT_CONSTRAINT(0x4e, 0x7),
3788        UNCORE_EVENT_CONSTRAINT(0x4f, 0x7),
3789        UNCORE_EVENT_CONSTRAINT(0x50, 0x7),
3790        UNCORE_EVENT_CONSTRAINT(0x51, 0x7),
3791        UNCORE_EVENT_CONSTRAINT(0x52, 0x7),
3792        EVENT_CONSTRAINT_END
3793};
3794
3795static struct intel_uncore_type skx_uncore_m3upi = {
3796        .name           = "m3upi",
3797        .num_counters   = 3,
3798        .num_boxes      = 3,
3799        .perf_ctr_bits  = 48,
3800        .constraints    = skx_uncore_m3upi_constraints,
3801        .perf_ctr       = SNBEP_PCI_PMON_CTR0,
3802        .event_ctl      = SNBEP_PCI_PMON_CTL0,
3803        .event_mask     = SNBEP_PMON_RAW_EVENT_MASK,
3804        .box_ctl        = SNBEP_PCI_PMON_BOX_CTL,
3805        .ops            = &ivbep_uncore_pci_ops,
3806        .format_group   = &skx_uncore_format_group,
3807};
3808
3809enum {
3810        SKX_PCI_UNCORE_IMC,
3811        SKX_PCI_UNCORE_M2M,
3812        SKX_PCI_UNCORE_UPI,
3813        SKX_PCI_UNCORE_M2PCIE,
3814        SKX_PCI_UNCORE_M3UPI,
3815};
3816
3817static struct intel_uncore_type *skx_pci_uncores[] = {
3818        [SKX_PCI_UNCORE_IMC]    = &skx_uncore_imc,
3819        [SKX_PCI_UNCORE_M2M]    = &skx_uncore_m2m,
3820        [SKX_PCI_UNCORE_UPI]    = &skx_uncore_upi,
3821        [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie,
3822        [SKX_PCI_UNCORE_M3UPI]  = &skx_uncore_m3upi,
3823        NULL,
3824};
3825
3826static const struct pci_device_id skx_uncore_pci_ids[] = {
3827        { /* MC0 Channel 0 */
3828                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3829                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0),
3830        },
3831        { /* MC0 Channel 1 */
3832                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3833                .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1),
3834        },
3835        { /* MC0 Channel 2 */
3836                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3837                .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2),
3838        },
3839        { /* MC1 Channel 0 */
3840                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042),
3841                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3),
3842        },
3843        { /* MC1 Channel 1 */
3844                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046),
3845                .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4),
3846        },
3847        { /* MC1 Channel 2 */
3848                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a),
3849                .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5),
3850        },
3851        { /* M2M0 */
3852                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3853                .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0),
3854        },
3855        { /* M2M1 */
3856                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066),
3857                .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1),
3858        },
3859        { /* UPI0 Link 0 */
3860                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3861                .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0),
3862        },
3863        { /* UPI0 Link 1 */
3864                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3865                .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1),
3866        },
3867        { /* UPI1 Link 2 */
3868                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058),
3869                .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2),
3870        },
3871        { /* M2PCIe 0 */
3872                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3873                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0),
3874        },
3875        { /* M2PCIe 1 */
3876                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3877                .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1),
3878        },
3879        { /* M2PCIe 2 */
3880                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3881                .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2),
3882        },
3883        { /* M2PCIe 3 */
3884                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088),
3885                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
3886        },
3887        { /* M3UPI0 Link 0 */
3888                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3889                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
3890        },
3891        { /* M3UPI0 Link 1 */
3892                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
3893                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
3894        },
3895        { /* M3UPI1 Link 2 */
3896                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
3897                .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
3898        },
3899        { /* end: all zeroes */ }
3900};
3901
3902
3903static struct pci_driver skx_uncore_pci_driver = {
3904        .name           = "skx_uncore",
3905        .id_table       = skx_uncore_pci_ids,
3906};
3907
3908int skx_uncore_pci_init(void)
3909{
3910        /* need to double check pci address */
3911        int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false);
3912
3913        if (ret)
3914                return ret;
3915
3916        uncore_pci_uncores = skx_pci_uncores;
3917        uncore_pci_driver = &skx_uncore_pci_driver;
3918        return 0;
3919}
3920
3921/* end of SKX uncore support */
3922