linux/arch/x86/events/intel/uncore_snb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
   3#include "uncore.h"
   4
   5/* Uncore IMC PCI IDs */
   6#define PCI_DEVICE_ID_INTEL_SNB_IMC     0x0100
   7#define PCI_DEVICE_ID_INTEL_IVB_IMC     0x0154
   8#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC  0x0150
   9#define PCI_DEVICE_ID_INTEL_HSW_IMC     0x0c00
  10#define PCI_DEVICE_ID_INTEL_HSW_U_IMC   0x0a04
  11#define PCI_DEVICE_ID_INTEL_BDW_IMC     0x1604
  12#define PCI_DEVICE_ID_INTEL_SKL_U_IMC   0x1904
  13#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC   0x190c
  14#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC  0x1900
  15#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC  0x1910
  16#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC  0x190f
  17#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC  0x191f
  18
  19/* SNB event control */
  20#define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
  21#define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
  22#define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
  23#define SNB_UNC_CTL_EN                          (1 << 22)
  24#define SNB_UNC_CTL_INVERT                      (1 << 23)
  25#define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
  26#define NHM_UNC_CTL_CMASK_MASK                  0xff000000
  27#define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
  28
  29#define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  30                                                 SNB_UNC_CTL_UMASK_MASK | \
  31                                                 SNB_UNC_CTL_EDGE_DET | \
  32                                                 SNB_UNC_CTL_INVERT | \
  33                                                 SNB_UNC_CTL_CMASK_MASK)
  34
  35#define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  36                                                 SNB_UNC_CTL_UMASK_MASK | \
  37                                                 SNB_UNC_CTL_EDGE_DET | \
  38                                                 SNB_UNC_CTL_INVERT | \
  39                                                 NHM_UNC_CTL_CMASK_MASK)
  40
  41/* SNB global control register */
  42#define SNB_UNC_PERF_GLOBAL_CTL                 0x391
  43#define SNB_UNC_FIXED_CTR_CTRL                  0x394
  44#define SNB_UNC_FIXED_CTR                       0x395
  45
  46/* SNB uncore global control */
  47#define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
  48#define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
  49
  50/* SNB Cbo register */
  51#define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
  52#define SNB_UNC_CBO_0_PER_CTR0                  0x706
  53#define SNB_UNC_CBO_MSR_OFFSET                  0x10
  54
  55/* SNB ARB register */
  56#define SNB_UNC_ARB_PER_CTR0                    0x3b0
  57#define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
  58#define SNB_UNC_ARB_MSR_OFFSET                  0x10
  59
  60/* NHM global control register */
  61#define NHM_UNC_PERF_GLOBAL_CTL                 0x391
  62#define NHM_UNC_FIXED_CTR                       0x394
  63#define NHM_UNC_FIXED_CTR_CTRL                  0x395
  64
  65/* NHM uncore global control */
  66#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
  67#define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
  68
  69/* NHM uncore register */
  70#define NHM_UNC_PERFEVTSEL0                     0x3c0
  71#define NHM_UNC_UNCORE_PMC0                     0x3b0
  72
  73/* SKL uncore global control */
  74#define SKL_UNC_PERF_GLOBAL_CTL                 0xe01
  75#define SKL_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 5) - 1)
  76
  77DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  78DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
  79DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
  80DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
  81DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
  82DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
  83
  84/* Sandy Bridge uncore support */
  85static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  86{
  87        struct hw_perf_event *hwc = &event->hw;
  88
  89        if (hwc->idx < UNCORE_PMC_IDX_FIXED)
  90                wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
  91        else
  92                wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
  93}
  94
  95static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  96{
  97        wrmsrl(event->hw.config_base, 0);
  98}
  99
 100static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
 101{
 102        if (box->pmu->pmu_idx == 0) {
 103                wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
 104                        SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
 105        }
 106}
 107
 108static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
 109{
 110        wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
 111                SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
 112}
 113
 114static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
 115{
 116        if (box->pmu->pmu_idx == 0)
 117                wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
 118}
 119
 120static struct uncore_event_desc snb_uncore_events[] = {
 121        INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
 122        { /* end: all zeroes */ },
 123};
 124
 125static struct attribute *snb_uncore_formats_attr[] = {
 126        &format_attr_event.attr,
 127        &format_attr_umask.attr,
 128        &format_attr_edge.attr,
 129        &format_attr_inv.attr,
 130        &format_attr_cmask5.attr,
 131        NULL,
 132};
 133
 134static const struct attribute_group snb_uncore_format_group = {
 135        .name           = "format",
 136        .attrs          = snb_uncore_formats_attr,
 137};
 138
 139static struct intel_uncore_ops snb_uncore_msr_ops = {
 140        .init_box       = snb_uncore_msr_init_box,
 141        .enable_box     = snb_uncore_msr_enable_box,
 142        .exit_box       = snb_uncore_msr_exit_box,
 143        .disable_event  = snb_uncore_msr_disable_event,
 144        .enable_event   = snb_uncore_msr_enable_event,
 145        .read_counter   = uncore_msr_read_counter,
 146};
 147
 148static struct event_constraint snb_uncore_arb_constraints[] = {
 149        UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
 150        UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
 151        EVENT_CONSTRAINT_END
 152};
 153
 154static struct intel_uncore_type snb_uncore_cbox = {
 155        .name           = "cbox",
 156        .num_counters   = 2,
 157        .num_boxes      = 4,
 158        .perf_ctr_bits  = 44,
 159        .fixed_ctr_bits = 48,
 160        .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
 161        .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 162        .fixed_ctr      = SNB_UNC_FIXED_CTR,
 163        .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 164        .single_fixed   = 1,
 165        .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 166        .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
 167        .ops            = &snb_uncore_msr_ops,
 168        .format_group   = &snb_uncore_format_group,
 169        .event_descs    = snb_uncore_events,
 170};
 171
 172static struct intel_uncore_type snb_uncore_arb = {
 173        .name           = "arb",
 174        .num_counters   = 2,
 175        .num_boxes      = 1,
 176        .perf_ctr_bits  = 44,
 177        .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
 178        .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
 179        .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 180        .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
 181        .constraints    = snb_uncore_arb_constraints,
 182        .ops            = &snb_uncore_msr_ops,
 183        .format_group   = &snb_uncore_format_group,
 184};
 185
 186static struct intel_uncore_type *snb_msr_uncores[] = {
 187        &snb_uncore_cbox,
 188        &snb_uncore_arb,
 189        NULL,
 190};
 191
 192void snb_uncore_cpu_init(void)
 193{
 194        uncore_msr_uncores = snb_msr_uncores;
 195        if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
 196                snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 197}
 198
 199static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
 200{
 201        if (box->pmu->pmu_idx == 0) {
 202                wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
 203                        SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
 204        }
 205}
 206
 207static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
 208{
 209        wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
 210                SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
 211}
 212
 213static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
 214{
 215        if (box->pmu->pmu_idx == 0)
 216                wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
 217}
 218
 219static struct intel_uncore_ops skl_uncore_msr_ops = {
 220        .init_box       = skl_uncore_msr_init_box,
 221        .enable_box     = skl_uncore_msr_enable_box,
 222        .exit_box       = skl_uncore_msr_exit_box,
 223        .disable_event  = snb_uncore_msr_disable_event,
 224        .enable_event   = snb_uncore_msr_enable_event,
 225        .read_counter   = uncore_msr_read_counter,
 226};
 227
 228static struct intel_uncore_type skl_uncore_cbox = {
 229        .name           = "cbox",
 230        .num_counters   = 4,
 231        .num_boxes      = 5,
 232        .perf_ctr_bits  = 44,
 233        .fixed_ctr_bits = 48,
 234        .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
 235        .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 236        .fixed_ctr      = SNB_UNC_FIXED_CTR,
 237        .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 238        .single_fixed   = 1,
 239        .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 240        .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
 241        .ops            = &skl_uncore_msr_ops,
 242        .format_group   = &snb_uncore_format_group,
 243        .event_descs    = snb_uncore_events,
 244};
 245
 246static struct intel_uncore_type *skl_msr_uncores[] = {
 247        &skl_uncore_cbox,
 248        &snb_uncore_arb,
 249        NULL,
 250};
 251
 252void skl_uncore_cpu_init(void)
 253{
 254        uncore_msr_uncores = skl_msr_uncores;
 255        if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
 256                skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 257        snb_uncore_arb.ops = &skl_uncore_msr_ops;
 258}
 259
 260enum {
 261        SNB_PCI_UNCORE_IMC,
 262};
 263
 264static struct uncore_event_desc snb_uncore_imc_events[] = {
 265        INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
 266        INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
 267        INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
 268
 269        INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
 270        INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
 271        INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
 272
 273        { /* end: all zeroes */ },
 274};
 275
 276#define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
 277#define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
 278
 279/* page size multiple covering all config regs */
 280#define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
 281
 282#define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
 283#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
 284#define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
 285#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
 286#define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
 287
 288static struct attribute *snb_uncore_imc_formats_attr[] = {
 289        &format_attr_event.attr,
 290        NULL,
 291};
 292
 293static const struct attribute_group snb_uncore_imc_format_group = {
 294        .name = "format",
 295        .attrs = snb_uncore_imc_formats_attr,
 296};
 297
 298static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
 299{
 300        struct pci_dev *pdev = box->pci_dev;
 301        int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
 302        resource_size_t addr;
 303        u32 pci_dword;
 304
 305        pci_read_config_dword(pdev, where, &pci_dword);
 306        addr = pci_dword;
 307
 308#ifdef CONFIG_PHYS_ADDR_T_64BIT
 309        pci_read_config_dword(pdev, where + 4, &pci_dword);
 310        addr |= ((resource_size_t)pci_dword << 32);
 311#endif
 312
 313        addr &= ~(PAGE_SIZE - 1);
 314
 315        box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
 316        box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
 317}
 318
 319static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
 320{
 321        iounmap(box->io_addr);
 322}
 323
 324static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
 325{}
 326
 327static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
 328{}
 329
 330static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 331{}
 332
 333static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 334{}
 335
 336static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 337{
 338        struct hw_perf_event *hwc = &event->hw;
 339
 340        return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
 341}
 342
 343/*
 344 * custom event_init() function because we define our own fixed, free
 345 * running counters, so we do not want to conflict with generic uncore
 346 * logic. Also simplifies processing
 347 */
 348static int snb_uncore_imc_event_init(struct perf_event *event)
 349{
 350        struct intel_uncore_pmu *pmu;
 351        struct intel_uncore_box *box;
 352        struct hw_perf_event *hwc = &event->hw;
 353        u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
 354        int idx, base;
 355
 356        if (event->attr.type != event->pmu->type)
 357                return -ENOENT;
 358
 359        pmu = uncore_event_to_pmu(event);
 360        /* no device found for this pmu */
 361        if (pmu->func_id < 0)
 362                return -ENOENT;
 363
 364        /* Sampling not supported yet */
 365        if (hwc->sample_period)
 366                return -EINVAL;
 367
 368        /* unsupported modes and filters */
 369        if (event->attr.exclude_user   ||
 370            event->attr.exclude_kernel ||
 371            event->attr.exclude_hv     ||
 372            event->attr.exclude_idle   ||
 373            event->attr.exclude_host   ||
 374            event->attr.exclude_guest  ||
 375            event->attr.sample_period) /* no sampling */
 376                return -EINVAL;
 377
 378        /*
 379         * Place all uncore events for a particular physical package
 380         * onto a single cpu
 381         */
 382        if (event->cpu < 0)
 383                return -EINVAL;
 384
 385        /* check only supported bits are set */
 386        if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
 387                return -EINVAL;
 388
 389        box = uncore_pmu_to_box(pmu, event->cpu);
 390        if (!box || box->cpu < 0)
 391                return -EINVAL;
 392
 393        event->cpu = box->cpu;
 394        event->pmu_private = box;
 395
 396        event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
 397
 398        event->hw.idx = -1;
 399        event->hw.last_tag = ~0ULL;
 400        event->hw.extra_reg.idx = EXTRA_REG_NONE;
 401        event->hw.branch_reg.idx = EXTRA_REG_NONE;
 402        /*
 403         * check event is known (whitelist, determines counter)
 404         */
 405        switch (cfg) {
 406        case SNB_UNCORE_PCI_IMC_DATA_READS:
 407                base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
 408                idx = UNCORE_PMC_IDX_FIXED;
 409                break;
 410        case SNB_UNCORE_PCI_IMC_DATA_WRITES:
 411                base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
 412                idx = UNCORE_PMC_IDX_FIXED + 1;
 413                break;
 414        default:
 415                return -EINVAL;
 416        }
 417
 418        /* must be done before validate_group */
 419        event->hw.event_base = base;
 420        event->hw.config = cfg;
 421        event->hw.idx = idx;
 422
 423        /* no group validation needed, we have free running counters */
 424
 425        return 0;
 426}
 427
 428static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 429{
 430        return 0;
 431}
 432
 433static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
 434{
 435        struct intel_uncore_box *box = uncore_event_to_box(event);
 436        u64 count;
 437
 438        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
 439                return;
 440
 441        event->hw.state = 0;
 442        box->n_active++;
 443
 444        list_add_tail(&event->active_entry, &box->active_list);
 445
 446        count = snb_uncore_imc_read_counter(box, event);
 447        local64_set(&event->hw.prev_count, count);
 448
 449        if (box->n_active == 1)
 450                uncore_pmu_start_hrtimer(box);
 451}
 452
 453static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
 454{
 455        struct intel_uncore_box *box = uncore_event_to_box(event);
 456        struct hw_perf_event *hwc = &event->hw;
 457
 458        if (!(hwc->state & PERF_HES_STOPPED)) {
 459                box->n_active--;
 460
 461                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 462                hwc->state |= PERF_HES_STOPPED;
 463
 464                list_del(&event->active_entry);
 465
 466                if (box->n_active == 0)
 467                        uncore_pmu_cancel_hrtimer(box);
 468        }
 469
 470        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 471                /*
 472                 * Drain the remaining delta count out of a event
 473                 * that we are disabling:
 474                 */
 475                uncore_perf_event_update(box, event);
 476                hwc->state |= PERF_HES_UPTODATE;
 477        }
 478}
 479
 480static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
 481{
 482        struct intel_uncore_box *box = uncore_event_to_box(event);
 483        struct hw_perf_event *hwc = &event->hw;
 484
 485        if (!box)
 486                return -ENODEV;
 487
 488        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 489        if (!(flags & PERF_EF_START))
 490                hwc->state |= PERF_HES_ARCH;
 491
 492        snb_uncore_imc_event_start(event, 0);
 493
 494        return 0;
 495}
 496
 497static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
 498{
 499        snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
 500}
 501
 502int snb_pci2phy_map_init(int devid)
 503{
 504        struct pci_dev *dev = NULL;
 505        struct pci2phy_map *map;
 506        int bus, segment;
 507
 508        dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
 509        if (!dev)
 510                return -ENOTTY;
 511
 512        bus = dev->bus->number;
 513        segment = pci_domain_nr(dev->bus);
 514
 515        raw_spin_lock(&pci2phy_map_lock);
 516        map = __find_pci2phy_map(segment);
 517        if (!map) {
 518                raw_spin_unlock(&pci2phy_map_lock);
 519                pci_dev_put(dev);
 520                return -ENOMEM;
 521        }
 522        map->pbus_to_physid[bus] = 0;
 523        raw_spin_unlock(&pci2phy_map_lock);
 524
 525        pci_dev_put(dev);
 526
 527        return 0;
 528}
 529
 530static struct pmu snb_uncore_imc_pmu = {
 531        .task_ctx_nr    = perf_invalid_context,
 532        .event_init     = snb_uncore_imc_event_init,
 533        .add            = snb_uncore_imc_event_add,
 534        .del            = snb_uncore_imc_event_del,
 535        .start          = snb_uncore_imc_event_start,
 536        .stop           = snb_uncore_imc_event_stop,
 537        .read           = uncore_pmu_event_read,
 538};
 539
 540static struct intel_uncore_ops snb_uncore_imc_ops = {
 541        .init_box       = snb_uncore_imc_init_box,
 542        .exit_box       = snb_uncore_imc_exit_box,
 543        .enable_box     = snb_uncore_imc_enable_box,
 544        .disable_box    = snb_uncore_imc_disable_box,
 545        .disable_event  = snb_uncore_imc_disable_event,
 546        .enable_event   = snb_uncore_imc_enable_event,
 547        .hw_config      = snb_uncore_imc_hw_config,
 548        .read_counter   = snb_uncore_imc_read_counter,
 549};
 550
 551static struct intel_uncore_type snb_uncore_imc = {
 552        .name           = "imc",
 553        .num_counters   = 2,
 554        .num_boxes      = 1,
 555        .fixed_ctr_bits = 32,
 556        .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
 557        .event_descs    = snb_uncore_imc_events,
 558        .format_group   = &snb_uncore_imc_format_group,
 559        .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
 560        .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
 561        .ops            = &snb_uncore_imc_ops,
 562        .pmu            = &snb_uncore_imc_pmu,
 563};
 564
 565static struct intel_uncore_type *snb_pci_uncores[] = {
 566        [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
 567        NULL,
 568};
 569
 570static const struct pci_device_id snb_uncore_pci_ids[] = {
 571        { /* IMC */
 572                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
 573                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 574        },
 575        { /* end: all zeroes */ },
 576};
 577
 578static const struct pci_device_id ivb_uncore_pci_ids[] = {
 579        { /* IMC */
 580                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
 581                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 582        },
 583        { /* IMC */
 584                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
 585                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 586        },
 587        { /* end: all zeroes */ },
 588};
 589
 590static const struct pci_device_id hsw_uncore_pci_ids[] = {
 591        { /* IMC */
 592                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
 593                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 594        },
 595        { /* IMC */
 596                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
 597                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 598        },
 599        { /* end: all zeroes */ },
 600};
 601
 602static const struct pci_device_id bdw_uncore_pci_ids[] = {
 603        { /* IMC */
 604                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
 605                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 606        },
 607        { /* end: all zeroes */ },
 608};
 609
 610static const struct pci_device_id skl_uncore_pci_ids[] = {
 611        { /* IMC */
 612                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
 613                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 614        },
 615        { /* IMC */
 616                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
 617                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 618        },
 619        { /* IMC */
 620                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
 621                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 622        },
 623        { /* IMC */
 624                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
 625                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 626        },
 627        { /* IMC */
 628                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
 629                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 630        },
 631        { /* IMC */
 632                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
 633                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 634        },
 635
 636        { /* end: all zeroes */ },
 637};
 638
 639static struct pci_driver snb_uncore_pci_driver = {
 640        .name           = "snb_uncore",
 641        .id_table       = snb_uncore_pci_ids,
 642};
 643
 644static struct pci_driver ivb_uncore_pci_driver = {
 645        .name           = "ivb_uncore",
 646        .id_table       = ivb_uncore_pci_ids,
 647};
 648
 649static struct pci_driver hsw_uncore_pci_driver = {
 650        .name           = "hsw_uncore",
 651        .id_table       = hsw_uncore_pci_ids,
 652};
 653
 654static struct pci_driver bdw_uncore_pci_driver = {
 655        .name           = "bdw_uncore",
 656        .id_table       = bdw_uncore_pci_ids,
 657};
 658
 659static struct pci_driver skl_uncore_pci_driver = {
 660        .name           = "skl_uncore",
 661        .id_table       = skl_uncore_pci_ids,
 662};
 663
 664struct imc_uncore_pci_dev {
 665        __u32 pci_id;
 666        struct pci_driver *driver;
 667};
 668#define IMC_DEV(a, d) \
 669        { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
 670
 671static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 672        IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
 673        IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
 674        IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
 675        IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 676        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 677        IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 678        IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
 679        IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
 680        IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
 681        IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
 682        IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
 683        IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
 684        {  /* end marker */ }
 685};
 686
 687
 688#define for_each_imc_pci_id(x, t) \
 689        for (x = (t); (x)->pci_id; x++)
 690
 691static struct pci_driver *imc_uncore_find_dev(void)
 692{
 693        const struct imc_uncore_pci_dev *p;
 694        int ret;
 695
 696        for_each_imc_pci_id(p, desktop_imc_pci_ids) {
 697                ret = snb_pci2phy_map_init(p->pci_id);
 698                if (ret == 0)
 699                        return p->driver;
 700        }
 701        return NULL;
 702}
 703
 704static int imc_uncore_pci_init(void)
 705{
 706        struct pci_driver *imc_drv = imc_uncore_find_dev();
 707
 708        if (!imc_drv)
 709                return -ENODEV;
 710
 711        uncore_pci_uncores = snb_pci_uncores;
 712        uncore_pci_driver = imc_drv;
 713
 714        return 0;
 715}
 716
 717int snb_uncore_pci_init(void)
 718{
 719        return imc_uncore_pci_init();
 720}
 721
 722int ivb_uncore_pci_init(void)
 723{
 724        return imc_uncore_pci_init();
 725}
 726int hsw_uncore_pci_init(void)
 727{
 728        return imc_uncore_pci_init();
 729}
 730
 731int bdw_uncore_pci_init(void)
 732{
 733        return imc_uncore_pci_init();
 734}
 735
 736int skl_uncore_pci_init(void)
 737{
 738        return imc_uncore_pci_init();
 739}
 740
 741/* end of Sandy Bridge uncore support */
 742
 743/* Nehalem uncore support */
 744static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
 745{
 746        wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
 747}
 748
 749static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
 750{
 751        wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
 752}
 753
 754static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 755{
 756        struct hw_perf_event *hwc = &event->hw;
 757
 758        if (hwc->idx < UNCORE_PMC_IDX_FIXED)
 759                wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
 760        else
 761                wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
 762}
 763
 764static struct attribute *nhm_uncore_formats_attr[] = {
 765        &format_attr_event.attr,
 766        &format_attr_umask.attr,
 767        &format_attr_edge.attr,
 768        &format_attr_inv.attr,
 769        &format_attr_cmask8.attr,
 770        NULL,
 771};
 772
 773static const struct attribute_group nhm_uncore_format_group = {
 774        .name = "format",
 775        .attrs = nhm_uncore_formats_attr,
 776};
 777
 778static struct uncore_event_desc nhm_uncore_events[] = {
 779        INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
 780        INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
 781        INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
 782        INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
 783        INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
 784        INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
 785        INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
 786        INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
 787        INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
 788        { /* end: all zeroes */ },
 789};
 790
 791static struct intel_uncore_ops nhm_uncore_msr_ops = {
 792        .disable_box    = nhm_uncore_msr_disable_box,
 793        .enable_box     = nhm_uncore_msr_enable_box,
 794        .disable_event  = snb_uncore_msr_disable_event,
 795        .enable_event   = nhm_uncore_msr_enable_event,
 796        .read_counter   = uncore_msr_read_counter,
 797};
 798
 799static struct intel_uncore_type nhm_uncore = {
 800        .name           = "",
 801        .num_counters   = 8,
 802        .num_boxes      = 1,
 803        .perf_ctr_bits  = 48,
 804        .fixed_ctr_bits = 48,
 805        .event_ctl      = NHM_UNC_PERFEVTSEL0,
 806        .perf_ctr       = NHM_UNC_UNCORE_PMC0,
 807        .fixed_ctr      = NHM_UNC_FIXED_CTR,
 808        .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
 809        .event_mask     = NHM_UNC_RAW_EVENT_MASK,
 810        .event_descs    = nhm_uncore_events,
 811        .ops            = &nhm_uncore_msr_ops,
 812        .format_group   = &nhm_uncore_format_group,
 813};
 814
 815static struct intel_uncore_type *nhm_msr_uncores[] = {
 816        &nhm_uncore,
 817        NULL,
 818};
 819
 820void nhm_uncore_cpu_init(void)
 821{
 822        uncore_msr_uncores = nhm_msr_uncores;
 823}
 824
 825/* end of Nehalem uncore support */
 826