linux/arch/x86/events/intel/uncore_snb.c
<<
>>
Prefs
   1/* Nehalem/SandBridge/Haswell uncore support */
   2#include "uncore.h"
   3
   4/* Uncore IMC PCI IDs */
   5#define PCI_DEVICE_ID_INTEL_SNB_IMC     0x0100
   6#define PCI_DEVICE_ID_INTEL_IVB_IMC     0x0154
   7#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC  0x0150
   8#define PCI_DEVICE_ID_INTEL_HSW_IMC     0x0c00
   9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC   0x0a04
  10#define PCI_DEVICE_ID_INTEL_BDW_IMC     0x1604
  11#define PCI_DEVICE_ID_INTEL_SKL_IMC     0x191f
  12
  13/* SNB event control */
  14#define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
  15#define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
  16#define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
  17#define SNB_UNC_CTL_EN                          (1 << 22)
  18#define SNB_UNC_CTL_INVERT                      (1 << 23)
  19#define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
  20#define NHM_UNC_CTL_CMASK_MASK                  0xff000000
  21#define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
  22
  23#define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  24                                                 SNB_UNC_CTL_UMASK_MASK | \
  25                                                 SNB_UNC_CTL_EDGE_DET | \
  26                                                 SNB_UNC_CTL_INVERT | \
  27                                                 SNB_UNC_CTL_CMASK_MASK)
  28
  29#define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
  30                                                 SNB_UNC_CTL_UMASK_MASK | \
  31                                                 SNB_UNC_CTL_EDGE_DET | \
  32                                                 SNB_UNC_CTL_INVERT | \
  33                                                 NHM_UNC_CTL_CMASK_MASK)
  34
  35/* SNB global control register */
  36#define SNB_UNC_PERF_GLOBAL_CTL                 0x391
  37#define SNB_UNC_FIXED_CTR_CTRL                  0x394
  38#define SNB_UNC_FIXED_CTR                       0x395
  39
  40/* SNB uncore global control */
  41#define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
  42#define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
  43
  44/* SNB Cbo register */
  45#define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
  46#define SNB_UNC_CBO_0_PER_CTR0                  0x706
  47#define SNB_UNC_CBO_MSR_OFFSET                  0x10
  48
  49/* SNB ARB register */
  50#define SNB_UNC_ARB_PER_CTR0                    0x3b0
  51#define SNB_UNC_ARB_PERFEVTSEL0                 0x3b2
  52#define SNB_UNC_ARB_MSR_OFFSET                  0x10
  53
  54/* NHM global control register */
  55#define NHM_UNC_PERF_GLOBAL_CTL                 0x391
  56#define NHM_UNC_FIXED_CTR                       0x394
  57#define NHM_UNC_FIXED_CTR_CTRL                  0x395
  58
  59/* NHM uncore global control */
  60#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
  61#define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
  62
  63/* NHM uncore register */
  64#define NHM_UNC_PERFEVTSEL0                     0x3c0
  65#define NHM_UNC_UNCORE_PMC0                     0x3b0
  66
  67DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
  68DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
  69DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
  70DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
  71DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
  72DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
  73
  74/* Sandy Bridge uncore support */
  75static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
  76{
  77        struct hw_perf_event *hwc = &event->hw;
  78
  79        if (hwc->idx < UNCORE_PMC_IDX_FIXED)
  80                wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
  81        else
  82                wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
  83}
  84
  85static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
  86{
  87        wrmsrl(event->hw.config_base, 0);
  88}
  89
  90static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
  91{
  92        if (box->pmu->pmu_idx == 0) {
  93                wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
  94                        SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
  95        }
  96}
  97
  98static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
  99{
 100        if (box->pmu->pmu_idx == 0)
 101                wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
 102}
 103
 104static struct uncore_event_desc snb_uncore_events[] = {
 105        INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
 106        { /* end: all zeroes */ },
 107};
 108
 109static struct attribute *snb_uncore_formats_attr[] = {
 110        &format_attr_event.attr,
 111        &format_attr_umask.attr,
 112        &format_attr_edge.attr,
 113        &format_attr_inv.attr,
 114        &format_attr_cmask5.attr,
 115        NULL,
 116};
 117
 118static struct attribute_group snb_uncore_format_group = {
 119        .name           = "format",
 120        .attrs          = snb_uncore_formats_attr,
 121};
 122
 123static struct intel_uncore_ops snb_uncore_msr_ops = {
 124        .init_box       = snb_uncore_msr_init_box,
 125        .exit_box       = snb_uncore_msr_exit_box,
 126        .disable_event  = snb_uncore_msr_disable_event,
 127        .enable_event   = snb_uncore_msr_enable_event,
 128        .read_counter   = uncore_msr_read_counter,
 129};
 130
 131static struct event_constraint snb_uncore_arb_constraints[] = {
 132        UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
 133        UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
 134        EVENT_CONSTRAINT_END
 135};
 136
 137static struct intel_uncore_type snb_uncore_cbox = {
 138        .name           = "cbox",
 139        .num_counters   = 2,
 140        .num_boxes      = 4,
 141        .perf_ctr_bits  = 44,
 142        .fixed_ctr_bits = 48,
 143        .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
 144        .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
 145        .fixed_ctr      = SNB_UNC_FIXED_CTR,
 146        .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
 147        .single_fixed   = 1,
 148        .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 149        .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
 150        .ops            = &snb_uncore_msr_ops,
 151        .format_group   = &snb_uncore_format_group,
 152        .event_descs    = snb_uncore_events,
 153};
 154
 155static struct intel_uncore_type snb_uncore_arb = {
 156        .name           = "arb",
 157        .num_counters   = 2,
 158        .num_boxes      = 1,
 159        .perf_ctr_bits  = 44,
 160        .perf_ctr       = SNB_UNC_ARB_PER_CTR0,
 161        .event_ctl      = SNB_UNC_ARB_PERFEVTSEL0,
 162        .event_mask     = SNB_UNC_RAW_EVENT_MASK,
 163        .msr_offset     = SNB_UNC_ARB_MSR_OFFSET,
 164        .constraints    = snb_uncore_arb_constraints,
 165        .ops            = &snb_uncore_msr_ops,
 166        .format_group   = &snb_uncore_format_group,
 167};
 168
 169static struct intel_uncore_type *snb_msr_uncores[] = {
 170        &snb_uncore_cbox,
 171        &snb_uncore_arb,
 172        NULL,
 173};
 174
 175void snb_uncore_cpu_init(void)
 176{
 177        uncore_msr_uncores = snb_msr_uncores;
 178        if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
 179                snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 180}
 181
 182enum {
 183        SNB_PCI_UNCORE_IMC,
 184};
 185
 186static struct uncore_event_desc snb_uncore_imc_events[] = {
 187        INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
 188        INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
 189        INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
 190
 191        INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
 192        INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
 193        INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
 194
 195        { /* end: all zeroes */ },
 196};
 197
 198#define SNB_UNCORE_PCI_IMC_EVENT_MASK           0xff
 199#define SNB_UNCORE_PCI_IMC_BAR_OFFSET           0x48
 200
 201/* page size multiple covering all config regs */
 202#define SNB_UNCORE_PCI_IMC_MAP_SIZE             0x6000
 203
 204#define SNB_UNCORE_PCI_IMC_DATA_READS           0x1
 205#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE      0x5050
 206#define SNB_UNCORE_PCI_IMC_DATA_WRITES          0x2
 207#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE     0x5054
 208#define SNB_UNCORE_PCI_IMC_CTR_BASE             SNB_UNCORE_PCI_IMC_DATA_READS_BASE
 209
 210static struct attribute *snb_uncore_imc_formats_attr[] = {
 211        &format_attr_event.attr,
 212        NULL,
 213};
 214
 215static struct attribute_group snb_uncore_imc_format_group = {
 216        .name = "format",
 217        .attrs = snb_uncore_imc_formats_attr,
 218};
 219
 220static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
 221{
 222        struct pci_dev *pdev = box->pci_dev;
 223        int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
 224        resource_size_t addr;
 225        u32 pci_dword;
 226
 227        pci_read_config_dword(pdev, where, &pci_dword);
 228        addr = pci_dword;
 229
 230#ifdef CONFIG_PHYS_ADDR_T_64BIT
 231        pci_read_config_dword(pdev, where + 4, &pci_dword);
 232        addr |= ((resource_size_t)pci_dword << 32);
 233#endif
 234
 235        addr &= ~(PAGE_SIZE - 1);
 236
 237        box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
 238        box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
 239}
 240
 241static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
 242{
 243        iounmap(box->io_addr);
 244}
 245
 246static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
 247{}
 248
 249static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
 250{}
 251
 252static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 253{}
 254
 255static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
 256{}
 257
 258static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 259{
 260        struct hw_perf_event *hwc = &event->hw;
 261
 262        return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
 263}
 264
 265/*
 266 * custom event_init() function because we define our own fixed, free
 267 * running counters, so we do not want to conflict with generic uncore
 268 * logic. Also simplifies processing
 269 */
 270static int snb_uncore_imc_event_init(struct perf_event *event)
 271{
 272        struct intel_uncore_pmu *pmu;
 273        struct intel_uncore_box *box;
 274        struct hw_perf_event *hwc = &event->hw;
 275        u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
 276        int idx, base;
 277
 278        if (event->attr.type != event->pmu->type)
 279                return -ENOENT;
 280
 281        pmu = uncore_event_to_pmu(event);
 282        /* no device found for this pmu */
 283        if (pmu->func_id < 0)
 284                return -ENOENT;
 285
 286        /* Sampling not supported yet */
 287        if (hwc->sample_period)
 288                return -EINVAL;
 289
 290        /* unsupported modes and filters */
 291        if (event->attr.exclude_user   ||
 292            event->attr.exclude_kernel ||
 293            event->attr.exclude_hv     ||
 294            event->attr.exclude_idle   ||
 295            event->attr.exclude_host   ||
 296            event->attr.exclude_guest  ||
 297            event->attr.sample_period) /* no sampling */
 298                return -EINVAL;
 299
 300        /*
 301         * Place all uncore events for a particular physical package
 302         * onto a single cpu
 303         */
 304        if (event->cpu < 0)
 305                return -EINVAL;
 306
 307        /* check only supported bits are set */
 308        if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
 309                return -EINVAL;
 310
 311        box = uncore_pmu_to_box(pmu, event->cpu);
 312        if (!box || box->cpu < 0)
 313                return -EINVAL;
 314
 315        event->cpu = box->cpu;
 316        event->pmu_private = box;
 317
 318        event->hw.idx = -1;
 319        event->hw.last_tag = ~0ULL;
 320        event->hw.extra_reg.idx = EXTRA_REG_NONE;
 321        event->hw.branch_reg.idx = EXTRA_REG_NONE;
 322        /*
 323         * check event is known (whitelist, determines counter)
 324         */
 325        switch (cfg) {
 326        case SNB_UNCORE_PCI_IMC_DATA_READS:
 327                base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
 328                idx = UNCORE_PMC_IDX_FIXED;
 329                break;
 330        case SNB_UNCORE_PCI_IMC_DATA_WRITES:
 331                base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
 332                idx = UNCORE_PMC_IDX_FIXED + 1;
 333                break;
 334        default:
 335                return -EINVAL;
 336        }
 337
 338        /* must be done before validate_group */
 339        event->hw.event_base = base;
 340        event->hw.config = cfg;
 341        event->hw.idx = idx;
 342
 343        /* no group validation needed, we have free running counters */
 344
 345        return 0;
 346}
 347
 348static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
 349{
 350        return 0;
 351}
 352
 353static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
 354{
 355        struct intel_uncore_box *box = uncore_event_to_box(event);
 356        u64 count;
 357
 358        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
 359                return;
 360
 361        event->hw.state = 0;
 362        box->n_active++;
 363
 364        list_add_tail(&event->active_entry, &box->active_list);
 365
 366        count = snb_uncore_imc_read_counter(box, event);
 367        local64_set(&event->hw.prev_count, count);
 368
 369        if (box->n_active == 1)
 370                uncore_pmu_start_hrtimer(box);
 371}
 372
 373static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
 374{
 375        struct intel_uncore_box *box = uncore_event_to_box(event);
 376        struct hw_perf_event *hwc = &event->hw;
 377
 378        if (!(hwc->state & PERF_HES_STOPPED)) {
 379                box->n_active--;
 380
 381                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 382                hwc->state |= PERF_HES_STOPPED;
 383
 384                list_del(&event->active_entry);
 385
 386                if (box->n_active == 0)
 387                        uncore_pmu_cancel_hrtimer(box);
 388        }
 389
 390        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 391                /*
 392                 * Drain the remaining delta count out of a event
 393                 * that we are disabling:
 394                 */
 395                uncore_perf_event_update(box, event);
 396                hwc->state |= PERF_HES_UPTODATE;
 397        }
 398}
 399
 400static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
 401{
 402        struct intel_uncore_box *box = uncore_event_to_box(event);
 403        struct hw_perf_event *hwc = &event->hw;
 404
 405        if (!box)
 406                return -ENODEV;
 407
 408        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 409        if (!(flags & PERF_EF_START))
 410                hwc->state |= PERF_HES_ARCH;
 411
 412        snb_uncore_imc_event_start(event, 0);
 413
 414        box->n_events++;
 415
 416        return 0;
 417}
 418
 419static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
 420{
 421        struct intel_uncore_box *box = uncore_event_to_box(event);
 422        int i;
 423
 424        snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
 425
 426        for (i = 0; i < box->n_events; i++) {
 427                if (event == box->event_list[i]) {
 428                        --box->n_events;
 429                        break;
 430                }
 431        }
 432}
 433
 434int snb_pci2phy_map_init(int devid)
 435{
 436        struct pci_dev *dev = NULL;
 437        struct pci2phy_map *map;
 438        int bus, segment;
 439
 440        dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
 441        if (!dev)
 442                return -ENOTTY;
 443
 444        bus = dev->bus->number;
 445        segment = pci_domain_nr(dev->bus);
 446
 447        raw_spin_lock(&pci2phy_map_lock);
 448        map = __find_pci2phy_map(segment);
 449        if (!map) {
 450                raw_spin_unlock(&pci2phy_map_lock);
 451                pci_dev_put(dev);
 452                return -ENOMEM;
 453        }
 454        map->pbus_to_physid[bus] = 0;
 455        raw_spin_unlock(&pci2phy_map_lock);
 456
 457        pci_dev_put(dev);
 458
 459        return 0;
 460}
 461
 462static struct pmu snb_uncore_imc_pmu = {
 463        .task_ctx_nr    = perf_invalid_context,
 464        .event_init     = snb_uncore_imc_event_init,
 465        .add            = snb_uncore_imc_event_add,
 466        .del            = snb_uncore_imc_event_del,
 467        .start          = snb_uncore_imc_event_start,
 468        .stop           = snb_uncore_imc_event_stop,
 469        .read           = uncore_pmu_event_read,
 470};
 471
 472static struct intel_uncore_ops snb_uncore_imc_ops = {
 473        .init_box       = snb_uncore_imc_init_box,
 474        .exit_box       = snb_uncore_imc_exit_box,
 475        .enable_box     = snb_uncore_imc_enable_box,
 476        .disable_box    = snb_uncore_imc_disable_box,
 477        .disable_event  = snb_uncore_imc_disable_event,
 478        .enable_event   = snb_uncore_imc_enable_event,
 479        .hw_config      = snb_uncore_imc_hw_config,
 480        .read_counter   = snb_uncore_imc_read_counter,
 481};
 482
 483static struct intel_uncore_type snb_uncore_imc = {
 484        .name           = "imc",
 485        .num_counters   = 2,
 486        .num_boxes      = 1,
 487        .fixed_ctr_bits = 32,
 488        .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
 489        .event_descs    = snb_uncore_imc_events,
 490        .format_group   = &snb_uncore_imc_format_group,
 491        .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
 492        .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
 493        .ops            = &snb_uncore_imc_ops,
 494        .pmu            = &snb_uncore_imc_pmu,
 495};
 496
 497static struct intel_uncore_type *snb_pci_uncores[] = {
 498        [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
 499        NULL,
 500};
 501
 502static const struct pci_device_id snb_uncore_pci_ids[] = {
 503        { /* IMC */
 504                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
 505                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 506        },
 507        { /* end: all zeroes */ },
 508};
 509
 510static const struct pci_device_id ivb_uncore_pci_ids[] = {
 511        { /* IMC */
 512                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
 513                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 514        },
 515        { /* IMC */
 516                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC),
 517                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 518        },
 519        { /* end: all zeroes */ },
 520};
 521
 522static const struct pci_device_id hsw_uncore_pci_ids[] = {
 523        { /* IMC */
 524                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
 525                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 526        },
 527        { /* IMC */
 528                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
 529                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 530        },
 531        { /* end: all zeroes */ },
 532};
 533
 534static const struct pci_device_id bdw_uncore_pci_ids[] = {
 535        { /* IMC */
 536                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
 537                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 538        },
 539        { /* end: all zeroes */ },
 540};
 541
 542static const struct pci_device_id skl_uncore_pci_ids[] = {
 543        { /* IMC */
 544                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
 545                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
 546        },
 547        { /* end: all zeroes */ },
 548};
 549
 550static struct pci_driver snb_uncore_pci_driver = {
 551        .name           = "snb_uncore",
 552        .id_table       = snb_uncore_pci_ids,
 553};
 554
 555static struct pci_driver ivb_uncore_pci_driver = {
 556        .name           = "ivb_uncore",
 557        .id_table       = ivb_uncore_pci_ids,
 558};
 559
 560static struct pci_driver hsw_uncore_pci_driver = {
 561        .name           = "hsw_uncore",
 562        .id_table       = hsw_uncore_pci_ids,
 563};
 564
 565static struct pci_driver bdw_uncore_pci_driver = {
 566        .name           = "bdw_uncore",
 567        .id_table       = bdw_uncore_pci_ids,
 568};
 569
 570static struct pci_driver skl_uncore_pci_driver = {
 571        .name           = "skl_uncore",
 572        .id_table       = skl_uncore_pci_ids,
 573};
 574
 575struct imc_uncore_pci_dev {
 576        __u32 pci_id;
 577        struct pci_driver *driver;
 578};
 579#define IMC_DEV(a, d) \
 580        { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
 581
 582static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
 583        IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
 584        IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
 585        IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
 586        IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
 587        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
 588        IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
 589        IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
 590        {  /* end marker */ }
 591};
 592
 593
 594#define for_each_imc_pci_id(x, t) \
 595        for (x = (t); (x)->pci_id; x++)
 596
 597static struct pci_driver *imc_uncore_find_dev(void)
 598{
 599        const struct imc_uncore_pci_dev *p;
 600        int ret;
 601
 602        for_each_imc_pci_id(p, desktop_imc_pci_ids) {
 603                ret = snb_pci2phy_map_init(p->pci_id);
 604                if (ret == 0)
 605                        return p->driver;
 606        }
 607        return NULL;
 608}
 609
 610static int imc_uncore_pci_init(void)
 611{
 612        struct pci_driver *imc_drv = imc_uncore_find_dev();
 613
 614        if (!imc_drv)
 615                return -ENODEV;
 616
 617        uncore_pci_uncores = snb_pci_uncores;
 618        uncore_pci_driver = imc_drv;
 619
 620        return 0;
 621}
 622
 623int snb_uncore_pci_init(void)
 624{
 625        return imc_uncore_pci_init();
 626}
 627
 628int ivb_uncore_pci_init(void)
 629{
 630        return imc_uncore_pci_init();
 631}
 632int hsw_uncore_pci_init(void)
 633{
 634        return imc_uncore_pci_init();
 635}
 636
 637int bdw_uncore_pci_init(void)
 638{
 639        return imc_uncore_pci_init();
 640}
 641
 642int skl_uncore_pci_init(void)
 643{
 644        return imc_uncore_pci_init();
 645}
 646
 647/* end of Sandy Bridge uncore support */
 648
 649/* Nehalem uncore support */
 650static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
 651{
 652        wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
 653}
 654
 655static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
 656{
 657        wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
 658}
 659
 660static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
 661{
 662        struct hw_perf_event *hwc = &event->hw;
 663
 664        if (hwc->idx < UNCORE_PMC_IDX_FIXED)
 665                wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
 666        else
 667                wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
 668}
 669
 670static struct attribute *nhm_uncore_formats_attr[] = {
 671        &format_attr_event.attr,
 672        &format_attr_umask.attr,
 673        &format_attr_edge.attr,
 674        &format_attr_inv.attr,
 675        &format_attr_cmask8.attr,
 676        NULL,
 677};
 678
 679static struct attribute_group nhm_uncore_format_group = {
 680        .name = "format",
 681        .attrs = nhm_uncore_formats_attr,
 682};
 683
 684static struct uncore_event_desc nhm_uncore_events[] = {
 685        INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
 686        INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
 687        INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
 688        INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
 689        INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
 690        INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
 691        INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
 692        INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
 693        INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
 694        { /* end: all zeroes */ },
 695};
 696
 697static struct intel_uncore_ops nhm_uncore_msr_ops = {
 698        .disable_box    = nhm_uncore_msr_disable_box,
 699        .enable_box     = nhm_uncore_msr_enable_box,
 700        .disable_event  = snb_uncore_msr_disable_event,
 701        .enable_event   = nhm_uncore_msr_enable_event,
 702        .read_counter   = uncore_msr_read_counter,
 703};
 704
 705static struct intel_uncore_type nhm_uncore = {
 706        .name           = "",
 707        .num_counters   = 8,
 708        .num_boxes      = 1,
 709        .perf_ctr_bits  = 48,
 710        .fixed_ctr_bits = 48,
 711        .event_ctl      = NHM_UNC_PERFEVTSEL0,
 712        .perf_ctr       = NHM_UNC_UNCORE_PMC0,
 713        .fixed_ctr      = NHM_UNC_FIXED_CTR,
 714        .fixed_ctl      = NHM_UNC_FIXED_CTR_CTRL,
 715        .event_mask     = NHM_UNC_RAW_EVENT_MASK,
 716        .event_descs    = nhm_uncore_events,
 717        .ops            = &nhm_uncore_msr_ops,
 718        .format_group   = &nhm_uncore_format_group,
 719};
 720
 721static struct intel_uncore_type *nhm_msr_uncores[] = {
 722        &nhm_uncore,
 723        NULL,
 724};
 725
 726void nhm_uncore_cpu_init(void)
 727{
 728        uncore_msr_uncores = nhm_msr_uncores;
 729}
 730
 731/* end of Nehalem uncore support */
 732