linux/arch/x86/events/intel/uncore.c
<<
>>
Prefs
   1#include <linux/module.h>
   2
   3#include <asm/cpu_device_id.h>
   4#include <asm/intel-family.h>
   5#include "uncore.h"
   6
   7static struct intel_uncore_type *empty_uncore[] = { NULL, };
   8struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
   9struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
  10
  11static bool pcidrv_registered;
  12struct pci_driver *uncore_pci_driver;
  13/* pci bus to socket mapping */
  14DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
  15struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
  16struct pci_extra_dev *uncore_extra_pci_dev;
  17static int max_packages;
  18
  19/* mask of cpus that collect uncore events */
  20static cpumask_t uncore_cpu_mask;
  21
  22/* constraint for the fixed counter */
  23static struct event_constraint uncore_constraint_fixed =
  24        EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
  25struct event_constraint uncore_constraint_empty =
  26        EVENT_CONSTRAINT(0, 0, 0);
  27
  28MODULE_LICENSE("GPL");
  29
  30static int uncore_pcibus_to_physid(struct pci_bus *bus)
  31{
  32        struct pci2phy_map *map;
  33        int phys_id = -1;
  34
  35        raw_spin_lock(&pci2phy_map_lock);
  36        list_for_each_entry(map, &pci2phy_map_head, list) {
  37                if (map->segment == pci_domain_nr(bus)) {
  38                        phys_id = map->pbus_to_physid[bus->number];
  39                        break;
  40                }
  41        }
  42        raw_spin_unlock(&pci2phy_map_lock);
  43
  44        return phys_id;
  45}
  46
  47static void uncore_free_pcibus_map(void)
  48{
  49        struct pci2phy_map *map, *tmp;
  50
  51        list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
  52                list_del(&map->list);
  53                kfree(map);
  54        }
  55}
  56
  57struct pci2phy_map *__find_pci2phy_map(int segment)
  58{
  59        struct pci2phy_map *map, *alloc = NULL;
  60        int i;
  61
  62        lockdep_assert_held(&pci2phy_map_lock);
  63
  64lookup:
  65        list_for_each_entry(map, &pci2phy_map_head, list) {
  66                if (map->segment == segment)
  67                        goto end;
  68        }
  69
  70        if (!alloc) {
  71                raw_spin_unlock(&pci2phy_map_lock);
  72                alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
  73                raw_spin_lock(&pci2phy_map_lock);
  74
  75                if (!alloc)
  76                        return NULL;
  77
  78                goto lookup;
  79        }
  80
  81        map = alloc;
  82        alloc = NULL;
  83        map->segment = segment;
  84        for (i = 0; i < 256; i++)
  85                map->pbus_to_physid[i] = -1;
  86        list_add_tail(&map->list, &pci2phy_map_head);
  87
  88end:
  89        kfree(alloc);
  90        return map;
  91}
  92
  93ssize_t uncore_event_show(struct kobject *kobj,
  94                          struct kobj_attribute *attr, char *buf)
  95{
  96        struct uncore_event_desc *event =
  97                container_of(attr, struct uncore_event_desc, attr);
  98        return sprintf(buf, "%s", event->config);
  99}
 100
 101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 102{
 103        unsigned int pkgid = topology_logical_package_id(cpu);
 104
 105        /*
 106         * The unsigned check also catches the '-1' return value for non
 107         * existent mappings in the topology map.
 108         */
 109        return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 110}
 111
 112u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 113{
 114        u64 count;
 115
 116        rdmsrl(event->hw.event_base, count);
 117
 118        return count;
 119}
 120
 121/*
 122 * generic get constraint function for shared match/mask registers.
 123 */
 124struct event_constraint *
 125uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 126{
 127        struct intel_uncore_extra_reg *er;
 128        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 129        struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
 130        unsigned long flags;
 131        bool ok = false;
 132
 133        /*
 134         * reg->alloc can be set due to existing state, so for fake box we
 135         * need to ignore this, otherwise we might fail to allocate proper
 136         * fake state for this extra reg constraint.
 137         */
 138        if (reg1->idx == EXTRA_REG_NONE ||
 139            (!uncore_box_is_fake(box) && reg1->alloc))
 140                return NULL;
 141
 142        er = &box->shared_regs[reg1->idx];
 143        raw_spin_lock_irqsave(&er->lock, flags);
 144        if (!atomic_read(&er->ref) ||
 145            (er->config1 == reg1->config && er->config2 == reg2->config)) {
 146                atomic_inc(&er->ref);
 147                er->config1 = reg1->config;
 148                er->config2 = reg2->config;
 149                ok = true;
 150        }
 151        raw_spin_unlock_irqrestore(&er->lock, flags);
 152
 153        if (ok) {
 154                if (!uncore_box_is_fake(box))
 155                        reg1->alloc = 1;
 156                return NULL;
 157        }
 158
 159        return &uncore_constraint_empty;
 160}
 161
 162void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 163{
 164        struct intel_uncore_extra_reg *er;
 165        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 166
 167        /*
 168         * Only put constraint if extra reg was actually allocated. Also
 169         * takes care of event which do not use an extra shared reg.
 170         *
 171         * Also, if this is a fake box we shouldn't touch any event state
 172         * (reg->alloc) and we don't care about leaving inconsistent box
 173         * state either since it will be thrown out.
 174         */
 175        if (uncore_box_is_fake(box) || !reg1->alloc)
 176                return;
 177
 178        er = &box->shared_regs[reg1->idx];
 179        atomic_dec(&er->ref);
 180        reg1->alloc = 0;
 181}
 182
 183u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
 184{
 185        struct intel_uncore_extra_reg *er;
 186        unsigned long flags;
 187        u64 config;
 188
 189        er = &box->shared_regs[idx];
 190
 191        raw_spin_lock_irqsave(&er->lock, flags);
 192        config = er->config;
 193        raw_spin_unlock_irqrestore(&er->lock, flags);
 194
 195        return config;
 196}
 197
 198static void uncore_assign_hw_event(struct intel_uncore_box *box,
 199                                   struct perf_event *event, int idx)
 200{
 201        struct hw_perf_event *hwc = &event->hw;
 202
 203        hwc->idx = idx;
 204        hwc->last_tag = ++box->tags[idx];
 205
 206        if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
 207                hwc->event_base = uncore_fixed_ctr(box);
 208                hwc->config_base = uncore_fixed_ctl(box);
 209                return;
 210        }
 211
 212        hwc->config_base = uncore_event_ctl(box, hwc->idx);
 213        hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
 214}
 215
 216void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
 217{
 218        u64 prev_count, new_count, delta;
 219        int shift;
 220
 221        if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
 222                shift = 64 - uncore_fixed_ctr_bits(box);
 223        else
 224                shift = 64 - uncore_perf_ctr_bits(box);
 225
 226        /* the hrtimer might modify the previous event value */
 227again:
 228        prev_count = local64_read(&event->hw.prev_count);
 229        new_count = uncore_read_counter(box, event);
 230        if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
 231                goto again;
 232
 233        delta = (new_count << shift) - (prev_count << shift);
 234        delta >>= shift;
 235
 236        local64_add(delta, &event->count);
 237}
 238
 239/*
 240 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
 241 * for SandyBridge. So we use hrtimer to periodically poll the counter
 242 * to avoid overflow.
 243 */
 244static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
 245{
 246        struct intel_uncore_box *box;
 247        struct perf_event *event;
 248        unsigned long flags;
 249        int bit;
 250
 251        box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
 252        if (!box->n_active || box->cpu != smp_processor_id())
 253                return HRTIMER_NORESTART;
 254        /*
 255         * disable local interrupt to prevent uncore_pmu_event_start/stop
 256         * to interrupt the update process
 257         */
 258        local_irq_save(flags);
 259
 260        /*
 261         * handle boxes with an active event list as opposed to active
 262         * counters
 263         */
 264        list_for_each_entry(event, &box->active_list, active_entry) {
 265                uncore_perf_event_update(box, event);
 266        }
 267
 268        for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
 269                uncore_perf_event_update(box, box->events[bit]);
 270
 271        local_irq_restore(flags);
 272
 273        hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
 274        return HRTIMER_RESTART;
 275}
 276
 277void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
 278{
 279        hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
 280                      HRTIMER_MODE_REL_PINNED);
 281}
 282
 283void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
 284{
 285        hrtimer_cancel(&box->hrtimer);
 286}
 287
 288static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
 289{
 290        hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 291        box->hrtimer.function = uncore_pmu_hrtimer;
 292}
 293
 294static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
 295                                                 int node)
 296{
 297        int i, size, numshared = type->num_shared_regs ;
 298        struct intel_uncore_box *box;
 299
 300        size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
 301
 302        box = kzalloc_node(size, GFP_KERNEL, node);
 303        if (!box)
 304                return NULL;
 305
 306        for (i = 0; i < numshared; i++)
 307                raw_spin_lock_init(&box->shared_regs[i].lock);
 308
 309        uncore_pmu_init_hrtimer(box);
 310        box->cpu = -1;
 311        box->pci_phys_id = -1;
 312        box->pkgid = -1;
 313
 314        /* set default hrtimer timeout */
 315        box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
 316
 317        INIT_LIST_HEAD(&box->active_list);
 318
 319        return box;
 320}
 321
 322/*
 323 * Using uncore_pmu_event_init pmu event_init callback
 324 * as a detection point for uncore events.
 325 */
 326static int uncore_pmu_event_init(struct perf_event *event);
 327
 328static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
 329{
 330        return &box->pmu->pmu == event->pmu;
 331}
 332
 333static int
 334uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
 335                      bool dogrp)
 336{
 337        struct perf_event *event;
 338        int n, max_count;
 339
 340        max_count = box->pmu->type->num_counters;
 341        if (box->pmu->type->fixed_ctl)
 342                max_count++;
 343
 344        if (box->n_events >= max_count)
 345                return -EINVAL;
 346
 347        n = box->n_events;
 348
 349        if (is_box_event(box, leader)) {
 350                box->event_list[n] = leader;
 351                n++;
 352        }
 353
 354        if (!dogrp)
 355                return n;
 356
 357        for_each_sibling_event(event, leader) {
 358                if (!is_box_event(box, event) ||
 359                    event->state <= PERF_EVENT_STATE_OFF)
 360                        continue;
 361
 362                if (n >= max_count)
 363                        return -EINVAL;
 364
 365                box->event_list[n] = event;
 366                n++;
 367        }
 368        return n;
 369}
 370
 371static struct event_constraint *
 372uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
 373{
 374        struct intel_uncore_type *type = box->pmu->type;
 375        struct event_constraint *c;
 376
 377        if (type->ops->get_constraint) {
 378                c = type->ops->get_constraint(box, event);
 379                if (c)
 380                        return c;
 381        }
 382
 383        if (event->attr.config == UNCORE_FIXED_EVENT)
 384                return &uncore_constraint_fixed;
 385
 386        if (type->constraints) {
 387                for_each_event_constraint(c, type->constraints) {
 388                        if ((event->hw.config & c->cmask) == c->code)
 389                                return c;
 390                }
 391        }
 392
 393        return &type->unconstrainted;
 394}
 395
 396static void uncore_put_event_constraint(struct intel_uncore_box *box,
 397                                        struct perf_event *event)
 398{
 399        if (box->pmu->type->ops->put_constraint)
 400                box->pmu->type->ops->put_constraint(box, event);
 401}
 402
 403static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
 404{
 405        unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
 406        struct event_constraint *c;
 407        int i, wmin, wmax, ret = 0;
 408        struct hw_perf_event *hwc;
 409
 410        bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
 411
 412        for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
 413                c = uncore_get_event_constraint(box, box->event_list[i]);
 414                box->event_constraint[i] = c;
 415                wmin = min(wmin, c->weight);
 416                wmax = max(wmax, c->weight);
 417        }
 418
 419        /* fastpath, try to reuse previous register */
 420        for (i = 0; i < n; i++) {
 421                hwc = &box->event_list[i]->hw;
 422                c = box->event_constraint[i];
 423
 424                /* never assigned */
 425                if (hwc->idx == -1)
 426                        break;
 427
 428                /* constraint still honored */
 429                if (!test_bit(hwc->idx, c->idxmsk))
 430                        break;
 431
 432                /* not already used */
 433                if (test_bit(hwc->idx, used_mask))
 434                        break;
 435
 436                __set_bit(hwc->idx, used_mask);
 437                if (assign)
 438                        assign[i] = hwc->idx;
 439        }
 440        /* slow path */
 441        if (i != n)
 442                ret = perf_assign_events(box->event_constraint, n,
 443                                         wmin, wmax, n, assign);
 444
 445        if (!assign || ret) {
 446                for (i = 0; i < n; i++)
 447                        uncore_put_event_constraint(box, box->event_list[i]);
 448        }
 449        return ret ? -EINVAL : 0;
 450}
 451
 452static void uncore_pmu_event_start(struct perf_event *event, int flags)
 453{
 454        struct intel_uncore_box *box = uncore_event_to_box(event);
 455        int idx = event->hw.idx;
 456
 457        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
 458                return;
 459
 460        if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
 461                return;
 462
 463        event->hw.state = 0;
 464        box->events[idx] = event;
 465        box->n_active++;
 466        __set_bit(idx, box->active_mask);
 467
 468        local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
 469        uncore_enable_event(box, event);
 470
 471        if (box->n_active == 1) {
 472                uncore_enable_box(box);
 473                uncore_pmu_start_hrtimer(box);
 474        }
 475}
 476
 477static void uncore_pmu_event_stop(struct perf_event *event, int flags)
 478{
 479        struct intel_uncore_box *box = uncore_event_to_box(event);
 480        struct hw_perf_event *hwc = &event->hw;
 481
 482        if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
 483                uncore_disable_event(box, event);
 484                box->n_active--;
 485                box->events[hwc->idx] = NULL;
 486                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 487                hwc->state |= PERF_HES_STOPPED;
 488
 489                if (box->n_active == 0) {
 490                        uncore_disable_box(box);
 491                        uncore_pmu_cancel_hrtimer(box);
 492                }
 493        }
 494
 495        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 496                /*
 497                 * Drain the remaining delta count out of a event
 498                 * that we are disabling:
 499                 */
 500                uncore_perf_event_update(box, event);
 501                hwc->state |= PERF_HES_UPTODATE;
 502        }
 503}
 504
 505static int uncore_pmu_event_add(struct perf_event *event, int flags)
 506{
 507        struct intel_uncore_box *box = uncore_event_to_box(event);
 508        struct hw_perf_event *hwc = &event->hw;
 509        int assign[UNCORE_PMC_IDX_MAX];
 510        int i, n, ret;
 511
 512        if (!box)
 513                return -ENODEV;
 514
 515        ret = n = uncore_collect_events(box, event, false);
 516        if (ret < 0)
 517                return ret;
 518
 519        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 520        if (!(flags & PERF_EF_START))
 521                hwc->state |= PERF_HES_ARCH;
 522
 523        ret = uncore_assign_events(box, assign, n);
 524        if (ret)
 525                return ret;
 526
 527        /* save events moving to new counters */
 528        for (i = 0; i < box->n_events; i++) {
 529                event = box->event_list[i];
 530                hwc = &event->hw;
 531
 532                if (hwc->idx == assign[i] &&
 533                        hwc->last_tag == box->tags[assign[i]])
 534                        continue;
 535                /*
 536                 * Ensure we don't accidentally enable a stopped
 537                 * counter simply because we rescheduled.
 538                 */
 539                if (hwc->state & PERF_HES_STOPPED)
 540                        hwc->state |= PERF_HES_ARCH;
 541
 542                uncore_pmu_event_stop(event, PERF_EF_UPDATE);
 543        }
 544
 545        /* reprogram moved events into new counters */
 546        for (i = 0; i < n; i++) {
 547                event = box->event_list[i];
 548                hwc = &event->hw;
 549
 550                if (hwc->idx != assign[i] ||
 551                        hwc->last_tag != box->tags[assign[i]])
 552                        uncore_assign_hw_event(box, event, assign[i]);
 553                else if (i < box->n_events)
 554                        continue;
 555
 556                if (hwc->state & PERF_HES_ARCH)
 557                        continue;
 558
 559                uncore_pmu_event_start(event, 0);
 560        }
 561        box->n_events = n;
 562
 563        return 0;
 564}
 565
 566static void uncore_pmu_event_del(struct perf_event *event, int flags)
 567{
 568        struct intel_uncore_box *box = uncore_event_to_box(event);
 569        int i;
 570
 571        uncore_pmu_event_stop(event, PERF_EF_UPDATE);
 572
 573        for (i = 0; i < box->n_events; i++) {
 574                if (event == box->event_list[i]) {
 575                        uncore_put_event_constraint(box, event);
 576
 577                        for (++i; i < box->n_events; i++)
 578                                box->event_list[i - 1] = box->event_list[i];
 579
 580                        --box->n_events;
 581                        break;
 582                }
 583        }
 584
 585        event->hw.idx = -1;
 586        event->hw.last_tag = ~0ULL;
 587}
 588
 589void uncore_pmu_event_read(struct perf_event *event)
 590{
 591        struct intel_uncore_box *box = uncore_event_to_box(event);
 592        uncore_perf_event_update(box, event);
 593}
 594
 595/*
 596 * validation ensures the group can be loaded onto the
 597 * PMU if it was the only group available.
 598 */
 599static int uncore_validate_group(struct intel_uncore_pmu *pmu,
 600                                struct perf_event *event)
 601{
 602        struct perf_event *leader = event->group_leader;
 603        struct intel_uncore_box *fake_box;
 604        int ret = -EINVAL, n;
 605
 606        fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
 607        if (!fake_box)
 608                return -ENOMEM;
 609
 610        fake_box->pmu = pmu;
 611        /*
 612         * the event is not yet connected with its
 613         * siblings therefore we must first collect
 614         * existing siblings, then add the new event
 615         * before we can simulate the scheduling
 616         */
 617        n = uncore_collect_events(fake_box, leader, true);
 618        if (n < 0)
 619                goto out;
 620
 621        fake_box->n_events = n;
 622        n = uncore_collect_events(fake_box, event, false);
 623        if (n < 0)
 624                goto out;
 625
 626        fake_box->n_events = n;
 627
 628        ret = uncore_assign_events(fake_box, NULL, n);
 629out:
 630        kfree(fake_box);
 631        return ret;
 632}
 633
 634static int uncore_pmu_event_init(struct perf_event *event)
 635{
 636        struct intel_uncore_pmu *pmu;
 637        struct intel_uncore_box *box;
 638        struct hw_perf_event *hwc = &event->hw;
 639        int ret;
 640
 641        if (event->attr.type != event->pmu->type)
 642                return -ENOENT;
 643
 644        pmu = uncore_event_to_pmu(event);
 645        /* no device found for this pmu */
 646        if (pmu->func_id < 0)
 647                return -ENOENT;
 648
 649        /*
 650         * Uncore PMU does measure at all privilege level all the time.
 651         * So it doesn't make sense to specify any exclude bits.
 652         */
 653        if (event->attr.exclude_user || event->attr.exclude_kernel ||
 654                        event->attr.exclude_hv || event->attr.exclude_idle)
 655                return -EINVAL;
 656
 657        /* Sampling not supported yet */
 658        if (hwc->sample_period)
 659                return -EINVAL;
 660
 661        /*
 662         * Place all uncore events for a particular physical package
 663         * onto a single cpu
 664         */
 665        if (event->cpu < 0)
 666                return -EINVAL;
 667        box = uncore_pmu_to_box(pmu, event->cpu);
 668        if (!box || box->cpu < 0)
 669                return -EINVAL;
 670        event->cpu = box->cpu;
 671        event->pmu_private = box;
 672
 673        event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
 674
 675        event->hw.idx = -1;
 676        event->hw.last_tag = ~0ULL;
 677        event->hw.extra_reg.idx = EXTRA_REG_NONE;
 678        event->hw.branch_reg.idx = EXTRA_REG_NONE;
 679
 680        if (event->attr.config == UNCORE_FIXED_EVENT) {
 681                /* no fixed counter */
 682                if (!pmu->type->fixed_ctl)
 683                        return -EINVAL;
 684                /*
 685                 * if there is only one fixed counter, only the first pmu
 686                 * can access the fixed counter
 687                 */
 688                if (pmu->type->single_fixed && pmu->pmu_idx > 0)
 689                        return -EINVAL;
 690
 691                /* fixed counters have event field hardcoded to zero */
 692                hwc->config = 0ULL;
 693        } else {
 694                hwc->config = event->attr.config &
 695                              (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
 696                if (pmu->type->ops->hw_config) {
 697                        ret = pmu->type->ops->hw_config(box, event);
 698                        if (ret)
 699                                return ret;
 700                }
 701        }
 702
 703        if (event->group_leader != event)
 704                ret = uncore_validate_group(pmu, event);
 705        else
 706                ret = 0;
 707
 708        return ret;
 709}
 710
 711static ssize_t uncore_get_attr_cpumask(struct device *dev,
 712                                struct device_attribute *attr, char *buf)
 713{
 714        return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
 715}
 716
 717static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
 718
 719static struct attribute *uncore_pmu_attrs[] = {
 720        &dev_attr_cpumask.attr,
 721        NULL,
 722};
 723
 724static const struct attribute_group uncore_pmu_attr_group = {
 725        .attrs = uncore_pmu_attrs,
 726};
 727
 728static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
 729{
 730        int ret;
 731
 732        if (!pmu->type->pmu) {
 733                pmu->pmu = (struct pmu) {
 734                        .attr_groups    = pmu->type->attr_groups,
 735                        .task_ctx_nr    = perf_invalid_context,
 736                        .event_init     = uncore_pmu_event_init,
 737                        .add            = uncore_pmu_event_add,
 738                        .del            = uncore_pmu_event_del,
 739                        .start          = uncore_pmu_event_start,
 740                        .stop           = uncore_pmu_event_stop,
 741                        .read           = uncore_pmu_event_read,
 742                        .module         = THIS_MODULE,
 743                };
 744        } else {
 745                pmu->pmu = *pmu->type->pmu;
 746                pmu->pmu.attr_groups = pmu->type->attr_groups;
 747        }
 748
 749        if (pmu->type->num_boxes == 1) {
 750                if (strlen(pmu->type->name) > 0)
 751                        sprintf(pmu->name, "uncore_%s", pmu->type->name);
 752                else
 753                        sprintf(pmu->name, "uncore");
 754        } else {
 755                sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
 756                        pmu->pmu_idx);
 757        }
 758
 759        ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
 760        if (!ret)
 761                pmu->registered = true;
 762        return ret;
 763}
 764
 765static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 766{
 767        if (!pmu->registered)
 768                return;
 769        perf_pmu_unregister(&pmu->pmu);
 770        pmu->registered = false;
 771}
 772
 773static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 774{
 775        int pkg;
 776
 777        for (pkg = 0; pkg < max_packages; pkg++)
 778                kfree(pmu->boxes[pkg]);
 779        kfree(pmu->boxes);
 780}
 781
 782static void uncore_type_exit(struct intel_uncore_type *type)
 783{
 784        struct intel_uncore_pmu *pmu = type->pmus;
 785        int i;
 786
 787        if (pmu) {
 788                for (i = 0; i < type->num_boxes; i++, pmu++) {
 789                        uncore_pmu_unregister(pmu);
 790                        uncore_free_boxes(pmu);
 791                }
 792                kfree(type->pmus);
 793                type->pmus = NULL;
 794        }
 795        kfree(type->events_group);
 796        type->events_group = NULL;
 797}
 798
 799static void uncore_types_exit(struct intel_uncore_type **types)
 800{
 801        for (; *types; types++)
 802                uncore_type_exit(*types);
 803}
 804
 805static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
 806{
 807        struct intel_uncore_pmu *pmus;
 808        struct attribute_group *attr_group;
 809        struct attribute **attrs;
 810        size_t size;
 811        int i, j;
 812
 813        pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
 814        if (!pmus)
 815                return -ENOMEM;
 816
 817        size = max_packages * sizeof(struct intel_uncore_box *);
 818
 819        for (i = 0; i < type->num_boxes; i++) {
 820                pmus[i].func_id = setid ? i : -1;
 821                pmus[i].pmu_idx = i;
 822                pmus[i].type    = type;
 823                pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
 824                if (!pmus[i].boxes)
 825                        goto err;
 826        }
 827
 828        type->pmus = pmus;
 829        type->unconstrainted = (struct event_constraint)
 830                __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
 831                                0, type->num_counters, 0, 0);
 832
 833        if (type->event_descs) {
 834                for (i = 0; type->event_descs[i].attr.attr.name; i++);
 835
 836                attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
 837                                        sizeof(*attr_group), GFP_KERNEL);
 838                if (!attr_group)
 839                        goto err;
 840
 841                attrs = (struct attribute **)(attr_group + 1);
 842                attr_group->name = "events";
 843                attr_group->attrs = attrs;
 844
 845                for (j = 0; j < i; j++)
 846                        attrs[j] = &type->event_descs[j].attr.attr;
 847
 848                type->events_group = attr_group;
 849        }
 850
 851        type->pmu_group = &uncore_pmu_attr_group;
 852
 853        return 0;
 854
 855err:
 856        for (i = 0; i < type->num_boxes; i++)
 857                kfree(pmus[i].boxes);
 858        kfree(pmus);
 859
 860        return -ENOMEM;
 861}
 862
 863static int __init
 864uncore_types_init(struct intel_uncore_type **types, bool setid)
 865{
 866        int ret;
 867
 868        for (; *types; types++) {
 869                ret = uncore_type_init(*types, setid);
 870                if (ret)
 871                        return ret;
 872        }
 873        return 0;
 874}
 875
 876/*
 877 * add a pci uncore device
 878 */
 879static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 880{
 881        struct intel_uncore_type *type;
 882        struct intel_uncore_pmu *pmu = NULL;
 883        struct intel_uncore_box *box;
 884        int phys_id, pkg, ret;
 885
 886        phys_id = uncore_pcibus_to_physid(pdev->bus);
 887        if (phys_id < 0)
 888                return -ENODEV;
 889
 890        pkg = topology_phys_to_logical_pkg(phys_id);
 891        if (pkg < 0)
 892                return -EINVAL;
 893
 894        if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
 895                int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
 896
 897                uncore_extra_pci_dev[pkg].dev[idx] = pdev;
 898                pci_set_drvdata(pdev, NULL);
 899                return 0;
 900        }
 901
 902        type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
 903
 904        /*
 905         * Some platforms, e.g.  Knights Landing, use a common PCI device ID
 906         * for multiple instances of an uncore PMU device type. We should check
 907         * PCI slot and func to indicate the uncore box.
 908         */
 909        if (id->driver_data & ~0xffff) {
 910                struct pci_driver *pci_drv = pdev->driver;
 911                const struct pci_device_id *ids = pci_drv->id_table;
 912                unsigned int devfn;
 913
 914                while (ids && ids->vendor) {
 915                        if ((ids->vendor == pdev->vendor) &&
 916                            (ids->device == pdev->device)) {
 917                                devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
 918                                                  UNCORE_PCI_DEV_FUNC(ids->driver_data));
 919                                if (devfn == pdev->devfn) {
 920                                        pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
 921                                        break;
 922                                }
 923                        }
 924                        ids++;
 925                }
 926                if (pmu == NULL)
 927                        return -ENODEV;
 928        } else {
 929                /*
 930                 * for performance monitoring unit with multiple boxes,
 931                 * each box has a different function id.
 932                 */
 933                pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
 934        }
 935
 936        if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
 937                return -EINVAL;
 938
 939        box = uncore_alloc_box(type, NUMA_NO_NODE);
 940        if (!box)
 941                return -ENOMEM;
 942
 943        if (pmu->func_id < 0)
 944                pmu->func_id = pdev->devfn;
 945        else
 946                WARN_ON_ONCE(pmu->func_id != pdev->devfn);
 947
 948        atomic_inc(&box->refcnt);
 949        box->pci_phys_id = phys_id;
 950        box->pkgid = pkg;
 951        box->pci_dev = pdev;
 952        box->pmu = pmu;
 953        uncore_box_init(box);
 954        pci_set_drvdata(pdev, box);
 955
 956        pmu->boxes[pkg] = box;
 957        if (atomic_inc_return(&pmu->activeboxes) > 1)
 958                return 0;
 959
 960        /* First active box registers the pmu */
 961        ret = uncore_pmu_register(pmu);
 962        if (ret) {
 963                pci_set_drvdata(pdev, NULL);
 964                pmu->boxes[pkg] = NULL;
 965                uncore_box_exit(box);
 966                kfree(box);
 967        }
 968        return ret;
 969}
 970
 971static void uncore_pci_remove(struct pci_dev *pdev)
 972{
 973        struct intel_uncore_box *box;
 974        struct intel_uncore_pmu *pmu;
 975        int i, phys_id, pkg;
 976
 977        phys_id = uncore_pcibus_to_physid(pdev->bus);
 978
 979        box = pci_get_drvdata(pdev);
 980        if (!box) {
 981                pkg = topology_phys_to_logical_pkg(phys_id);
 982                for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
 983                        if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
 984                                uncore_extra_pci_dev[pkg].dev[i] = NULL;
 985                                break;
 986                        }
 987                }
 988                WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
 989                return;
 990        }
 991
 992        pmu = box->pmu;
 993        if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
 994                return;
 995
 996        pci_set_drvdata(pdev, NULL);
 997        pmu->boxes[box->pkgid] = NULL;
 998        if (atomic_dec_return(&pmu->activeboxes) == 0)
 999                uncore_pmu_unregister(pmu);
1000        uncore_box_exit(box);
1001        kfree(box);
1002}
1003
1004static int __init uncore_pci_init(void)
1005{
1006        size_t size;
1007        int ret;
1008
1009        size = max_packages * sizeof(struct pci_extra_dev);
1010        uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1011        if (!uncore_extra_pci_dev) {
1012                ret = -ENOMEM;
1013                goto err;
1014        }
1015
1016        ret = uncore_types_init(uncore_pci_uncores, false);
1017        if (ret)
1018                goto errtype;
1019
1020        uncore_pci_driver->probe = uncore_pci_probe;
1021        uncore_pci_driver->remove = uncore_pci_remove;
1022
1023        ret = pci_register_driver(uncore_pci_driver);
1024        if (ret)
1025                goto errtype;
1026
1027        pcidrv_registered = true;
1028        return 0;
1029
1030errtype:
1031        uncore_types_exit(uncore_pci_uncores);
1032        kfree(uncore_extra_pci_dev);
1033        uncore_extra_pci_dev = NULL;
1034        uncore_free_pcibus_map();
1035err:
1036        uncore_pci_uncores = empty_uncore;
1037        return ret;
1038}
1039
1040static void uncore_pci_exit(void)
1041{
1042        if (pcidrv_registered) {
1043                pcidrv_registered = false;
1044                pci_unregister_driver(uncore_pci_driver);
1045                uncore_types_exit(uncore_pci_uncores);
1046                kfree(uncore_extra_pci_dev);
1047                uncore_free_pcibus_map();
1048        }
1049}
1050
1051static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1052                                   int new_cpu)
1053{
1054        struct intel_uncore_pmu *pmu = type->pmus;
1055        struct intel_uncore_box *box;
1056        int i, pkg;
1057
1058        pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1059        for (i = 0; i < type->num_boxes; i++, pmu++) {
1060                box = pmu->boxes[pkg];
1061                if (!box)
1062                        continue;
1063
1064                if (old_cpu < 0) {
1065                        WARN_ON_ONCE(box->cpu != -1);
1066                        box->cpu = new_cpu;
1067                        continue;
1068                }
1069
1070                WARN_ON_ONCE(box->cpu != old_cpu);
1071                box->cpu = -1;
1072                if (new_cpu < 0)
1073                        continue;
1074
1075                uncore_pmu_cancel_hrtimer(box);
1076                perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1077                box->cpu = new_cpu;
1078        }
1079}
1080
1081static void uncore_change_context(struct intel_uncore_type **uncores,
1082                                  int old_cpu, int new_cpu)
1083{
1084        for (; *uncores; uncores++)
1085                uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1086}
1087
1088static int uncore_event_cpu_offline(unsigned int cpu)
1089{
1090        struct intel_uncore_type *type, **types = uncore_msr_uncores;
1091        struct intel_uncore_pmu *pmu;
1092        struct intel_uncore_box *box;
1093        int i, pkg, target;
1094
1095        /* Check if exiting cpu is used for collecting uncore events */
1096        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1097                goto unref;
1098        /* Find a new cpu to collect uncore events */
1099        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1100
1101        /* Migrate uncore events to the new target */
1102        if (target < nr_cpu_ids)
1103                cpumask_set_cpu(target, &uncore_cpu_mask);
1104        else
1105                target = -1;
1106
1107        uncore_change_context(uncore_msr_uncores, cpu, target);
1108        uncore_change_context(uncore_pci_uncores, cpu, target);
1109
1110unref:
1111        /* Clear the references */
1112        pkg = topology_logical_package_id(cpu);
1113        for (; *types; types++) {
1114                type = *types;
1115                pmu = type->pmus;
1116                for (i = 0; i < type->num_boxes; i++, pmu++) {
1117                        box = pmu->boxes[pkg];
1118                        if (box && atomic_dec_return(&box->refcnt) == 0)
1119                                uncore_box_exit(box);
1120                }
1121        }
1122        return 0;
1123}
1124
1125static int allocate_boxes(struct intel_uncore_type **types,
1126                         unsigned int pkg, unsigned int cpu)
1127{
1128        struct intel_uncore_box *box, *tmp;
1129        struct intel_uncore_type *type;
1130        struct intel_uncore_pmu *pmu;
1131        LIST_HEAD(allocated);
1132        int i;
1133
1134        /* Try to allocate all required boxes */
1135        for (; *types; types++) {
1136                type = *types;
1137                pmu = type->pmus;
1138                for (i = 0; i < type->num_boxes; i++, pmu++) {
1139                        if (pmu->boxes[pkg])
1140                                continue;
1141                        box = uncore_alloc_box(type, cpu_to_node(cpu));
1142                        if (!box)
1143                                goto cleanup;
1144                        box->pmu = pmu;
1145                        box->pkgid = pkg;
1146                        list_add(&box->active_list, &allocated);
1147                }
1148        }
1149        /* Install them in the pmus */
1150        list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1151                list_del_init(&box->active_list);
1152                box->pmu->boxes[pkg] = box;
1153        }
1154        return 0;
1155
1156cleanup:
1157        list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1158                list_del_init(&box->active_list);
1159                kfree(box);
1160        }
1161        return -ENOMEM;
1162}
1163
1164static int uncore_event_cpu_online(unsigned int cpu)
1165{
1166        struct intel_uncore_type *type, **types = uncore_msr_uncores;
1167        struct intel_uncore_pmu *pmu;
1168        struct intel_uncore_box *box;
1169        int i, ret, pkg, target;
1170
1171        pkg = topology_logical_package_id(cpu);
1172        ret = allocate_boxes(types, pkg, cpu);
1173        if (ret)
1174                return ret;
1175
1176        for (; *types; types++) {
1177                type = *types;
1178                pmu = type->pmus;
1179                for (i = 0; i < type->num_boxes; i++, pmu++) {
1180                        box = pmu->boxes[pkg];
1181                        if (box && atomic_inc_return(&box->refcnt) == 1)
1182                                uncore_box_init(box);
1183                }
1184        }
1185
1186        /*
1187         * Check if there is an online cpu in the package
1188         * which collects uncore events already.
1189         */
1190        target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1191        if (target < nr_cpu_ids)
1192                return 0;
1193
1194        cpumask_set_cpu(cpu, &uncore_cpu_mask);
1195
1196        uncore_change_context(uncore_msr_uncores, -1, cpu);
1197        uncore_change_context(uncore_pci_uncores, -1, cpu);
1198        return 0;
1199}
1200
1201static int __init type_pmu_register(struct intel_uncore_type *type)
1202{
1203        int i, ret;
1204
1205        for (i = 0; i < type->num_boxes; i++) {
1206                ret = uncore_pmu_register(&type->pmus[i]);
1207                if (ret)
1208                        return ret;
1209        }
1210        return 0;
1211}
1212
1213static int __init uncore_msr_pmus_register(void)
1214{
1215        struct intel_uncore_type **types = uncore_msr_uncores;
1216        int ret;
1217
1218        for (; *types; types++) {
1219                ret = type_pmu_register(*types);
1220                if (ret)
1221                        return ret;
1222        }
1223        return 0;
1224}
1225
1226static int __init uncore_cpu_init(void)
1227{
1228        int ret;
1229
1230        ret = uncore_types_init(uncore_msr_uncores, true);
1231        if (ret)
1232                goto err;
1233
1234        ret = uncore_msr_pmus_register();
1235        if (ret)
1236                goto err;
1237        return 0;
1238err:
1239        uncore_types_exit(uncore_msr_uncores);
1240        uncore_msr_uncores = empty_uncore;
1241        return ret;
1242}
1243
1244#define X86_UNCORE_MODEL_MATCH(model, init)     \
1245        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1246
1247struct intel_uncore_init_fun {
1248        void    (*cpu_init)(void);
1249        int     (*pci_init)(void);
1250};
1251
1252static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1253        .cpu_init = nhm_uncore_cpu_init,
1254};
1255
1256static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1257        .cpu_init = snb_uncore_cpu_init,
1258        .pci_init = snb_uncore_pci_init,
1259};
1260
1261static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1262        .cpu_init = snb_uncore_cpu_init,
1263        .pci_init = ivb_uncore_pci_init,
1264};
1265
1266static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1267        .cpu_init = snb_uncore_cpu_init,
1268        .pci_init = hsw_uncore_pci_init,
1269};
1270
1271static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1272        .cpu_init = snb_uncore_cpu_init,
1273        .pci_init = bdw_uncore_pci_init,
1274};
1275
1276static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1277        .cpu_init = snbep_uncore_cpu_init,
1278        .pci_init = snbep_uncore_pci_init,
1279};
1280
1281static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1282        .cpu_init = nhmex_uncore_cpu_init,
1283};
1284
1285static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1286        .cpu_init = ivbep_uncore_cpu_init,
1287        .pci_init = ivbep_uncore_pci_init,
1288};
1289
1290static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1291        .cpu_init = hswep_uncore_cpu_init,
1292        .pci_init = hswep_uncore_pci_init,
1293};
1294
1295static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1296        .cpu_init = bdx_uncore_cpu_init,
1297        .pci_init = bdx_uncore_pci_init,
1298};
1299
1300static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1301        .cpu_init = knl_uncore_cpu_init,
1302        .pci_init = knl_uncore_pci_init,
1303};
1304
1305static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1306        .cpu_init = skl_uncore_cpu_init,
1307        .pci_init = skl_uncore_pci_init,
1308};
1309
1310static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1311        .cpu_init = skx_uncore_cpu_init,
1312        .pci_init = skx_uncore_pci_init,
1313};
1314
1315static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1316        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
1317        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
1318        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,       nhm_uncore_init),
1319        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,    nhm_uncore_init),
1320        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,    snb_uncore_init),
1321        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,      ivb_uncore_init),
1322        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,   hsw_uncore_init),
1323        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,    hsw_uncore_init),
1324        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,   hsw_uncore_init),
1325        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1326        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1327        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
1328        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,     nhmex_uncore_init),
1329        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,    nhmex_uncore_init),
1330        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,    ivbep_uncore_init),
1331        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,      hswep_uncore_init),
1332        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
1333        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1334        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
1335        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,   knl_uncore_init),
1336        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1337        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1338        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
1339        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1340        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1341        {},
1342};
1343
1344MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1345
1346static int __init intel_uncore_init(void)
1347{
1348        const struct x86_cpu_id *id;
1349        struct intel_uncore_init_fun *uncore_init;
1350        int pret = 0, cret = 0, ret;
1351
1352        id = x86_match_cpu(intel_uncore_match);
1353        if (!id)
1354                return -ENODEV;
1355
1356        if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1357                return -ENODEV;
1358
1359        max_packages = topology_max_packages();
1360
1361        uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1362        if (uncore_init->pci_init) {
1363                pret = uncore_init->pci_init();
1364                if (!pret)
1365                        pret = uncore_pci_init();
1366        }
1367
1368        if (uncore_init->cpu_init) {
1369                uncore_init->cpu_init();
1370                cret = uncore_cpu_init();
1371        }
1372
1373        if (cret && pret)
1374                return -ENODEV;
1375
1376        /* Install hotplug callbacks to setup the targets for each package */
1377        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1378                                "perf/x86/intel/uncore:online",
1379                                uncore_event_cpu_online,
1380                                uncore_event_cpu_offline);
1381        if (ret)
1382                goto err;
1383        return 0;
1384
1385err:
1386        uncore_types_exit(uncore_msr_uncores);
1387        uncore_pci_exit();
1388        return ret;
1389}
1390module_init(intel_uncore_init);
1391
1392static void __exit intel_uncore_exit(void)
1393{
1394        cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1395        uncore_types_exit(uncore_msr_uncores);
1396        uncore_pci_exit();
1397}
1398module_exit(intel_uncore_exit);
1399