linux/arch/x86/events/intel/uncore.c
<<
>>
Prefs
   1#include <linux/module.h>
   2
   3#include <asm/cpu_device_id.h>
   4#include <asm/intel-family.h>
   5#include "uncore.h"
   6
   7static struct intel_uncore_type *empty_uncore[] = { NULL, };
   8struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
   9struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
  10
  11static bool pcidrv_registered;
  12struct pci_driver *uncore_pci_driver;
  13/* pci bus to socket mapping */
  14DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
  15struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
  16struct pci_extra_dev *uncore_extra_pci_dev;
  17static int max_packages;
  18
  19/* mask of cpus that collect uncore events */
  20static cpumask_t uncore_cpu_mask;
  21
  22/* constraint for the fixed counter */
  23static struct event_constraint uncore_constraint_fixed =
  24        EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
  25struct event_constraint uncore_constraint_empty =
  26        EVENT_CONSTRAINT(0, 0, 0);
  27
  28MODULE_LICENSE("GPL");
  29
  30static int uncore_pcibus_to_physid(struct pci_bus *bus)
  31{
  32        struct pci2phy_map *map;
  33        int phys_id = -1;
  34
  35        raw_spin_lock(&pci2phy_map_lock);
  36        list_for_each_entry(map, &pci2phy_map_head, list) {
  37                if (map->segment == pci_domain_nr(bus)) {
  38                        phys_id = map->pbus_to_physid[bus->number];
  39                        break;
  40                }
  41        }
  42        raw_spin_unlock(&pci2phy_map_lock);
  43
  44        return phys_id;
  45}
  46
  47static void uncore_free_pcibus_map(void)
  48{
  49        struct pci2phy_map *map, *tmp;
  50
  51        list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
  52                list_del(&map->list);
  53                kfree(map);
  54        }
  55}
  56
  57struct pci2phy_map *__find_pci2phy_map(int segment)
  58{
  59        struct pci2phy_map *map, *alloc = NULL;
  60        int i;
  61
  62        lockdep_assert_held(&pci2phy_map_lock);
  63
  64lookup:
  65        list_for_each_entry(map, &pci2phy_map_head, list) {
  66                if (map->segment == segment)
  67                        goto end;
  68        }
  69
  70        if (!alloc) {
  71                raw_spin_unlock(&pci2phy_map_lock);
  72                alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
  73                raw_spin_lock(&pci2phy_map_lock);
  74
  75                if (!alloc)
  76                        return NULL;
  77
  78                goto lookup;
  79        }
  80
  81        map = alloc;
  82        alloc = NULL;
  83        map->segment = segment;
  84        for (i = 0; i < 256; i++)
  85                map->pbus_to_physid[i] = -1;
  86        list_add_tail(&map->list, &pci2phy_map_head);
  87
  88end:
  89        kfree(alloc);
  90        return map;
  91}
  92
  93ssize_t uncore_event_show(struct kobject *kobj,
  94                          struct kobj_attribute *attr, char *buf)
  95{
  96        struct uncore_event_desc *event =
  97                container_of(attr, struct uncore_event_desc, attr);
  98        return sprintf(buf, "%s", event->config);
  99}
 100
 101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 102{
 103        unsigned int pkgid = topology_logical_package_id(cpu);
 104
 105        /*
 106         * The unsigned check also catches the '-1' return value for non
 107         * existent mappings in the topology map.
 108         */
 109        return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 110}
 111
 112u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 113{
 114        u64 count;
 115
 116        rdmsrl(event->hw.event_base, count);
 117
 118        return count;
 119}
 120
 121/*
 122 * generic get constraint function for shared match/mask registers.
 123 */
 124struct event_constraint *
 125uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
 126{
 127        struct intel_uncore_extra_reg *er;
 128        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 129        struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
 130        unsigned long flags;
 131        bool ok = false;
 132
 133        /*
 134         * reg->alloc can be set due to existing state, so for fake box we
 135         * need to ignore this, otherwise we might fail to allocate proper
 136         * fake state for this extra reg constraint.
 137         */
 138        if (reg1->idx == EXTRA_REG_NONE ||
 139            (!uncore_box_is_fake(box) && reg1->alloc))
 140                return NULL;
 141
 142        er = &box->shared_regs[reg1->idx];
 143        raw_spin_lock_irqsave(&er->lock, flags);
 144        if (!atomic_read(&er->ref) ||
 145            (er->config1 == reg1->config && er->config2 == reg2->config)) {
 146                atomic_inc(&er->ref);
 147                er->config1 = reg1->config;
 148                er->config2 = reg2->config;
 149                ok = true;
 150        }
 151        raw_spin_unlock_irqrestore(&er->lock, flags);
 152
 153        if (ok) {
 154                if (!uncore_box_is_fake(box))
 155                        reg1->alloc = 1;
 156                return NULL;
 157        }
 158
 159        return &uncore_constraint_empty;
 160}
 161
 162void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
 163{
 164        struct intel_uncore_extra_reg *er;
 165        struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
 166
 167        /*
 168         * Only put constraint if extra reg was actually allocated. Also
 169         * takes care of event which do not use an extra shared reg.
 170         *
 171         * Also, if this is a fake box we shouldn't touch any event state
 172         * (reg->alloc) and we don't care about leaving inconsistent box
 173         * state either since it will be thrown out.
 174         */
 175        if (uncore_box_is_fake(box) || !reg1->alloc)
 176                return;
 177
 178        er = &box->shared_regs[reg1->idx];
 179        atomic_dec(&er->ref);
 180        reg1->alloc = 0;
 181}
 182
 183u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
 184{
 185        struct intel_uncore_extra_reg *er;
 186        unsigned long flags;
 187        u64 config;
 188
 189        er = &box->shared_regs[idx];
 190
 191        raw_spin_lock_irqsave(&er->lock, flags);
 192        config = er->config;
 193        raw_spin_unlock_irqrestore(&er->lock, flags);
 194
 195        return config;
 196}
 197
 198static void uncore_assign_hw_event(struct intel_uncore_box *box,
 199                                   struct perf_event *event, int idx)
 200{
 201        struct hw_perf_event *hwc = &event->hw;
 202
 203        hwc->idx = idx;
 204        hwc->last_tag = ++box->tags[idx];
 205
 206        if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
 207                hwc->event_base = uncore_fixed_ctr(box);
 208                hwc->config_base = uncore_fixed_ctl(box);
 209                return;
 210        }
 211
 212        hwc->config_base = uncore_event_ctl(box, hwc->idx);
 213        hwc->event_base  = uncore_perf_ctr(box, hwc->idx);
 214}
 215
 216void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
 217{
 218        u64 prev_count, new_count, delta;
 219        int shift;
 220
 221        if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
 222                shift = 64 - uncore_fixed_ctr_bits(box);
 223        else
 224                shift = 64 - uncore_perf_ctr_bits(box);
 225
 226        /* the hrtimer might modify the previous event value */
 227again:
 228        prev_count = local64_read(&event->hw.prev_count);
 229        new_count = uncore_read_counter(box, event);
 230        if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
 231                goto again;
 232
 233        delta = (new_count << shift) - (prev_count << shift);
 234        delta >>= shift;
 235
 236        local64_add(delta, &event->count);
 237}
 238
 239/*
 240 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
 241 * for SandyBridge. So we use hrtimer to periodically poll the counter
 242 * to avoid overflow.
 243 */
 244static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
 245{
 246        struct intel_uncore_box *box;
 247        struct perf_event *event;
 248        unsigned long flags;
 249        int bit;
 250
 251        box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
 252        if (!box->n_active || box->cpu != smp_processor_id())
 253                return HRTIMER_NORESTART;
 254        /*
 255         * disable local interrupt to prevent uncore_pmu_event_start/stop
 256         * to interrupt the update process
 257         */
 258        local_irq_save(flags);
 259
 260        /*
 261         * handle boxes with an active event list as opposed to active
 262         * counters
 263         */
 264        list_for_each_entry(event, &box->active_list, active_entry) {
 265                uncore_perf_event_update(box, event);
 266        }
 267
 268        for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
 269                uncore_perf_event_update(box, box->events[bit]);
 270
 271        local_irq_restore(flags);
 272
 273        hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
 274        return HRTIMER_RESTART;
 275}
 276
 277void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
 278{
 279        hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
 280                      HRTIMER_MODE_REL_PINNED);
 281}
 282
 283void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
 284{
 285        hrtimer_cancel(&box->hrtimer);
 286}
 287
 288static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
 289{
 290        hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 291        box->hrtimer.function = uncore_pmu_hrtimer;
 292}
 293
 294static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
 295                                                 int node)
 296{
 297        int i, size, numshared = type->num_shared_regs ;
 298        struct intel_uncore_box *box;
 299
 300        size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
 301
 302        box = kzalloc_node(size, GFP_KERNEL, node);
 303        if (!box)
 304                return NULL;
 305
 306        for (i = 0; i < numshared; i++)
 307                raw_spin_lock_init(&box->shared_regs[i].lock);
 308
 309        uncore_pmu_init_hrtimer(box);
 310        box->cpu = -1;
 311        box->pci_phys_id = -1;
 312        box->pkgid = -1;
 313
 314        /* set default hrtimer timeout */
 315        box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
 316
 317        INIT_LIST_HEAD(&box->active_list);
 318
 319        return box;
 320}
 321
 322/*
 323 * Using uncore_pmu_event_init pmu event_init callback
 324 * as a detection point for uncore events.
 325 */
 326static int uncore_pmu_event_init(struct perf_event *event);
 327
 328static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
 329{
 330        return &box->pmu->pmu == event->pmu;
 331}
 332
 333static int
 334uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
 335                      bool dogrp)
 336{
 337        struct perf_event *event;
 338        int n, max_count;
 339
 340        max_count = box->pmu->type->num_counters;
 341        if (box->pmu->type->fixed_ctl)
 342                max_count++;
 343
 344        if (box->n_events >= max_count)
 345                return -EINVAL;
 346
 347        n = box->n_events;
 348
 349        if (is_box_event(box, leader)) {
 350                box->event_list[n] = leader;
 351                n++;
 352        }
 353
 354        if (!dogrp)
 355                return n;
 356
 357        list_for_each_entry(event, &leader->sibling_list, group_entry) {
 358                if (!is_box_event(box, event) ||
 359                    event->state <= PERF_EVENT_STATE_OFF)
 360                        continue;
 361
 362                if (n >= max_count)
 363                        return -EINVAL;
 364
 365                box->event_list[n] = event;
 366                n++;
 367        }
 368        return n;
 369}
 370
 371static struct event_constraint *
 372uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
 373{
 374        struct intel_uncore_type *type = box->pmu->type;
 375        struct event_constraint *c;
 376
 377        if (type->ops->get_constraint) {
 378                c = type->ops->get_constraint(box, event);
 379                if (c)
 380                        return c;
 381        }
 382
 383        if (event->attr.config == UNCORE_FIXED_EVENT)
 384                return &uncore_constraint_fixed;
 385
 386        if (type->constraints) {
 387                for_each_event_constraint(c, type->constraints) {
 388                        if ((event->hw.config & c->cmask) == c->code)
 389                                return c;
 390                }
 391        }
 392
 393        return &type->unconstrainted;
 394}
 395
 396static void uncore_put_event_constraint(struct intel_uncore_box *box,
 397                                        struct perf_event *event)
 398{
 399        if (box->pmu->type->ops->put_constraint)
 400                box->pmu->type->ops->put_constraint(box, event);
 401}
 402
 403static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
 404{
 405        unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
 406        struct event_constraint *c;
 407        int i, wmin, wmax, ret = 0;
 408        struct hw_perf_event *hwc;
 409
 410        bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
 411
 412        for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
 413                c = uncore_get_event_constraint(box, box->event_list[i]);
 414                box->event_constraint[i] = c;
 415                wmin = min(wmin, c->weight);
 416                wmax = max(wmax, c->weight);
 417        }
 418
 419        /* fastpath, try to reuse previous register */
 420        for (i = 0; i < n; i++) {
 421                hwc = &box->event_list[i]->hw;
 422                c = box->event_constraint[i];
 423
 424                /* never assigned */
 425                if (hwc->idx == -1)
 426                        break;
 427
 428                /* constraint still honored */
 429                if (!test_bit(hwc->idx, c->idxmsk))
 430                        break;
 431
 432                /* not already used */
 433                if (test_bit(hwc->idx, used_mask))
 434                        break;
 435
 436                __set_bit(hwc->idx, used_mask);
 437                if (assign)
 438                        assign[i] = hwc->idx;
 439        }
 440        /* slow path */
 441        if (i != n)
 442                ret = perf_assign_events(box->event_constraint, n,
 443                                         wmin, wmax, n, assign);
 444
 445        if (!assign || ret) {
 446                for (i = 0; i < n; i++)
 447                        uncore_put_event_constraint(box, box->event_list[i]);
 448        }
 449        return ret ? -EINVAL : 0;
 450}
 451
 452static void uncore_pmu_event_start(struct perf_event *event, int flags)
 453{
 454        struct intel_uncore_box *box = uncore_event_to_box(event);
 455        int idx = event->hw.idx;
 456
 457        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
 458                return;
 459
 460        if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
 461                return;
 462
 463        event->hw.state = 0;
 464        box->events[idx] = event;
 465        box->n_active++;
 466        __set_bit(idx, box->active_mask);
 467
 468        local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
 469        uncore_enable_event(box, event);
 470
 471        if (box->n_active == 1) {
 472                uncore_enable_box(box);
 473                uncore_pmu_start_hrtimer(box);
 474        }
 475}
 476
 477static void uncore_pmu_event_stop(struct perf_event *event, int flags)
 478{
 479        struct intel_uncore_box *box = uncore_event_to_box(event);
 480        struct hw_perf_event *hwc = &event->hw;
 481
 482        if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
 483                uncore_disable_event(box, event);
 484                box->n_active--;
 485                box->events[hwc->idx] = NULL;
 486                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 487                hwc->state |= PERF_HES_STOPPED;
 488
 489                if (box->n_active == 0) {
 490                        uncore_disable_box(box);
 491                        uncore_pmu_cancel_hrtimer(box);
 492                }
 493        }
 494
 495        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 496                /*
 497                 * Drain the remaining delta count out of a event
 498                 * that we are disabling:
 499                 */
 500                uncore_perf_event_update(box, event);
 501                hwc->state |= PERF_HES_UPTODATE;
 502        }
 503}
 504
 505static int uncore_pmu_event_add(struct perf_event *event, int flags)
 506{
 507        struct intel_uncore_box *box = uncore_event_to_box(event);
 508        struct hw_perf_event *hwc = &event->hw;
 509        int assign[UNCORE_PMC_IDX_MAX];
 510        int i, n, ret;
 511
 512        if (!box)
 513                return -ENODEV;
 514
 515        ret = n = uncore_collect_events(box, event, false);
 516        if (ret < 0)
 517                return ret;
 518
 519        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 520        if (!(flags & PERF_EF_START))
 521                hwc->state |= PERF_HES_ARCH;
 522
 523        ret = uncore_assign_events(box, assign, n);
 524        if (ret)
 525                return ret;
 526
 527        /* save events moving to new counters */
 528        for (i = 0; i < box->n_events; i++) {
 529                event = box->event_list[i];
 530                hwc = &event->hw;
 531
 532                if (hwc->idx == assign[i] &&
 533                        hwc->last_tag == box->tags[assign[i]])
 534                        continue;
 535                /*
 536                 * Ensure we don't accidentally enable a stopped
 537                 * counter simply because we rescheduled.
 538                 */
 539                if (hwc->state & PERF_HES_STOPPED)
 540                        hwc->state |= PERF_HES_ARCH;
 541
 542                uncore_pmu_event_stop(event, PERF_EF_UPDATE);
 543        }
 544
 545        /* reprogram moved events into new counters */
 546        for (i = 0; i < n; i++) {
 547                event = box->event_list[i];
 548                hwc = &event->hw;
 549
 550                if (hwc->idx != assign[i] ||
 551                        hwc->last_tag != box->tags[assign[i]])
 552                        uncore_assign_hw_event(box, event, assign[i]);
 553                else if (i < box->n_events)
 554                        continue;
 555
 556                if (hwc->state & PERF_HES_ARCH)
 557                        continue;
 558
 559                uncore_pmu_event_start(event, 0);
 560        }
 561        box->n_events = n;
 562
 563        return 0;
 564}
 565
 566static void uncore_pmu_event_del(struct perf_event *event, int flags)
 567{
 568        struct intel_uncore_box *box = uncore_event_to_box(event);
 569        int i;
 570
 571        uncore_pmu_event_stop(event, PERF_EF_UPDATE);
 572
 573        for (i = 0; i < box->n_events; i++) {
 574                if (event == box->event_list[i]) {
 575                        uncore_put_event_constraint(box, event);
 576
 577                        for (++i; i < box->n_events; i++)
 578                                box->event_list[i - 1] = box->event_list[i];
 579
 580                        --box->n_events;
 581                        break;
 582                }
 583        }
 584
 585        event->hw.idx = -1;
 586        event->hw.last_tag = ~0ULL;
 587}
 588
 589void uncore_pmu_event_read(struct perf_event *event)
 590{
 591        struct intel_uncore_box *box = uncore_event_to_box(event);
 592        uncore_perf_event_update(box, event);
 593}
 594
 595/*
 596 * validation ensures the group can be loaded onto the
 597 * PMU if it was the only group available.
 598 */
 599static int uncore_validate_group(struct intel_uncore_pmu *pmu,
 600                                struct perf_event *event)
 601{
 602        struct perf_event *leader = event->group_leader;
 603        struct intel_uncore_box *fake_box;
 604        int ret = -EINVAL, n;
 605
 606        fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
 607        if (!fake_box)
 608                return -ENOMEM;
 609
 610        fake_box->pmu = pmu;
 611        /*
 612         * the event is not yet connected with its
 613         * siblings therefore we must first collect
 614         * existing siblings, then add the new event
 615         * before we can simulate the scheduling
 616         */
 617        n = uncore_collect_events(fake_box, leader, true);
 618        if (n < 0)
 619                goto out;
 620
 621        fake_box->n_events = n;
 622        n = uncore_collect_events(fake_box, event, false);
 623        if (n < 0)
 624                goto out;
 625
 626        fake_box->n_events = n;
 627
 628        ret = uncore_assign_events(fake_box, NULL, n);
 629out:
 630        kfree(fake_box);
 631        return ret;
 632}
 633
 634static int uncore_pmu_event_init(struct perf_event *event)
 635{
 636        struct intel_uncore_pmu *pmu;
 637        struct intel_uncore_box *box;
 638        struct hw_perf_event *hwc = &event->hw;
 639        int ret;
 640
 641        if (event->attr.type != event->pmu->type)
 642                return -ENOENT;
 643
 644        pmu = uncore_event_to_pmu(event);
 645        /* no device found for this pmu */
 646        if (pmu->func_id < 0)
 647                return -ENOENT;
 648
 649        /*
 650         * Uncore PMU does measure at all privilege level all the time.
 651         * So it doesn't make sense to specify any exclude bits.
 652         */
 653        if (event->attr.exclude_user || event->attr.exclude_kernel ||
 654                        event->attr.exclude_hv || event->attr.exclude_idle)
 655                return -EINVAL;
 656
 657        /* Sampling not supported yet */
 658        if (hwc->sample_period)
 659                return -EINVAL;
 660
 661        /*
 662         * Place all uncore events for a particular physical package
 663         * onto a single cpu
 664         */
 665        if (event->cpu < 0)
 666                return -EINVAL;
 667        box = uncore_pmu_to_box(pmu, event->cpu);
 668        if (!box || box->cpu < 0)
 669                return -EINVAL;
 670        event->cpu = box->cpu;
 671        event->pmu_private = box;
 672
 673        event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
 674
 675        event->hw.idx = -1;
 676        event->hw.last_tag = ~0ULL;
 677        event->hw.extra_reg.idx = EXTRA_REG_NONE;
 678        event->hw.branch_reg.idx = EXTRA_REG_NONE;
 679
 680        if (event->attr.config == UNCORE_FIXED_EVENT) {
 681                /* no fixed counter */
 682                if (!pmu->type->fixed_ctl)
 683                        return -EINVAL;
 684                /*
 685                 * if there is only one fixed counter, only the first pmu
 686                 * can access the fixed counter
 687                 */
 688                if (pmu->type->single_fixed && pmu->pmu_idx > 0)
 689                        return -EINVAL;
 690
 691                /* fixed counters have event field hardcoded to zero */
 692                hwc->config = 0ULL;
 693        } else {
 694                hwc->config = event->attr.config &
 695                              (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
 696                if (pmu->type->ops->hw_config) {
 697                        ret = pmu->type->ops->hw_config(box, event);
 698                        if (ret)
 699                                return ret;
 700                }
 701        }
 702
 703        if (event->group_leader != event)
 704                ret = uncore_validate_group(pmu, event);
 705        else
 706                ret = 0;
 707
 708        return ret;
 709}
 710
 711static ssize_t uncore_get_attr_cpumask(struct device *dev,
 712                                struct device_attribute *attr, char *buf)
 713{
 714        return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
 715}
 716
 717static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
 718
 719static struct attribute *uncore_pmu_attrs[] = {
 720        &dev_attr_cpumask.attr,
 721        NULL,
 722};
 723
 724static const struct attribute_group uncore_pmu_attr_group = {
 725        .attrs = uncore_pmu_attrs,
 726};
 727
 728static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
 729{
 730        int ret;
 731
 732        if (!pmu->type->pmu) {
 733                pmu->pmu = (struct pmu) {
 734                        .attr_groups    = pmu->type->attr_groups,
 735                        .task_ctx_nr    = perf_invalid_context,
 736                        .event_init     = uncore_pmu_event_init,
 737                        .add            = uncore_pmu_event_add,
 738                        .del            = uncore_pmu_event_del,
 739                        .start          = uncore_pmu_event_start,
 740                        .stop           = uncore_pmu_event_stop,
 741                        .read           = uncore_pmu_event_read,
 742                        .module         = THIS_MODULE,
 743                };
 744        } else {
 745                pmu->pmu = *pmu->type->pmu;
 746                pmu->pmu.attr_groups = pmu->type->attr_groups;
 747        }
 748
 749        if (pmu->type->num_boxes == 1) {
 750                if (strlen(pmu->type->name) > 0)
 751                        sprintf(pmu->name, "uncore_%s", pmu->type->name);
 752                else
 753                        sprintf(pmu->name, "uncore");
 754        } else {
 755                sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
 756                        pmu->pmu_idx);
 757        }
 758
 759        ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
 760        if (!ret)
 761                pmu->registered = true;
 762        return ret;
 763}
 764
 765static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
 766{
 767        if (!pmu->registered)
 768                return;
 769        perf_pmu_unregister(&pmu->pmu);
 770        pmu->registered = false;
 771}
 772
 773static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 774{
 775        int pkg;
 776
 777        for (pkg = 0; pkg < max_packages; pkg++)
 778                kfree(pmu->boxes[pkg]);
 779        kfree(pmu->boxes);
 780}
 781
 782static void uncore_type_exit(struct intel_uncore_type *type)
 783{
 784        struct intel_uncore_pmu *pmu = type->pmus;
 785        int i;
 786
 787        if (pmu) {
 788                for (i = 0; i < type->num_boxes; i++, pmu++) {
 789                        uncore_pmu_unregister(pmu);
 790                        uncore_free_boxes(pmu);
 791                }
 792                kfree(type->pmus);
 793                type->pmus = NULL;
 794        }
 795        kfree(type->events_group);
 796        type->events_group = NULL;
 797}
 798
 799static void uncore_types_exit(struct intel_uncore_type **types)
 800{
 801        for (; *types; types++)
 802                uncore_type_exit(*types);
 803}
 804
 805static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
 806{
 807        struct intel_uncore_pmu *pmus;
 808        struct attribute_group *attr_group;
 809        struct attribute **attrs;
 810        size_t size;
 811        int i, j;
 812
 813        pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
 814        if (!pmus)
 815                return -ENOMEM;
 816
 817        size = max_packages * sizeof(struct intel_uncore_box *);
 818
 819        for (i = 0; i < type->num_boxes; i++) {
 820                pmus[i].func_id = setid ? i : -1;
 821                pmus[i].pmu_idx = i;
 822                pmus[i].type    = type;
 823                pmus[i].boxes   = kzalloc(size, GFP_KERNEL);
 824                if (!pmus[i].boxes)
 825                        return -ENOMEM;
 826        }
 827
 828        type->pmus = pmus;
 829        type->unconstrainted = (struct event_constraint)
 830                __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
 831                                0, type->num_counters, 0, 0);
 832
 833        if (type->event_descs) {
 834                for (i = 0; type->event_descs[i].attr.attr.name; i++);
 835
 836                attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
 837                                        sizeof(*attr_group), GFP_KERNEL);
 838                if (!attr_group)
 839                        return -ENOMEM;
 840
 841                attrs = (struct attribute **)(attr_group + 1);
 842                attr_group->name = "events";
 843                attr_group->attrs = attrs;
 844
 845                for (j = 0; j < i; j++)
 846                        attrs[j] = &type->event_descs[j].attr.attr;
 847
 848                type->events_group = attr_group;
 849        }
 850
 851        type->pmu_group = &uncore_pmu_attr_group;
 852        return 0;
 853}
 854
 855static int __init
 856uncore_types_init(struct intel_uncore_type **types, bool setid)
 857{
 858        int ret;
 859
 860        for (; *types; types++) {
 861                ret = uncore_type_init(*types, setid);
 862                if (ret)
 863                        return ret;
 864        }
 865        return 0;
 866}
 867
 868/*
 869 * add a pci uncore device
 870 */
 871static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 872{
 873        struct intel_uncore_type *type;
 874        struct intel_uncore_pmu *pmu = NULL;
 875        struct intel_uncore_box *box;
 876        int phys_id, pkg, ret;
 877
 878        phys_id = uncore_pcibus_to_physid(pdev->bus);
 879        if (phys_id < 0)
 880                return -ENODEV;
 881
 882        pkg = topology_phys_to_logical_pkg(phys_id);
 883        if (pkg < 0)
 884                return -EINVAL;
 885
 886        if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
 887                int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
 888
 889                uncore_extra_pci_dev[pkg].dev[idx] = pdev;
 890                pci_set_drvdata(pdev, NULL);
 891                return 0;
 892        }
 893
 894        type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
 895
 896        /*
 897         * Some platforms, e.g.  Knights Landing, use a common PCI device ID
 898         * for multiple instances of an uncore PMU device type. We should check
 899         * PCI slot and func to indicate the uncore box.
 900         */
 901        if (id->driver_data & ~0xffff) {
 902                struct pci_driver *pci_drv = pdev->driver;
 903                const struct pci_device_id *ids = pci_drv->id_table;
 904                unsigned int devfn;
 905
 906                while (ids && ids->vendor) {
 907                        if ((ids->vendor == pdev->vendor) &&
 908                            (ids->device == pdev->device)) {
 909                                devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
 910                                                  UNCORE_PCI_DEV_FUNC(ids->driver_data));
 911                                if (devfn == pdev->devfn) {
 912                                        pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
 913                                        break;
 914                                }
 915                        }
 916                        ids++;
 917                }
 918                if (pmu == NULL)
 919                        return -ENODEV;
 920        } else {
 921                /*
 922                 * for performance monitoring unit with multiple boxes,
 923                 * each box has a different function id.
 924                 */
 925                pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
 926        }
 927
 928        if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
 929                return -EINVAL;
 930
 931        box = uncore_alloc_box(type, NUMA_NO_NODE);
 932        if (!box)
 933                return -ENOMEM;
 934
 935        if (pmu->func_id < 0)
 936                pmu->func_id = pdev->devfn;
 937        else
 938                WARN_ON_ONCE(pmu->func_id != pdev->devfn);
 939
 940        atomic_inc(&box->refcnt);
 941        box->pci_phys_id = phys_id;
 942        box->pkgid = pkg;
 943        box->pci_dev = pdev;
 944        box->pmu = pmu;
 945        uncore_box_init(box);
 946        pci_set_drvdata(pdev, box);
 947
 948        pmu->boxes[pkg] = box;
 949        if (atomic_inc_return(&pmu->activeboxes) > 1)
 950                return 0;
 951
 952        /* First active box registers the pmu */
 953        ret = uncore_pmu_register(pmu);
 954        if (ret) {
 955                pci_set_drvdata(pdev, NULL);
 956                pmu->boxes[pkg] = NULL;
 957                uncore_box_exit(box);
 958                kfree(box);
 959        }
 960        return ret;
 961}
 962
 963static void uncore_pci_remove(struct pci_dev *pdev)
 964{
 965        struct intel_uncore_box *box;
 966        struct intel_uncore_pmu *pmu;
 967        int i, phys_id, pkg;
 968
 969        phys_id = uncore_pcibus_to_physid(pdev->bus);
 970        pkg = topology_phys_to_logical_pkg(phys_id);
 971
 972        box = pci_get_drvdata(pdev);
 973        if (!box) {
 974                for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
 975                        if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
 976                                uncore_extra_pci_dev[pkg].dev[i] = NULL;
 977                                break;
 978                        }
 979                }
 980                WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
 981                return;
 982        }
 983
 984        pmu = box->pmu;
 985        if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
 986                return;
 987
 988        pci_set_drvdata(pdev, NULL);
 989        pmu->boxes[pkg] = NULL;
 990        if (atomic_dec_return(&pmu->activeboxes) == 0)
 991                uncore_pmu_unregister(pmu);
 992        uncore_box_exit(box);
 993        kfree(box);
 994}
 995
 996static int __init uncore_pci_init(void)
 997{
 998        size_t size;
 999        int ret;
1000
1001        size = max_packages * sizeof(struct pci_extra_dev);
1002        uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1003        if (!uncore_extra_pci_dev) {
1004                ret = -ENOMEM;
1005                goto err;
1006        }
1007
1008        ret = uncore_types_init(uncore_pci_uncores, false);
1009        if (ret)
1010                goto errtype;
1011
1012        uncore_pci_driver->probe = uncore_pci_probe;
1013        uncore_pci_driver->remove = uncore_pci_remove;
1014
1015        ret = pci_register_driver(uncore_pci_driver);
1016        if (ret)
1017                goto errtype;
1018
1019        pcidrv_registered = true;
1020        return 0;
1021
1022errtype:
1023        uncore_types_exit(uncore_pci_uncores);
1024        kfree(uncore_extra_pci_dev);
1025        uncore_extra_pci_dev = NULL;
1026        uncore_free_pcibus_map();
1027err:
1028        uncore_pci_uncores = empty_uncore;
1029        return ret;
1030}
1031
1032static void uncore_pci_exit(void)
1033{
1034        if (pcidrv_registered) {
1035                pcidrv_registered = false;
1036                pci_unregister_driver(uncore_pci_driver);
1037                uncore_types_exit(uncore_pci_uncores);
1038                kfree(uncore_extra_pci_dev);
1039                uncore_free_pcibus_map();
1040        }
1041}
1042
1043static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1044                                   int new_cpu)
1045{
1046        struct intel_uncore_pmu *pmu = type->pmus;
1047        struct intel_uncore_box *box;
1048        int i, pkg;
1049
1050        pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
1051        for (i = 0; i < type->num_boxes; i++, pmu++) {
1052                box = pmu->boxes[pkg];
1053                if (!box)
1054                        continue;
1055
1056                if (old_cpu < 0) {
1057                        WARN_ON_ONCE(box->cpu != -1);
1058                        box->cpu = new_cpu;
1059                        continue;
1060                }
1061
1062                WARN_ON_ONCE(box->cpu != old_cpu);
1063                box->cpu = -1;
1064                if (new_cpu < 0)
1065                        continue;
1066
1067                uncore_pmu_cancel_hrtimer(box);
1068                perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1069                box->cpu = new_cpu;
1070        }
1071}
1072
1073static void uncore_change_context(struct intel_uncore_type **uncores,
1074                                  int old_cpu, int new_cpu)
1075{
1076        for (; *uncores; uncores++)
1077                uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1078}
1079
1080static int uncore_event_cpu_offline(unsigned int cpu)
1081{
1082        struct intel_uncore_type *type, **types = uncore_msr_uncores;
1083        struct intel_uncore_pmu *pmu;
1084        struct intel_uncore_box *box;
1085        int i, pkg, target;
1086
1087        /* Check if exiting cpu is used for collecting uncore events */
1088        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1089                goto unref;
1090        /* Find a new cpu to collect uncore events */
1091        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1092
1093        /* Migrate uncore events to the new target */
1094        if (target < nr_cpu_ids)
1095                cpumask_set_cpu(target, &uncore_cpu_mask);
1096        else
1097                target = -1;
1098
1099        uncore_change_context(uncore_msr_uncores, cpu, target);
1100        uncore_change_context(uncore_pci_uncores, cpu, target);
1101
1102unref:
1103        /* Clear the references */
1104        pkg = topology_logical_package_id(cpu);
1105        for (; *types; types++) {
1106                type = *types;
1107                pmu = type->pmus;
1108                for (i = 0; i < type->num_boxes; i++, pmu++) {
1109                        box = pmu->boxes[pkg];
1110                        if (box && atomic_dec_return(&box->refcnt) == 0)
1111                                uncore_box_exit(box);
1112                }
1113        }
1114        return 0;
1115}
1116
1117static int allocate_boxes(struct intel_uncore_type **types,
1118                         unsigned int pkg, unsigned int cpu)
1119{
1120        struct intel_uncore_box *box, *tmp;
1121        struct intel_uncore_type *type;
1122        struct intel_uncore_pmu *pmu;
1123        LIST_HEAD(allocated);
1124        int i;
1125
1126        /* Try to allocate all required boxes */
1127        for (; *types; types++) {
1128                type = *types;
1129                pmu = type->pmus;
1130                for (i = 0; i < type->num_boxes; i++, pmu++) {
1131                        if (pmu->boxes[pkg])
1132                                continue;
1133                        box = uncore_alloc_box(type, cpu_to_node(cpu));
1134                        if (!box)
1135                                goto cleanup;
1136                        box->pmu = pmu;
1137                        box->pkgid = pkg;
1138                        list_add(&box->active_list, &allocated);
1139                }
1140        }
1141        /* Install them in the pmus */
1142        list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1143                list_del_init(&box->active_list);
1144                box->pmu->boxes[pkg] = box;
1145        }
1146        return 0;
1147
1148cleanup:
1149        list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1150                list_del_init(&box->active_list);
1151                kfree(box);
1152        }
1153        return -ENOMEM;
1154}
1155
1156static int uncore_event_cpu_online(unsigned int cpu)
1157{
1158        struct intel_uncore_type *type, **types = uncore_msr_uncores;
1159        struct intel_uncore_pmu *pmu;
1160        struct intel_uncore_box *box;
1161        int i, ret, pkg, target;
1162
1163        pkg = topology_logical_package_id(cpu);
1164        ret = allocate_boxes(types, pkg, cpu);
1165        if (ret)
1166                return ret;
1167
1168        for (; *types; types++) {
1169                type = *types;
1170                pmu = type->pmus;
1171                for (i = 0; i < type->num_boxes; i++, pmu++) {
1172                        box = pmu->boxes[pkg];
1173                        if (box && atomic_inc_return(&box->refcnt) == 1)
1174                                uncore_box_init(box);
1175                }
1176        }
1177
1178        /*
1179         * Check if there is an online cpu in the package
1180         * which collects uncore events already.
1181         */
1182        target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
1183        if (target < nr_cpu_ids)
1184                return 0;
1185
1186        cpumask_set_cpu(cpu, &uncore_cpu_mask);
1187
1188        uncore_change_context(uncore_msr_uncores, -1, cpu);
1189        uncore_change_context(uncore_pci_uncores, -1, cpu);
1190        return 0;
1191}
1192
1193static int __init type_pmu_register(struct intel_uncore_type *type)
1194{
1195        int i, ret;
1196
1197        for (i = 0; i < type->num_boxes; i++) {
1198                ret = uncore_pmu_register(&type->pmus[i]);
1199                if (ret)
1200                        return ret;
1201        }
1202        return 0;
1203}
1204
1205static int __init uncore_msr_pmus_register(void)
1206{
1207        struct intel_uncore_type **types = uncore_msr_uncores;
1208        int ret;
1209
1210        for (; *types; types++) {
1211                ret = type_pmu_register(*types);
1212                if (ret)
1213                        return ret;
1214        }
1215        return 0;
1216}
1217
1218static int __init uncore_cpu_init(void)
1219{
1220        int ret;
1221
1222        ret = uncore_types_init(uncore_msr_uncores, true);
1223        if (ret)
1224                goto err;
1225
1226        ret = uncore_msr_pmus_register();
1227        if (ret)
1228                goto err;
1229        return 0;
1230err:
1231        uncore_types_exit(uncore_msr_uncores);
1232        uncore_msr_uncores = empty_uncore;
1233        return ret;
1234}
1235
1236#define X86_UNCORE_MODEL_MATCH(model, init)     \
1237        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1238
1239struct intel_uncore_init_fun {
1240        void    (*cpu_init)(void);
1241        int     (*pci_init)(void);
1242};
1243
1244static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1245        .cpu_init = nhm_uncore_cpu_init,
1246};
1247
1248static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1249        .cpu_init = snb_uncore_cpu_init,
1250        .pci_init = snb_uncore_pci_init,
1251};
1252
1253static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1254        .cpu_init = snb_uncore_cpu_init,
1255        .pci_init = ivb_uncore_pci_init,
1256};
1257
1258static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1259        .cpu_init = snb_uncore_cpu_init,
1260        .pci_init = hsw_uncore_pci_init,
1261};
1262
1263static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1264        .cpu_init = snb_uncore_cpu_init,
1265        .pci_init = bdw_uncore_pci_init,
1266};
1267
1268static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1269        .cpu_init = snbep_uncore_cpu_init,
1270        .pci_init = snbep_uncore_pci_init,
1271};
1272
1273static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1274        .cpu_init = nhmex_uncore_cpu_init,
1275};
1276
1277static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1278        .cpu_init = ivbep_uncore_cpu_init,
1279        .pci_init = ivbep_uncore_pci_init,
1280};
1281
1282static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1283        .cpu_init = hswep_uncore_cpu_init,
1284        .pci_init = hswep_uncore_pci_init,
1285};
1286
1287static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1288        .cpu_init = bdx_uncore_cpu_init,
1289        .pci_init = bdx_uncore_pci_init,
1290};
1291
1292static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1293        .cpu_init = knl_uncore_cpu_init,
1294        .pci_init = knl_uncore_pci_init,
1295};
1296
1297static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1298        .cpu_init = skl_uncore_cpu_init,
1299        .pci_init = skl_uncore_pci_init,
1300};
1301
1302static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1303        .cpu_init = skx_uncore_cpu_init,
1304        .pci_init = skx_uncore_pci_init,
1305};
1306
1307static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1308        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
1309        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
1310        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,       nhm_uncore_init),
1311        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,    nhm_uncore_init),
1312        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,    snb_uncore_init),
1313        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,      ivb_uncore_init),
1314        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,   hsw_uncore_init),
1315        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,    hsw_uncore_init),
1316        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,   hsw_uncore_init),
1317        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1318        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1319        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
1320        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,     nhmex_uncore_init),
1321        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,    nhmex_uncore_init),
1322        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,    ivbep_uncore_init),
1323        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,      hswep_uncore_init),
1324        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
1325        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1326        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
1327        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM,   knl_uncore_init),
1328        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1329        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1330        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,      skx_uncore_init),
1331        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1332        X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1333        {},
1334};
1335
1336MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1337
1338static int __init intel_uncore_init(void)
1339{
1340        const struct x86_cpu_id *id;
1341        struct intel_uncore_init_fun *uncore_init;
1342        int pret = 0, cret = 0, ret;
1343
1344        id = x86_match_cpu(intel_uncore_match);
1345        if (!id)
1346                return -ENODEV;
1347
1348        if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1349                return -ENODEV;
1350
1351        max_packages = topology_max_packages();
1352
1353        uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1354        if (uncore_init->pci_init) {
1355                pret = uncore_init->pci_init();
1356                if (!pret)
1357                        pret = uncore_pci_init();
1358        }
1359
1360        if (uncore_init->cpu_init) {
1361                uncore_init->cpu_init();
1362                cret = uncore_cpu_init();
1363        }
1364
1365        if (cret && pret)
1366                return -ENODEV;
1367
1368        /* Install hotplug callbacks to setup the targets for each package */
1369        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1370                                "perf/x86/intel/uncore:online",
1371                                uncore_event_cpu_online,
1372                                uncore_event_cpu_offline);
1373        if (ret)
1374                goto err;
1375        return 0;
1376
1377err:
1378        uncore_types_exit(uncore_msr_uncores);
1379        uncore_pci_exit();
1380        return ret;
1381}
1382module_init(intel_uncore_init);
1383
1384static void __exit intel_uncore_exit(void)
1385{
1386        cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1387        uncore_types_exit(uncore_msr_uncores);
1388        uncore_pci_exit();
1389}
1390module_exit(intel_uncore_exit);
1391