1
2#include <linux/module.h>
3
4#include <asm/cpu_device_id.h>
5#include <asm/intel-family.h>
6#include "uncore.h"
7
8static struct intel_uncore_type *empty_uncore[] = { NULL, };
9struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
10struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
11struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
12
13static bool pcidrv_registered;
14struct pci_driver *uncore_pci_driver;
15
16DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
17struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
18struct pci_extra_dev *uncore_extra_pci_dev;
19static int max_dies;
20
21
22static cpumask_t uncore_cpu_mask;
23
24
25static struct event_constraint uncore_constraint_fixed =
26 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
27struct event_constraint uncore_constraint_empty =
28 EVENT_CONSTRAINT(0, 0, 0);
29
30MODULE_LICENSE("GPL");
31
32int uncore_pcibus_to_physid(struct pci_bus *bus)
33{
34 struct pci2phy_map *map;
35 int phys_id = -1;
36
37 raw_spin_lock(&pci2phy_map_lock);
38 list_for_each_entry(map, &pci2phy_map_head, list) {
39 if (map->segment == pci_domain_nr(bus)) {
40 phys_id = map->pbus_to_physid[bus->number];
41 break;
42 }
43 }
44 raw_spin_unlock(&pci2phy_map_lock);
45
46 return phys_id;
47}
48
49static void uncore_free_pcibus_map(void)
50{
51 struct pci2phy_map *map, *tmp;
52
53 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
54 list_del(&map->list);
55 kfree(map);
56 }
57}
58
59struct pci2phy_map *__find_pci2phy_map(int segment)
60{
61 struct pci2phy_map *map, *alloc = NULL;
62 int i;
63
64 lockdep_assert_held(&pci2phy_map_lock);
65
66lookup:
67 list_for_each_entry(map, &pci2phy_map_head, list) {
68 if (map->segment == segment)
69 goto end;
70 }
71
72 if (!alloc) {
73 raw_spin_unlock(&pci2phy_map_lock);
74 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
75 raw_spin_lock(&pci2phy_map_lock);
76
77 if (!alloc)
78 return NULL;
79
80 goto lookup;
81 }
82
83 map = alloc;
84 alloc = NULL;
85 map->segment = segment;
86 for (i = 0; i < 256; i++)
87 map->pbus_to_physid[i] = -1;
88 list_add_tail(&map->list, &pci2phy_map_head);
89
90end:
91 kfree(alloc);
92 return map;
93}
94
95ssize_t uncore_event_show(struct kobject *kobj,
96 struct kobj_attribute *attr, char *buf)
97{
98 struct uncore_event_desc *event =
99 container_of(attr, struct uncore_event_desc, attr);
100 return sprintf(buf, "%s", event->config);
101}
102
103struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
104{
105 unsigned int dieid = topology_logical_die_id(cpu);
106
107
108
109
110
111 return dieid < max_dies ? pmu->boxes[dieid] : NULL;
112}
113
114u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
115{
116 u64 count;
117
118 rdmsrl(event->hw.event_base, count);
119
120 return count;
121}
122
123void uncore_mmio_exit_box(struct intel_uncore_box *box)
124{
125 if (box->io_addr)
126 iounmap(box->io_addr);
127}
128
129u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
130 struct perf_event *event)
131{
132 if (!box->io_addr)
133 return 0;
134
135 return readq(box->io_addr + event->hw.event_base);
136}
137
138
139
140
141struct event_constraint *
142uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
143{
144 struct intel_uncore_extra_reg *er;
145 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
146 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
147 unsigned long flags;
148 bool ok = false;
149
150
151
152
153
154
155 if (reg1->idx == EXTRA_REG_NONE ||
156 (!uncore_box_is_fake(box) && reg1->alloc))
157 return NULL;
158
159 er = &box->shared_regs[reg1->idx];
160 raw_spin_lock_irqsave(&er->lock, flags);
161 if (!atomic_read(&er->ref) ||
162 (er->config1 == reg1->config && er->config2 == reg2->config)) {
163 atomic_inc(&er->ref);
164 er->config1 = reg1->config;
165 er->config2 = reg2->config;
166 ok = true;
167 }
168 raw_spin_unlock_irqrestore(&er->lock, flags);
169
170 if (ok) {
171 if (!uncore_box_is_fake(box))
172 reg1->alloc = 1;
173 return NULL;
174 }
175
176 return &uncore_constraint_empty;
177}
178
179void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
180{
181 struct intel_uncore_extra_reg *er;
182 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
183
184
185
186
187
188
189
190
191
192 if (uncore_box_is_fake(box) || !reg1->alloc)
193 return;
194
195 er = &box->shared_regs[reg1->idx];
196 atomic_dec(&er->ref);
197 reg1->alloc = 0;
198}
199
200u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
201{
202 struct intel_uncore_extra_reg *er;
203 unsigned long flags;
204 u64 config;
205
206 er = &box->shared_regs[idx];
207
208 raw_spin_lock_irqsave(&er->lock, flags);
209 config = er->config;
210 raw_spin_unlock_irqrestore(&er->lock, flags);
211
212 return config;
213}
214
215static void uncore_assign_hw_event(struct intel_uncore_box *box,
216 struct perf_event *event, int idx)
217{
218 struct hw_perf_event *hwc = &event->hw;
219
220 hwc->idx = idx;
221 hwc->last_tag = ++box->tags[idx];
222
223 if (uncore_pmc_fixed(hwc->idx)) {
224 hwc->event_base = uncore_fixed_ctr(box);
225 hwc->config_base = uncore_fixed_ctl(box);
226 return;
227 }
228
229 hwc->config_base = uncore_event_ctl(box, hwc->idx);
230 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
231}
232
233void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
234{
235 u64 prev_count, new_count, delta;
236 int shift;
237
238 if (uncore_pmc_freerunning(event->hw.idx))
239 shift = 64 - uncore_freerunning_bits(box, event);
240 else if (uncore_pmc_fixed(event->hw.idx))
241 shift = 64 - uncore_fixed_ctr_bits(box);
242 else
243 shift = 64 - uncore_perf_ctr_bits(box);
244
245
246again:
247 prev_count = local64_read(&event->hw.prev_count);
248 new_count = uncore_read_counter(box, event);
249 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
250 goto again;
251
252 delta = (new_count << shift) - (prev_count << shift);
253 delta >>= shift;
254
255 local64_add(delta, &event->count);
256}
257
258
259
260
261
262
263static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
264{
265 struct intel_uncore_box *box;
266 struct perf_event *event;
267 unsigned long flags;
268 int bit;
269
270 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
271 if (!box->n_active || box->cpu != smp_processor_id())
272 return HRTIMER_NORESTART;
273
274
275
276
277 local_irq_save(flags);
278
279
280
281
282
283 list_for_each_entry(event, &box->active_list, active_entry) {
284 uncore_perf_event_update(box, event);
285 }
286
287 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
288 uncore_perf_event_update(box, box->events[bit]);
289
290 local_irq_restore(flags);
291
292 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
293 return HRTIMER_RESTART;
294}
295
296void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
297{
298 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
299 HRTIMER_MODE_REL_PINNED);
300}
301
302void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
303{
304 hrtimer_cancel(&box->hrtimer);
305}
306
307static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
308{
309 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
310 box->hrtimer.function = uncore_pmu_hrtimer;
311}
312
313static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
314 int node)
315{
316 int i, size, numshared = type->num_shared_regs ;
317 struct intel_uncore_box *box;
318
319 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
320
321 box = kzalloc_node(size, GFP_KERNEL, node);
322 if (!box)
323 return NULL;
324
325 for (i = 0; i < numshared; i++)
326 raw_spin_lock_init(&box->shared_regs[i].lock);
327
328 uncore_pmu_init_hrtimer(box);
329 box->cpu = -1;
330 box->pci_phys_id = -1;
331 box->dieid = -1;
332
333
334 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
335
336 INIT_LIST_HEAD(&box->active_list);
337
338 return box;
339}
340
341
342
343
344
345static int uncore_pmu_event_init(struct perf_event *event);
346
347static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
348{
349 return &box->pmu->pmu == event->pmu;
350}
351
352static int
353uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
354 bool dogrp)
355{
356 struct perf_event *event;
357 int n, max_count;
358
359 max_count = box->pmu->type->num_counters;
360 if (box->pmu->type->fixed_ctl)
361 max_count++;
362
363 if (box->n_events >= max_count)
364 return -EINVAL;
365
366 n = box->n_events;
367
368 if (is_box_event(box, leader)) {
369 box->event_list[n] = leader;
370 n++;
371 }
372
373 if (!dogrp)
374 return n;
375
376 for_each_sibling_event(event, leader) {
377 if (!is_box_event(box, event) ||
378 event->state <= PERF_EVENT_STATE_OFF)
379 continue;
380
381 if (n >= max_count)
382 return -EINVAL;
383
384 box->event_list[n] = event;
385 n++;
386 }
387 return n;
388}
389
390static struct event_constraint *
391uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
392{
393 struct intel_uncore_type *type = box->pmu->type;
394 struct event_constraint *c;
395
396 if (type->ops->get_constraint) {
397 c = type->ops->get_constraint(box, event);
398 if (c)
399 return c;
400 }
401
402 if (event->attr.config == UNCORE_FIXED_EVENT)
403 return &uncore_constraint_fixed;
404
405 if (type->constraints) {
406 for_each_event_constraint(c, type->constraints) {
407 if ((event->hw.config & c->cmask) == c->code)
408 return c;
409 }
410 }
411
412 return &type->unconstrainted;
413}
414
415static void uncore_put_event_constraint(struct intel_uncore_box *box,
416 struct perf_event *event)
417{
418 if (box->pmu->type->ops->put_constraint)
419 box->pmu->type->ops->put_constraint(box, event);
420}
421
422static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
423{
424 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
425 struct event_constraint *c;
426 int i, wmin, wmax, ret = 0;
427 struct hw_perf_event *hwc;
428
429 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
430
431 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
432 c = uncore_get_event_constraint(box, box->event_list[i]);
433 box->event_constraint[i] = c;
434 wmin = min(wmin, c->weight);
435 wmax = max(wmax, c->weight);
436 }
437
438
439 for (i = 0; i < n; i++) {
440 hwc = &box->event_list[i]->hw;
441 c = box->event_constraint[i];
442
443
444 if (hwc->idx == -1)
445 break;
446
447
448 if (!test_bit(hwc->idx, c->idxmsk))
449 break;
450
451
452 if (test_bit(hwc->idx, used_mask))
453 break;
454
455 __set_bit(hwc->idx, used_mask);
456 if (assign)
457 assign[i] = hwc->idx;
458 }
459
460 if (i != n)
461 ret = perf_assign_events(box->event_constraint, n,
462 wmin, wmax, n, assign);
463
464 if (!assign || ret) {
465 for (i = 0; i < n; i++)
466 uncore_put_event_constraint(box, box->event_list[i]);
467 }
468 return ret ? -EINVAL : 0;
469}
470
471void uncore_pmu_event_start(struct perf_event *event, int flags)
472{
473 struct intel_uncore_box *box = uncore_event_to_box(event);
474 int idx = event->hw.idx;
475
476 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
477 return;
478
479
480
481
482
483
484
485 if (uncore_pmc_freerunning(event->hw.idx)) {
486 list_add_tail(&event->active_entry, &box->active_list);
487 local64_set(&event->hw.prev_count,
488 uncore_read_counter(box, event));
489 if (box->n_active++ == 0)
490 uncore_pmu_start_hrtimer(box);
491 return;
492 }
493
494 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
495 return;
496
497 event->hw.state = 0;
498 box->events[idx] = event;
499 box->n_active++;
500 __set_bit(idx, box->active_mask);
501
502 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
503 uncore_enable_event(box, event);
504
505 if (box->n_active == 1) {
506 uncore_enable_box(box);
507 uncore_pmu_start_hrtimer(box);
508 }
509}
510
511void uncore_pmu_event_stop(struct perf_event *event, int flags)
512{
513 struct intel_uncore_box *box = uncore_event_to_box(event);
514 struct hw_perf_event *hwc = &event->hw;
515
516
517 if (uncore_pmc_freerunning(hwc->idx)) {
518 list_del(&event->active_entry);
519 if (--box->n_active == 0)
520 uncore_pmu_cancel_hrtimer(box);
521 uncore_perf_event_update(box, event);
522 return;
523 }
524
525 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
526 uncore_disable_event(box, event);
527 box->n_active--;
528 box->events[hwc->idx] = NULL;
529 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
530 hwc->state |= PERF_HES_STOPPED;
531
532 if (box->n_active == 0) {
533 uncore_disable_box(box);
534 uncore_pmu_cancel_hrtimer(box);
535 }
536 }
537
538 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
539
540
541
542
543 uncore_perf_event_update(box, event);
544 hwc->state |= PERF_HES_UPTODATE;
545 }
546}
547
548int uncore_pmu_event_add(struct perf_event *event, int flags)
549{
550 struct intel_uncore_box *box = uncore_event_to_box(event);
551 struct hw_perf_event *hwc = &event->hw;
552 int assign[UNCORE_PMC_IDX_MAX];
553 int i, n, ret;
554
555 if (!box)
556 return -ENODEV;
557
558
559
560
561
562
563 if (uncore_pmc_freerunning(hwc->idx)) {
564 if (flags & PERF_EF_START)
565 uncore_pmu_event_start(event, 0);
566 return 0;
567 }
568
569 ret = n = uncore_collect_events(box, event, false);
570 if (ret < 0)
571 return ret;
572
573 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
574 if (!(flags & PERF_EF_START))
575 hwc->state |= PERF_HES_ARCH;
576
577 ret = uncore_assign_events(box, assign, n);
578 if (ret)
579 return ret;
580
581
582 for (i = 0; i < box->n_events; i++) {
583 event = box->event_list[i];
584 hwc = &event->hw;
585
586 if (hwc->idx == assign[i] &&
587 hwc->last_tag == box->tags[assign[i]])
588 continue;
589
590
591
592
593 if (hwc->state & PERF_HES_STOPPED)
594 hwc->state |= PERF_HES_ARCH;
595
596 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
597 }
598
599
600 for (i = 0; i < n; i++) {
601 event = box->event_list[i];
602 hwc = &event->hw;
603
604 if (hwc->idx != assign[i] ||
605 hwc->last_tag != box->tags[assign[i]])
606 uncore_assign_hw_event(box, event, assign[i]);
607 else if (i < box->n_events)
608 continue;
609
610 if (hwc->state & PERF_HES_ARCH)
611 continue;
612
613 uncore_pmu_event_start(event, 0);
614 }
615 box->n_events = n;
616
617 return 0;
618}
619
620void uncore_pmu_event_del(struct perf_event *event, int flags)
621{
622 struct intel_uncore_box *box = uncore_event_to_box(event);
623 int i;
624
625 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
626
627
628
629
630
631
632 if (uncore_pmc_freerunning(event->hw.idx))
633 return;
634
635 for (i = 0; i < box->n_events; i++) {
636 if (event == box->event_list[i]) {
637 uncore_put_event_constraint(box, event);
638
639 for (++i; i < box->n_events; i++)
640 box->event_list[i - 1] = box->event_list[i];
641
642 --box->n_events;
643 break;
644 }
645 }
646
647 event->hw.idx = -1;
648 event->hw.last_tag = ~0ULL;
649}
650
651void uncore_pmu_event_read(struct perf_event *event)
652{
653 struct intel_uncore_box *box = uncore_event_to_box(event);
654 uncore_perf_event_update(box, event);
655}
656
657
658
659
660
661static int uncore_validate_group(struct intel_uncore_pmu *pmu,
662 struct perf_event *event)
663{
664 struct perf_event *leader = event->group_leader;
665 struct intel_uncore_box *fake_box;
666 int ret = -EINVAL, n;
667
668
669 if (uncore_pmc_freerunning(event->hw.idx))
670 return 0;
671
672 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
673 if (!fake_box)
674 return -ENOMEM;
675
676 fake_box->pmu = pmu;
677
678
679
680
681
682
683 n = uncore_collect_events(fake_box, leader, true);
684 if (n < 0)
685 goto out;
686
687 fake_box->n_events = n;
688 n = uncore_collect_events(fake_box, event, false);
689 if (n < 0)
690 goto out;
691
692 fake_box->n_events = n;
693
694 ret = uncore_assign_events(fake_box, NULL, n);
695out:
696 kfree(fake_box);
697 return ret;
698}
699
700static int uncore_pmu_event_init(struct perf_event *event)
701{
702 struct intel_uncore_pmu *pmu;
703 struct intel_uncore_box *box;
704 struct hw_perf_event *hwc = &event->hw;
705 int ret;
706
707 if (event->attr.type != event->pmu->type)
708 return -ENOENT;
709
710 pmu = uncore_event_to_pmu(event);
711
712 if (pmu->func_id < 0)
713 return -ENOENT;
714
715
716 if (hwc->sample_period)
717 return -EINVAL;
718
719
720
721
722
723 if (event->cpu < 0)
724 return -EINVAL;
725 box = uncore_pmu_to_box(pmu, event->cpu);
726 if (!box || box->cpu < 0)
727 return -EINVAL;
728 event->cpu = box->cpu;
729 event->pmu_private = box;
730
731 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
732
733 event->hw.idx = -1;
734 event->hw.last_tag = ~0ULL;
735 event->hw.extra_reg.idx = EXTRA_REG_NONE;
736 event->hw.branch_reg.idx = EXTRA_REG_NONE;
737
738 if (event->attr.config == UNCORE_FIXED_EVENT) {
739
740 if (!pmu->type->fixed_ctl)
741 return -EINVAL;
742
743
744
745
746 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
747 return -EINVAL;
748
749
750 hwc->config = 0ULL;
751 } else if (is_freerunning_event(event)) {
752 hwc->config = event->attr.config;
753 if (!check_valid_freerunning_event(box, event))
754 return -EINVAL;
755 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
756
757
758
759
760
761
762 event->hw.event_base = uncore_freerunning_counter(box, event);
763 } else {
764 hwc->config = event->attr.config &
765 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
766 if (pmu->type->ops->hw_config) {
767 ret = pmu->type->ops->hw_config(box, event);
768 if (ret)
769 return ret;
770 }
771 }
772
773 if (event->group_leader != event)
774 ret = uncore_validate_group(pmu, event);
775 else
776 ret = 0;
777
778 return ret;
779}
780
781static ssize_t uncore_get_attr_cpumask(struct device *dev,
782 struct device_attribute *attr, char *buf)
783{
784 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
785}
786
787static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
788
789static struct attribute *uncore_pmu_attrs[] = {
790 &dev_attr_cpumask.attr,
791 NULL,
792};
793
794static const struct attribute_group uncore_pmu_attr_group = {
795 .attrs = uncore_pmu_attrs,
796};
797
798static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
799{
800 int ret;
801
802 if (!pmu->type->pmu) {
803 pmu->pmu = (struct pmu) {
804 .attr_groups = pmu->type->attr_groups,
805 .task_ctx_nr = perf_invalid_context,
806 .event_init = uncore_pmu_event_init,
807 .add = uncore_pmu_event_add,
808 .del = uncore_pmu_event_del,
809 .start = uncore_pmu_event_start,
810 .stop = uncore_pmu_event_stop,
811 .read = uncore_pmu_event_read,
812 .module = THIS_MODULE,
813 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
814 };
815 } else {
816 pmu->pmu = *pmu->type->pmu;
817 pmu->pmu.attr_groups = pmu->type->attr_groups;
818 }
819
820 if (pmu->type->num_boxes == 1) {
821 if (strlen(pmu->type->name) > 0)
822 sprintf(pmu->name, "uncore_%s", pmu->type->name);
823 else
824 sprintf(pmu->name, "uncore");
825 } else {
826 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
827 pmu->pmu_idx);
828 }
829
830 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
831 if (!ret)
832 pmu->registered = true;
833 return ret;
834}
835
836static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
837{
838 if (!pmu->registered)
839 return;
840 perf_pmu_unregister(&pmu->pmu);
841 pmu->registered = false;
842}
843
844static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
845{
846 int die;
847
848 for (die = 0; die < max_dies; die++)
849 kfree(pmu->boxes[die]);
850 kfree(pmu->boxes);
851}
852
853static void uncore_type_exit(struct intel_uncore_type *type)
854{
855 struct intel_uncore_pmu *pmu = type->pmus;
856 int i;
857
858 if (pmu) {
859 for (i = 0; i < type->num_boxes; i++, pmu++) {
860 uncore_pmu_unregister(pmu);
861 uncore_free_boxes(pmu);
862 }
863 kfree(type->pmus);
864 type->pmus = NULL;
865 }
866 kfree(type->events_group);
867 type->events_group = NULL;
868}
869
870static void uncore_types_exit(struct intel_uncore_type **types)
871{
872 for (; *types; types++)
873 uncore_type_exit(*types);
874}
875
876static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
877{
878 struct intel_uncore_pmu *pmus;
879 size_t size;
880 int i, j;
881
882 pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
883 if (!pmus)
884 return -ENOMEM;
885
886 size = max_dies * sizeof(struct intel_uncore_box *);
887
888 for (i = 0; i < type->num_boxes; i++) {
889 pmus[i].func_id = setid ? i : -1;
890 pmus[i].pmu_idx = i;
891 pmus[i].type = type;
892 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
893 if (!pmus[i].boxes)
894 goto err;
895 }
896
897 type->pmus = pmus;
898 type->unconstrainted = (struct event_constraint)
899 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
900 0, type->num_counters, 0, 0);
901
902 if (type->event_descs) {
903 struct {
904 struct attribute_group group;
905 struct attribute *attrs[];
906 } *attr_group;
907 for (i = 0; type->event_descs[i].attr.attr.name; i++);
908
909 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
910 GFP_KERNEL);
911 if (!attr_group)
912 goto err;
913
914 attr_group->group.name = "events";
915 attr_group->group.attrs = attr_group->attrs;
916
917 for (j = 0; j < i; j++)
918 attr_group->attrs[j] = &type->event_descs[j].attr.attr;
919
920 type->events_group = &attr_group->group;
921 }
922
923 type->pmu_group = &uncore_pmu_attr_group;
924
925 return 0;
926
927err:
928 for (i = 0; i < type->num_boxes; i++)
929 kfree(pmus[i].boxes);
930 kfree(pmus);
931
932 return -ENOMEM;
933}
934
935static int __init
936uncore_types_init(struct intel_uncore_type **types, bool setid)
937{
938 int ret;
939
940 for (; *types; types++) {
941 ret = uncore_type_init(*types, setid);
942 if (ret)
943 return ret;
944 }
945 return 0;
946}
947
948
949
950
951static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
952{
953 struct intel_uncore_type *type;
954 struct intel_uncore_pmu *pmu = NULL;
955 struct intel_uncore_box *box;
956 int phys_id, die, ret;
957
958 phys_id = uncore_pcibus_to_physid(pdev->bus);
959 if (phys_id < 0)
960 return -ENODEV;
961
962 die = (topology_max_die_per_package() > 1) ? phys_id :
963 topology_phys_to_logical_pkg(phys_id);
964 if (die < 0)
965 return -EINVAL;
966
967 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
968 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
969
970 uncore_extra_pci_dev[die].dev[idx] = pdev;
971 pci_set_drvdata(pdev, NULL);
972 return 0;
973 }
974
975 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
976
977
978
979
980
981
982 if (id->driver_data & ~0xffff) {
983 struct pci_driver *pci_drv = pdev->driver;
984 const struct pci_device_id *ids = pci_drv->id_table;
985 unsigned int devfn;
986
987 while (ids && ids->vendor) {
988 if ((ids->vendor == pdev->vendor) &&
989 (ids->device == pdev->device)) {
990 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
991 UNCORE_PCI_DEV_FUNC(ids->driver_data));
992 if (devfn == pdev->devfn) {
993 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
994 break;
995 }
996 }
997 ids++;
998 }
999 if (pmu == NULL)
1000 return -ENODEV;
1001 } else {
1002
1003
1004
1005
1006 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
1007 }
1008
1009 if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
1010 return -EINVAL;
1011
1012 box = uncore_alloc_box(type, NUMA_NO_NODE);
1013 if (!box)
1014 return -ENOMEM;
1015
1016 if (pmu->func_id < 0)
1017 pmu->func_id = pdev->devfn;
1018 else
1019 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1020
1021 atomic_inc(&box->refcnt);
1022 box->pci_phys_id = phys_id;
1023 box->dieid = die;
1024 box->pci_dev = pdev;
1025 box->pmu = pmu;
1026 uncore_box_init(box);
1027 pci_set_drvdata(pdev, box);
1028
1029 pmu->boxes[die] = box;
1030 if (atomic_inc_return(&pmu->activeboxes) > 1)
1031 return 0;
1032
1033
1034 ret = uncore_pmu_register(pmu);
1035 if (ret) {
1036 pci_set_drvdata(pdev, NULL);
1037 pmu->boxes[die] = NULL;
1038 uncore_box_exit(box);
1039 kfree(box);
1040 }
1041 return ret;
1042}
1043
1044static void uncore_pci_remove(struct pci_dev *pdev)
1045{
1046 struct intel_uncore_box *box;
1047 struct intel_uncore_pmu *pmu;
1048 int i, phys_id, die;
1049
1050 phys_id = uncore_pcibus_to_physid(pdev->bus);
1051
1052 box = pci_get_drvdata(pdev);
1053 if (!box) {
1054 die = (topology_max_die_per_package() > 1) ? phys_id :
1055 topology_phys_to_logical_pkg(phys_id);
1056 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1057 if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1058 uncore_extra_pci_dev[die].dev[i] = NULL;
1059 break;
1060 }
1061 }
1062 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1063 return;
1064 }
1065
1066 pmu = box->pmu;
1067 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1068 return;
1069
1070 pci_set_drvdata(pdev, NULL);
1071 pmu->boxes[box->dieid] = NULL;
1072 if (atomic_dec_return(&pmu->activeboxes) == 0)
1073 uncore_pmu_unregister(pmu);
1074 uncore_box_exit(box);
1075 kfree(box);
1076}
1077
1078static int __init uncore_pci_init(void)
1079{
1080 size_t size;
1081 int ret;
1082
1083 size = max_dies * sizeof(struct pci_extra_dev);
1084 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1085 if (!uncore_extra_pci_dev) {
1086 ret = -ENOMEM;
1087 goto err;
1088 }
1089
1090 ret = uncore_types_init(uncore_pci_uncores, false);
1091 if (ret)
1092 goto errtype;
1093
1094 uncore_pci_driver->probe = uncore_pci_probe;
1095 uncore_pci_driver->remove = uncore_pci_remove;
1096
1097 ret = pci_register_driver(uncore_pci_driver);
1098 if (ret)
1099 goto errtype;
1100
1101 pcidrv_registered = true;
1102 return 0;
1103
1104errtype:
1105 uncore_types_exit(uncore_pci_uncores);
1106 kfree(uncore_extra_pci_dev);
1107 uncore_extra_pci_dev = NULL;
1108 uncore_free_pcibus_map();
1109err:
1110 uncore_pci_uncores = empty_uncore;
1111 return ret;
1112}
1113
1114static void uncore_pci_exit(void)
1115{
1116 if (pcidrv_registered) {
1117 pcidrv_registered = false;
1118 pci_unregister_driver(uncore_pci_driver);
1119 uncore_types_exit(uncore_pci_uncores);
1120 kfree(uncore_extra_pci_dev);
1121 uncore_free_pcibus_map();
1122 }
1123}
1124
1125static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1126 int new_cpu)
1127{
1128 struct intel_uncore_pmu *pmu = type->pmus;
1129 struct intel_uncore_box *box;
1130 int i, die;
1131
1132 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1133 for (i = 0; i < type->num_boxes; i++, pmu++) {
1134 box = pmu->boxes[die];
1135 if (!box)
1136 continue;
1137
1138 if (old_cpu < 0) {
1139 WARN_ON_ONCE(box->cpu != -1);
1140 box->cpu = new_cpu;
1141 continue;
1142 }
1143
1144 WARN_ON_ONCE(box->cpu != old_cpu);
1145 box->cpu = -1;
1146 if (new_cpu < 0)
1147 continue;
1148
1149 uncore_pmu_cancel_hrtimer(box);
1150 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1151 box->cpu = new_cpu;
1152 }
1153}
1154
1155static void uncore_change_context(struct intel_uncore_type **uncores,
1156 int old_cpu, int new_cpu)
1157{
1158 for (; *uncores; uncores++)
1159 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1160}
1161
1162static void uncore_box_unref(struct intel_uncore_type **types, int id)
1163{
1164 struct intel_uncore_type *type;
1165 struct intel_uncore_pmu *pmu;
1166 struct intel_uncore_box *box;
1167 int i;
1168
1169 for (; *types; types++) {
1170 type = *types;
1171 pmu = type->pmus;
1172 for (i = 0; i < type->num_boxes; i++, pmu++) {
1173 box = pmu->boxes[id];
1174 if (box && atomic_dec_return(&box->refcnt) == 0)
1175 uncore_box_exit(box);
1176 }
1177 }
1178}
1179
1180static int uncore_event_cpu_offline(unsigned int cpu)
1181{
1182 int die, target;
1183
1184
1185 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1186 goto unref;
1187
1188 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1189
1190
1191 if (target < nr_cpu_ids)
1192 cpumask_set_cpu(target, &uncore_cpu_mask);
1193 else
1194 target = -1;
1195
1196 uncore_change_context(uncore_msr_uncores, cpu, target);
1197 uncore_change_context(uncore_mmio_uncores, cpu, target);
1198 uncore_change_context(uncore_pci_uncores, cpu, target);
1199
1200unref:
1201
1202 die = topology_logical_die_id(cpu);
1203 uncore_box_unref(uncore_msr_uncores, die);
1204 uncore_box_unref(uncore_mmio_uncores, die);
1205 return 0;
1206}
1207
1208static int allocate_boxes(struct intel_uncore_type **types,
1209 unsigned int die, unsigned int cpu)
1210{
1211 struct intel_uncore_box *box, *tmp;
1212 struct intel_uncore_type *type;
1213 struct intel_uncore_pmu *pmu;
1214 LIST_HEAD(allocated);
1215 int i;
1216
1217
1218 for (; *types; types++) {
1219 type = *types;
1220 pmu = type->pmus;
1221 for (i = 0; i < type->num_boxes; i++, pmu++) {
1222 if (pmu->boxes[die])
1223 continue;
1224 box = uncore_alloc_box(type, cpu_to_node(cpu));
1225 if (!box)
1226 goto cleanup;
1227 box->pmu = pmu;
1228 box->dieid = die;
1229 list_add(&box->active_list, &allocated);
1230 }
1231 }
1232
1233 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1234 list_del_init(&box->active_list);
1235 box->pmu->boxes[die] = box;
1236 }
1237 return 0;
1238
1239cleanup:
1240 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1241 list_del_init(&box->active_list);
1242 kfree(box);
1243 }
1244 return -ENOMEM;
1245}
1246
1247static int uncore_box_ref(struct intel_uncore_type **types,
1248 int id, unsigned int cpu)
1249{
1250 struct intel_uncore_type *type;
1251 struct intel_uncore_pmu *pmu;
1252 struct intel_uncore_box *box;
1253 int i, ret;
1254
1255 ret = allocate_boxes(types, id, cpu);
1256 if (ret)
1257 return ret;
1258
1259 for (; *types; types++) {
1260 type = *types;
1261 pmu = type->pmus;
1262 for (i = 0; i < type->num_boxes; i++, pmu++) {
1263 box = pmu->boxes[id];
1264 if (box && atomic_inc_return(&box->refcnt) == 1)
1265 uncore_box_init(box);
1266 }
1267 }
1268 return 0;
1269}
1270
1271static int uncore_event_cpu_online(unsigned int cpu)
1272{
1273 int die, target, msr_ret, mmio_ret;
1274
1275 die = topology_logical_die_id(cpu);
1276 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
1277 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
1278 if (msr_ret && mmio_ret)
1279 return -ENOMEM;
1280
1281
1282
1283
1284
1285 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1286 if (target < nr_cpu_ids)
1287 return 0;
1288
1289 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1290
1291 if (!msr_ret)
1292 uncore_change_context(uncore_msr_uncores, -1, cpu);
1293 if (!mmio_ret)
1294 uncore_change_context(uncore_mmio_uncores, -1, cpu);
1295 uncore_change_context(uncore_pci_uncores, -1, cpu);
1296 return 0;
1297}
1298
1299static int __init type_pmu_register(struct intel_uncore_type *type)
1300{
1301 int i, ret;
1302
1303 for (i = 0; i < type->num_boxes; i++) {
1304 ret = uncore_pmu_register(&type->pmus[i]);
1305 if (ret)
1306 return ret;
1307 }
1308 return 0;
1309}
1310
1311static int __init uncore_msr_pmus_register(void)
1312{
1313 struct intel_uncore_type **types = uncore_msr_uncores;
1314 int ret;
1315
1316 for (; *types; types++) {
1317 ret = type_pmu_register(*types);
1318 if (ret)
1319 return ret;
1320 }
1321 return 0;
1322}
1323
1324static int __init uncore_cpu_init(void)
1325{
1326 int ret;
1327
1328 ret = uncore_types_init(uncore_msr_uncores, true);
1329 if (ret)
1330 goto err;
1331
1332 ret = uncore_msr_pmus_register();
1333 if (ret)
1334 goto err;
1335 return 0;
1336err:
1337 uncore_types_exit(uncore_msr_uncores);
1338 uncore_msr_uncores = empty_uncore;
1339 return ret;
1340}
1341
1342static int __init uncore_mmio_init(void)
1343{
1344 struct intel_uncore_type **types = uncore_mmio_uncores;
1345 int ret;
1346
1347 ret = uncore_types_init(types, true);
1348 if (ret)
1349 goto err;
1350
1351 for (; *types; types++) {
1352 ret = type_pmu_register(*types);
1353 if (ret)
1354 goto err;
1355 }
1356 return 0;
1357err:
1358 uncore_types_exit(uncore_mmio_uncores);
1359 uncore_mmio_uncores = empty_uncore;
1360 return ret;
1361}
1362
1363
1364#define X86_UNCORE_MODEL_MATCH(model, init) \
1365 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
1366
1367struct intel_uncore_init_fun {
1368 void (*cpu_init)(void);
1369 int (*pci_init)(void);
1370 void (*mmio_init)(void);
1371};
1372
1373static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1374 .cpu_init = nhm_uncore_cpu_init,
1375};
1376
1377static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1378 .cpu_init = snb_uncore_cpu_init,
1379 .pci_init = snb_uncore_pci_init,
1380};
1381
1382static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1383 .cpu_init = snb_uncore_cpu_init,
1384 .pci_init = ivb_uncore_pci_init,
1385};
1386
1387static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1388 .cpu_init = snb_uncore_cpu_init,
1389 .pci_init = hsw_uncore_pci_init,
1390};
1391
1392static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1393 .cpu_init = snb_uncore_cpu_init,
1394 .pci_init = bdw_uncore_pci_init,
1395};
1396
1397static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1398 .cpu_init = snbep_uncore_cpu_init,
1399 .pci_init = snbep_uncore_pci_init,
1400};
1401
1402static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1403 .cpu_init = nhmex_uncore_cpu_init,
1404};
1405
1406static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1407 .cpu_init = ivbep_uncore_cpu_init,
1408 .pci_init = ivbep_uncore_pci_init,
1409};
1410
1411static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1412 .cpu_init = hswep_uncore_cpu_init,
1413 .pci_init = hswep_uncore_pci_init,
1414};
1415
1416static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1417 .cpu_init = bdx_uncore_cpu_init,
1418 .pci_init = bdx_uncore_pci_init,
1419};
1420
1421static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1422 .cpu_init = knl_uncore_cpu_init,
1423 .pci_init = knl_uncore_pci_init,
1424};
1425
1426static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1427 .cpu_init = skl_uncore_cpu_init,
1428 .pci_init = skl_uncore_pci_init,
1429};
1430
1431static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1432 .cpu_init = skx_uncore_cpu_init,
1433 .pci_init = skx_uncore_pci_init,
1434};
1435
1436static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1437 .cpu_init = icl_uncore_cpu_init,
1438 .pci_init = skl_uncore_pci_init,
1439};
1440
1441static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
1442 .cpu_init = snr_uncore_cpu_init,
1443 .pci_init = snr_uncore_pci_init,
1444 .mmio_init = snr_uncore_mmio_init,
1445};
1446
1447static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1448 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
1449 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
1450 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
1451 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
1452 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
1453 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
1454 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
1455 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
1456 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
1457 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
1458 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
1459 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
1460 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
1461 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
1462 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
1463 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
1464 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
1465 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
1466 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
1467 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
1468 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
1469 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
1470 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
1471 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
1472 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
1473 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
1474 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
1475 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_DESKTOP, icl_uncore_init),
1476 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_X, snr_uncore_init),
1477 {},
1478};
1479
1480MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1481
1482static int __init intel_uncore_init(void)
1483{
1484 const struct x86_cpu_id *id;
1485 struct intel_uncore_init_fun *uncore_init;
1486 int pret = 0, cret = 0, mret = 0, ret;
1487
1488 id = x86_match_cpu(intel_uncore_match);
1489 if (!id)
1490 return -ENODEV;
1491
1492 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1493 return -ENODEV;
1494
1495 max_dies = topology_max_packages() * topology_max_die_per_package();
1496
1497 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1498 if (uncore_init->pci_init) {
1499 pret = uncore_init->pci_init();
1500 if (!pret)
1501 pret = uncore_pci_init();
1502 }
1503
1504 if (uncore_init->cpu_init) {
1505 uncore_init->cpu_init();
1506 cret = uncore_cpu_init();
1507 }
1508
1509 if (uncore_init->mmio_init) {
1510 uncore_init->mmio_init();
1511 mret = uncore_mmio_init();
1512 }
1513
1514 if (cret && pret && mret)
1515 return -ENODEV;
1516
1517
1518 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1519 "perf/x86/intel/uncore:online",
1520 uncore_event_cpu_online,
1521 uncore_event_cpu_offline);
1522 if (ret)
1523 goto err;
1524 return 0;
1525
1526err:
1527 uncore_types_exit(uncore_msr_uncores);
1528 uncore_types_exit(uncore_mmio_uncores);
1529 uncore_pci_exit();
1530 return ret;
1531}
1532module_init(intel_uncore_init);
1533
1534static void __exit intel_uncore_exit(void)
1535{
1536 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1537 uncore_types_exit(uncore_msr_uncores);
1538 uncore_types_exit(uncore_mmio_uncores);
1539 uncore_pci_exit();
1540}
1541module_exit(intel_uncore_exit);
1542