1#include <linux/module.h>
2
3#include <asm/cpu_device_id.h>
4#include <asm/intel-family.h>
5#include "uncore.h"
6
7static struct intel_uncore_type *empty_uncore[] = { NULL, };
8struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
9struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
10struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
11
12static bool pcidrv_registered;
13struct pci_driver *uncore_pci_driver;
14
15DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
16struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
17struct pci_extra_dev *uncore_extra_pci_dev;
18static int max_dies;
19
20
21static cpumask_t uncore_cpu_mask;
22
23
24static struct event_constraint uncore_constraint_fixed =
25 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
26struct event_constraint uncore_constraint_empty =
27 EVENT_CONSTRAINT(0, 0, 0);
28
29MODULE_LICENSE("GPL");
30
31int uncore_pcibus_to_physid(struct pci_bus *bus)
32{
33 struct pci2phy_map *map;
34 int phys_id = -1;
35
36 raw_spin_lock(&pci2phy_map_lock);
37 list_for_each_entry(map, &pci2phy_map_head, list) {
38 if (map->segment == pci_domain_nr(bus)) {
39 phys_id = map->pbus_to_physid[bus->number];
40 break;
41 }
42 }
43 raw_spin_unlock(&pci2phy_map_lock);
44
45 return phys_id;
46}
47
48static void uncore_free_pcibus_map(void)
49{
50 struct pci2phy_map *map, *tmp;
51
52 list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
53 list_del(&map->list);
54 kfree(map);
55 }
56}
57
58struct pci2phy_map *__find_pci2phy_map(int segment)
59{
60 struct pci2phy_map *map, *alloc = NULL;
61 int i;
62
63 lockdep_assert_held(&pci2phy_map_lock);
64
65lookup:
66 list_for_each_entry(map, &pci2phy_map_head, list) {
67 if (map->segment == segment)
68 goto end;
69 }
70
71 if (!alloc) {
72 raw_spin_unlock(&pci2phy_map_lock);
73 alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
74 raw_spin_lock(&pci2phy_map_lock);
75
76 if (!alloc)
77 return NULL;
78
79 goto lookup;
80 }
81
82 map = alloc;
83 alloc = NULL;
84 map->segment = segment;
85 for (i = 0; i < 256; i++)
86 map->pbus_to_physid[i] = -1;
87 list_add_tail(&map->list, &pci2phy_map_head);
88
89end:
90 kfree(alloc);
91 return map;
92}
93
94ssize_t uncore_event_show(struct kobject *kobj,
95 struct kobj_attribute *attr, char *buf)
96{
97 struct uncore_event_desc *event =
98 container_of(attr, struct uncore_event_desc, attr);
99 return sprintf(buf, "%s", event->config);
100}
101
102struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
103{
104 unsigned int dieid = topology_logical_die_id(cpu);
105
106
107
108
109
110 return dieid < max_dies ? pmu->boxes[dieid] : NULL;
111}
112
113u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
114{
115 u64 count;
116
117 rdmsrl(event->hw.event_base, count);
118
119 return count;
120}
121
122void uncore_mmio_exit_box(struct intel_uncore_box *box)
123{
124 if (box->io_addr)
125 iounmap(box->io_addr);
126}
127
128u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
129 struct perf_event *event)
130{
131 if (!box->io_addr)
132 return 0;
133
134 return readq(box->io_addr + event->hw.event_base);
135}
136
137
138
139
140struct event_constraint *
141uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
142{
143 struct intel_uncore_extra_reg *er;
144 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
145 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
146 unsigned long flags;
147 bool ok = false;
148
149
150
151
152
153
154 if (reg1->idx == EXTRA_REG_NONE ||
155 (!uncore_box_is_fake(box) && reg1->alloc))
156 return NULL;
157
158 er = &box->shared_regs[reg1->idx];
159 raw_spin_lock_irqsave(&er->lock, flags);
160 if (!atomic_read(&er->ref) ||
161 (er->config1 == reg1->config && er->config2 == reg2->config)) {
162 atomic_inc(&er->ref);
163 er->config1 = reg1->config;
164 er->config2 = reg2->config;
165 ok = true;
166 }
167 raw_spin_unlock_irqrestore(&er->lock, flags);
168
169 if (ok) {
170 if (!uncore_box_is_fake(box))
171 reg1->alloc = 1;
172 return NULL;
173 }
174
175 return &uncore_constraint_empty;
176}
177
178void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
179{
180 struct intel_uncore_extra_reg *er;
181 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
182
183
184
185
186
187
188
189
190
191 if (uncore_box_is_fake(box) || !reg1->alloc)
192 return;
193
194 er = &box->shared_regs[reg1->idx];
195 atomic_dec(&er->ref);
196 reg1->alloc = 0;
197}
198
199u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
200{
201 struct intel_uncore_extra_reg *er;
202 unsigned long flags;
203 u64 config;
204
205 er = &box->shared_regs[idx];
206
207 raw_spin_lock_irqsave(&er->lock, flags);
208 config = er->config;
209 raw_spin_unlock_irqrestore(&er->lock, flags);
210
211 return config;
212}
213
214static void uncore_assign_hw_event(struct intel_uncore_box *box,
215 struct perf_event *event, int idx)
216{
217 struct hw_perf_event *hwc = &event->hw;
218
219 hwc->idx = idx;
220 hwc->last_tag = ++box->tags[idx];
221
222 if (uncore_pmc_fixed(hwc->idx)) {
223 hwc->event_base = uncore_fixed_ctr(box);
224 hwc->config_base = uncore_fixed_ctl(box);
225 return;
226 }
227
228 hwc->config_base = uncore_event_ctl(box, hwc->idx);
229 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
230}
231
232void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
233{
234 u64 prev_count, new_count, delta;
235 int shift;
236
237 if (uncore_pmc_freerunning(event->hw.idx))
238 shift = 64 - uncore_freerunning_bits(box, event);
239 else if (uncore_pmc_fixed(event->hw.idx))
240 shift = 64 - uncore_fixed_ctr_bits(box);
241 else
242 shift = 64 - uncore_perf_ctr_bits(box);
243
244
245again:
246 prev_count = local64_read(&event->hw.prev_count);
247 new_count = uncore_read_counter(box, event);
248 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
249 goto again;
250
251 delta = (new_count << shift) - (prev_count << shift);
252 delta >>= shift;
253
254 local64_add(delta, &event->count);
255}
256
257
258
259
260
261
262static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
263{
264 struct intel_uncore_box *box;
265 struct perf_event *event;
266 unsigned long flags;
267 int bit;
268
269 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
270 if (!box->n_active || box->cpu != smp_processor_id())
271 return HRTIMER_NORESTART;
272
273
274
275
276 local_irq_save(flags);
277
278
279
280
281
282 list_for_each_entry(event, &box->active_list, active_entry) {
283 uncore_perf_event_update(box, event);
284 }
285
286 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
287 uncore_perf_event_update(box, box->events[bit]);
288
289 local_irq_restore(flags);
290
291 hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
292 return HRTIMER_RESTART;
293}
294
295void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
296{
297 hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
298 HRTIMER_MODE_REL_PINNED);
299}
300
301void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
302{
303 hrtimer_cancel(&box->hrtimer);
304}
305
306static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
307{
308 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
309 box->hrtimer.function = uncore_pmu_hrtimer;
310}
311
312static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
313 int node)
314{
315 int i, size, numshared = type->num_shared_regs ;
316 struct intel_uncore_box *box;
317
318 size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
319
320 box = kzalloc_node(size, GFP_KERNEL, node);
321 if (!box)
322 return NULL;
323
324 for (i = 0; i < numshared; i++)
325 raw_spin_lock_init(&box->shared_regs[i].lock);
326
327 uncore_pmu_init_hrtimer(box);
328 box->cpu = -1;
329 box->pci_phys_id = -1;
330 box->dieid = -1;
331
332
333 box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
334
335 INIT_LIST_HEAD(&box->active_list);
336
337 return box;
338}
339
340
341
342
343
344static int uncore_pmu_event_init(struct perf_event *event);
345
346static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
347{
348 return &box->pmu->pmu == event->pmu;
349}
350
351static int
352uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
353 bool dogrp)
354{
355 struct perf_event *event;
356 int n, max_count;
357
358 max_count = box->pmu->type->num_counters;
359 if (box->pmu->type->fixed_ctl)
360 max_count++;
361
362 if (box->n_events >= max_count)
363 return -EINVAL;
364
365 n = box->n_events;
366
367 if (is_box_event(box, leader)) {
368 box->event_list[n] = leader;
369 n++;
370 }
371
372 if (!dogrp)
373 return n;
374
375 for_each_sibling_event(event, leader) {
376 if (!is_box_event(box, event) ||
377 event->state <= PERF_EVENT_STATE_OFF)
378 continue;
379
380 if (n >= max_count)
381 return -EINVAL;
382
383 box->event_list[n] = event;
384 n++;
385 }
386 return n;
387}
388
389static struct event_constraint *
390uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
391{
392 struct intel_uncore_type *type = box->pmu->type;
393 struct event_constraint *c;
394
395 if (type->ops->get_constraint) {
396 c = type->ops->get_constraint(box, event);
397 if (c)
398 return c;
399 }
400
401 if (event->attr.config == UNCORE_FIXED_EVENT)
402 return &uncore_constraint_fixed;
403
404 if (type->constraints) {
405 for_each_event_constraint(c, type->constraints) {
406 if ((event->hw.config & c->cmask) == c->code)
407 return c;
408 }
409 }
410
411 return &type->unconstrainted;
412}
413
414static void uncore_put_event_constraint(struct intel_uncore_box *box,
415 struct perf_event *event)
416{
417 if (box->pmu->type->ops->put_constraint)
418 box->pmu->type->ops->put_constraint(box, event);
419}
420
421static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
422{
423 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
424 struct event_constraint *c;
425 int i, wmin, wmax, ret = 0;
426 struct hw_perf_event *hwc;
427
428 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
429
430 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
431 c = uncore_get_event_constraint(box, box->event_list[i]);
432 box->event_constraint[i] = c;
433 wmin = min(wmin, c->weight);
434 wmax = max(wmax, c->weight);
435 }
436
437
438 for (i = 0; i < n; i++) {
439 hwc = &box->event_list[i]->hw;
440 c = box->event_constraint[i];
441
442
443 if (hwc->idx == -1)
444 break;
445
446
447 if (!test_bit(hwc->idx, c->idxmsk))
448 break;
449
450
451 if (test_bit(hwc->idx, used_mask))
452 break;
453
454 __set_bit(hwc->idx, used_mask);
455 if (assign)
456 assign[i] = hwc->idx;
457 }
458
459 if (i != n)
460 ret = perf_assign_events(box->event_constraint, n,
461 wmin, wmax, n, assign);
462
463 if (!assign || ret) {
464 for (i = 0; i < n; i++)
465 uncore_put_event_constraint(box, box->event_list[i]);
466 }
467 return ret ? -EINVAL : 0;
468}
469
470void uncore_pmu_event_start(struct perf_event *event, int flags)
471{
472 struct intel_uncore_box *box = uncore_event_to_box(event);
473 int idx = event->hw.idx;
474
475 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
476 return;
477
478
479
480
481
482
483
484 if (uncore_pmc_freerunning(event->hw.idx)) {
485 list_add_tail(&event->active_entry, &box->active_list);
486 local64_set(&event->hw.prev_count,
487 uncore_read_counter(box, event));
488 if (box->n_active++ == 0)
489 uncore_pmu_start_hrtimer(box);
490 return;
491 }
492
493 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
494 return;
495
496 event->hw.state = 0;
497 box->events[idx] = event;
498 box->n_active++;
499 __set_bit(idx, box->active_mask);
500
501 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
502 uncore_enable_event(box, event);
503
504 if (box->n_active == 1)
505 uncore_pmu_start_hrtimer(box);
506}
507
508void uncore_pmu_event_stop(struct perf_event *event, int flags)
509{
510 struct intel_uncore_box *box = uncore_event_to_box(event);
511 struct hw_perf_event *hwc = &event->hw;
512
513
514 if (uncore_pmc_freerunning(hwc->idx)) {
515 list_del(&event->active_entry);
516 if (--box->n_active == 0)
517 uncore_pmu_cancel_hrtimer(box);
518 uncore_perf_event_update(box, event);
519 return;
520 }
521
522 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
523 uncore_disable_event(box, event);
524 box->n_active--;
525 box->events[hwc->idx] = NULL;
526 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
527 hwc->state |= PERF_HES_STOPPED;
528
529 if (box->n_active == 0)
530 uncore_pmu_cancel_hrtimer(box);
531 }
532
533 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
534
535
536
537
538 uncore_perf_event_update(box, event);
539 hwc->state |= PERF_HES_UPTODATE;
540 }
541}
542
543int uncore_pmu_event_add(struct perf_event *event, int flags)
544{
545 struct intel_uncore_box *box = uncore_event_to_box(event);
546 struct hw_perf_event *hwc = &event->hw;
547 int assign[UNCORE_PMC_IDX_MAX];
548 int i, n, ret;
549
550 if (!box)
551 return -ENODEV;
552
553
554
555
556
557
558 if (uncore_pmc_freerunning(hwc->idx)) {
559 if (flags & PERF_EF_START)
560 uncore_pmu_event_start(event, 0);
561 return 0;
562 }
563
564 ret = n = uncore_collect_events(box, event, false);
565 if (ret < 0)
566 return ret;
567
568 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
569 if (!(flags & PERF_EF_START))
570 hwc->state |= PERF_HES_ARCH;
571
572 ret = uncore_assign_events(box, assign, n);
573 if (ret)
574 return ret;
575
576
577 for (i = 0; i < box->n_events; i++) {
578 event = box->event_list[i];
579 hwc = &event->hw;
580
581 if (hwc->idx == assign[i] &&
582 hwc->last_tag == box->tags[assign[i]])
583 continue;
584
585
586
587
588 if (hwc->state & PERF_HES_STOPPED)
589 hwc->state |= PERF_HES_ARCH;
590
591 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
592 }
593
594
595 for (i = 0; i < n; i++) {
596 event = box->event_list[i];
597 hwc = &event->hw;
598
599 if (hwc->idx != assign[i] ||
600 hwc->last_tag != box->tags[assign[i]])
601 uncore_assign_hw_event(box, event, assign[i]);
602 else if (i < box->n_events)
603 continue;
604
605 if (hwc->state & PERF_HES_ARCH)
606 continue;
607
608 uncore_pmu_event_start(event, 0);
609 }
610 box->n_events = n;
611
612 return 0;
613}
614
615void uncore_pmu_event_del(struct perf_event *event, int flags)
616{
617 struct intel_uncore_box *box = uncore_event_to_box(event);
618 int i;
619
620 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
621
622
623
624
625
626
627 if (uncore_pmc_freerunning(event->hw.idx))
628 return;
629
630 for (i = 0; i < box->n_events; i++) {
631 if (event == box->event_list[i]) {
632 uncore_put_event_constraint(box, event);
633
634 for (++i; i < box->n_events; i++)
635 box->event_list[i - 1] = box->event_list[i];
636
637 --box->n_events;
638 break;
639 }
640 }
641
642 event->hw.idx = -1;
643 event->hw.last_tag = ~0ULL;
644}
645
646void uncore_pmu_event_read(struct perf_event *event)
647{
648 struct intel_uncore_box *box = uncore_event_to_box(event);
649 uncore_perf_event_update(box, event);
650}
651
652
653
654
655
656static int uncore_validate_group(struct intel_uncore_pmu *pmu,
657 struct perf_event *event)
658{
659 struct perf_event *leader = event->group_leader;
660 struct intel_uncore_box *fake_box;
661 int ret = -EINVAL, n;
662
663
664 if (uncore_pmc_freerunning(event->hw.idx))
665 return 0;
666
667 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
668 if (!fake_box)
669 return -ENOMEM;
670
671 fake_box->pmu = pmu;
672
673
674
675
676
677
678 n = uncore_collect_events(fake_box, leader, true);
679 if (n < 0)
680 goto out;
681
682 fake_box->n_events = n;
683 n = uncore_collect_events(fake_box, event, false);
684 if (n < 0)
685 goto out;
686
687 fake_box->n_events = n;
688
689 ret = uncore_assign_events(fake_box, NULL, n);
690out:
691 kfree(fake_box);
692 return ret;
693}
694
695static int uncore_pmu_event_init(struct perf_event *event)
696{
697 struct intel_uncore_pmu *pmu;
698 struct intel_uncore_box *box;
699 struct hw_perf_event *hwc = &event->hw;
700 int ret;
701
702 if (event->attr.type != event->pmu->type)
703 return -ENOENT;
704
705 pmu = uncore_event_to_pmu(event);
706
707 if (pmu->func_id < 0)
708 return -ENOENT;
709
710
711 if (hwc->sample_period)
712 return -EINVAL;
713
714
715
716
717
718 if (event->cpu < 0)
719 return -EINVAL;
720 box = uncore_pmu_to_box(pmu, event->cpu);
721 if (!box || box->cpu < 0)
722 return -EINVAL;
723 event->cpu = box->cpu;
724 event->pmu_private = box;
725
726 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
727
728 event->hw.idx = -1;
729 event->hw.last_tag = ~0ULL;
730 event->hw.extra_reg.idx = EXTRA_REG_NONE;
731 event->hw.branch_reg.idx = EXTRA_REG_NONE;
732
733 if (event->attr.config == UNCORE_FIXED_EVENT) {
734
735 if (!pmu->type->fixed_ctl)
736 return -EINVAL;
737
738
739
740
741 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
742 return -EINVAL;
743
744
745 hwc->config = 0ULL;
746 } else if (is_freerunning_event(event)) {
747 hwc->config = event->attr.config;
748 if (!check_valid_freerunning_event(box, event))
749 return -EINVAL;
750 event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
751
752
753
754
755
756
757 event->hw.event_base = uncore_freerunning_counter(box, event);
758 } else {
759 hwc->config = event->attr.config &
760 (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
761 if (pmu->type->ops->hw_config) {
762 ret = pmu->type->ops->hw_config(box, event);
763 if (ret)
764 return ret;
765 }
766 }
767
768 if (event->group_leader != event)
769 ret = uncore_validate_group(pmu, event);
770 else
771 ret = 0;
772
773 return ret;
774}
775
776static void uncore_pmu_enable(struct pmu *pmu)
777{
778 struct intel_uncore_pmu *uncore_pmu;
779 struct intel_uncore_box *box;
780
781 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
782 if (!uncore_pmu)
783 return;
784
785 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
786 if (!box)
787 return;
788
789 if (uncore_pmu->type->ops->enable_box)
790 uncore_pmu->type->ops->enable_box(box);
791}
792
793static void uncore_pmu_disable(struct pmu *pmu)
794{
795 struct intel_uncore_pmu *uncore_pmu;
796 struct intel_uncore_box *box;
797
798 uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
799 if (!uncore_pmu)
800 return;
801
802 box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
803 if (!box)
804 return;
805
806 if (uncore_pmu->type->ops->disable_box)
807 uncore_pmu->type->ops->disable_box(box);
808}
809
810static ssize_t uncore_get_attr_cpumask(struct device *dev,
811 struct device_attribute *attr, char *buf)
812{
813 return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
814}
815
816static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
817
818static struct attribute *uncore_pmu_attrs[] = {
819 &dev_attr_cpumask.attr,
820 NULL,
821};
822
823static const struct attribute_group uncore_pmu_attr_group = {
824 .attrs = uncore_pmu_attrs,
825};
826
827static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
828{
829 int ret;
830
831 if (!pmu->type->pmu) {
832 pmu->pmu = (struct pmu) {
833 .attr_groups = pmu->type->attr_groups,
834 .task_ctx_nr = perf_invalid_context,
835 .pmu_enable = uncore_pmu_enable,
836 .pmu_disable = uncore_pmu_disable,
837 .event_init = uncore_pmu_event_init,
838 .add = uncore_pmu_event_add,
839 .del = uncore_pmu_event_del,
840 .start = uncore_pmu_event_start,
841 .stop = uncore_pmu_event_stop,
842 .read = uncore_pmu_event_read,
843 .module = THIS_MODULE,
844 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
845 };
846 } else {
847 pmu->pmu = *pmu->type->pmu;
848 pmu->pmu.attr_groups = pmu->type->attr_groups;
849 }
850
851 if (pmu->type->num_boxes == 1) {
852 if (strlen(pmu->type->name) > 0)
853 sprintf(pmu->name, "uncore_%s", pmu->type->name);
854 else
855 sprintf(pmu->name, "uncore");
856 } else {
857 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
858 pmu->pmu_idx);
859 }
860
861 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
862 if (!ret)
863 pmu->registered = true;
864 return ret;
865}
866
867static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
868{
869 if (!pmu->registered)
870 return;
871 perf_pmu_unregister(&pmu->pmu);
872 pmu->registered = false;
873}
874
875static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
876{
877 int die;
878
879 for (die = 0; die < max_dies; die++)
880 kfree(pmu->boxes[die]);
881 kfree(pmu->boxes);
882}
883
884static void uncore_type_exit(struct intel_uncore_type *type)
885{
886 struct intel_uncore_pmu *pmu = type->pmus;
887 int i;
888
889 if (pmu) {
890 for (i = 0; i < type->num_boxes; i++, pmu++) {
891 uncore_pmu_unregister(pmu);
892 uncore_free_boxes(pmu);
893 }
894 kfree(type->pmus);
895 type->pmus = NULL;
896 }
897 kfree(type->events_group);
898 type->events_group = NULL;
899}
900
901static void uncore_types_exit(struct intel_uncore_type **types)
902{
903 for (; *types; types++)
904 uncore_type_exit(*types);
905}
906
907static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
908{
909 struct intel_uncore_pmu *pmus;
910 size_t size;
911 int i, j;
912
913 pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
914 if (!pmus)
915 return -ENOMEM;
916
917 size = max_dies * sizeof(struct intel_uncore_box *);
918
919 for (i = 0; i < type->num_boxes; i++) {
920 pmus[i].func_id = setid ? i : -1;
921 pmus[i].pmu_idx = i;
922 pmus[i].type = type;
923 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
924 if (!pmus[i].boxes)
925 goto err;
926 }
927
928 type->pmus = pmus;
929 type->unconstrainted = (struct event_constraint)
930 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
931 0, type->num_counters, 0, 0);
932
933 if (type->event_descs) {
934 struct {
935 struct attribute_group group;
936 struct attribute *attrs[];
937 } *attr_group;
938 for (i = 0; type->event_descs[i].attr.attr.name; i++);
939
940 attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
941 GFP_KERNEL);
942 if (!attr_group)
943 goto err;
944
945 attr_group->group.name = "events";
946 attr_group->group.attrs = attr_group->attrs;
947
948 for (j = 0; j < i; j++)
949 attr_group->attrs[j] = &type->event_descs[j].attr.attr;
950
951 type->events_group = &attr_group->group;
952 }
953
954 type->pmu_group = &uncore_pmu_attr_group;
955
956 return 0;
957
958err:
959 for (i = 0; i < type->num_boxes; i++)
960 kfree(pmus[i].boxes);
961 kfree(pmus);
962
963 return -ENOMEM;
964}
965
966static int __init
967uncore_types_init(struct intel_uncore_type **types, bool setid)
968{
969 int ret;
970
971 for (; *types; types++) {
972 ret = uncore_type_init(*types, setid);
973 if (ret)
974 return ret;
975 }
976 return 0;
977}
978
979
980
981
982static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
983{
984 struct intel_uncore_type *type;
985 struct intel_uncore_pmu *pmu = NULL;
986 struct intel_uncore_box *box;
987 int phys_id, die, ret;
988
989 phys_id = uncore_pcibus_to_physid(pdev->bus);
990 if (phys_id < 0)
991 return -ENODEV;
992
993 die = (topology_max_die_per_package() > 1) ? phys_id :
994 topology_phys_to_logical_pkg(phys_id);
995 if (die < 0)
996 return -EINVAL;
997
998 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
999 int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
1000
1001 uncore_extra_pci_dev[die].dev[idx] = pdev;
1002 pci_set_drvdata(pdev, NULL);
1003 return 0;
1004 }
1005
1006 type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
1007
1008
1009
1010
1011
1012
1013 if (id->driver_data & ~0xffff) {
1014 struct pci_driver *pci_drv = pdev->driver;
1015 const struct pci_device_id *ids = pci_drv->id_table;
1016 unsigned int devfn;
1017
1018 while (ids && ids->vendor) {
1019 if ((ids->vendor == pdev->vendor) &&
1020 (ids->device == pdev->device)) {
1021 devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
1022 UNCORE_PCI_DEV_FUNC(ids->driver_data));
1023 if (devfn == pdev->devfn) {
1024 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
1025 break;
1026 }
1027 }
1028 ids++;
1029 }
1030 if (pmu == NULL)
1031 return -ENODEV;
1032 } else {
1033
1034
1035
1036
1037 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
1038 }
1039
1040 if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
1041 return -EINVAL;
1042
1043 box = uncore_alloc_box(type, NUMA_NO_NODE);
1044 if (!box)
1045 return -ENOMEM;
1046
1047 if (pmu->func_id < 0)
1048 pmu->func_id = pdev->devfn;
1049 else
1050 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
1051
1052 atomic_inc(&box->refcnt);
1053 box->pci_phys_id = phys_id;
1054 box->dieid = die;
1055 box->pci_dev = pdev;
1056 box->pmu = pmu;
1057 uncore_box_init(box);
1058 pci_set_drvdata(pdev, box);
1059
1060 pmu->boxes[die] = box;
1061 if (atomic_inc_return(&pmu->activeboxes) > 1)
1062 return 0;
1063
1064
1065 ret = uncore_pmu_register(pmu);
1066 if (ret) {
1067 pci_set_drvdata(pdev, NULL);
1068 pmu->boxes[die] = NULL;
1069 uncore_box_exit(box);
1070 kfree(box);
1071 }
1072 return ret;
1073}
1074
1075static void uncore_pci_remove(struct pci_dev *pdev)
1076{
1077 struct intel_uncore_box *box;
1078 struct intel_uncore_pmu *pmu;
1079 int i, phys_id, die;
1080
1081 phys_id = uncore_pcibus_to_physid(pdev->bus);
1082
1083 box = pci_get_drvdata(pdev);
1084 if (!box) {
1085 die = (topology_max_die_per_package() > 1) ? phys_id :
1086 topology_phys_to_logical_pkg(phys_id);
1087 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
1088 if (uncore_extra_pci_dev[die].dev[i] == pdev) {
1089 uncore_extra_pci_dev[die].dev[i] = NULL;
1090 break;
1091 }
1092 }
1093 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
1094 return;
1095 }
1096
1097 pmu = box->pmu;
1098 if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
1099 return;
1100
1101 pci_set_drvdata(pdev, NULL);
1102 pmu->boxes[box->dieid] = NULL;
1103 if (atomic_dec_return(&pmu->activeboxes) == 0)
1104 uncore_pmu_unregister(pmu);
1105 uncore_box_exit(box);
1106 kfree(box);
1107}
1108
1109static int __init uncore_pci_init(void)
1110{
1111 size_t size;
1112 int ret;
1113
1114 size = max_dies * sizeof(struct pci_extra_dev);
1115 uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
1116 if (!uncore_extra_pci_dev) {
1117 ret = -ENOMEM;
1118 goto err;
1119 }
1120
1121 ret = uncore_types_init(uncore_pci_uncores, false);
1122 if (ret)
1123 goto errtype;
1124
1125 uncore_pci_driver->probe = uncore_pci_probe;
1126 uncore_pci_driver->remove = uncore_pci_remove;
1127
1128 ret = pci_register_driver(uncore_pci_driver);
1129 if (ret)
1130 goto errtype;
1131
1132 pcidrv_registered = true;
1133 return 0;
1134
1135errtype:
1136 uncore_types_exit(uncore_pci_uncores);
1137 kfree(uncore_extra_pci_dev);
1138 uncore_extra_pci_dev = NULL;
1139 uncore_free_pcibus_map();
1140err:
1141 uncore_pci_uncores = empty_uncore;
1142 return ret;
1143}
1144
1145static void uncore_pci_exit(void)
1146{
1147 if (pcidrv_registered) {
1148 pcidrv_registered = false;
1149 pci_unregister_driver(uncore_pci_driver);
1150 uncore_types_exit(uncore_pci_uncores);
1151 kfree(uncore_extra_pci_dev);
1152 uncore_free_pcibus_map();
1153 }
1154}
1155
1156static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1157 int new_cpu)
1158{
1159 struct intel_uncore_pmu *pmu = type->pmus;
1160 struct intel_uncore_box *box;
1161 int i, die;
1162
1163 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu);
1164 for (i = 0; i < type->num_boxes; i++, pmu++) {
1165 box = pmu->boxes[die];
1166 if (!box)
1167 continue;
1168
1169 if (old_cpu < 0) {
1170 WARN_ON_ONCE(box->cpu != -1);
1171 box->cpu = new_cpu;
1172 continue;
1173 }
1174
1175 WARN_ON_ONCE(box->cpu != old_cpu);
1176 box->cpu = -1;
1177 if (new_cpu < 0)
1178 continue;
1179
1180 uncore_pmu_cancel_hrtimer(box);
1181 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
1182 box->cpu = new_cpu;
1183 }
1184}
1185
1186static void uncore_change_context(struct intel_uncore_type **uncores,
1187 int old_cpu, int new_cpu)
1188{
1189 for (; *uncores; uncores++)
1190 uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
1191}
1192
1193static void uncore_box_unref(struct intel_uncore_type **types, int id)
1194{
1195 struct intel_uncore_type *type;
1196 struct intel_uncore_pmu *pmu;
1197 struct intel_uncore_box *box;
1198 int i;
1199
1200 for (; *types; types++) {
1201 type = *types;
1202 pmu = type->pmus;
1203 for (i = 0; i < type->num_boxes; i++, pmu++) {
1204 box = pmu->boxes[id];
1205 if (box && atomic_dec_return(&box->refcnt) == 0)
1206 uncore_box_exit(box);
1207 }
1208 }
1209}
1210
1211static int uncore_event_cpu_offline(unsigned int cpu)
1212{
1213 int die, target;
1214
1215
1216 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1217 goto unref;
1218
1219 target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
1220
1221
1222 if (target < nr_cpu_ids)
1223 cpumask_set_cpu(target, &uncore_cpu_mask);
1224 else
1225 target = -1;
1226
1227 uncore_change_context(uncore_msr_uncores, cpu, target);
1228 uncore_change_context(uncore_mmio_uncores, cpu, target);
1229 uncore_change_context(uncore_pci_uncores, cpu, target);
1230
1231unref:
1232
1233 die = topology_logical_die_id(cpu);
1234 uncore_box_unref(uncore_msr_uncores, die);
1235 uncore_box_unref(uncore_mmio_uncores, die);
1236 return 0;
1237}
1238
1239static int allocate_boxes(struct intel_uncore_type **types,
1240 unsigned int die, unsigned int cpu)
1241{
1242 struct intel_uncore_box *box, *tmp;
1243 struct intel_uncore_type *type;
1244 struct intel_uncore_pmu *pmu;
1245 LIST_HEAD(allocated);
1246 int i;
1247
1248
1249 for (; *types; types++) {
1250 type = *types;
1251 pmu = type->pmus;
1252 for (i = 0; i < type->num_boxes; i++, pmu++) {
1253 if (pmu->boxes[die])
1254 continue;
1255 box = uncore_alloc_box(type, cpu_to_node(cpu));
1256 if (!box)
1257 goto cleanup;
1258 box->pmu = pmu;
1259 box->dieid = die;
1260 list_add(&box->active_list, &allocated);
1261 }
1262 }
1263
1264 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1265 list_del_init(&box->active_list);
1266 box->pmu->boxes[die] = box;
1267 }
1268 return 0;
1269
1270cleanup:
1271 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1272 list_del_init(&box->active_list);
1273 kfree(box);
1274 }
1275 return -ENOMEM;
1276}
1277
1278static int uncore_box_ref(struct intel_uncore_type **types,
1279 int id, unsigned int cpu)
1280{
1281 struct intel_uncore_type *type;
1282 struct intel_uncore_pmu *pmu;
1283 struct intel_uncore_box *box;
1284 int i, ret;
1285
1286 ret = allocate_boxes(types, id, cpu);
1287 if (ret)
1288 return ret;
1289
1290 for (; *types; types++) {
1291 type = *types;
1292 pmu = type->pmus;
1293 for (i = 0; i < type->num_boxes; i++, pmu++) {
1294 box = pmu->boxes[id];
1295 if (box && atomic_inc_return(&box->refcnt) == 1)
1296 uncore_box_init(box);
1297 }
1298 }
1299 return 0;
1300}
1301
1302static int uncore_event_cpu_online(unsigned int cpu)
1303{
1304 int die, target, msr_ret, mmio_ret;
1305
1306 die = topology_logical_die_id(cpu);
1307 msr_ret = uncore_box_ref(uncore_msr_uncores, die, cpu);
1308 mmio_ret = uncore_box_ref(uncore_mmio_uncores, die, cpu);
1309 if (msr_ret && mmio_ret)
1310 return -ENOMEM;
1311
1312
1313
1314
1315
1316 target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
1317 if (target < nr_cpu_ids)
1318 return 0;
1319
1320 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1321
1322 if (!msr_ret)
1323 uncore_change_context(uncore_msr_uncores, -1, cpu);
1324 if (!mmio_ret)
1325 uncore_change_context(uncore_mmio_uncores, -1, cpu);
1326 uncore_change_context(uncore_pci_uncores, -1, cpu);
1327 return 0;
1328}
1329
1330static int __init type_pmu_register(struct intel_uncore_type *type)
1331{
1332 int i, ret;
1333
1334 for (i = 0; i < type->num_boxes; i++) {
1335 ret = uncore_pmu_register(&type->pmus[i]);
1336 if (ret)
1337 return ret;
1338 }
1339 return 0;
1340}
1341
1342static int __init uncore_msr_pmus_register(void)
1343{
1344 struct intel_uncore_type **types = uncore_msr_uncores;
1345 int ret;
1346
1347 for (; *types; types++) {
1348 ret = type_pmu_register(*types);
1349 if (ret)
1350 return ret;
1351 }
1352 return 0;
1353}
1354
1355static int __init uncore_cpu_init(void)
1356{
1357 int ret;
1358
1359 ret = uncore_types_init(uncore_msr_uncores, true);
1360 if (ret)
1361 goto err;
1362
1363 ret = uncore_msr_pmus_register();
1364 if (ret)
1365 goto err;
1366 return 0;
1367err:
1368 uncore_types_exit(uncore_msr_uncores);
1369 uncore_msr_uncores = empty_uncore;
1370 return ret;
1371}
1372
1373static int __init uncore_mmio_init(void)
1374{
1375 struct intel_uncore_type **types = uncore_mmio_uncores;
1376 int ret;
1377
1378 ret = uncore_types_init(types, true);
1379 if (ret)
1380 goto err;
1381
1382 for (; *types; types++) {
1383 ret = type_pmu_register(*types);
1384 if (ret)
1385 goto err;
1386 }
1387 return 0;
1388err:
1389 uncore_types_exit(uncore_mmio_uncores);
1390 uncore_mmio_uncores = empty_uncore;
1391 return ret;
1392}
1393
1394struct intel_uncore_init_fun {
1395 void (*cpu_init)(void);
1396 int (*pci_init)(void);
1397 void (*mmio_init)(void);
1398};
1399
1400static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
1401 .cpu_init = nhm_uncore_cpu_init,
1402};
1403
1404static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
1405 .cpu_init = snb_uncore_cpu_init,
1406 .pci_init = snb_uncore_pci_init,
1407};
1408
1409static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
1410 .cpu_init = snb_uncore_cpu_init,
1411 .pci_init = ivb_uncore_pci_init,
1412};
1413
1414static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
1415 .cpu_init = snb_uncore_cpu_init,
1416 .pci_init = hsw_uncore_pci_init,
1417};
1418
1419static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
1420 .cpu_init = snb_uncore_cpu_init,
1421 .pci_init = bdw_uncore_pci_init,
1422};
1423
1424static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
1425 .cpu_init = snbep_uncore_cpu_init,
1426 .pci_init = snbep_uncore_pci_init,
1427};
1428
1429static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
1430 .cpu_init = nhmex_uncore_cpu_init,
1431};
1432
1433static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
1434 .cpu_init = ivbep_uncore_cpu_init,
1435 .pci_init = ivbep_uncore_pci_init,
1436};
1437
1438static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
1439 .cpu_init = hswep_uncore_cpu_init,
1440 .pci_init = hswep_uncore_pci_init,
1441};
1442
1443static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
1444 .cpu_init = bdx_uncore_cpu_init,
1445 .pci_init = bdx_uncore_pci_init,
1446};
1447
1448static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
1449 .cpu_init = knl_uncore_cpu_init,
1450 .pci_init = knl_uncore_pci_init,
1451};
1452
1453static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
1454 .cpu_init = skl_uncore_cpu_init,
1455 .pci_init = skl_uncore_pci_init,
1456};
1457
1458static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
1459 .cpu_init = skx_uncore_cpu_init,
1460 .pci_init = skx_uncore_pci_init,
1461};
1462
1463static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
1464 .cpu_init = icl_uncore_cpu_init,
1465 .pci_init = skl_uncore_pci_init,
1466};
1467
1468static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
1469 .cpu_init = icl_uncore_cpu_init,
1470 .mmio_init = tgl_uncore_mmio_init,
1471};
1472
1473static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
1474 .cpu_init = icl_uncore_cpu_init,
1475 .mmio_init = tgl_l_uncore_mmio_init,
1476};
1477
1478static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
1479 .cpu_init = icx_uncore_cpu_init,
1480 .pci_init = icx_uncore_pci_init,
1481 .mmio_init = icx_uncore_mmio_init,
1482};
1483
1484static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
1485 .cpu_init = snr_uncore_cpu_init,
1486 .pci_init = snr_uncore_pci_init,
1487 .mmio_init = snr_uncore_mmio_init,
1488};
1489
1490static const struct x86_cpu_id intel_uncore_match[] __initconst = {
1491 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
1492 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
1493 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
1494 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
1495 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
1496 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
1497 X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
1498 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
1499 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
1500 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
1501 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
1502 X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
1503 X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
1504 X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
1505 X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
1506 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
1507 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
1508 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
1509 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
1510 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
1511 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
1512 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
1513 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
1514 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
1515 X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
1516 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
1517 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
1518 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
1519 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_uncore_init),
1520 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_uncore_init),
1521 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
1522 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
1523 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
1524 {},
1525};
1526MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
1527
1528static int __init intel_uncore_init(void)
1529{
1530 const struct x86_cpu_id *id;
1531 struct intel_uncore_init_fun *uncore_init;
1532 int pret = 0, cret = 0, mret = 0, ret;
1533
1534 id = x86_match_cpu(intel_uncore_match);
1535 if (!id)
1536 return -ENODEV;
1537
1538 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1539 return -ENODEV;
1540
1541 max_dies = topology_max_packages() * topology_max_die_per_package();
1542
1543 uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
1544 if (uncore_init->pci_init) {
1545 pret = uncore_init->pci_init();
1546 if (!pret)
1547 pret = uncore_pci_init();
1548 }
1549
1550 if (uncore_init->cpu_init) {
1551 uncore_init->cpu_init();
1552 cret = uncore_cpu_init();
1553 }
1554
1555 if (uncore_init->mmio_init) {
1556 uncore_init->mmio_init();
1557 mret = uncore_mmio_init();
1558 }
1559
1560 if (cret && pret && mret)
1561 return -ENODEV;
1562
1563
1564 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1565 "perf/x86/intel/uncore:online",
1566 uncore_event_cpu_online,
1567 uncore_event_cpu_offline);
1568 if (ret)
1569 goto err;
1570 return 0;
1571
1572err:
1573 uncore_types_exit(uncore_msr_uncores);
1574 uncore_types_exit(uncore_mmio_uncores);
1575 uncore_pci_exit();
1576 return ret;
1577}
1578module_init(intel_uncore_init);
1579
1580static void __exit intel_uncore_exit(void)
1581{
1582 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1583 uncore_types_exit(uncore_msr_uncores);
1584 uncore_types_exit(uncore_mmio_uncores);
1585 uncore_pci_exit();
1586}
1587module_exit(intel_uncore_exit);
1588