1
2
3
4
5
6
7
8
9#include <linux/perf_event.h>
10#include <linux/init.h>
11#include <linux/export.h>
12#include <linux/pci.h>
13#include <linux/ptrace.h>
14#include <linux/syscore_ops.h>
15#include <linux/sched/clock.h>
16
17#include <asm/apic.h>
18
19#include "../perf_event.h"
20
21static u32 ibs_caps;
22
23#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
24
25#include <linux/kprobes.h>
26#include <linux/hardirq.h>
27
28#include <asm/nmi.h>
29
30#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68enum ibs_states {
69 IBS_ENABLED = 0,
70 IBS_STARTED = 1,
71 IBS_STOPPING = 2,
72 IBS_STOPPED = 3,
73
74 IBS_MAX_STATES,
75};
76
77struct cpu_perf_ibs {
78 struct perf_event *event;
79 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
80};
81
82struct perf_ibs {
83 struct pmu pmu;
84 unsigned int msr;
85 u64 config_mask;
86 u64 cnt_mask;
87 u64 enable_mask;
88 u64 valid_mask;
89 u64 max_period;
90 unsigned long offset_mask[1];
91 int offset_max;
92 struct cpu_perf_ibs __percpu *pcpu;
93
94 struct attribute **format_attrs;
95 struct attribute_group format_group;
96 const struct attribute_group *attr_groups[2];
97
98 u64 (*get_count)(u64 config);
99};
100
101struct perf_ibs_data {
102 u32 size;
103 union {
104 u32 data[0];
105 u32 caps;
106 };
107 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
108};
109
110static int
111perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
112{
113 s64 left = local64_read(&hwc->period_left);
114 s64 period = hwc->sample_period;
115 int overflow = 0;
116
117
118
119
120 if (unlikely(left <= -period)) {
121 left = period;
122 local64_set(&hwc->period_left, left);
123 hwc->last_period = period;
124 overflow = 1;
125 }
126
127 if (unlikely(left < (s64)min)) {
128 left += period;
129 local64_set(&hwc->period_left, left);
130 hwc->last_period = period;
131 overflow = 1;
132 }
133
134
135
136
137
138
139
140 if (left > max) {
141 left -= max;
142 if (left > max)
143 left = max;
144 else if (left < min)
145 left = min;
146 }
147
148 *hw_period = (u64)left;
149
150 return overflow;
151}
152
153static int
154perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
155{
156 struct hw_perf_event *hwc = &event->hw;
157 int shift = 64 - width;
158 u64 prev_raw_count;
159 u64 delta;
160
161
162
163
164
165
166
167
168 prev_raw_count = local64_read(&hwc->prev_count);
169 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
170 new_raw_count) != prev_raw_count)
171 return 0;
172
173
174
175
176
177
178
179
180
181 delta = (new_raw_count << shift) - (prev_raw_count << shift);
182 delta >>= shift;
183
184 local64_add(delta, &event->count);
185 local64_sub(delta, &hwc->period_left);
186
187 return 1;
188}
189
190static struct perf_ibs perf_ibs_fetch;
191static struct perf_ibs perf_ibs_op;
192
193static struct perf_ibs *get_ibs_pmu(int type)
194{
195 if (perf_ibs_fetch.pmu.type == type)
196 return &perf_ibs_fetch;
197 if (perf_ibs_op.pmu.type == type)
198 return &perf_ibs_op;
199 return NULL;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
220{
221 switch (event->attr.precise_ip) {
222 case 0:
223 return -ENOENT;
224 case 1:
225 case 2:
226 break;
227 default:
228 return -EOPNOTSUPP;
229 }
230
231 switch (event->attr.type) {
232 case PERF_TYPE_HARDWARE:
233 switch (event->attr.config) {
234 case PERF_COUNT_HW_CPU_CYCLES:
235 *config = 0;
236 return 0;
237 }
238 break;
239 case PERF_TYPE_RAW:
240 switch (event->attr.config) {
241 case 0x0076:
242 *config = 0;
243 return 0;
244 case 0x00C1:
245 *config = IBS_OP_CNT_CTL;
246 return 0;
247 }
248 break;
249 default:
250 return -ENOENT;
251 }
252
253 return -EOPNOTSUPP;
254}
255
256static const struct perf_event_attr ibs_notsupp = {
257 .exclude_user = 1,
258 .exclude_kernel = 1,
259 .exclude_hv = 1,
260 .exclude_idle = 1,
261 .exclude_host = 1,
262 .exclude_guest = 1,
263};
264
265static int perf_ibs_init(struct perf_event *event)
266{
267 struct hw_perf_event *hwc = &event->hw;
268 struct perf_ibs *perf_ibs;
269 u64 max_cnt, config;
270 int ret;
271
272 perf_ibs = get_ibs_pmu(event->attr.type);
273 if (perf_ibs) {
274 config = event->attr.config;
275 } else {
276 perf_ibs = &perf_ibs_op;
277 ret = perf_ibs_precise_event(event, &config);
278 if (ret)
279 return ret;
280 }
281
282 if (event->pmu != &perf_ibs->pmu)
283 return -ENOENT;
284
285 if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
286 return -EINVAL;
287
288 if (config & ~perf_ibs->config_mask)
289 return -EINVAL;
290
291 if (hwc->sample_period) {
292 if (config & perf_ibs->cnt_mask)
293
294 return -EINVAL;
295 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
296
297
298
299
300
301 return -EINVAL;
302 hwc->sample_period &= ~0x0FULL;
303 if (!hwc->sample_period)
304 hwc->sample_period = 0x10;
305 } else {
306 max_cnt = config & perf_ibs->cnt_mask;
307 config &= ~perf_ibs->cnt_mask;
308 event->attr.sample_period = max_cnt << 4;
309 hwc->sample_period = event->attr.sample_period;
310 }
311
312 if (!hwc->sample_period)
313 return -EINVAL;
314
315
316
317
318
319 hwc->last_period = hwc->sample_period;
320 local64_set(&hwc->period_left, hwc->sample_period);
321
322 hwc->config_base = perf_ibs->msr;
323 hwc->config = config;
324
325 return 0;
326}
327
328static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
329 struct hw_perf_event *hwc, u64 *period)
330{
331 int overflow;
332
333
334 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
335 local64_set(&hwc->prev_count, 0);
336
337 return overflow;
338}
339
340static u64 get_ibs_fetch_count(u64 config)
341{
342 return (config & IBS_FETCH_CNT) >> 12;
343}
344
345static u64 get_ibs_op_count(u64 config)
346{
347 u64 count = 0;
348
349 if (config & IBS_OP_VAL)
350 count += (config & IBS_OP_MAX_CNT) << 4;
351
352 if (ibs_caps & IBS_CAPS_RDWROPCNT)
353 count += (config & IBS_OP_CUR_CNT) >> 32;
354
355 return count;
356}
357
358static void
359perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
360 u64 *config)
361{
362 u64 count = perf_ibs->get_count(*config);
363
364
365
366
367
368
369 while (!perf_event_try_update(event, count, 64)) {
370 rdmsrl(event->hw.config_base, *config);
371 count = perf_ibs->get_count(*config);
372 }
373}
374
375static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
376 struct hw_perf_event *hwc, u64 config)
377{
378 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
379}
380
381
382
383
384
385
386
387
388static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
389 struct hw_perf_event *hwc, u64 config)
390{
391 config &= ~perf_ibs->cnt_mask;
392 wrmsrl(hwc->config_base, config);
393 config &= ~perf_ibs->enable_mask;
394 wrmsrl(hwc->config_base, config);
395}
396
397
398
399
400
401
402
403static void perf_ibs_start(struct perf_event *event, int flags)
404{
405 struct hw_perf_event *hwc = &event->hw;
406 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
407 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
408 u64 period;
409
410 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
411 return;
412
413 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
414 hwc->state = 0;
415
416 perf_ibs_set_period(perf_ibs, hwc, &period);
417
418
419
420
421 set_bit(IBS_STARTED, pcpu->state);
422 clear_bit(IBS_STOPPING, pcpu->state);
423 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
424
425 perf_event_update_userpage(event);
426}
427
428static void perf_ibs_stop(struct perf_event *event, int flags)
429{
430 struct hw_perf_event *hwc = &event->hw;
431 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
432 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
433 u64 config;
434 int stopping;
435
436 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
437 return;
438
439 stopping = test_bit(IBS_STARTED, pcpu->state);
440
441 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
442 return;
443
444 rdmsrl(hwc->config_base, config);
445
446 if (stopping) {
447
448
449
450
451
452
453 set_bit(IBS_STOPPED, pcpu->state);
454 perf_ibs_disable_event(perf_ibs, hwc, config);
455
456
457
458
459
460
461
462
463
464 clear_bit(IBS_STARTED, pcpu->state);
465 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
466 hwc->state |= PERF_HES_STOPPED;
467 }
468
469 if (hwc->state & PERF_HES_UPTODATE)
470 return;
471
472
473
474
475
476 config &= ~perf_ibs->valid_mask;
477
478 perf_ibs_event_update(perf_ibs, event, &config);
479 hwc->state |= PERF_HES_UPTODATE;
480}
481
482static int perf_ibs_add(struct perf_event *event, int flags)
483{
484 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
485 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
486
487 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
488 return -ENOSPC;
489
490 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
491
492 pcpu->event = event;
493
494 if (flags & PERF_EF_START)
495 perf_ibs_start(event, PERF_EF_RELOAD);
496
497 return 0;
498}
499
500static void perf_ibs_del(struct perf_event *event, int flags)
501{
502 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
503 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
504
505 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
506 return;
507
508 perf_ibs_stop(event, PERF_EF_UPDATE);
509
510 pcpu->event = NULL;
511
512 perf_event_update_userpage(event);
513}
514
515static void perf_ibs_read(struct perf_event *event) { }
516
517PMU_FORMAT_ATTR(rand_en, "config:57");
518PMU_FORMAT_ATTR(cnt_ctl, "config:19");
519
520static struct attribute *ibs_fetch_format_attrs[] = {
521 &format_attr_rand_en.attr,
522 NULL,
523};
524
525static struct attribute *ibs_op_format_attrs[] = {
526 NULL,
527 NULL,
528};
529
530static struct perf_ibs perf_ibs_fetch = {
531 .pmu = {
532 .task_ctx_nr = perf_invalid_context,
533
534 .event_init = perf_ibs_init,
535 .add = perf_ibs_add,
536 .del = perf_ibs_del,
537 .start = perf_ibs_start,
538 .stop = perf_ibs_stop,
539 .read = perf_ibs_read,
540 },
541 .msr = MSR_AMD64_IBSFETCHCTL,
542 .config_mask = IBS_FETCH_CONFIG_MASK,
543 .cnt_mask = IBS_FETCH_MAX_CNT,
544 .enable_mask = IBS_FETCH_ENABLE,
545 .valid_mask = IBS_FETCH_VAL,
546 .max_period = IBS_FETCH_MAX_CNT << 4,
547 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
548 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
549 .format_attrs = ibs_fetch_format_attrs,
550
551 .get_count = get_ibs_fetch_count,
552};
553
554static struct perf_ibs perf_ibs_op = {
555 .pmu = {
556 .task_ctx_nr = perf_invalid_context,
557
558 .event_init = perf_ibs_init,
559 .add = perf_ibs_add,
560 .del = perf_ibs_del,
561 .start = perf_ibs_start,
562 .stop = perf_ibs_stop,
563 .read = perf_ibs_read,
564 },
565 .msr = MSR_AMD64_IBSOPCTL,
566 .config_mask = IBS_OP_CONFIG_MASK,
567 .cnt_mask = IBS_OP_MAX_CNT,
568 .enable_mask = IBS_OP_ENABLE,
569 .valid_mask = IBS_OP_VAL,
570 .max_period = IBS_OP_MAX_CNT << 4,
571 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
572 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
573 .format_attrs = ibs_op_format_attrs,
574
575 .get_count = get_ibs_op_count,
576};
577
578static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
579{
580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
581 struct perf_event *event = pcpu->event;
582 struct hw_perf_event *hwc = &event->hw;
583 struct perf_sample_data data;
584 struct perf_raw_record raw;
585 struct pt_regs regs;
586 struct perf_ibs_data ibs_data;
587 int offset, size, check_rip, offset_max, throttle = 0;
588 unsigned int msr;
589 u64 *buf, *config, period;
590
591 if (!test_bit(IBS_STARTED, pcpu->state)) {
592fail:
593
594
595
596
597
598
599 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
600 return 1;
601
602 return 0;
603 }
604
605 msr = hwc->config_base;
606 buf = ibs_data.regs;
607 rdmsrl(msr, *buf);
608 if (!(*buf++ & perf_ibs->valid_mask))
609 goto fail;
610
611 config = &ibs_data.regs[0];
612 perf_ibs_event_update(perf_ibs, event, config);
613 perf_sample_data_init(&data, 0, hwc->last_period);
614 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
615 goto out;
616
617 ibs_data.caps = ibs_caps;
618 size = 1;
619 offset = 1;
620 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
621 if (event->attr.sample_type & PERF_SAMPLE_RAW)
622 offset_max = perf_ibs->offset_max;
623 else if (check_rip)
624 offset_max = 2;
625 else
626 offset_max = 1;
627 do {
628 rdmsrl(msr + offset, *buf++);
629 size++;
630 offset = find_next_bit(perf_ibs->offset_mask,
631 perf_ibs->offset_max,
632 offset + 1);
633 } while (offset < offset_max);
634 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
635
636
637
638
639
640 if (ibs_caps & IBS_CAPS_BRNTRGT) {
641 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
642 size++;
643 }
644 if (ibs_caps & IBS_CAPS_OPDATA4) {
645 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
646 size++;
647 }
648 }
649 ibs_data.size = sizeof(u64) * size;
650
651 regs = *iregs;
652 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
653 regs.flags &= ~PERF_EFLAGS_EXACT;
654 } else {
655 set_linear_ip(®s, ibs_data.regs[1]);
656 regs.flags |= PERF_EFLAGS_EXACT;
657 }
658
659 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
660 raw = (struct perf_raw_record){
661 .frag = {
662 .size = sizeof(u32) + ibs_data.size,
663 .data = ibs_data.data,
664 },
665 };
666 data.raw = &raw;
667 }
668
669 throttle = perf_event_overflow(event, &data, ®s);
670out:
671 if (throttle)
672 perf_ibs_stop(event, 0);
673 else
674 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
675
676 perf_event_update_userpage(event);
677
678 return 1;
679}
680
681static int
682perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
683{
684 u64 stamp = sched_clock();
685 int handled = 0;
686
687 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
688 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
689
690 if (handled)
691 inc_irq_stat(apic_perf_irqs);
692
693 perf_sample_event_took(sched_clock() - stamp);
694
695 return handled;
696}
697NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
698
699static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
700{
701 struct cpu_perf_ibs __percpu *pcpu;
702 int ret;
703
704 pcpu = alloc_percpu(struct cpu_perf_ibs);
705 if (!pcpu)
706 return -ENOMEM;
707
708 perf_ibs->pcpu = pcpu;
709
710
711 if (perf_ibs->format_attrs[0]) {
712 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
713 perf_ibs->format_group.name = "format";
714 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
715
716 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
717 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
718 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
719 }
720
721 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
722 if (ret) {
723 perf_ibs->pcpu = NULL;
724 free_percpu(pcpu);
725 }
726
727 return ret;
728}
729
730static __init void perf_event_ibs_init(void)
731{
732 struct attribute **attr = ibs_op_format_attrs;
733
734 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
735
736 if (ibs_caps & IBS_CAPS_OPCNT) {
737 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
738 *attr++ = &format_attr_cnt_ctl.attr;
739 }
740 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
741
742 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
743 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
744}
745
746#else
747
748static __init void perf_event_ibs_init(void) { }
749
750#endif
751
752
753
754static __init u32 __get_ibs_caps(void)
755{
756 u32 caps;
757 unsigned int max_level;
758
759 if (!boot_cpu_has(X86_FEATURE_IBS))
760 return 0;
761
762
763 max_level = cpuid_eax(0x80000000);
764 if (max_level < IBS_CPUID_FEATURES)
765 return IBS_CAPS_DEFAULT;
766
767 caps = cpuid_eax(IBS_CPUID_FEATURES);
768 if (!(caps & IBS_CAPS_AVAIL))
769
770 return IBS_CAPS_DEFAULT;
771
772 return caps;
773}
774
775u32 get_ibs_caps(void)
776{
777 return ibs_caps;
778}
779
780EXPORT_SYMBOL(get_ibs_caps);
781
782static inline int get_eilvt(int offset)
783{
784 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
785}
786
787static inline int put_eilvt(int offset)
788{
789 return !setup_APIC_eilvt(offset, 0, 0, 1);
790}
791
792
793
794
795static inline int ibs_eilvt_valid(void)
796{
797 int offset;
798 u64 val;
799 int valid = 0;
800
801 preempt_disable();
802
803 rdmsrl(MSR_AMD64_IBSCTL, val);
804 offset = val & IBSCTL_LVT_OFFSET_MASK;
805
806 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
807 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
808 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
809 goto out;
810 }
811
812 if (!get_eilvt(offset)) {
813 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
814 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
815 goto out;
816 }
817
818 valid = 1;
819out:
820 preempt_enable();
821
822 return valid;
823}
824
825static int setup_ibs_ctl(int ibs_eilvt_off)
826{
827 struct pci_dev *cpu_cfg;
828 int nodes;
829 u32 value = 0;
830
831 nodes = 0;
832 cpu_cfg = NULL;
833 do {
834 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
835 PCI_DEVICE_ID_AMD_10H_NB_MISC,
836 cpu_cfg);
837 if (!cpu_cfg)
838 break;
839 ++nodes;
840 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
841 | IBSCTL_LVT_OFFSET_VALID);
842 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
843 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
844 pci_dev_put(cpu_cfg);
845 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
846 value);
847 return -EINVAL;
848 }
849 } while (1);
850
851 if (!nodes) {
852 pr_debug("No CPU node configured for IBS\n");
853 return -ENODEV;
854 }
855
856 return 0;
857}
858
859
860
861
862
863
864
865
866
867static void force_ibs_eilvt_setup(void)
868{
869 int offset;
870 int ret;
871
872 preempt_disable();
873
874 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
875 if (get_eilvt(offset))
876 break;
877 }
878 preempt_enable();
879
880 if (offset == APIC_EILVT_NR_MAX) {
881 pr_debug("No EILVT entry available\n");
882 return;
883 }
884
885 ret = setup_ibs_ctl(offset);
886 if (ret)
887 goto out;
888
889 if (!ibs_eilvt_valid())
890 goto out;
891
892 pr_info("IBS: LVT offset %d assigned\n", offset);
893
894 return;
895out:
896 preempt_disable();
897 put_eilvt(offset);
898 preempt_enable();
899 return;
900}
901
902static void ibs_eilvt_setup(void)
903{
904
905
906
907
908
909
910 if (boot_cpu_data.x86 == 0x10)
911 force_ibs_eilvt_setup();
912}
913
914static inline int get_ibs_lvt_offset(void)
915{
916 u64 val;
917
918 rdmsrl(MSR_AMD64_IBSCTL, val);
919 if (!(val & IBSCTL_LVT_OFFSET_VALID))
920 return -EINVAL;
921
922 return val & IBSCTL_LVT_OFFSET_MASK;
923}
924
925static void setup_APIC_ibs(void)
926{
927 int offset;
928
929 offset = get_ibs_lvt_offset();
930 if (offset < 0)
931 goto failed;
932
933 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
934 return;
935failed:
936 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
937 smp_processor_id());
938}
939
940static void clear_APIC_ibs(void)
941{
942 int offset;
943
944 offset = get_ibs_lvt_offset();
945 if (offset >= 0)
946 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
947}
948
949static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
950{
951 setup_APIC_ibs();
952 return 0;
953}
954
955#ifdef CONFIG_PM
956
957static int perf_ibs_suspend(void)
958{
959 clear_APIC_ibs();
960 return 0;
961}
962
963static void perf_ibs_resume(void)
964{
965 ibs_eilvt_setup();
966 setup_APIC_ibs();
967}
968
969static struct syscore_ops perf_ibs_syscore_ops = {
970 .resume = perf_ibs_resume,
971 .suspend = perf_ibs_suspend,
972};
973
974static void perf_ibs_pm_init(void)
975{
976 register_syscore_ops(&perf_ibs_syscore_ops);
977}
978
979#else
980
981static inline void perf_ibs_pm_init(void) { }
982
983#endif
984
985static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
986{
987 clear_APIC_ibs();
988 return 0;
989}
990
991static __init int amd_ibs_init(void)
992{
993 u32 caps;
994
995 caps = __get_ibs_caps();
996 if (!caps)
997 return -ENODEV;
998
999 ibs_eilvt_setup();
1000
1001 if (!ibs_eilvt_valid())
1002 return -EINVAL;
1003
1004 perf_ibs_pm_init();
1005
1006 ibs_caps = caps;
1007
1008 smp_mb();
1009
1010
1011
1012
1013 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1014 "perf/x86/amd/ibs:starting",
1015 x86_pmu_amd_ibs_starting_cpu,
1016 x86_pmu_amd_ibs_dying_cpu);
1017
1018 perf_event_ibs_init();
1019
1020 return 0;
1021}
1022
1023
1024device_initcall(amd_ibs_init);
1025