1
2
3
4
5
6
7
8
9#include <linux/perf_event.h>
10#include <linux/init.h>
11#include <linux/export.h>
12#include <linux/pci.h>
13#include <linux/ptrace.h>
14#include <linux/syscore_ops.h>
15#include <linux/sched/clock.h>
16
17#include <asm/apic.h>
18
19#include "../perf_event.h"
20
21static u32 ibs_caps;
22
23#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
24
25#include <linux/kprobes.h>
26#include <linux/hardirq.h>
27
28#include <asm/nmi.h>
29
30#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68enum ibs_states {
69 IBS_ENABLED = 0,
70 IBS_STARTED = 1,
71 IBS_STOPPING = 2,
72 IBS_STOPPED = 3,
73
74 IBS_MAX_STATES,
75};
76
77struct cpu_perf_ibs {
78 struct perf_event *event;
79 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
80};
81
82struct perf_ibs {
83 struct pmu pmu;
84 unsigned int msr;
85 u64 config_mask;
86 u64 cnt_mask;
87 u64 enable_mask;
88 u64 valid_mask;
89 u64 max_period;
90 unsigned long offset_mask[1];
91 int offset_max;
92 struct cpu_perf_ibs __percpu *pcpu;
93
94 struct attribute **format_attrs;
95 struct attribute_group format_group;
96 const struct attribute_group *attr_groups[2];
97
98 u64 (*get_count)(u64 config);
99};
100
101struct perf_ibs_data {
102 u32 size;
103 union {
104 u32 data[0];
105 u32 caps;
106 };
107 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
108};
109
110static int
111perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
112{
113 s64 left = local64_read(&hwc->period_left);
114 s64 period = hwc->sample_period;
115 int overflow = 0;
116
117
118
119
120 if (unlikely(left <= -period)) {
121 left = period;
122 local64_set(&hwc->period_left, left);
123 hwc->last_period = period;
124 overflow = 1;
125 }
126
127 if (unlikely(left < (s64)min)) {
128 left += period;
129 local64_set(&hwc->period_left, left);
130 hwc->last_period = period;
131 overflow = 1;
132 }
133
134
135
136
137
138
139
140 if (left > max) {
141 left -= max;
142 if (left > max)
143 left = max;
144 else if (left < min)
145 left = min;
146 }
147
148 *hw_period = (u64)left;
149
150 return overflow;
151}
152
153static int
154perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
155{
156 struct hw_perf_event *hwc = &event->hw;
157 int shift = 64 - width;
158 u64 prev_raw_count;
159 u64 delta;
160
161
162
163
164
165
166
167
168 prev_raw_count = local64_read(&hwc->prev_count);
169 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
170 new_raw_count) != prev_raw_count)
171 return 0;
172
173
174
175
176
177
178
179
180
181 delta = (new_raw_count << shift) - (prev_raw_count << shift);
182 delta >>= shift;
183
184 local64_add(delta, &event->count);
185 local64_sub(delta, &hwc->period_left);
186
187 return 1;
188}
189
190static struct perf_ibs perf_ibs_fetch;
191static struct perf_ibs perf_ibs_op;
192
193static struct perf_ibs *get_ibs_pmu(int type)
194{
195 if (perf_ibs_fetch.pmu.type == type)
196 return &perf_ibs_fetch;
197 if (perf_ibs_op.pmu.type == type)
198 return &perf_ibs_op;
199 return NULL;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
220{
221 switch (event->attr.precise_ip) {
222 case 0:
223 return -ENOENT;
224 case 1:
225 case 2:
226 break;
227 default:
228 return -EOPNOTSUPP;
229 }
230
231 switch (event->attr.type) {
232 case PERF_TYPE_HARDWARE:
233 switch (event->attr.config) {
234 case PERF_COUNT_HW_CPU_CYCLES:
235 *config = 0;
236 return 0;
237 }
238 break;
239 case PERF_TYPE_RAW:
240 switch (event->attr.config) {
241 case 0x0076:
242 *config = 0;
243 return 0;
244 case 0x00C1:
245 *config = IBS_OP_CNT_CTL;
246 return 0;
247 }
248 break;
249 default:
250 return -ENOENT;
251 }
252
253 return -EOPNOTSUPP;
254}
255
256static int perf_ibs_init(struct perf_event *event)
257{
258 struct hw_perf_event *hwc = &event->hw;
259 struct perf_ibs *perf_ibs;
260 u64 max_cnt, config;
261 int ret;
262
263 perf_ibs = get_ibs_pmu(event->attr.type);
264 if (perf_ibs) {
265 config = event->attr.config;
266 } else {
267 perf_ibs = &perf_ibs_op;
268 ret = perf_ibs_precise_event(event, &config);
269 if (ret)
270 return ret;
271 }
272
273 if (event->pmu != &perf_ibs->pmu)
274 return -ENOENT;
275
276 if (config & ~perf_ibs->config_mask)
277 return -EINVAL;
278
279 if (hwc->sample_period) {
280 if (config & perf_ibs->cnt_mask)
281
282 return -EINVAL;
283 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
284
285
286
287
288
289 return -EINVAL;
290 hwc->sample_period &= ~0x0FULL;
291 if (!hwc->sample_period)
292 hwc->sample_period = 0x10;
293 } else {
294 max_cnt = config & perf_ibs->cnt_mask;
295 config &= ~perf_ibs->cnt_mask;
296 event->attr.sample_period = max_cnt << 4;
297 hwc->sample_period = event->attr.sample_period;
298 }
299
300 if (!hwc->sample_period)
301 return -EINVAL;
302
303
304
305
306
307 hwc->last_period = hwc->sample_period;
308 local64_set(&hwc->period_left, hwc->sample_period);
309
310 hwc->config_base = perf_ibs->msr;
311 hwc->config = config;
312
313 return 0;
314}
315
316static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
317 struct hw_perf_event *hwc, u64 *period)
318{
319 int overflow;
320
321
322 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
323 local64_set(&hwc->prev_count, 0);
324
325 return overflow;
326}
327
328static u64 get_ibs_fetch_count(u64 config)
329{
330 return (config & IBS_FETCH_CNT) >> 12;
331}
332
333static u64 get_ibs_op_count(u64 config)
334{
335 u64 count = 0;
336
337 if (config & IBS_OP_VAL)
338 count += (config & IBS_OP_MAX_CNT) << 4;
339
340 if (ibs_caps & IBS_CAPS_RDWROPCNT)
341 count += (config & IBS_OP_CUR_CNT) >> 32;
342
343 return count;
344}
345
346static void
347perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
348 u64 *config)
349{
350 u64 count = perf_ibs->get_count(*config);
351
352
353
354
355
356
357 while (!perf_event_try_update(event, count, 64)) {
358 rdmsrl(event->hw.config_base, *config);
359 count = perf_ibs->get_count(*config);
360 }
361}
362
363static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
364 struct hw_perf_event *hwc, u64 config)
365{
366 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
367}
368
369
370
371
372
373
374
375
376static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
377 struct hw_perf_event *hwc, u64 config)
378{
379 config &= ~perf_ibs->cnt_mask;
380 wrmsrl(hwc->config_base, config);
381 config &= ~perf_ibs->enable_mask;
382 wrmsrl(hwc->config_base, config);
383}
384
385
386
387
388
389
390
391static void perf_ibs_start(struct perf_event *event, int flags)
392{
393 struct hw_perf_event *hwc = &event->hw;
394 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
395 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
396 u64 period;
397
398 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
399 return;
400
401 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
402 hwc->state = 0;
403
404 perf_ibs_set_period(perf_ibs, hwc, &period);
405
406
407
408
409 set_bit(IBS_STARTED, pcpu->state);
410 clear_bit(IBS_STOPPING, pcpu->state);
411 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
412
413 perf_event_update_userpage(event);
414}
415
416static void perf_ibs_stop(struct perf_event *event, int flags)
417{
418 struct hw_perf_event *hwc = &event->hw;
419 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
420 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
421 u64 config;
422 int stopping;
423
424 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
425 return;
426
427 stopping = test_bit(IBS_STARTED, pcpu->state);
428
429 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
430 return;
431
432 rdmsrl(hwc->config_base, config);
433
434 if (stopping) {
435
436
437
438
439
440
441 set_bit(IBS_STOPPED, pcpu->state);
442 perf_ibs_disable_event(perf_ibs, hwc, config);
443
444
445
446
447
448
449
450
451
452 clear_bit(IBS_STARTED, pcpu->state);
453 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
454 hwc->state |= PERF_HES_STOPPED;
455 }
456
457 if (hwc->state & PERF_HES_UPTODATE)
458 return;
459
460
461
462
463
464 config &= ~perf_ibs->valid_mask;
465
466 perf_ibs_event_update(perf_ibs, event, &config);
467 hwc->state |= PERF_HES_UPTODATE;
468}
469
470static int perf_ibs_add(struct perf_event *event, int flags)
471{
472 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
473 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
474
475 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
476 return -ENOSPC;
477
478 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
479
480 pcpu->event = event;
481
482 if (flags & PERF_EF_START)
483 perf_ibs_start(event, PERF_EF_RELOAD);
484
485 return 0;
486}
487
488static void perf_ibs_del(struct perf_event *event, int flags)
489{
490 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
491 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
492
493 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
494 return;
495
496 perf_ibs_stop(event, PERF_EF_UPDATE);
497
498 pcpu->event = NULL;
499
500 perf_event_update_userpage(event);
501}
502
503static void perf_ibs_read(struct perf_event *event) { }
504
505PMU_FORMAT_ATTR(rand_en, "config:57");
506PMU_FORMAT_ATTR(cnt_ctl, "config:19");
507
508static struct attribute *ibs_fetch_format_attrs[] = {
509 &format_attr_rand_en.attr,
510 NULL,
511};
512
513static struct attribute *ibs_op_format_attrs[] = {
514 NULL,
515 NULL,
516};
517
518static struct perf_ibs perf_ibs_fetch = {
519 .pmu = {
520 .task_ctx_nr = perf_invalid_context,
521
522 .event_init = perf_ibs_init,
523 .add = perf_ibs_add,
524 .del = perf_ibs_del,
525 .start = perf_ibs_start,
526 .stop = perf_ibs_stop,
527 .read = perf_ibs_read,
528 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
529 },
530 .msr = MSR_AMD64_IBSFETCHCTL,
531 .config_mask = IBS_FETCH_CONFIG_MASK,
532 .cnt_mask = IBS_FETCH_MAX_CNT,
533 .enable_mask = IBS_FETCH_ENABLE,
534 .valid_mask = IBS_FETCH_VAL,
535 .max_period = IBS_FETCH_MAX_CNT << 4,
536 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
537 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
538 .format_attrs = ibs_fetch_format_attrs,
539
540 .get_count = get_ibs_fetch_count,
541};
542
543static struct perf_ibs perf_ibs_op = {
544 .pmu = {
545 .task_ctx_nr = perf_invalid_context,
546
547 .event_init = perf_ibs_init,
548 .add = perf_ibs_add,
549 .del = perf_ibs_del,
550 .start = perf_ibs_start,
551 .stop = perf_ibs_stop,
552 .read = perf_ibs_read,
553 },
554 .msr = MSR_AMD64_IBSOPCTL,
555 .config_mask = IBS_OP_CONFIG_MASK,
556 .cnt_mask = IBS_OP_MAX_CNT,
557 .enable_mask = IBS_OP_ENABLE,
558 .valid_mask = IBS_OP_VAL,
559 .max_period = IBS_OP_MAX_CNT << 4,
560 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
561 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
562 .format_attrs = ibs_op_format_attrs,
563
564 .get_count = get_ibs_op_count,
565};
566
567static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
568{
569 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
570 struct perf_event *event = pcpu->event;
571 struct hw_perf_event *hwc;
572 struct perf_sample_data data;
573 struct perf_raw_record raw;
574 struct pt_regs regs;
575 struct perf_ibs_data ibs_data;
576 int offset, size, check_rip, offset_max, throttle = 0;
577 unsigned int msr;
578 u64 *buf, *config, period;
579
580 if (!test_bit(IBS_STARTED, pcpu->state)) {
581fail:
582
583
584
585
586
587
588 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
589 return 1;
590
591 return 0;
592 }
593
594 if (WARN_ON_ONCE(!event))
595 goto fail;
596
597 hwc = &event->hw;
598 msr = hwc->config_base;
599 buf = ibs_data.regs;
600 rdmsrl(msr, *buf);
601 if (!(*buf++ & perf_ibs->valid_mask))
602 goto fail;
603
604 config = &ibs_data.regs[0];
605 perf_ibs_event_update(perf_ibs, event, config);
606 perf_sample_data_init(&data, 0, hwc->last_period);
607 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
608 goto out;
609
610 ibs_data.caps = ibs_caps;
611 size = 1;
612 offset = 1;
613 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
614 if (event->attr.sample_type & PERF_SAMPLE_RAW)
615 offset_max = perf_ibs->offset_max;
616 else if (check_rip)
617 offset_max = 2;
618 else
619 offset_max = 1;
620 do {
621 rdmsrl(msr + offset, *buf++);
622 size++;
623 offset = find_next_bit(perf_ibs->offset_mask,
624 perf_ibs->offset_max,
625 offset + 1);
626 } while (offset < offset_max);
627 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
628
629
630
631
632
633 if (ibs_caps & IBS_CAPS_BRNTRGT) {
634 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
635 size++;
636 }
637 if (ibs_caps & IBS_CAPS_OPDATA4) {
638 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
639 size++;
640 }
641 }
642 ibs_data.size = sizeof(u64) * size;
643
644 regs = *iregs;
645 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
646 regs.flags &= ~PERF_EFLAGS_EXACT;
647 } else {
648 set_linear_ip(®s, ibs_data.regs[1]);
649 regs.flags |= PERF_EFLAGS_EXACT;
650 }
651
652 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
653 raw = (struct perf_raw_record){
654 .frag = {
655 .size = sizeof(u32) + ibs_data.size,
656 .data = ibs_data.data,
657 },
658 };
659 data.raw = &raw;
660 }
661
662 throttle = perf_event_overflow(event, &data, ®s);
663out:
664 if (throttle) {
665 perf_ibs_stop(event, 0);
666 } else {
667 period >>= 4;
668
669 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
670 (*config & IBS_OP_CNT_CTL))
671 period |= *config & IBS_OP_CUR_CNT_RAND;
672
673 perf_ibs_enable_event(perf_ibs, hwc, period);
674 }
675
676 perf_event_update_userpage(event);
677
678 return 1;
679}
680
681static int
682perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
683{
684 u64 stamp = sched_clock();
685 int handled = 0;
686
687 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
688 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
689
690 if (handled)
691 inc_irq_stat(apic_perf_irqs);
692
693 perf_sample_event_took(sched_clock() - stamp);
694
695 return handled;
696}
697NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
698
699static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
700{
701 struct cpu_perf_ibs __percpu *pcpu;
702 int ret;
703
704 pcpu = alloc_percpu(struct cpu_perf_ibs);
705 if (!pcpu)
706 return -ENOMEM;
707
708 perf_ibs->pcpu = pcpu;
709
710
711 if (perf_ibs->format_attrs[0]) {
712 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
713 perf_ibs->format_group.name = "format";
714 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
715
716 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
717 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
718 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
719 }
720
721 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
722 if (ret) {
723 perf_ibs->pcpu = NULL;
724 free_percpu(pcpu);
725 }
726
727 return ret;
728}
729
730static __init void perf_event_ibs_init(void)
731{
732 struct attribute **attr = ibs_op_format_attrs;
733
734 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
735
736 if (ibs_caps & IBS_CAPS_OPCNT) {
737 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
738 *attr++ = &format_attr_cnt_ctl.attr;
739 }
740 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
741
742 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
743 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
744}
745
746#else
747
748static __init void perf_event_ibs_init(void) { }
749
750#endif
751
752
753
754static __init u32 __get_ibs_caps(void)
755{
756 u32 caps;
757 unsigned int max_level;
758
759 if (!boot_cpu_has(X86_FEATURE_IBS))
760 return 0;
761
762
763 max_level = cpuid_eax(0x80000000);
764 if (max_level < IBS_CPUID_FEATURES)
765 return IBS_CAPS_DEFAULT;
766
767 caps = cpuid_eax(IBS_CPUID_FEATURES);
768 if (!(caps & IBS_CAPS_AVAIL))
769
770 return IBS_CAPS_DEFAULT;
771
772 return caps;
773}
774
775u32 get_ibs_caps(void)
776{
777 return ibs_caps;
778}
779
780EXPORT_SYMBOL(get_ibs_caps);
781
782static inline int get_eilvt(int offset)
783{
784 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
785}
786
787static inline int put_eilvt(int offset)
788{
789 return !setup_APIC_eilvt(offset, 0, 0, 1);
790}
791
792
793
794
795static inline int ibs_eilvt_valid(void)
796{
797 int offset;
798 u64 val;
799 int valid = 0;
800
801 preempt_disable();
802
803 rdmsrl(MSR_AMD64_IBSCTL, val);
804 offset = val & IBSCTL_LVT_OFFSET_MASK;
805
806 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
807 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
808 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
809 goto out;
810 }
811
812 if (!get_eilvt(offset)) {
813 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
814 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
815 goto out;
816 }
817
818 valid = 1;
819out:
820 preempt_enable();
821
822 return valid;
823}
824
825static int setup_ibs_ctl(int ibs_eilvt_off)
826{
827 struct pci_dev *cpu_cfg;
828 int nodes;
829 u32 value = 0;
830
831 nodes = 0;
832 cpu_cfg = NULL;
833 do {
834 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
835 PCI_DEVICE_ID_AMD_10H_NB_MISC,
836 cpu_cfg);
837 if (!cpu_cfg)
838 break;
839 ++nodes;
840 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
841 | IBSCTL_LVT_OFFSET_VALID);
842 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
843 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
844 pci_dev_put(cpu_cfg);
845 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
846 value);
847 return -EINVAL;
848 }
849 } while (1);
850
851 if (!nodes) {
852 pr_debug("No CPU node configured for IBS\n");
853 return -ENODEV;
854 }
855
856 return 0;
857}
858
859
860
861
862
863
864
865
866
867static void force_ibs_eilvt_setup(void)
868{
869 int offset;
870 int ret;
871
872 preempt_disable();
873
874 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
875 if (get_eilvt(offset))
876 break;
877 }
878 preempt_enable();
879
880 if (offset == APIC_EILVT_NR_MAX) {
881 pr_debug("No EILVT entry available\n");
882 return;
883 }
884
885 ret = setup_ibs_ctl(offset);
886 if (ret)
887 goto out;
888
889 if (!ibs_eilvt_valid())
890 goto out;
891
892 pr_info("LVT offset %d assigned\n", offset);
893
894 return;
895out:
896 preempt_disable();
897 put_eilvt(offset);
898 preempt_enable();
899 return;
900}
901
902static void ibs_eilvt_setup(void)
903{
904
905
906
907
908
909
910 if (boot_cpu_data.x86 == 0x10)
911 force_ibs_eilvt_setup();
912}
913
914static inline int get_ibs_lvt_offset(void)
915{
916 u64 val;
917
918 rdmsrl(MSR_AMD64_IBSCTL, val);
919 if (!(val & IBSCTL_LVT_OFFSET_VALID))
920 return -EINVAL;
921
922 return val & IBSCTL_LVT_OFFSET_MASK;
923}
924
925static void setup_APIC_ibs(void)
926{
927 int offset;
928
929 offset = get_ibs_lvt_offset();
930 if (offset < 0)
931 goto failed;
932
933 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
934 return;
935failed:
936 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
937 smp_processor_id());
938}
939
940static void clear_APIC_ibs(void)
941{
942 int offset;
943
944 offset = get_ibs_lvt_offset();
945 if (offset >= 0)
946 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
947}
948
949static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
950{
951 setup_APIC_ibs();
952 return 0;
953}
954
955#ifdef CONFIG_PM
956
957static int perf_ibs_suspend(void)
958{
959 clear_APIC_ibs();
960 return 0;
961}
962
963static void perf_ibs_resume(void)
964{
965 ibs_eilvt_setup();
966 setup_APIC_ibs();
967}
968
969static struct syscore_ops perf_ibs_syscore_ops = {
970 .resume = perf_ibs_resume,
971 .suspend = perf_ibs_suspend,
972};
973
974static void perf_ibs_pm_init(void)
975{
976 register_syscore_ops(&perf_ibs_syscore_ops);
977}
978
979#else
980
981static inline void perf_ibs_pm_init(void) { }
982
983#endif
984
985static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
986{
987 clear_APIC_ibs();
988 return 0;
989}
990
991static __init int amd_ibs_init(void)
992{
993 u32 caps;
994
995 caps = __get_ibs_caps();
996 if (!caps)
997 return -ENODEV;
998
999 ibs_eilvt_setup();
1000
1001 if (!ibs_eilvt_valid())
1002 return -EINVAL;
1003
1004 perf_ibs_pm_init();
1005
1006 ibs_caps = caps;
1007
1008 smp_mb();
1009
1010
1011
1012
1013 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1014 "perf/x86/amd/ibs:starting",
1015 x86_pmu_amd_ibs_starting_cpu,
1016 x86_pmu_amd_ibs_dying_cpu);
1017
1018 perf_event_ibs_init();
1019
1020 return 0;
1021}
1022
1023
1024device_initcall(amd_ibs_init);
1025