1
2
3
4
5
6
7
8
9#include <linux/perf_event.h>
10#include <linux/init.h>
11#include <linux/export.h>
12#include <linux/pci.h>
13#include <linux/ptrace.h>
14#include <linux/syscore_ops.h>
15#include <linux/sched/clock.h>
16
17#include <asm/apic.h>
18
19#include "../perf_event.h"
20
21static u32 ibs_caps;
22
23#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
24
25#include <linux/kprobes.h>
26#include <linux/hardirq.h>
27
28#include <asm/nmi.h>
29
30#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
31#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68enum ibs_states {
69 IBS_ENABLED = 0,
70 IBS_STARTED = 1,
71 IBS_STOPPING = 2,
72 IBS_STOPPED = 3,
73
74 IBS_MAX_STATES,
75};
76
77struct cpu_perf_ibs {
78 struct perf_event *event;
79 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
80};
81
82struct perf_ibs {
83 struct pmu pmu;
84 unsigned int msr;
85 u64 config_mask;
86 u64 cnt_mask;
87 u64 enable_mask;
88 u64 valid_mask;
89 u64 max_period;
90 unsigned long offset_mask[1];
91 int offset_max;
92 unsigned int fetch_count_reset_broken : 1;
93 struct cpu_perf_ibs __percpu *pcpu;
94
95 struct attribute **format_attrs;
96 struct attribute_group format_group;
97 const struct attribute_group *attr_groups[2];
98
99 u64 (*get_count)(u64 config);
100};
101
102struct perf_ibs_data {
103 u32 size;
104 union {
105 u32 data[0];
106 u32 caps;
107 };
108 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
109};
110
111static int
112perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
113{
114 s64 left = local64_read(&hwc->period_left);
115 s64 period = hwc->sample_period;
116 int overflow = 0;
117
118
119
120
121 if (unlikely(left <= -period)) {
122 left = period;
123 local64_set(&hwc->period_left, left);
124 hwc->last_period = period;
125 overflow = 1;
126 }
127
128 if (unlikely(left < (s64)min)) {
129 left += period;
130 local64_set(&hwc->period_left, left);
131 hwc->last_period = period;
132 overflow = 1;
133 }
134
135
136
137
138
139
140
141 if (left > max) {
142 left -= max;
143 if (left > max)
144 left = max;
145 else if (left < min)
146 left = min;
147 }
148
149 *hw_period = (u64)left;
150
151 return overflow;
152}
153
154static int
155perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
156{
157 struct hw_perf_event *hwc = &event->hw;
158 int shift = 64 - width;
159 u64 prev_raw_count;
160 u64 delta;
161
162
163
164
165
166
167
168
169 prev_raw_count = local64_read(&hwc->prev_count);
170 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
171 new_raw_count) != prev_raw_count)
172 return 0;
173
174
175
176
177
178
179
180
181
182 delta = (new_raw_count << shift) - (prev_raw_count << shift);
183 delta >>= shift;
184
185 local64_add(delta, &event->count);
186 local64_sub(delta, &hwc->period_left);
187
188 return 1;
189}
190
191static struct perf_ibs perf_ibs_fetch;
192static struct perf_ibs perf_ibs_op;
193
194static struct perf_ibs *get_ibs_pmu(int type)
195{
196 if (perf_ibs_fetch.pmu.type == type)
197 return &perf_ibs_fetch;
198 if (perf_ibs_op.pmu.type == type)
199 return &perf_ibs_op;
200 return NULL;
201}
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
221{
222 switch (event->attr.precise_ip) {
223 case 0:
224 return -ENOENT;
225 case 1:
226 case 2:
227 break;
228 default:
229 return -EOPNOTSUPP;
230 }
231
232 switch (event->attr.type) {
233 case PERF_TYPE_HARDWARE:
234 switch (event->attr.config) {
235 case PERF_COUNT_HW_CPU_CYCLES:
236 *config = 0;
237 return 0;
238 }
239 break;
240 case PERF_TYPE_RAW:
241 switch (event->attr.config) {
242 case 0x0076:
243 *config = 0;
244 return 0;
245 case 0x00C1:
246 *config = IBS_OP_CNT_CTL;
247 return 0;
248 }
249 break;
250 default:
251 return -ENOENT;
252 }
253
254 return -EOPNOTSUPP;
255}
256
257static int perf_ibs_init(struct perf_event *event)
258{
259 struct hw_perf_event *hwc = &event->hw;
260 struct perf_ibs *perf_ibs;
261 u64 max_cnt, config;
262 int ret;
263
264 perf_ibs = get_ibs_pmu(event->attr.type);
265 if (perf_ibs) {
266 config = event->attr.config;
267 } else {
268 perf_ibs = &perf_ibs_op;
269 ret = perf_ibs_precise_event(event, &config);
270 if (ret)
271 return ret;
272 }
273
274 if (event->pmu != &perf_ibs->pmu)
275 return -ENOENT;
276
277 if (config & ~perf_ibs->config_mask)
278 return -EINVAL;
279
280 if (hwc->sample_period) {
281 if (config & perf_ibs->cnt_mask)
282
283 return -EINVAL;
284 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
285
286
287
288
289
290 return -EINVAL;
291 hwc->sample_period &= ~0x0FULL;
292 if (!hwc->sample_period)
293 hwc->sample_period = 0x10;
294 } else {
295 max_cnt = config & perf_ibs->cnt_mask;
296 config &= ~perf_ibs->cnt_mask;
297 event->attr.sample_period = max_cnt << 4;
298 hwc->sample_period = event->attr.sample_period;
299 }
300
301 if (!hwc->sample_period)
302 return -EINVAL;
303
304
305
306
307
308 hwc->last_period = hwc->sample_period;
309 local64_set(&hwc->period_left, hwc->sample_period);
310
311 hwc->config_base = perf_ibs->msr;
312 hwc->config = config;
313
314 return 0;
315}
316
317static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
318 struct hw_perf_event *hwc, u64 *period)
319{
320 int overflow;
321
322
323 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
324 local64_set(&hwc->prev_count, 0);
325
326 return overflow;
327}
328
329static u64 get_ibs_fetch_count(u64 config)
330{
331 return (config & IBS_FETCH_CNT) >> 12;
332}
333
334static u64 get_ibs_op_count(u64 config)
335{
336 u64 count = 0;
337
338
339
340
341
342
343 if (config & IBS_OP_VAL) {
344 count = (config & IBS_OP_MAX_CNT) << 4;
345 if (ibs_caps & IBS_CAPS_OPCNTEXT)
346 count += config & IBS_OP_MAX_CNT_EXT_MASK;
347 } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
348 count = (config & IBS_OP_CUR_CNT) >> 32;
349 }
350
351 return count;
352}
353
354static void
355perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
356 u64 *config)
357{
358 u64 count = perf_ibs->get_count(*config);
359
360
361
362
363
364
365 while (!perf_event_try_update(event, count, 64)) {
366 rdmsrl(event->hw.config_base, *config);
367 count = perf_ibs->get_count(*config);
368 }
369}
370
371static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
372 struct hw_perf_event *hwc, u64 config)
373{
374 u64 tmp = hwc->config | config;
375
376 if (perf_ibs->fetch_count_reset_broken)
377 wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
378
379 wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
380}
381
382
383
384
385
386
387
388
389static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
390 struct hw_perf_event *hwc, u64 config)
391{
392 config &= ~perf_ibs->cnt_mask;
393 if (boot_cpu_data.x86 == 0x10)
394 wrmsrl(hwc->config_base, config);
395 config &= ~perf_ibs->enable_mask;
396 wrmsrl(hwc->config_base, config);
397}
398
399
400
401
402
403
404
405static void perf_ibs_start(struct perf_event *event, int flags)
406{
407 struct hw_perf_event *hwc = &event->hw;
408 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
409 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
410 u64 period, config = 0;
411
412 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
413 return;
414
415 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
416 hwc->state = 0;
417
418 perf_ibs_set_period(perf_ibs, hwc, &period);
419 if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
420 config |= period & IBS_OP_MAX_CNT_EXT_MASK;
421 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
422 }
423 config |= period >> 4;
424
425
426
427
428
429 set_bit(IBS_STARTED, pcpu->state);
430 clear_bit(IBS_STOPPING, pcpu->state);
431 perf_ibs_enable_event(perf_ibs, hwc, config);
432
433 perf_event_update_userpage(event);
434}
435
436static void perf_ibs_stop(struct perf_event *event, int flags)
437{
438 struct hw_perf_event *hwc = &event->hw;
439 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
440 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
441 u64 config;
442 int stopping;
443
444 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
445 return;
446
447 stopping = test_bit(IBS_STARTED, pcpu->state);
448
449 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
450 return;
451
452 rdmsrl(hwc->config_base, config);
453
454 if (stopping) {
455
456
457
458
459
460
461 set_bit(IBS_STOPPED, pcpu->state);
462 perf_ibs_disable_event(perf_ibs, hwc, config);
463
464
465
466
467
468
469
470
471
472 clear_bit(IBS_STARTED, pcpu->state);
473 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
474 hwc->state |= PERF_HES_STOPPED;
475 }
476
477 if (hwc->state & PERF_HES_UPTODATE)
478 return;
479
480
481
482
483
484 config &= ~perf_ibs->valid_mask;
485
486 perf_ibs_event_update(perf_ibs, event, &config);
487 hwc->state |= PERF_HES_UPTODATE;
488}
489
490static int perf_ibs_add(struct perf_event *event, int flags)
491{
492 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
493 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
494
495 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
496 return -ENOSPC;
497
498 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
499
500 pcpu->event = event;
501
502 if (flags & PERF_EF_START)
503 perf_ibs_start(event, PERF_EF_RELOAD);
504
505 return 0;
506}
507
508static void perf_ibs_del(struct perf_event *event, int flags)
509{
510 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
511 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
512
513 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
514 return;
515
516 perf_ibs_stop(event, PERF_EF_UPDATE);
517
518 pcpu->event = NULL;
519
520 perf_event_update_userpage(event);
521}
522
523static void perf_ibs_read(struct perf_event *event) { }
524
525PMU_FORMAT_ATTR(rand_en, "config:57");
526PMU_FORMAT_ATTR(cnt_ctl, "config:19");
527
528static struct attribute *ibs_fetch_format_attrs[] = {
529 &format_attr_rand_en.attr,
530 NULL,
531};
532
533static struct attribute *ibs_op_format_attrs[] = {
534 NULL,
535 NULL,
536};
537
538static struct perf_ibs perf_ibs_fetch = {
539 .pmu = {
540 .task_ctx_nr = perf_invalid_context,
541
542 .event_init = perf_ibs_init,
543 .add = perf_ibs_add,
544 .del = perf_ibs_del,
545 .start = perf_ibs_start,
546 .stop = perf_ibs_stop,
547 .read = perf_ibs_read,
548 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
549 },
550 .msr = MSR_AMD64_IBSFETCHCTL,
551 .config_mask = IBS_FETCH_CONFIG_MASK,
552 .cnt_mask = IBS_FETCH_MAX_CNT,
553 .enable_mask = IBS_FETCH_ENABLE,
554 .valid_mask = IBS_FETCH_VAL,
555 .max_period = IBS_FETCH_MAX_CNT << 4,
556 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
557 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
558 .format_attrs = ibs_fetch_format_attrs,
559
560 .get_count = get_ibs_fetch_count,
561};
562
563static struct perf_ibs perf_ibs_op = {
564 .pmu = {
565 .task_ctx_nr = perf_invalid_context,
566
567 .event_init = perf_ibs_init,
568 .add = perf_ibs_add,
569 .del = perf_ibs_del,
570 .start = perf_ibs_start,
571 .stop = perf_ibs_stop,
572 .read = perf_ibs_read,
573 },
574 .msr = MSR_AMD64_IBSOPCTL,
575 .config_mask = IBS_OP_CONFIG_MASK,
576 .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
577 IBS_OP_CUR_CNT_RAND,
578 .enable_mask = IBS_OP_ENABLE,
579 .valid_mask = IBS_OP_VAL,
580 .max_period = IBS_OP_MAX_CNT << 4,
581 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
582 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
583 .format_attrs = ibs_op_format_attrs,
584
585 .get_count = get_ibs_op_count,
586};
587
588static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
589{
590 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
591 struct perf_event *event = pcpu->event;
592 struct hw_perf_event *hwc;
593 struct perf_sample_data data;
594 struct perf_raw_record raw;
595 struct pt_regs regs;
596 struct perf_ibs_data ibs_data;
597 int offset, size, check_rip, offset_max, throttle = 0;
598 unsigned int msr;
599 u64 *buf, *config, period, new_config = 0;
600
601 if (!test_bit(IBS_STARTED, pcpu->state)) {
602fail:
603
604
605
606
607
608
609 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
610 return 1;
611
612 return 0;
613 }
614
615 if (WARN_ON_ONCE(!event))
616 goto fail;
617
618 hwc = &event->hw;
619 msr = hwc->config_base;
620 buf = ibs_data.regs;
621 rdmsrl(msr, *buf);
622 if (!(*buf++ & perf_ibs->valid_mask))
623 goto fail;
624
625 config = &ibs_data.regs[0];
626 perf_ibs_event_update(perf_ibs, event, config);
627 perf_sample_data_init(&data, 0, hwc->last_period);
628 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
629 goto out;
630
631 ibs_data.caps = ibs_caps;
632 size = 1;
633 offset = 1;
634 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
635 if (event->attr.sample_type & PERF_SAMPLE_RAW)
636 offset_max = perf_ibs->offset_max;
637 else if (check_rip)
638 offset_max = 3;
639 else
640 offset_max = 1;
641 do {
642 rdmsrl(msr + offset, *buf++);
643 size++;
644 offset = find_next_bit(perf_ibs->offset_mask,
645 perf_ibs->offset_max,
646 offset + 1);
647 } while (offset < offset_max);
648
649
650
651
652
653 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
654 if (perf_ibs == &perf_ibs_op) {
655 if (ibs_caps & IBS_CAPS_BRNTRGT) {
656 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
657 size++;
658 }
659 if (ibs_caps & IBS_CAPS_OPDATA4) {
660 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
661 size++;
662 }
663 }
664 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
665 rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
666 size++;
667 }
668 }
669 ibs_data.size = sizeof(u64) * size;
670
671 regs = *iregs;
672 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
673 regs.flags &= ~PERF_EFLAGS_EXACT;
674 } else {
675 set_linear_ip(®s, ibs_data.regs[1]);
676 regs.flags |= PERF_EFLAGS_EXACT;
677 }
678
679 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
680 raw = (struct perf_raw_record){
681 .frag = {
682 .size = sizeof(u32) + ibs_data.size,
683 .data = ibs_data.data,
684 },
685 };
686 data.raw = &raw;
687 }
688
689 throttle = perf_event_overflow(event, &data, ®s);
690out:
691 if (throttle) {
692 perf_ibs_stop(event, 0);
693 } else {
694 if (perf_ibs == &perf_ibs_op) {
695 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
696 new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
697 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
698 }
699 if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
700 new_config |= *config & IBS_OP_CUR_CNT_RAND;
701 }
702 new_config |= period >> 4;
703
704 perf_ibs_enable_event(perf_ibs, hwc, new_config);
705 }
706
707 perf_event_update_userpage(event);
708
709 return 1;
710}
711
712static int
713perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
714{
715 u64 stamp = sched_clock();
716 int handled = 0;
717
718 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
719 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
720
721 if (handled)
722 inc_irq_stat(apic_perf_irqs);
723
724 perf_sample_event_took(sched_clock() - stamp);
725
726 return handled;
727}
728NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
729
730static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
731{
732 struct cpu_perf_ibs __percpu *pcpu;
733 int ret;
734
735 pcpu = alloc_percpu(struct cpu_perf_ibs);
736 if (!pcpu)
737 return -ENOMEM;
738
739 perf_ibs->pcpu = pcpu;
740
741
742 if (perf_ibs->format_attrs[0]) {
743 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
744 perf_ibs->format_group.name = "format";
745 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
746
747 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
748 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
749 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
750 }
751
752 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
753 if (ret) {
754 perf_ibs->pcpu = NULL;
755 free_percpu(pcpu);
756 }
757
758 return ret;
759}
760
761static __init void perf_event_ibs_init(void)
762{
763 struct attribute **attr = ibs_op_format_attrs;
764
765
766
767
768
769 if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
770 perf_ibs_fetch.fetch_count_reset_broken = 1;
771
772 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
773
774 if (ibs_caps & IBS_CAPS_OPCNT) {
775 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
776 *attr++ = &format_attr_cnt_ctl.attr;
777 }
778
779 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
780 perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
781 perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
782 perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
783 }
784
785 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
786
787 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
788 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
789}
790
791#else
792
793static __init void perf_event_ibs_init(void) { }
794
795#endif
796
797
798
799static __init u32 __get_ibs_caps(void)
800{
801 u32 caps;
802 unsigned int max_level;
803
804 if (!boot_cpu_has(X86_FEATURE_IBS))
805 return 0;
806
807
808 max_level = cpuid_eax(0x80000000);
809 if (max_level < IBS_CPUID_FEATURES)
810 return IBS_CAPS_DEFAULT;
811
812 caps = cpuid_eax(IBS_CPUID_FEATURES);
813 if (!(caps & IBS_CAPS_AVAIL))
814
815 return IBS_CAPS_DEFAULT;
816
817 return caps;
818}
819
820u32 get_ibs_caps(void)
821{
822 return ibs_caps;
823}
824
825EXPORT_SYMBOL(get_ibs_caps);
826
827static inline int get_eilvt(int offset)
828{
829 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
830}
831
832static inline int put_eilvt(int offset)
833{
834 return !setup_APIC_eilvt(offset, 0, 0, 1);
835}
836
837
838
839
840static inline int ibs_eilvt_valid(void)
841{
842 int offset;
843 u64 val;
844 int valid = 0;
845
846 preempt_disable();
847
848 rdmsrl(MSR_AMD64_IBSCTL, val);
849 offset = val & IBSCTL_LVT_OFFSET_MASK;
850
851 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
852 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
853 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
854 goto out;
855 }
856
857 if (!get_eilvt(offset)) {
858 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
859 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
860 goto out;
861 }
862
863 valid = 1;
864out:
865 preempt_enable();
866
867 return valid;
868}
869
870static int setup_ibs_ctl(int ibs_eilvt_off)
871{
872 struct pci_dev *cpu_cfg;
873 int nodes;
874 u32 value = 0;
875
876 nodes = 0;
877 cpu_cfg = NULL;
878 do {
879 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
880 PCI_DEVICE_ID_AMD_10H_NB_MISC,
881 cpu_cfg);
882 if (!cpu_cfg)
883 break;
884 ++nodes;
885 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
886 | IBSCTL_LVT_OFFSET_VALID);
887 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
888 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
889 pci_dev_put(cpu_cfg);
890 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
891 value);
892 return -EINVAL;
893 }
894 } while (1);
895
896 if (!nodes) {
897 pr_debug("No CPU node configured for IBS\n");
898 return -ENODEV;
899 }
900
901 return 0;
902}
903
904
905
906
907
908
909
910
911
912static void force_ibs_eilvt_setup(void)
913{
914 int offset;
915 int ret;
916
917 preempt_disable();
918
919 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
920 if (get_eilvt(offset))
921 break;
922 }
923 preempt_enable();
924
925 if (offset == APIC_EILVT_NR_MAX) {
926 pr_debug("No EILVT entry available\n");
927 return;
928 }
929
930 ret = setup_ibs_ctl(offset);
931 if (ret)
932 goto out;
933
934 if (!ibs_eilvt_valid())
935 goto out;
936
937 pr_info("LVT offset %d assigned\n", offset);
938
939 return;
940out:
941 preempt_disable();
942 put_eilvt(offset);
943 preempt_enable();
944 return;
945}
946
947static void ibs_eilvt_setup(void)
948{
949
950
951
952
953
954
955 if (boot_cpu_data.x86 == 0x10)
956 force_ibs_eilvt_setup();
957}
958
959static inline int get_ibs_lvt_offset(void)
960{
961 u64 val;
962
963 rdmsrl(MSR_AMD64_IBSCTL, val);
964 if (!(val & IBSCTL_LVT_OFFSET_VALID))
965 return -EINVAL;
966
967 return val & IBSCTL_LVT_OFFSET_MASK;
968}
969
970static void setup_APIC_ibs(void)
971{
972 int offset;
973
974 offset = get_ibs_lvt_offset();
975 if (offset < 0)
976 goto failed;
977
978 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
979 return;
980failed:
981 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
982 smp_processor_id());
983}
984
985static void clear_APIC_ibs(void)
986{
987 int offset;
988
989 offset = get_ibs_lvt_offset();
990 if (offset >= 0)
991 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
992}
993
994static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
995{
996 setup_APIC_ibs();
997 return 0;
998}
999
1000#ifdef CONFIG_PM
1001
1002static int perf_ibs_suspend(void)
1003{
1004 clear_APIC_ibs();
1005 return 0;
1006}
1007
1008static void perf_ibs_resume(void)
1009{
1010 ibs_eilvt_setup();
1011 setup_APIC_ibs();
1012}
1013
1014static struct syscore_ops perf_ibs_syscore_ops = {
1015 .resume = perf_ibs_resume,
1016 .suspend = perf_ibs_suspend,
1017};
1018
1019static void perf_ibs_pm_init(void)
1020{
1021 register_syscore_ops(&perf_ibs_syscore_ops);
1022}
1023
1024#else
1025
1026static inline void perf_ibs_pm_init(void) { }
1027
1028#endif
1029
1030static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
1031{
1032 clear_APIC_ibs();
1033 return 0;
1034}
1035
1036static __init int amd_ibs_init(void)
1037{
1038 u32 caps;
1039
1040 caps = __get_ibs_caps();
1041 if (!caps)
1042 return -ENODEV;
1043
1044 ibs_eilvt_setup();
1045
1046 if (!ibs_eilvt_valid())
1047 return -EINVAL;
1048
1049 perf_ibs_pm_init();
1050
1051 ibs_caps = caps;
1052
1053 smp_mb();
1054
1055
1056
1057
1058 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1059 "perf/x86/amd/ibs:starting",
1060 x86_pmu_amd_ibs_starting_cpu,
1061 x86_pmu_amd_ibs_dying_cpu);
1062
1063 perf_event_ibs_init();
1064
1065 return 0;
1066}
1067
1068
1069device_initcall(amd_ibs_init);
1070