1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/cpu.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/perf_event.h>
22#include <linux/perf/arm_pmu.h>
23#include <linux/uaccess.h>
24#include <asm/kvm_emulate.h>
25#include <kvm/arm_pmu.h>
26#include <kvm/arm_vgic.h>
27
28static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
29static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
30static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
31
32#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
33
34
35
36
37
38
39static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
40{
41 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
42 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
43}
44
45static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
46{
47 struct kvm_pmu *pmu;
48 struct kvm_vcpu_arch *vcpu_arch;
49
50 pmc -= pmc->idx;
51 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
52 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
53 return container_of(vcpu_arch, struct kvm_vcpu, arch);
54}
55
56
57
58
59
60static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
61{
62 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
63
64 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
65}
66
67
68
69
70
71static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
72{
73 return select_idx & 0x1;
74}
75
76
77
78
79
80
81
82
83static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
84{
85 if (kvm_pmu_pmc_is_chained(pmc) &&
86 kvm_pmu_idx_is_high_counter(pmc->idx))
87 return pmc - 1;
88
89 return pmc;
90}
91static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
92{
93 if (kvm_pmu_idx_is_high_counter(pmc->idx))
94 return pmc - 1;
95 else
96 return pmc + 1;
97}
98
99
100
101
102
103
104static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
105{
106 u64 eventsel, reg;
107
108 select_idx |= 0x1;
109
110 if (select_idx == ARMV8_PMU_CYCLE_IDX)
111 return false;
112
113 reg = PMEVTYPER0_EL0 + select_idx;
114 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
115
116 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
117}
118
119
120
121
122
123
124static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
125 struct kvm_pmc *pmc)
126{
127 u64 counter, counter_high, reg, enabled, running;
128
129 if (kvm_pmu_pmc_is_chained(pmc)) {
130 pmc = kvm_pmu_get_canonical_pmc(pmc);
131 reg = PMEVCNTR0_EL0 + pmc->idx;
132
133 counter = __vcpu_sys_reg(vcpu, reg);
134 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
135
136 counter = lower_32_bits(counter) | (counter_high << 32);
137 } else {
138 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
139 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
140 counter = __vcpu_sys_reg(vcpu, reg);
141 }
142
143
144
145
146
147 if (pmc->perf_event)
148 counter += perf_event_read_value(pmc->perf_event, &enabled,
149 &running);
150
151 return counter;
152}
153
154
155
156
157
158
159u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
160{
161 u64 counter;
162 struct kvm_pmu *pmu = &vcpu->arch.pmu;
163 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
164
165 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
166
167 if (kvm_pmu_pmc_is_chained(pmc) &&
168 kvm_pmu_idx_is_high_counter(select_idx))
169 counter = upper_32_bits(counter);
170 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
171 counter = lower_32_bits(counter);
172
173 return counter;
174}
175
176
177
178
179
180
181
182void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
183{
184 u64 reg;
185
186 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
187 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
188 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
189
190
191 kvm_pmu_create_perf_event(vcpu, select_idx);
192}
193
194
195
196
197
198static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
199{
200 pmc = kvm_pmu_get_canonical_pmc(pmc);
201 if (pmc->perf_event) {
202 perf_event_disable(pmc->perf_event);
203 perf_event_release_kernel(pmc->perf_event);
204 pmc->perf_event = NULL;
205 }
206}
207
208
209
210
211
212
213
214static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
215{
216 u64 counter, reg, val;
217
218 pmc = kvm_pmu_get_canonical_pmc(pmc);
219 if (!pmc->perf_event)
220 return;
221
222 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
223
224 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
225 reg = PMCCNTR_EL0;
226 val = counter;
227 } else {
228 reg = PMEVCNTR0_EL0 + pmc->idx;
229 val = lower_32_bits(counter);
230 }
231
232 __vcpu_sys_reg(vcpu, reg) = val;
233
234 if (kvm_pmu_pmc_is_chained(pmc))
235 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
236
237 kvm_pmu_release_perf_event(pmc);
238}
239
240
241
242
243
244
245void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
246{
247 int i;
248 struct kvm_pmu *pmu = &vcpu->arch.pmu;
249
250 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
251 pmu->pmc[i].idx = i;
252}
253
254
255
256
257
258
259void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
260{
261 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
262 struct kvm_pmu *pmu = &vcpu->arch.pmu;
263 int i;
264
265 for_each_set_bit(i, &mask, 32)
266 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
267
268 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
269}
270
271
272
273
274
275
276void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
277{
278 int i;
279 struct kvm_pmu *pmu = &vcpu->arch.pmu;
280
281 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
282 kvm_pmu_release_perf_event(&pmu->pmc[i]);
283}
284
285u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
286{
287 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
288
289 val &= ARMV8_PMU_PMCR_N_MASK;
290 if (val == 0)
291 return BIT(ARMV8_PMU_CYCLE_IDX);
292 else
293 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
294}
295
296
297
298
299
300
301
302
303void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
304{
305 int i;
306 struct kvm_pmu *pmu = &vcpu->arch.pmu;
307 struct kvm_pmc *pmc;
308
309 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
310 return;
311
312 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
313 if (!(val & BIT(i)))
314 continue;
315
316 pmc = &pmu->pmc[i];
317
318
319 kvm_pmu_update_pmc_chained(vcpu, i);
320 kvm_pmu_create_perf_event(vcpu, i);
321
322
323 if (pmc->perf_event) {
324 perf_event_enable(pmc->perf_event);
325 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
326 kvm_debug("fail to enable perf event\n");
327 }
328 }
329}
330
331
332
333
334
335
336
337
338void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
339{
340 int i;
341 struct kvm_pmu *pmu = &vcpu->arch.pmu;
342 struct kvm_pmc *pmc;
343
344 if (!val)
345 return;
346
347 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
348 if (!(val & BIT(i)))
349 continue;
350
351 pmc = &pmu->pmc[i];
352
353
354 kvm_pmu_update_pmc_chained(vcpu, i);
355 kvm_pmu_create_perf_event(vcpu, i);
356
357
358 if (pmc->perf_event)
359 perf_event_disable(pmc->perf_event);
360 }
361}
362
363static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
364{
365 u64 reg = 0;
366
367 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
368 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
369 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
370 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
371 reg &= kvm_pmu_valid_counter_mask(vcpu);
372 }
373
374 return reg;
375}
376
377static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
378{
379 struct kvm_pmu *pmu = &vcpu->arch.pmu;
380 bool overflow;
381
382 if (!kvm_arm_pmu_v3_ready(vcpu))
383 return;
384
385 overflow = !!kvm_pmu_overflow_status(vcpu);
386 if (pmu->irq_level == overflow)
387 return;
388
389 pmu->irq_level = overflow;
390
391 if (likely(irqchip_in_kernel(vcpu->kvm))) {
392 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
393 pmu->irq_num, overflow, pmu);
394 WARN_ON(ret);
395 }
396}
397
398bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
399{
400 struct kvm_pmu *pmu = &vcpu->arch.pmu;
401 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
402 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
403
404 if (likely(irqchip_in_kernel(vcpu->kvm)))
405 return false;
406
407 return pmu->irq_level != run_level;
408}
409
410
411
412
413void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
414{
415 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
416
417
418 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
419 if (vcpu->arch.pmu.irq_level)
420 regs->device_irq_level |= KVM_ARM_DEV_PMU;
421}
422
423
424
425
426
427
428
429
430void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
431{
432 kvm_pmu_update_state(vcpu);
433}
434
435
436
437
438
439
440
441
442void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
443{
444 kvm_pmu_update_state(vcpu);
445}
446
447
448
449
450static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
451 struct perf_sample_data *data,
452 struct pt_regs *regs)
453{
454 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
455 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
456 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
457 int idx = pmc->idx;
458 u64 period;
459
460 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
461
462
463
464
465
466 period = -(local64_read(&perf_event->count));
467
468 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
469 period &= GENMASK(31, 0);
470
471 local64_set(&perf_event->hw.period_left, 0);
472 perf_event->attr.sample_period = period;
473 perf_event->hw.sample_period = period;
474
475 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
476
477 if (kvm_pmu_overflow_status(vcpu)) {
478 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
479 kvm_vcpu_kick(vcpu);
480 }
481
482 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
483}
484
485
486
487
488
489
490void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
491{
492 struct kvm_pmu *pmu = &vcpu->arch.pmu;
493 int i;
494
495 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
496 return;
497
498
499 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
500
501 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
502 u64 type, reg;
503
504 if (!(val & BIT(i)))
505 continue;
506
507
508 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
509 type &= ARMV8_PMU_EVTYPE_EVENT;
510 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
511 continue;
512
513
514 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
515 reg = lower_32_bits(reg);
516 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
517
518 if (reg)
519 continue;
520
521 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
522
523 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
524 reg = lower_32_bits(reg);
525 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
526 if (!reg)
527 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
528 } else {
529
530 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
531 }
532 }
533}
534
535
536
537
538
539
540void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
541{
542 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
543 int i;
544
545 if (val & ARMV8_PMU_PMCR_E) {
546 kvm_pmu_enable_counter_mask(vcpu,
547 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
548 } else {
549 kvm_pmu_disable_counter_mask(vcpu, mask);
550 }
551
552 if (val & ARMV8_PMU_PMCR_C)
553 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
554
555 if (val & ARMV8_PMU_PMCR_P) {
556 for_each_set_bit(i, &mask, 32)
557 kvm_pmu_set_counter_value(vcpu, i, 0);
558 }
559}
560
561static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
562{
563 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
564 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
565}
566
567
568
569
570
571
572static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
573{
574 struct kvm_pmu *pmu = &vcpu->arch.pmu;
575 struct kvm_pmc *pmc;
576 struct perf_event *event;
577 struct perf_event_attr attr;
578 u64 eventsel, counter, reg, data;
579
580
581
582
583
584
585 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
586
587 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
588 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
589 data = __vcpu_sys_reg(vcpu, reg);
590
591 kvm_pmu_stop_counter(vcpu, pmc);
592 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
593
594
595 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
596 pmc->idx != ARMV8_PMU_CYCLE_IDX)
597 return;
598
599 memset(&attr, 0, sizeof(struct perf_event_attr));
600 attr.type = PERF_TYPE_RAW;
601 attr.size = sizeof(attr);
602 attr.pinned = 1;
603 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
604 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
605 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
606 attr.exclude_hv = 1;
607 attr.exclude_host = 1;
608 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
609 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
610
611 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
612
613 if (kvm_pmu_pmc_is_chained(pmc)) {
614
615
616
617
618
619 attr.sample_period = (-counter) & GENMASK(63, 0);
620 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
621
622 event = perf_event_create_kernel_counter(&attr, -1, current,
623 kvm_pmu_perf_overflow,
624 pmc + 1);
625 } else {
626
627 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
628 attr.sample_period = (-counter) & GENMASK(63, 0);
629 else
630 attr.sample_period = (-counter) & GENMASK(31, 0);
631
632 event = perf_event_create_kernel_counter(&attr, -1, current,
633 kvm_pmu_perf_overflow, pmc);
634 }
635
636 if (IS_ERR(event)) {
637 pr_err_once("kvm: pmu event creation failed %ld\n",
638 PTR_ERR(event));
639 return;
640 }
641
642 pmc->perf_event = event;
643}
644
645
646
647
648
649
650
651
652
653static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
654{
655 struct kvm_pmu *pmu = &vcpu->arch.pmu;
656 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
657 bool new_state, old_state;
658
659 old_state = kvm_pmu_pmc_is_chained(pmc);
660 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
661 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
662
663 if (old_state == new_state)
664 return;
665
666 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
667 kvm_pmu_stop_counter(vcpu, canonical_pmc);
668 if (new_state) {
669
670
671
672
673 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
674 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
675 return;
676 }
677 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
678}
679
680
681
682
683
684
685
686
687
688
689
690void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
691 u64 select_idx)
692{
693 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
694
695 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
696 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
697
698 __vcpu_sys_reg(vcpu, reg) = event_type;
699
700 kvm_pmu_update_pmc_chained(vcpu, select_idx);
701 kvm_pmu_create_perf_event(vcpu, select_idx);
702}
703
704bool kvm_arm_support_pmu_v3(void)
705{
706
707
708
709
710
711 return (perf_num_counters() > 0);
712}
713
714int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
715{
716 if (!vcpu->arch.pmu.created)
717 return 0;
718
719
720
721
722
723
724 if (irqchip_in_kernel(vcpu->kvm)) {
725 int irq = vcpu->arch.pmu.irq_num;
726 if (!kvm_arm_pmu_irq_initialized(vcpu))
727 return -EINVAL;
728
729
730
731
732
733
734
735 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
736 return -EINVAL;
737 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
738 return -EINVAL;
739 }
740
741 kvm_pmu_vcpu_reset(vcpu);
742 vcpu->arch.pmu.ready = true;
743
744 return 0;
745}
746
747static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
748{
749 if (!kvm_arm_support_pmu_v3())
750 return -ENODEV;
751
752 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
753 return -ENXIO;
754
755 if (vcpu->arch.pmu.created)
756 return -EBUSY;
757
758 if (irqchip_in_kernel(vcpu->kvm)) {
759 int ret;
760
761
762
763
764
765
766 if (!vgic_initialized(vcpu->kvm))
767 return -ENODEV;
768
769 if (!kvm_arm_pmu_irq_initialized(vcpu))
770 return -ENXIO;
771
772 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
773 &vcpu->arch.pmu);
774 if (ret)
775 return ret;
776 }
777
778 vcpu->arch.pmu.created = true;
779 return 0;
780}
781
782
783
784
785
786
787static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
788{
789 int i;
790 struct kvm_vcpu *vcpu;
791
792 kvm_for_each_vcpu(i, vcpu, kvm) {
793 if (!kvm_arm_pmu_irq_initialized(vcpu))
794 continue;
795
796 if (irq_is_ppi(irq)) {
797 if (vcpu->arch.pmu.irq_num != irq)
798 return false;
799 } else {
800 if (vcpu->arch.pmu.irq_num == irq)
801 return false;
802 }
803 }
804
805 return true;
806}
807
808int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
809{
810 switch (attr->attr) {
811 case KVM_ARM_VCPU_PMU_V3_IRQ: {
812 int __user *uaddr = (int __user *)(long)attr->addr;
813 int irq;
814
815 if (!irqchip_in_kernel(vcpu->kvm))
816 return -EINVAL;
817
818 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
819 return -ENODEV;
820
821 if (get_user(irq, uaddr))
822 return -EFAULT;
823
824
825 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
826 return -EINVAL;
827
828 if (!pmu_irq_is_valid(vcpu->kvm, irq))
829 return -EINVAL;
830
831 if (kvm_arm_pmu_irq_initialized(vcpu))
832 return -EBUSY;
833
834 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
835 vcpu->arch.pmu.irq_num = irq;
836 return 0;
837 }
838 case KVM_ARM_VCPU_PMU_V3_INIT:
839 return kvm_arm_pmu_v3_init(vcpu);
840 }
841
842 return -ENXIO;
843}
844
845int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
846{
847 switch (attr->attr) {
848 case KVM_ARM_VCPU_PMU_V3_IRQ: {
849 int __user *uaddr = (int __user *)(long)attr->addr;
850 int irq;
851
852 if (!irqchip_in_kernel(vcpu->kvm))
853 return -EINVAL;
854
855 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
856 return -ENODEV;
857
858 if (!kvm_arm_pmu_irq_initialized(vcpu))
859 return -ENXIO;
860
861 irq = vcpu->arch.pmu.irq_num;
862 return put_user(irq, uaddr);
863 }
864 }
865
866 return -ENXIO;
867}
868
869int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
870{
871 switch (attr->attr) {
872 case KVM_ARM_VCPU_PMU_V3_IRQ:
873 case KVM_ARM_VCPU_PMU_V3_INIT:
874 if (kvm_arm_support_pmu_v3() &&
875 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
876 return 0;
877 }
878
879 return -ENXIO;
880}
881