1
2
3
4
5
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
11#include <linux/uaccess.h>
12#include <asm/kvm_emulate.h>
13#include <kvm/arm_pmu.h>
14#include <kvm/arm_vgic.h>
15
16static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
17
18#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
19
20
21
22
23
24
25static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
26{
27 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
28 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
29}
30
31static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
32{
33 struct kvm_pmu *pmu;
34 struct kvm_vcpu_arch *vcpu_arch;
35
36 pmc -= pmc->idx;
37 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
38 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
39 return container_of(vcpu_arch, struct kvm_vcpu, arch);
40}
41
42
43
44
45
46static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
47{
48 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
49
50 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
51}
52
53
54
55
56
57static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
58{
59 return select_idx & 0x1;
60}
61
62
63
64
65
66
67
68
69static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
70{
71 if (kvm_pmu_pmc_is_chained(pmc) &&
72 kvm_pmu_idx_is_high_counter(pmc->idx))
73 return pmc - 1;
74
75 return pmc;
76}
77
78
79
80
81
82
83static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
84{
85 u64 eventsel, reg;
86
87 select_idx |= 0x1;
88
89 if (select_idx == ARMV8_PMU_CYCLE_IDX)
90 return false;
91
92 reg = PMEVTYPER0_EL0 + select_idx;
93 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
94
95 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
96}
97
98
99
100
101
102
103static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
104 struct kvm_pmc *pmc)
105{
106 u64 counter, counter_high, reg, enabled, running;
107
108 if (kvm_pmu_pmc_is_chained(pmc)) {
109 pmc = kvm_pmu_get_canonical_pmc(pmc);
110 reg = PMEVCNTR0_EL0 + pmc->idx;
111
112 counter = __vcpu_sys_reg(vcpu, reg);
113 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
114
115 counter = lower_32_bits(counter) | (counter_high << 32);
116 } else {
117 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
118 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
119 counter = __vcpu_sys_reg(vcpu, reg);
120 }
121
122
123
124
125
126 if (pmc->perf_event)
127 counter += perf_event_read_value(pmc->perf_event, &enabled,
128 &running);
129
130 return counter;
131}
132
133
134
135
136
137
138u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
139{
140 u64 counter;
141 struct kvm_pmu *pmu = &vcpu->arch.pmu;
142 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
143
144 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
145
146 if (kvm_pmu_pmc_is_chained(pmc) &&
147 kvm_pmu_idx_is_high_counter(select_idx))
148 counter = upper_32_bits(counter);
149
150 else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
151 counter = lower_32_bits(counter);
152
153 return counter;
154}
155
156
157
158
159
160
161
162void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
163{
164 u64 reg;
165
166 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
167 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
168 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
169
170
171 kvm_pmu_create_perf_event(vcpu, select_idx);
172}
173
174
175
176
177
178static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
179{
180 pmc = kvm_pmu_get_canonical_pmc(pmc);
181 if (pmc->perf_event) {
182 perf_event_disable(pmc->perf_event);
183 perf_event_release_kernel(pmc->perf_event);
184 pmc->perf_event = NULL;
185 }
186}
187
188
189
190
191
192
193
194static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
195{
196 u64 counter, reg;
197
198 pmc = kvm_pmu_get_canonical_pmc(pmc);
199 if (!pmc->perf_event)
200 return;
201
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
203
204 if (kvm_pmu_pmc_is_chained(pmc)) {
205 reg = PMEVCNTR0_EL0 + pmc->idx;
206 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
207 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
208 } else {
209 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
210 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
211 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
212 }
213
214 kvm_pmu_release_perf_event(pmc);
215}
216
217
218
219
220
221
222void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
223{
224 int i;
225 struct kvm_pmu *pmu = &vcpu->arch.pmu;
226
227 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
228 pmu->pmc[i].idx = i;
229}
230
231
232
233
234
235
236void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
237{
238 int i;
239 struct kvm_pmu *pmu = &vcpu->arch.pmu;
240
241 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
242 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
243
244 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
245}
246
247
248
249
250
251
252void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
253{
254 int i;
255 struct kvm_pmu *pmu = &vcpu->arch.pmu;
256
257 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
258 kvm_pmu_release_perf_event(&pmu->pmc[i]);
259}
260
261u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
262{
263 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
264
265 val &= ARMV8_PMU_PMCR_N_MASK;
266 if (val == 0)
267 return BIT(ARMV8_PMU_CYCLE_IDX);
268 else
269 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
270}
271
272
273
274
275
276
277
278
279void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
280{
281 int i;
282 struct kvm_pmu *pmu = &vcpu->arch.pmu;
283 struct kvm_pmc *pmc;
284
285 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
286 return;
287
288 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
289 if (!(val & BIT(i)))
290 continue;
291
292 pmc = &pmu->pmc[i];
293
294
295
296
297
298 if (kvm_pmu_pmc_is_chained(pmc) &&
299 kvm_pmu_idx_is_high_counter(i)) {
300 kvm_pmu_create_perf_event(vcpu, i);
301 continue;
302 }
303
304
305 if (pmc->perf_event) {
306 perf_event_enable(pmc->perf_event);
307 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
308 kvm_debug("fail to enable perf event\n");
309 }
310 }
311}
312
313
314
315
316
317
318
319
320void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
321{
322 int i;
323 struct kvm_pmu *pmu = &vcpu->arch.pmu;
324 struct kvm_pmc *pmc;
325
326 if (!val)
327 return;
328
329 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
330 if (!(val & BIT(i)))
331 continue;
332
333 pmc = &pmu->pmc[i];
334
335
336
337
338
339 if (kvm_pmu_pmc_is_chained(pmc) &&
340 kvm_pmu_idx_is_high_counter(i)) {
341 kvm_pmu_create_perf_event(vcpu, i);
342 continue;
343 }
344
345
346 if (pmc->perf_event)
347 perf_event_disable(pmc->perf_event);
348 }
349}
350
351static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
352{
353 u64 reg = 0;
354
355 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
356 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
357 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
358 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
359 reg &= kvm_pmu_valid_counter_mask(vcpu);
360 }
361
362 return reg;
363}
364
365static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
366{
367 struct kvm_pmu *pmu = &vcpu->arch.pmu;
368 bool overflow;
369
370 if (!kvm_arm_pmu_v3_ready(vcpu))
371 return;
372
373 overflow = !!kvm_pmu_overflow_status(vcpu);
374 if (pmu->irq_level == overflow)
375 return;
376
377 pmu->irq_level = overflow;
378
379 if (likely(irqchip_in_kernel(vcpu->kvm))) {
380 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
381 pmu->irq_num, overflow, pmu);
382 WARN_ON(ret);
383 }
384}
385
386bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
387{
388 struct kvm_pmu *pmu = &vcpu->arch.pmu;
389 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
390 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
391
392 if (likely(irqchip_in_kernel(vcpu->kvm)))
393 return false;
394
395 return pmu->irq_level != run_level;
396}
397
398
399
400
401void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
402{
403 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
404
405
406 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
407 if (vcpu->arch.pmu.irq_level)
408 regs->device_irq_level |= KVM_ARM_DEV_PMU;
409}
410
411
412
413
414
415
416
417
418void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
419{
420 kvm_pmu_update_state(vcpu);
421}
422
423
424
425
426
427
428
429
430void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
431{
432 kvm_pmu_update_state(vcpu);
433}
434
435
436
437
438static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
439 struct perf_sample_data *data,
440 struct pt_regs *regs)
441{
442 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
443 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
444 int idx = pmc->idx;
445
446 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
447
448 if (kvm_pmu_overflow_status(vcpu)) {
449 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
450 kvm_vcpu_kick(vcpu);
451 }
452}
453
454
455
456
457
458
459void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
460{
461 int i;
462 u64 type, enable, reg;
463
464 if (val == 0)
465 return;
466
467 enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
468 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
469 if (!(val & BIT(i)))
470 continue;
471 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
472 & ARMV8_PMU_EVTYPE_EVENT;
473 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
474 && (enable & BIT(i))) {
475 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
476 reg = lower_32_bits(reg);
477 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
478 if (!reg)
479 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
480 }
481 }
482}
483
484
485
486
487
488
489void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
490{
491 u64 mask;
492 int i;
493
494 mask = kvm_pmu_valid_counter_mask(vcpu);
495 if (val & ARMV8_PMU_PMCR_E) {
496 kvm_pmu_enable_counter_mask(vcpu,
497 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
498 } else {
499 kvm_pmu_disable_counter_mask(vcpu, mask);
500 }
501
502 if (val & ARMV8_PMU_PMCR_C)
503 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
504
505 if (val & ARMV8_PMU_PMCR_P) {
506 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
507 kvm_pmu_set_counter_value(vcpu, i, 0);
508 }
509}
510
511static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
512{
513 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
514 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
515}
516
517
518
519
520
521
522static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
523{
524 struct kvm_pmu *pmu = &vcpu->arch.pmu;
525 struct kvm_pmc *pmc;
526 struct perf_event *event;
527 struct perf_event_attr attr;
528 u64 eventsel, counter, reg, data;
529
530
531
532
533
534
535 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
536
537 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
538 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
539 data = __vcpu_sys_reg(vcpu, reg);
540
541 kvm_pmu_stop_counter(vcpu, pmc);
542 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
543
544
545 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
546 pmc->idx != ARMV8_PMU_CYCLE_IDX)
547 return;
548
549 memset(&attr, 0, sizeof(struct perf_event_attr));
550 attr.type = PERF_TYPE_RAW;
551 attr.size = sizeof(attr);
552 attr.pinned = 1;
553 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
554 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
555 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
556 attr.exclude_hv = 1;
557 attr.exclude_host = 1;
558 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
559 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
560
561 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
562
563 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
564
565
566
567
568
569 attr.sample_period = (-counter) & GENMASK(63, 0);
570 event = perf_event_create_kernel_counter(&attr, -1, current,
571 kvm_pmu_perf_overflow,
572 pmc + 1);
573
574 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
575 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
576 } else {
577
578 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
579 attr.sample_period = (-counter) & GENMASK(63, 0);
580 else
581 attr.sample_period = (-counter) & GENMASK(31, 0);
582
583 event = perf_event_create_kernel_counter(&attr, -1, current,
584 kvm_pmu_perf_overflow, pmc);
585 }
586
587 if (IS_ERR(event)) {
588 pr_err_once("kvm: pmu event creation failed %ld\n",
589 PTR_ERR(event));
590 return;
591 }
592
593 pmc->perf_event = event;
594}
595
596
597
598
599
600
601
602
603
604static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
605{
606 struct kvm_pmu *pmu = &vcpu->arch.pmu;
607 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
608
609 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
610
611
612
613
614 if (!kvm_pmu_pmc_is_chained(pmc))
615 kvm_pmu_stop_counter(vcpu, pmc);
616
617 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
618 } else {
619 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
620 }
621}
622
623
624
625
626
627
628
629
630
631
632
633void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
634 u64 select_idx)
635{
636 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
637
638 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
639 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
640
641 __vcpu_sys_reg(vcpu, reg) = event_type;
642
643 kvm_pmu_update_pmc_chained(vcpu, select_idx);
644 kvm_pmu_create_perf_event(vcpu, select_idx);
645}
646
647bool kvm_arm_support_pmu_v3(void)
648{
649
650
651
652
653
654 return (perf_num_counters() > 0);
655}
656
657int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
658{
659 if (!vcpu->arch.pmu.created)
660 return 0;
661
662
663
664
665
666
667 if (irqchip_in_kernel(vcpu->kvm)) {
668 int irq = vcpu->arch.pmu.irq_num;
669 if (!kvm_arm_pmu_irq_initialized(vcpu))
670 return -EINVAL;
671
672
673
674
675
676
677
678 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
679 return -EINVAL;
680 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
681 return -EINVAL;
682 }
683
684 kvm_pmu_vcpu_reset(vcpu);
685 vcpu->arch.pmu.ready = true;
686
687 return 0;
688}
689
690static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
691{
692 if (!kvm_arm_support_pmu_v3())
693 return -ENODEV;
694
695 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
696 return -ENXIO;
697
698 if (vcpu->arch.pmu.created)
699 return -EBUSY;
700
701 if (irqchip_in_kernel(vcpu->kvm)) {
702 int ret;
703
704
705
706
707
708
709 if (!vgic_initialized(vcpu->kvm))
710 return -ENODEV;
711
712 if (!kvm_arm_pmu_irq_initialized(vcpu))
713 return -ENXIO;
714
715 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
716 &vcpu->arch.pmu);
717 if (ret)
718 return ret;
719 }
720
721 vcpu->arch.pmu.created = true;
722 return 0;
723}
724
725
726
727
728
729
730static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
731{
732 int i;
733 struct kvm_vcpu *vcpu;
734
735 kvm_for_each_vcpu(i, vcpu, kvm) {
736 if (!kvm_arm_pmu_irq_initialized(vcpu))
737 continue;
738
739 if (irq_is_ppi(irq)) {
740 if (vcpu->arch.pmu.irq_num != irq)
741 return false;
742 } else {
743 if (vcpu->arch.pmu.irq_num == irq)
744 return false;
745 }
746 }
747
748 return true;
749}
750
751int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
752{
753 switch (attr->attr) {
754 case KVM_ARM_VCPU_PMU_V3_IRQ: {
755 int __user *uaddr = (int __user *)(long)attr->addr;
756 int irq;
757
758 if (!irqchip_in_kernel(vcpu->kvm))
759 return -EINVAL;
760
761 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
762 return -ENODEV;
763
764 if (get_user(irq, uaddr))
765 return -EFAULT;
766
767
768 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
769 return -EINVAL;
770
771 if (!pmu_irq_is_valid(vcpu->kvm, irq))
772 return -EINVAL;
773
774 if (kvm_arm_pmu_irq_initialized(vcpu))
775 return -EBUSY;
776
777 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
778 vcpu->arch.pmu.irq_num = irq;
779 return 0;
780 }
781 case KVM_ARM_VCPU_PMU_V3_INIT:
782 return kvm_arm_pmu_v3_init(vcpu);
783 }
784
785 return -ENXIO;
786}
787
788int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
789{
790 switch (attr->attr) {
791 case KVM_ARM_VCPU_PMU_V3_IRQ: {
792 int __user *uaddr = (int __user *)(long)attr->addr;
793 int irq;
794
795 if (!irqchip_in_kernel(vcpu->kvm))
796 return -EINVAL;
797
798 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
799 return -ENODEV;
800
801 if (!kvm_arm_pmu_irq_initialized(vcpu))
802 return -ENXIO;
803
804 irq = vcpu->arch.pmu.irq_num;
805 return put_user(irq, uaddr);
806 }
807 }
808
809 return -ENXIO;
810}
811
812int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
813{
814 switch (attr->attr) {
815 case KVM_ARM_VCPU_PMU_V3_IRQ:
816 case KVM_ARM_VCPU_PMU_V3_INIT:
817 if (kvm_arm_support_pmu_v3() &&
818 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
819 return 0;
820 }
821
822 return -ENXIO;
823}
824