linux/virt/kvm/arm/pmu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Linaro Ltd.
   3 * Author: Shannon Zhao <shannon.zhao@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/cpu.h>
  19#include <linux/kvm.h>
  20#include <linux/kvm_host.h>
  21#include <linux/perf_event.h>
  22#include <linux/uaccess.h>
  23#include <asm/kvm_emulate.h>
  24#include <kvm/arm_pmu.h>
  25#include <kvm/arm_vgic.h>
  26
  27/**
  28 * kvm_pmu_get_counter_value - get PMU counter value
  29 * @vcpu: The vcpu pointer
  30 * @select_idx: The counter index
  31 */
  32u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
  33{
  34        u64 counter, reg, enabled, running;
  35        struct kvm_pmu *pmu = &vcpu->arch.pmu;
  36        struct kvm_pmc *pmc = &pmu->pmc[select_idx];
  37
  38        reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  39              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
  40        counter = __vcpu_sys_reg(vcpu, reg);
  41
  42        /* The real counter value is equal to the value of counter register plus
  43         * the value perf event counts.
  44         */
  45        if (pmc->perf_event)
  46                counter += perf_event_read_value(pmc->perf_event, &enabled,
  47                                                 &running);
  48
  49        return counter & pmc->bitmask;
  50}
  51
  52/**
  53 * kvm_pmu_set_counter_value - set PMU counter value
  54 * @vcpu: The vcpu pointer
  55 * @select_idx: The counter index
  56 * @val: The counter value
  57 */
  58void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
  59{
  60        u64 reg;
  61
  62        reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
  63              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
  64        __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
  65}
  66
  67/**
  68 * kvm_pmu_stop_counter - stop PMU counter
  69 * @pmc: The PMU counter pointer
  70 *
  71 * If this counter has been configured to monitor some event, release it here.
  72 */
  73static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
  74{
  75        u64 counter, reg;
  76
  77        if (pmc->perf_event) {
  78                counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
  79                reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
  80                       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
  81                __vcpu_sys_reg(vcpu, reg) = counter;
  82                perf_event_disable(pmc->perf_event);
  83                perf_event_release_kernel(pmc->perf_event);
  84                pmc->perf_event = NULL;
  85        }
  86}
  87
  88/**
  89 * kvm_pmu_vcpu_reset - reset pmu state for cpu
  90 * @vcpu: The vcpu pointer
  91 *
  92 */
  93void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
  94{
  95        int i;
  96        struct kvm_pmu *pmu = &vcpu->arch.pmu;
  97
  98        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
  99                kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
 100                pmu->pmc[i].idx = i;
 101                pmu->pmc[i].bitmask = 0xffffffffUL;
 102        }
 103}
 104
 105/**
 106 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
 107 * @vcpu: The vcpu pointer
 108 *
 109 */
 110void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
 111{
 112        int i;
 113        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 114
 115        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 116                struct kvm_pmc *pmc = &pmu->pmc[i];
 117
 118                if (pmc->perf_event) {
 119                        perf_event_disable(pmc->perf_event);
 120                        perf_event_release_kernel(pmc->perf_event);
 121                        pmc->perf_event = NULL;
 122                }
 123        }
 124}
 125
 126u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
 127{
 128        u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
 129
 130        val &= ARMV8_PMU_PMCR_N_MASK;
 131        if (val == 0)
 132                return BIT(ARMV8_PMU_CYCLE_IDX);
 133        else
 134                return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
 135}
 136
 137/**
 138 * kvm_pmu_enable_counter - enable selected PMU counter
 139 * @vcpu: The vcpu pointer
 140 * @val: the value guest writes to PMCNTENSET register
 141 *
 142 * Call perf_event_enable to start counting the perf event
 143 */
 144void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
 145{
 146        int i;
 147        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 148        struct kvm_pmc *pmc;
 149
 150        if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
 151                return;
 152
 153        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 154                if (!(val & BIT(i)))
 155                        continue;
 156
 157                pmc = &pmu->pmc[i];
 158                if (pmc->perf_event) {
 159                        perf_event_enable(pmc->perf_event);
 160                        if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
 161                                kvm_debug("fail to enable perf event\n");
 162                }
 163        }
 164}
 165
 166/**
 167 * kvm_pmu_disable_counter - disable selected PMU counter
 168 * @vcpu: The vcpu pointer
 169 * @val: the value guest writes to PMCNTENCLR register
 170 *
 171 * Call perf_event_disable to stop counting the perf event
 172 */
 173void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
 174{
 175        int i;
 176        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 177        struct kvm_pmc *pmc;
 178
 179        if (!val)
 180                return;
 181
 182        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 183                if (!(val & BIT(i)))
 184                        continue;
 185
 186                pmc = &pmu->pmc[i];
 187                if (pmc->perf_event)
 188                        perf_event_disable(pmc->perf_event);
 189        }
 190}
 191
 192static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
 193{
 194        u64 reg = 0;
 195
 196        if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
 197                reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
 198                reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
 199                reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
 200                reg &= kvm_pmu_valid_counter_mask(vcpu);
 201        }
 202
 203        return reg;
 204}
 205
 206static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
 207{
 208        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 209        bool overflow;
 210
 211        if (!kvm_arm_pmu_v3_ready(vcpu))
 212                return;
 213
 214        overflow = !!kvm_pmu_overflow_status(vcpu);
 215        if (pmu->irq_level == overflow)
 216                return;
 217
 218        pmu->irq_level = overflow;
 219
 220        if (likely(irqchip_in_kernel(vcpu->kvm))) {
 221                int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
 222                                              pmu->irq_num, overflow, pmu);
 223                WARN_ON(ret);
 224        }
 225}
 226
 227bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
 228{
 229        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 230        struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
 231        bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
 232
 233        if (likely(irqchip_in_kernel(vcpu->kvm)))
 234                return false;
 235
 236        return pmu->irq_level != run_level;
 237}
 238
 239/*
 240 * Reflect the PMU overflow interrupt output level into the kvm_run structure
 241 */
 242void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
 243{
 244        struct kvm_sync_regs *regs = &vcpu->run->s.regs;
 245
 246        /* Populate the timer bitmap for user space */
 247        regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
 248        if (vcpu->arch.pmu.irq_level)
 249                regs->device_irq_level |= KVM_ARM_DEV_PMU;
 250}
 251
 252/**
 253 * kvm_pmu_flush_hwstate - flush pmu state to cpu
 254 * @vcpu: The vcpu pointer
 255 *
 256 * Check if the PMU has overflowed while we were running in the host, and inject
 257 * an interrupt if that was the case.
 258 */
 259void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
 260{
 261        kvm_pmu_update_state(vcpu);
 262}
 263
 264/**
 265 * kvm_pmu_sync_hwstate - sync pmu state from cpu
 266 * @vcpu: The vcpu pointer
 267 *
 268 * Check if the PMU has overflowed while we were running in the guest, and
 269 * inject an interrupt if that was the case.
 270 */
 271void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
 272{
 273        kvm_pmu_update_state(vcpu);
 274}
 275
 276static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
 277{
 278        struct kvm_pmu *pmu;
 279        struct kvm_vcpu_arch *vcpu_arch;
 280
 281        pmc -= pmc->idx;
 282        pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
 283        vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
 284        return container_of(vcpu_arch, struct kvm_vcpu, arch);
 285}
 286
 287/**
 288 * When the perf event overflows, set the overflow status and inform the vcpu.
 289 */
 290static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
 291                                  struct perf_sample_data *data,
 292                                  struct pt_regs *regs)
 293{
 294        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
 295        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
 296        int idx = pmc->idx;
 297
 298        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 299
 300        if (kvm_pmu_overflow_status(vcpu)) {
 301                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
 302                kvm_vcpu_kick(vcpu);
 303        }
 304}
 305
 306/**
 307 * kvm_pmu_software_increment - do software increment
 308 * @vcpu: The vcpu pointer
 309 * @val: the value guest writes to PMSWINC register
 310 */
 311void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
 312{
 313        int i;
 314        u64 type, enable, reg;
 315
 316        if (val == 0)
 317                return;
 318
 319        enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
 320        for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
 321                if (!(val & BIT(i)))
 322                        continue;
 323                type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
 324                       & ARMV8_PMU_EVTYPE_EVENT;
 325                if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
 326                    && (enable & BIT(i))) {
 327                        reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
 328                        reg = lower_32_bits(reg);
 329                        __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
 330                        if (!reg)
 331                                __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
 332                }
 333        }
 334}
 335
 336/**
 337 * kvm_pmu_handle_pmcr - handle PMCR register
 338 * @vcpu: The vcpu pointer
 339 * @val: the value guest writes to PMCR register
 340 */
 341void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 342{
 343        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 344        struct kvm_pmc *pmc;
 345        u64 mask;
 346        int i;
 347
 348        mask = kvm_pmu_valid_counter_mask(vcpu);
 349        if (val & ARMV8_PMU_PMCR_E) {
 350                kvm_pmu_enable_counter(vcpu,
 351                       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
 352        } else {
 353                kvm_pmu_disable_counter(vcpu, mask);
 354        }
 355
 356        if (val & ARMV8_PMU_PMCR_C)
 357                kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
 358
 359        if (val & ARMV8_PMU_PMCR_P) {
 360                for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
 361                        kvm_pmu_set_counter_value(vcpu, i, 0);
 362        }
 363
 364        if (val & ARMV8_PMU_PMCR_LC) {
 365                pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
 366                pmc->bitmask = 0xffffffffffffffffUL;
 367        }
 368}
 369
 370static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
 371{
 372        return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
 373               (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
 374}
 375
 376/**
 377 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
 378 * @vcpu: The vcpu pointer
 379 * @data: The data guest writes to PMXEVTYPER_EL0
 380 * @select_idx: The number of selected counter
 381 *
 382 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
 383 * event with given hardware event number. Here we call perf_event API to
 384 * emulate this action and create a kernel perf event for it.
 385 */
 386void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 387                                    u64 select_idx)
 388{
 389        struct kvm_pmu *pmu = &vcpu->arch.pmu;
 390        struct kvm_pmc *pmc = &pmu->pmc[select_idx];
 391        struct perf_event *event;
 392        struct perf_event_attr attr;
 393        u64 eventsel, counter;
 394
 395        kvm_pmu_stop_counter(vcpu, pmc);
 396        eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
 397
 398        /* Software increment event does't need to be backed by a perf event */
 399        if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
 400            select_idx != ARMV8_PMU_CYCLE_IDX)
 401                return;
 402
 403        memset(&attr, 0, sizeof(struct perf_event_attr));
 404        attr.type = PERF_TYPE_RAW;
 405        attr.size = sizeof(attr);
 406        attr.pinned = 1;
 407        attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
 408        attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
 409        attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
 410        attr.exclude_hv = 1; /* Don't count EL2 events */
 411        attr.exclude_host = 1; /* Don't count host events */
 412        attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
 413                ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
 414
 415        counter = kvm_pmu_get_counter_value(vcpu, select_idx);
 416        /* The initial sample period (overflow count) of an event. */
 417        attr.sample_period = (-counter) & pmc->bitmask;
 418
 419        event = perf_event_create_kernel_counter(&attr, -1, current,
 420                                                 kvm_pmu_perf_overflow, pmc);
 421        if (IS_ERR(event)) {
 422                pr_err_once("kvm: pmu event creation failed %ld\n",
 423                            PTR_ERR(event));
 424                return;
 425        }
 426
 427        pmc->perf_event = event;
 428}
 429
 430bool kvm_arm_support_pmu_v3(void)
 431{
 432        /*
 433         * Check if HW_PERF_EVENTS are supported by checking the number of
 434         * hardware performance counters. This could ensure the presence of
 435         * a physical PMU and CONFIG_PERF_EVENT is selected.
 436         */
 437        return (perf_num_counters() > 0);
 438}
 439
 440int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
 441{
 442        if (!vcpu->arch.pmu.created)
 443                return 0;
 444
 445        /*
 446         * A valid interrupt configuration for the PMU is either to have a
 447         * properly configured interrupt number and using an in-kernel
 448         * irqchip, or to not have an in-kernel GIC and not set an IRQ.
 449         */
 450        if (irqchip_in_kernel(vcpu->kvm)) {
 451                int irq = vcpu->arch.pmu.irq_num;
 452                if (!kvm_arm_pmu_irq_initialized(vcpu))
 453                        return -EINVAL;
 454
 455                /*
 456                 * If we are using an in-kernel vgic, at this point we know
 457                 * the vgic will be initialized, so we can check the PMU irq
 458                 * number against the dimensions of the vgic and make sure
 459                 * it's valid.
 460                 */
 461                if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
 462                        return -EINVAL;
 463        } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
 464                   return -EINVAL;
 465        }
 466
 467        kvm_pmu_vcpu_reset(vcpu);
 468        vcpu->arch.pmu.ready = true;
 469
 470        return 0;
 471}
 472
 473static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
 474{
 475        if (!kvm_arm_support_pmu_v3())
 476                return -ENODEV;
 477
 478        if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 479                return -ENXIO;
 480
 481        if (vcpu->arch.pmu.created)
 482                return -EBUSY;
 483
 484        if (irqchip_in_kernel(vcpu->kvm)) {
 485                int ret;
 486
 487                /*
 488                 * If using the PMU with an in-kernel virtual GIC
 489                 * implementation, we require the GIC to be already
 490                 * initialized when initializing the PMU.
 491                 */
 492                if (!vgic_initialized(vcpu->kvm))
 493                        return -ENODEV;
 494
 495                if (!kvm_arm_pmu_irq_initialized(vcpu))
 496                        return -ENXIO;
 497
 498                ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
 499                                         &vcpu->arch.pmu);
 500                if (ret)
 501                        return ret;
 502        }
 503
 504        vcpu->arch.pmu.created = true;
 505        return 0;
 506}
 507
 508/*
 509 * For one VM the interrupt type must be same for each vcpu.
 510 * As a PPI, the interrupt number is the same for all vcpus,
 511 * while as an SPI it must be a separate number per vcpu.
 512 */
 513static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
 514{
 515        int i;
 516        struct kvm_vcpu *vcpu;
 517
 518        kvm_for_each_vcpu(i, vcpu, kvm) {
 519                if (!kvm_arm_pmu_irq_initialized(vcpu))
 520                        continue;
 521
 522                if (irq_is_ppi(irq)) {
 523                        if (vcpu->arch.pmu.irq_num != irq)
 524                                return false;
 525                } else {
 526                        if (vcpu->arch.pmu.irq_num == irq)
 527                                return false;
 528                }
 529        }
 530
 531        return true;
 532}
 533
 534int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 535{
 536        switch (attr->attr) {
 537        case KVM_ARM_VCPU_PMU_V3_IRQ: {
 538                int __user *uaddr = (int __user *)(long)attr->addr;
 539                int irq;
 540
 541                if (!irqchip_in_kernel(vcpu->kvm))
 542                        return -EINVAL;
 543
 544                if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 545                        return -ENODEV;
 546
 547                if (get_user(irq, uaddr))
 548                        return -EFAULT;
 549
 550                /* The PMU overflow interrupt can be a PPI or a valid SPI. */
 551                if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
 552                        return -EINVAL;
 553
 554                if (!pmu_irq_is_valid(vcpu->kvm, irq))
 555                        return -EINVAL;
 556
 557                if (kvm_arm_pmu_irq_initialized(vcpu))
 558                        return -EBUSY;
 559
 560                kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
 561                vcpu->arch.pmu.irq_num = irq;
 562                return 0;
 563        }
 564        case KVM_ARM_VCPU_PMU_V3_INIT:
 565                return kvm_arm_pmu_v3_init(vcpu);
 566        }
 567
 568        return -ENXIO;
 569}
 570
 571int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 572{
 573        switch (attr->attr) {
 574        case KVM_ARM_VCPU_PMU_V3_IRQ: {
 575                int __user *uaddr = (int __user *)(long)attr->addr;
 576                int irq;
 577
 578                if (!irqchip_in_kernel(vcpu->kvm))
 579                        return -EINVAL;
 580
 581                if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 582                        return -ENODEV;
 583
 584                if (!kvm_arm_pmu_irq_initialized(vcpu))
 585                        return -ENXIO;
 586
 587                irq = vcpu->arch.pmu.irq_num;
 588                return put_user(irq, uaddr);
 589        }
 590        }
 591
 592        return -ENXIO;
 593}
 594
 595int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 596{
 597        switch (attr->attr) {
 598        case KVM_ARM_VCPU_PMU_V3_IRQ:
 599        case KVM_ARM_VCPU_PMU_V3_INIT:
 600                if (kvm_arm_support_pmu_v3() &&
 601                    test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
 602                        return 0;
 603        }
 604
 605        return -ENXIO;
 606}
 607