linux/virt/kvm/arm/psci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/arm-smccc.h>
   8#include <linux/preempt.h>
   9#include <linux/kvm_host.h>
  10#include <linux/uaccess.h>
  11#include <linux/wait.h>
  12
  13#include <asm/cputype.h>
  14#include <asm/kvm_emulate.h>
  15#include <asm/kvm_host.h>
  16
  17#include <kvm/arm_psci.h>
  18
  19/*
  20 * This is an implementation of the Power State Coordination Interface
  21 * as described in ARM document number ARM DEN 0022A.
  22 */
  23
  24#define AFFINITY_MASK(level)    ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
  25
  26static u32 smccc_get_function(struct kvm_vcpu *vcpu)
  27{
  28        return vcpu_get_reg(vcpu, 0);
  29}
  30
  31static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
  32{
  33        return vcpu_get_reg(vcpu, 1);
  34}
  35
  36static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
  37{
  38        return vcpu_get_reg(vcpu, 2);
  39}
  40
  41static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
  42{
  43        return vcpu_get_reg(vcpu, 3);
  44}
  45
  46static void smccc_set_retval(struct kvm_vcpu *vcpu,
  47                             unsigned long a0,
  48                             unsigned long a1,
  49                             unsigned long a2,
  50                             unsigned long a3)
  51{
  52        vcpu_set_reg(vcpu, 0, a0);
  53        vcpu_set_reg(vcpu, 1, a1);
  54        vcpu_set_reg(vcpu, 2, a2);
  55        vcpu_set_reg(vcpu, 3, a3);
  56}
  57
  58static unsigned long psci_affinity_mask(unsigned long affinity_level)
  59{
  60        if (affinity_level <= 3)
  61                return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
  62
  63        return 0;
  64}
  65
  66static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
  67{
  68        /*
  69         * NOTE: For simplicity, we make VCPU suspend emulation to be
  70         * same-as WFI (Wait-for-interrupt) emulation.
  71         *
  72         * This means for KVM the wakeup events are interrupts and
  73         * this is consistent with intended use of StateID as described
  74         * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
  75         *
  76         * Further, we also treat power-down request to be same as
  77         * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
  78         * specification (ARM DEN 0022A). This means all suspend states
  79         * for KVM will preserve the register state.
  80         */
  81        kvm_vcpu_block(vcpu);
  82        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
  83
  84        return PSCI_RET_SUCCESS;
  85}
  86
  87static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
  88{
  89        vcpu->arch.power_off = true;
  90        kvm_make_request(KVM_REQ_SLEEP, vcpu);
  91        kvm_vcpu_kick(vcpu);
  92}
  93
  94static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
  95{
  96        struct vcpu_reset_state *reset_state;
  97        struct kvm *kvm = source_vcpu->kvm;
  98        struct kvm_vcpu *vcpu = NULL;
  99        unsigned long cpu_id;
 100
 101        cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
 102        if (vcpu_mode_is_32bit(source_vcpu))
 103                cpu_id &= ~((u32) 0);
 104
 105        vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
 106
 107        /*
 108         * Make sure the caller requested a valid CPU and that the CPU is
 109         * turned off.
 110         */
 111        if (!vcpu)
 112                return PSCI_RET_INVALID_PARAMS;
 113        if (!vcpu->arch.power_off) {
 114                if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
 115                        return PSCI_RET_ALREADY_ON;
 116                else
 117                        return PSCI_RET_INVALID_PARAMS;
 118        }
 119
 120        reset_state = &vcpu->arch.reset_state;
 121
 122        reset_state->pc = smccc_get_arg2(source_vcpu);
 123
 124        /* Propagate caller endianness */
 125        reset_state->be = kvm_vcpu_is_be(source_vcpu);
 126
 127        /*
 128         * NOTE: We always update r0 (or x0) because for PSCI v0.1
 129         * the general puspose registers are undefined upon CPU_ON.
 130         */
 131        reset_state->r0 = smccc_get_arg3(source_vcpu);
 132
 133        WRITE_ONCE(reset_state->reset, true);
 134        kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
 135
 136        /*
 137         * Make sure the reset request is observed if the change to
 138         * power_state is observed.
 139         */
 140        smp_wmb();
 141
 142        vcpu->arch.power_off = false;
 143        kvm_vcpu_wake_up(vcpu);
 144
 145        return PSCI_RET_SUCCESS;
 146}
 147
 148static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
 149{
 150        int i, matching_cpus = 0;
 151        unsigned long mpidr;
 152        unsigned long target_affinity;
 153        unsigned long target_affinity_mask;
 154        unsigned long lowest_affinity_level;
 155        struct kvm *kvm = vcpu->kvm;
 156        struct kvm_vcpu *tmp;
 157
 158        target_affinity = smccc_get_arg1(vcpu);
 159        lowest_affinity_level = smccc_get_arg2(vcpu);
 160
 161        /* Determine target affinity mask */
 162        target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
 163        if (!target_affinity_mask)
 164                return PSCI_RET_INVALID_PARAMS;
 165
 166        /* Ignore other bits of target affinity */
 167        target_affinity &= target_affinity_mask;
 168
 169        /*
 170         * If one or more VCPU matching target affinity are running
 171         * then ON else OFF
 172         */
 173        kvm_for_each_vcpu(i, tmp, kvm) {
 174                mpidr = kvm_vcpu_get_mpidr_aff(tmp);
 175                if ((mpidr & target_affinity_mask) == target_affinity) {
 176                        matching_cpus++;
 177                        if (!tmp->arch.power_off)
 178                                return PSCI_0_2_AFFINITY_LEVEL_ON;
 179                }
 180        }
 181
 182        if (!matching_cpus)
 183                return PSCI_RET_INVALID_PARAMS;
 184
 185        return PSCI_0_2_AFFINITY_LEVEL_OFF;
 186}
 187
 188static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
 189{
 190        int i;
 191        struct kvm_vcpu *tmp;
 192
 193        /*
 194         * The KVM ABI specifies that a system event exit may call KVM_RUN
 195         * again and may perform shutdown/reboot at a later time that when the
 196         * actual request is made.  Since we are implementing PSCI and a
 197         * caller of PSCI reboot and shutdown expects that the system shuts
 198         * down or reboots immediately, let's make sure that VCPUs are not run
 199         * after this call is handled and before the VCPUs have been
 200         * re-initialized.
 201         */
 202        kvm_for_each_vcpu(i, tmp, vcpu->kvm)
 203                tmp->arch.power_off = true;
 204        kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
 205
 206        memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
 207        vcpu->run->system_event.type = type;
 208        vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
 209}
 210
 211static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
 212{
 213        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
 214}
 215
 216static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
 217{
 218        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
 219}
 220
 221static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 222{
 223        struct kvm *kvm = vcpu->kvm;
 224        u32 psci_fn = smccc_get_function(vcpu);
 225        unsigned long val;
 226        int ret = 1;
 227
 228        switch (psci_fn) {
 229        case PSCI_0_2_FN_PSCI_VERSION:
 230                /*
 231                 * Bits[31:16] = Major Version = 0
 232                 * Bits[15:0] = Minor Version = 2
 233                 */
 234                val = KVM_ARM_PSCI_0_2;
 235                break;
 236        case PSCI_0_2_FN_CPU_SUSPEND:
 237        case PSCI_0_2_FN64_CPU_SUSPEND:
 238                val = kvm_psci_vcpu_suspend(vcpu);
 239                break;
 240        case PSCI_0_2_FN_CPU_OFF:
 241                kvm_psci_vcpu_off(vcpu);
 242                val = PSCI_RET_SUCCESS;
 243                break;
 244        case PSCI_0_2_FN_CPU_ON:
 245        case PSCI_0_2_FN64_CPU_ON:
 246                mutex_lock(&kvm->lock);
 247                val = kvm_psci_vcpu_on(vcpu);
 248                mutex_unlock(&kvm->lock);
 249                break;
 250        case PSCI_0_2_FN_AFFINITY_INFO:
 251        case PSCI_0_2_FN64_AFFINITY_INFO:
 252                val = kvm_psci_vcpu_affinity_info(vcpu);
 253                break;
 254        case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
 255                /*
 256                 * Trusted OS is MP hence does not require migration
 257                 * or
 258                 * Trusted OS is not present
 259                 */
 260                val = PSCI_0_2_TOS_MP;
 261                break;
 262        case PSCI_0_2_FN_SYSTEM_OFF:
 263                kvm_psci_system_off(vcpu);
 264                /*
 265                 * We should'nt be going back to guest VCPU after
 266                 * receiving SYSTEM_OFF request.
 267                 *
 268                 * If user space accidently/deliberately resumes
 269                 * guest VCPU after SYSTEM_OFF request then guest
 270                 * VCPU should see internal failure from PSCI return
 271                 * value. To achieve this, we preload r0 (or x0) with
 272                 * PSCI return value INTERNAL_FAILURE.
 273                 */
 274                val = PSCI_RET_INTERNAL_FAILURE;
 275                ret = 0;
 276                break;
 277        case PSCI_0_2_FN_SYSTEM_RESET:
 278                kvm_psci_system_reset(vcpu);
 279                /*
 280                 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
 281                 * with PSCI return value INTERNAL_FAILURE.
 282                 */
 283                val = PSCI_RET_INTERNAL_FAILURE;
 284                ret = 0;
 285                break;
 286        default:
 287                val = PSCI_RET_NOT_SUPPORTED;
 288                break;
 289        }
 290
 291        smccc_set_retval(vcpu, val, 0, 0, 0);
 292        return ret;
 293}
 294
 295static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
 296{
 297        u32 psci_fn = smccc_get_function(vcpu);
 298        u32 feature;
 299        unsigned long val;
 300        int ret = 1;
 301
 302        switch(psci_fn) {
 303        case PSCI_0_2_FN_PSCI_VERSION:
 304                val = KVM_ARM_PSCI_1_0;
 305                break;
 306        case PSCI_1_0_FN_PSCI_FEATURES:
 307                feature = smccc_get_arg1(vcpu);
 308                switch(feature) {
 309                case PSCI_0_2_FN_PSCI_VERSION:
 310                case PSCI_0_2_FN_CPU_SUSPEND:
 311                case PSCI_0_2_FN64_CPU_SUSPEND:
 312                case PSCI_0_2_FN_CPU_OFF:
 313                case PSCI_0_2_FN_CPU_ON:
 314                case PSCI_0_2_FN64_CPU_ON:
 315                case PSCI_0_2_FN_AFFINITY_INFO:
 316                case PSCI_0_2_FN64_AFFINITY_INFO:
 317                case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
 318                case PSCI_0_2_FN_SYSTEM_OFF:
 319                case PSCI_0_2_FN_SYSTEM_RESET:
 320                case PSCI_1_0_FN_PSCI_FEATURES:
 321                case ARM_SMCCC_VERSION_FUNC_ID:
 322                        val = 0;
 323                        break;
 324                default:
 325                        val = PSCI_RET_NOT_SUPPORTED;
 326                        break;
 327                }
 328                break;
 329        default:
 330                return kvm_psci_0_2_call(vcpu);
 331        }
 332
 333        smccc_set_retval(vcpu, val, 0, 0, 0);
 334        return ret;
 335}
 336
 337static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 338{
 339        struct kvm *kvm = vcpu->kvm;
 340        u32 psci_fn = smccc_get_function(vcpu);
 341        unsigned long val;
 342
 343        switch (psci_fn) {
 344        case KVM_PSCI_FN_CPU_OFF:
 345                kvm_psci_vcpu_off(vcpu);
 346                val = PSCI_RET_SUCCESS;
 347                break;
 348        case KVM_PSCI_FN_CPU_ON:
 349                mutex_lock(&kvm->lock);
 350                val = kvm_psci_vcpu_on(vcpu);
 351                mutex_unlock(&kvm->lock);
 352                break;
 353        default:
 354                val = PSCI_RET_NOT_SUPPORTED;
 355                break;
 356        }
 357
 358        smccc_set_retval(vcpu, val, 0, 0, 0);
 359        return 1;
 360}
 361
 362/**
 363 * kvm_psci_call - handle PSCI call if r0 value is in range
 364 * @vcpu: Pointer to the VCPU struct
 365 *
 366 * Handle PSCI calls from guests through traps from HVC instructions.
 367 * The calling convention is similar to SMC calls to the secure world
 368 * where the function number is placed in r0.
 369 *
 370 * This function returns: > 0 (success), 0 (success but exit to user
 371 * space), and < 0 (errors)
 372 *
 373 * Errors:
 374 * -EINVAL: Unrecognized PSCI function
 375 */
 376static int kvm_psci_call(struct kvm_vcpu *vcpu)
 377{
 378        switch (kvm_psci_version(vcpu, vcpu->kvm)) {
 379        case KVM_ARM_PSCI_1_0:
 380                return kvm_psci_1_0_call(vcpu);
 381        case KVM_ARM_PSCI_0_2:
 382                return kvm_psci_0_2_call(vcpu);
 383        case KVM_ARM_PSCI_0_1:
 384                return kvm_psci_0_1_call(vcpu);
 385        default:
 386                return -EINVAL;
 387        };
 388}
 389
 390int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 391{
 392        u32 func_id = smccc_get_function(vcpu);
 393        u32 val = SMCCC_RET_NOT_SUPPORTED;
 394        u32 feature;
 395
 396        switch (func_id) {
 397        case ARM_SMCCC_VERSION_FUNC_ID:
 398                val = ARM_SMCCC_VERSION_1_1;
 399                break;
 400        case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
 401                feature = smccc_get_arg1(vcpu);
 402                switch(feature) {
 403                case ARM_SMCCC_ARCH_WORKAROUND_1:
 404                        switch (kvm_arm_harden_branch_predictor()) {
 405                        case KVM_BP_HARDEN_UNKNOWN:
 406                                break;
 407                        case KVM_BP_HARDEN_WA_NEEDED:
 408                                val = SMCCC_RET_SUCCESS;
 409                                break;
 410                        case KVM_BP_HARDEN_NOT_REQUIRED:
 411                                val = SMCCC_RET_NOT_REQUIRED;
 412                                break;
 413                        }
 414                        break;
 415                case ARM_SMCCC_ARCH_WORKAROUND_2:
 416                        switch (kvm_arm_have_ssbd()) {
 417                        case KVM_SSBD_FORCE_DISABLE:
 418                        case KVM_SSBD_UNKNOWN:
 419                                break;
 420                        case KVM_SSBD_KERNEL:
 421                                val = SMCCC_RET_SUCCESS;
 422                                break;
 423                        case KVM_SSBD_FORCE_ENABLE:
 424                        case KVM_SSBD_MITIGATED:
 425                                val = SMCCC_RET_NOT_REQUIRED;
 426                                break;
 427                        }
 428                        break;
 429                }
 430                break;
 431        default:
 432                return kvm_psci_call(vcpu);
 433        }
 434
 435        smccc_set_retval(vcpu, val, 0, 0, 0);
 436        return 1;
 437}
 438
 439int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
 440{
 441        return 3;               /* PSCI version and two workaround registers */
 442}
 443
 444int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 445{
 446        if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
 447                return -EFAULT;
 448
 449        if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
 450                return -EFAULT;
 451
 452        if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
 453                return -EFAULT;
 454
 455        return 0;
 456}
 457
 458#define KVM_REG_FEATURE_LEVEL_WIDTH     4
 459#define KVM_REG_FEATURE_LEVEL_MASK      (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
 460
 461/*
 462 * Convert the workaround level into an easy-to-compare number, where higher
 463 * values mean better protection.
 464 */
 465static int get_kernel_wa_level(u64 regid)
 466{
 467        switch (regid) {
 468        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
 469                switch (kvm_arm_harden_branch_predictor()) {
 470                case KVM_BP_HARDEN_UNKNOWN:
 471                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
 472                case KVM_BP_HARDEN_WA_NEEDED:
 473                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
 474                case KVM_BP_HARDEN_NOT_REQUIRED:
 475                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
 476                }
 477                return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
 478        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
 479                switch (kvm_arm_have_ssbd()) {
 480                case KVM_SSBD_FORCE_DISABLE:
 481                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
 482                case KVM_SSBD_KERNEL:
 483                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
 484                case KVM_SSBD_FORCE_ENABLE:
 485                case KVM_SSBD_MITIGATED:
 486                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
 487                case KVM_SSBD_UNKNOWN:
 488                default:
 489                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
 490                }
 491        }
 492
 493        return -EINVAL;
 494}
 495
 496int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 497{
 498        void __user *uaddr = (void __user *)(long)reg->addr;
 499        u64 val;
 500
 501        switch (reg->id) {
 502        case KVM_REG_ARM_PSCI_VERSION:
 503                val = kvm_psci_version(vcpu, vcpu->kvm);
 504                break;
 505        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
 506                val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
 507                break;
 508        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
 509                val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
 510
 511                if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
 512                    kvm_arm_get_vcpu_workaround_2_flag(vcpu))
 513                        val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
 514                break;
 515        default:
 516                return -ENOENT;
 517        }
 518
 519        if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
 520                return -EFAULT;
 521
 522        return 0;
 523}
 524
 525int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 526{
 527        void __user *uaddr = (void __user *)(long)reg->addr;
 528        u64 val;
 529        int wa_level;
 530
 531        if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
 532                return -EFAULT;
 533
 534        switch (reg->id) {
 535        case KVM_REG_ARM_PSCI_VERSION:
 536        {
 537                bool wants_02;
 538
 539                wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
 540
 541                switch (val) {
 542                case KVM_ARM_PSCI_0_1:
 543                        if (wants_02)
 544                                return -EINVAL;
 545                        vcpu->kvm->arch.psci_version = val;
 546                        return 0;
 547                case KVM_ARM_PSCI_0_2:
 548                case KVM_ARM_PSCI_1_0:
 549                        if (!wants_02)
 550                                return -EINVAL;
 551                        vcpu->kvm->arch.psci_version = val;
 552                        return 0;
 553                }
 554                break;
 555        }
 556
 557        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
 558                if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
 559                        return -EINVAL;
 560
 561                if (get_kernel_wa_level(reg->id) < val)
 562                        return -EINVAL;
 563
 564                return 0;
 565
 566        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
 567                if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
 568                            KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
 569                        return -EINVAL;
 570
 571                wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
 572
 573                if (get_kernel_wa_level(reg->id) < wa_level)
 574                        return -EINVAL;
 575
 576                /* The enabled bit must not be set unless the level is AVAIL. */
 577                if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
 578                    wa_level != val)
 579                        return -EINVAL;
 580
 581                /* Are we finished or do we need to check the enable bit ? */
 582                if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
 583                        return 0;
 584
 585                /*
 586                 * If this kernel supports the workaround to be switched on
 587                 * or off, make sure it matches the requested setting.
 588                 */
 589                switch (wa_level) {
 590                case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
 591                        kvm_arm_set_vcpu_workaround_2_flag(vcpu,
 592                            val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
 593                        break;
 594                case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
 595                        kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
 596                        break;
 597                }
 598
 599                return 0;
 600        default:
 601                return -ENOENT;
 602        }
 603
 604        return -EINVAL;
 605}
 606