linux/virt/kvm/arm/psci.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/arm-smccc.h>
  19#include <linux/preempt.h>
  20#include <linux/kvm_host.h>
  21#include <linux/wait.h>
  22
  23#include <asm/cputype.h>
  24#include <asm/kvm_emulate.h>
  25#include <asm/kvm_host.h>
  26
  27#include <kvm/arm_psci.h>
  28
  29/*
  30 * This is an implementation of the Power State Coordination Interface
  31 * as described in ARM document number ARM DEN 0022A.
  32 */
  33
  34#define AFFINITY_MASK(level)    ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
  35
  36static u32 smccc_get_function(struct kvm_vcpu *vcpu)
  37{
  38        return vcpu_get_reg(vcpu, 0);
  39}
  40
  41static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
  42{
  43        return vcpu_get_reg(vcpu, 1);
  44}
  45
  46static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
  47{
  48        return vcpu_get_reg(vcpu, 2);
  49}
  50
  51static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
  52{
  53        return vcpu_get_reg(vcpu, 3);
  54}
  55
  56static void smccc_set_retval(struct kvm_vcpu *vcpu,
  57                             unsigned long a0,
  58                             unsigned long a1,
  59                             unsigned long a2,
  60                             unsigned long a3)
  61{
  62        vcpu_set_reg(vcpu, 0, a0);
  63        vcpu_set_reg(vcpu, 1, a1);
  64        vcpu_set_reg(vcpu, 2, a2);
  65        vcpu_set_reg(vcpu, 3, a3);
  66}
  67
  68static unsigned long psci_affinity_mask(unsigned long affinity_level)
  69{
  70        if (affinity_level <= 3)
  71                return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
  72
  73        return 0;
  74}
  75
  76static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
  77{
  78        /*
  79         * NOTE: For simplicity, we make VCPU suspend emulation to be
  80         * same-as WFI (Wait-for-interrupt) emulation.
  81         *
  82         * This means for KVM the wakeup events are interrupts and
  83         * this is consistent with intended use of StateID as described
  84         * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
  85         *
  86         * Further, we also treat power-down request to be same as
  87         * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
  88         * specification (ARM DEN 0022A). This means all suspend states
  89         * for KVM will preserve the register state.
  90         */
  91        kvm_vcpu_block(vcpu);
  92        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
  93
  94        return PSCI_RET_SUCCESS;
  95}
  96
  97static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
  98{
  99        vcpu->arch.power_off = true;
 100        kvm_make_request(KVM_REQ_SLEEP, vcpu);
 101        kvm_vcpu_kick(vcpu);
 102}
 103
 104static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 105{
 106        struct kvm *kvm = source_vcpu->kvm;
 107        struct kvm_vcpu *vcpu = NULL;
 108        struct swait_queue_head *wq;
 109        unsigned long cpu_id;
 110        unsigned long context_id;
 111        phys_addr_t target_pc;
 112
 113        cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
 114        if (vcpu_mode_is_32bit(source_vcpu))
 115                cpu_id &= ~((u32) 0);
 116
 117        vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
 118
 119        /*
 120         * Make sure the caller requested a valid CPU and that the CPU is
 121         * turned off.
 122         */
 123        if (!vcpu)
 124                return PSCI_RET_INVALID_PARAMS;
 125        if (!vcpu->arch.power_off) {
 126                if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
 127                        return PSCI_RET_ALREADY_ON;
 128                else
 129                        return PSCI_RET_INVALID_PARAMS;
 130        }
 131
 132        target_pc = smccc_get_arg2(source_vcpu);
 133        context_id = smccc_get_arg3(source_vcpu);
 134
 135        kvm_reset_vcpu(vcpu);
 136
 137        /* Gracefully handle Thumb2 entry point */
 138        if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
 139                target_pc &= ~((phys_addr_t) 1);
 140                vcpu_set_thumb(vcpu);
 141        }
 142
 143        /* Propagate caller endianness */
 144        if (kvm_vcpu_is_be(source_vcpu))
 145                kvm_vcpu_set_be(vcpu);
 146
 147        *vcpu_pc(vcpu) = target_pc;
 148        /*
 149         * NOTE: We always update r0 (or x0) because for PSCI v0.1
 150         * the general puspose registers are undefined upon CPU_ON.
 151         */
 152        smccc_set_retval(vcpu, context_id, 0, 0, 0);
 153        vcpu->arch.power_off = false;
 154        smp_mb();               /* Make sure the above is visible */
 155
 156        wq = kvm_arch_vcpu_wq(vcpu);
 157        swake_up(wq);
 158
 159        return PSCI_RET_SUCCESS;
 160}
 161
 162static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
 163{
 164        int i, matching_cpus = 0;
 165        unsigned long mpidr;
 166        unsigned long target_affinity;
 167        unsigned long target_affinity_mask;
 168        unsigned long lowest_affinity_level;
 169        struct kvm *kvm = vcpu->kvm;
 170        struct kvm_vcpu *tmp;
 171
 172        target_affinity = smccc_get_arg1(vcpu);
 173        lowest_affinity_level = smccc_get_arg2(vcpu);
 174
 175        /* Determine target affinity mask */
 176        target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
 177        if (!target_affinity_mask)
 178                return PSCI_RET_INVALID_PARAMS;
 179
 180        /* Ignore other bits of target affinity */
 181        target_affinity &= target_affinity_mask;
 182
 183        /*
 184         * If one or more VCPU matching target affinity are running
 185         * then ON else OFF
 186         */
 187        kvm_for_each_vcpu(i, tmp, kvm) {
 188                mpidr = kvm_vcpu_get_mpidr_aff(tmp);
 189                if ((mpidr & target_affinity_mask) == target_affinity) {
 190                        matching_cpus++;
 191                        if (!tmp->arch.power_off)
 192                                return PSCI_0_2_AFFINITY_LEVEL_ON;
 193                }
 194        }
 195
 196        if (!matching_cpus)
 197                return PSCI_RET_INVALID_PARAMS;
 198
 199        return PSCI_0_2_AFFINITY_LEVEL_OFF;
 200}
 201
 202static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
 203{
 204        int i;
 205        struct kvm_vcpu *tmp;
 206
 207        /*
 208         * The KVM ABI specifies that a system event exit may call KVM_RUN
 209         * again and may perform shutdown/reboot at a later time that when the
 210         * actual request is made.  Since we are implementing PSCI and a
 211         * caller of PSCI reboot and shutdown expects that the system shuts
 212         * down or reboots immediately, let's make sure that VCPUs are not run
 213         * after this call is handled and before the VCPUs have been
 214         * re-initialized.
 215         */
 216        kvm_for_each_vcpu(i, tmp, vcpu->kvm)
 217                tmp->arch.power_off = true;
 218        kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
 219
 220        memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
 221        vcpu->run->system_event.type = type;
 222        vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
 223}
 224
 225static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
 226{
 227        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
 228}
 229
 230static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
 231{
 232        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
 233}
 234
 235static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 236{
 237        struct kvm *kvm = vcpu->kvm;
 238        u32 psci_fn = smccc_get_function(vcpu);
 239        unsigned long val;
 240        int ret = 1;
 241
 242        switch (psci_fn) {
 243        case PSCI_0_2_FN_PSCI_VERSION:
 244                /*
 245                 * Bits[31:16] = Major Version = 0
 246                 * Bits[15:0] = Minor Version = 2
 247                 */
 248                val = KVM_ARM_PSCI_0_2;
 249                break;
 250        case PSCI_0_2_FN_CPU_SUSPEND:
 251        case PSCI_0_2_FN64_CPU_SUSPEND:
 252                val = kvm_psci_vcpu_suspend(vcpu);
 253                break;
 254        case PSCI_0_2_FN_CPU_OFF:
 255                kvm_psci_vcpu_off(vcpu);
 256                val = PSCI_RET_SUCCESS;
 257                break;
 258        case PSCI_0_2_FN_CPU_ON:
 259        case PSCI_0_2_FN64_CPU_ON:
 260                mutex_lock(&kvm->lock);
 261                val = kvm_psci_vcpu_on(vcpu);
 262                mutex_unlock(&kvm->lock);
 263                break;
 264        case PSCI_0_2_FN_AFFINITY_INFO:
 265        case PSCI_0_2_FN64_AFFINITY_INFO:
 266                val = kvm_psci_vcpu_affinity_info(vcpu);
 267                break;
 268        case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
 269                /*
 270                 * Trusted OS is MP hence does not require migration
 271                 * or
 272                 * Trusted OS is not present
 273                 */
 274                val = PSCI_0_2_TOS_MP;
 275                break;
 276        case PSCI_0_2_FN_SYSTEM_OFF:
 277                kvm_psci_system_off(vcpu);
 278                /*
 279                 * We should'nt be going back to guest VCPU after
 280                 * receiving SYSTEM_OFF request.
 281                 *
 282                 * If user space accidently/deliberately resumes
 283                 * guest VCPU after SYSTEM_OFF request then guest
 284                 * VCPU should see internal failure from PSCI return
 285                 * value. To achieve this, we preload r0 (or x0) with
 286                 * PSCI return value INTERNAL_FAILURE.
 287                 */
 288                val = PSCI_RET_INTERNAL_FAILURE;
 289                ret = 0;
 290                break;
 291        case PSCI_0_2_FN_SYSTEM_RESET:
 292                kvm_psci_system_reset(vcpu);
 293                /*
 294                 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
 295                 * with PSCI return value INTERNAL_FAILURE.
 296                 */
 297                val = PSCI_RET_INTERNAL_FAILURE;
 298                ret = 0;
 299                break;
 300        default:
 301                val = PSCI_RET_NOT_SUPPORTED;
 302                break;
 303        }
 304
 305        smccc_set_retval(vcpu, val, 0, 0, 0);
 306        return ret;
 307}
 308
 309static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
 310{
 311        u32 psci_fn = smccc_get_function(vcpu);
 312        u32 feature;
 313        unsigned long val;
 314        int ret = 1;
 315
 316        switch(psci_fn) {
 317        case PSCI_0_2_FN_PSCI_VERSION:
 318                val = KVM_ARM_PSCI_1_0;
 319                break;
 320        case PSCI_1_0_FN_PSCI_FEATURES:
 321                feature = smccc_get_arg1(vcpu);
 322                switch(feature) {
 323                case PSCI_0_2_FN_PSCI_VERSION:
 324                case PSCI_0_2_FN_CPU_SUSPEND:
 325                case PSCI_0_2_FN64_CPU_SUSPEND:
 326                case PSCI_0_2_FN_CPU_OFF:
 327                case PSCI_0_2_FN_CPU_ON:
 328                case PSCI_0_2_FN64_CPU_ON:
 329                case PSCI_0_2_FN_AFFINITY_INFO:
 330                case PSCI_0_2_FN64_AFFINITY_INFO:
 331                case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
 332                case PSCI_0_2_FN_SYSTEM_OFF:
 333                case PSCI_0_2_FN_SYSTEM_RESET:
 334                case PSCI_1_0_FN_PSCI_FEATURES:
 335                case ARM_SMCCC_VERSION_FUNC_ID:
 336                        val = 0;
 337                        break;
 338                default:
 339                        val = PSCI_RET_NOT_SUPPORTED;
 340                        break;
 341                }
 342                break;
 343        default:
 344                return kvm_psci_0_2_call(vcpu);
 345        }
 346
 347        smccc_set_retval(vcpu, val, 0, 0, 0);
 348        return ret;
 349}
 350
 351static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
 352{
 353        struct kvm *kvm = vcpu->kvm;
 354        u32 psci_fn = smccc_get_function(vcpu);
 355        unsigned long val;
 356
 357        switch (psci_fn) {
 358        case KVM_PSCI_FN_CPU_OFF:
 359                kvm_psci_vcpu_off(vcpu);
 360                val = PSCI_RET_SUCCESS;
 361                break;
 362        case KVM_PSCI_FN_CPU_ON:
 363                mutex_lock(&kvm->lock);
 364                val = kvm_psci_vcpu_on(vcpu);
 365                mutex_unlock(&kvm->lock);
 366                break;
 367        default:
 368                val = PSCI_RET_NOT_SUPPORTED;
 369                break;
 370        }
 371
 372        smccc_set_retval(vcpu, val, 0, 0, 0);
 373        return 1;
 374}
 375
 376/**
 377 * kvm_psci_call - handle PSCI call if r0 value is in range
 378 * @vcpu: Pointer to the VCPU struct
 379 *
 380 * Handle PSCI calls from guests through traps from HVC instructions.
 381 * The calling convention is similar to SMC calls to the secure world
 382 * where the function number is placed in r0.
 383 *
 384 * This function returns: > 0 (success), 0 (success but exit to user
 385 * space), and < 0 (errors)
 386 *
 387 * Errors:
 388 * -EINVAL: Unrecognized PSCI function
 389 */
 390static int kvm_psci_call(struct kvm_vcpu *vcpu)
 391{
 392        switch (kvm_psci_version(vcpu, vcpu->kvm)) {
 393        case KVM_ARM_PSCI_1_0:
 394                return kvm_psci_1_0_call(vcpu);
 395        case KVM_ARM_PSCI_0_2:
 396                return kvm_psci_0_2_call(vcpu);
 397        case KVM_ARM_PSCI_0_1:
 398                return kvm_psci_0_1_call(vcpu);
 399        default:
 400                return -EINVAL;
 401        };
 402}
 403
 404int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
 405{
 406        u32 func_id = smccc_get_function(vcpu);
 407        u32 val = PSCI_RET_NOT_SUPPORTED;
 408        u32 feature;
 409
 410        switch (func_id) {
 411        case ARM_SMCCC_VERSION_FUNC_ID:
 412                val = ARM_SMCCC_VERSION_1_1;
 413                break;
 414        case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
 415                feature = smccc_get_arg1(vcpu);
 416                switch(feature) {
 417                case ARM_SMCCC_ARCH_WORKAROUND_1:
 418                        if (kvm_arm_harden_branch_predictor())
 419                                val = 0;
 420                        break;
 421                }
 422                break;
 423        default:
 424                return kvm_psci_call(vcpu);
 425        }
 426
 427        smccc_set_retval(vcpu, val, 0, 0, 0);
 428        return 1;
 429}
 430