linux/arch/arm64/kvm/guest.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012,2013 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * Derived from arch/arm/kvm/guest.c:
   6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24#include <linux/kvm_host.h>
  25#include <linux/module.h>
  26#include <linux/vmalloc.h>
  27#include <linux/fs.h>
  28#include <kvm/arm_psci.h>
  29#include <asm/cputype.h>
  30#include <linux/uaccess.h>
  31#include <asm/kvm.h>
  32#include <asm/kvm_emulate.h>
  33#include <asm/kvm_coproc.h>
  34
  35#include "trace.h"
  36
  37#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
  38#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
  39
  40struct kvm_stats_debugfs_item debugfs_entries[] = {
  41        VCPU_STAT(hvc_exit_stat),
  42        VCPU_STAT(wfe_exit_stat),
  43        VCPU_STAT(wfi_exit_stat),
  44        VCPU_STAT(mmio_exit_user),
  45        VCPU_STAT(mmio_exit_kernel),
  46        VCPU_STAT(exits),
  47        { NULL }
  48};
  49
  50int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  51{
  52        return 0;
  53}
  54
  55static u64 core_reg_offset_from_id(u64 id)
  56{
  57        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
  58}
  59
  60static int validate_core_offset(const struct kvm_one_reg *reg)
  61{
  62        u64 off = core_reg_offset_from_id(reg->id);
  63        int size;
  64
  65        switch (off) {
  66        case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
  67             KVM_REG_ARM_CORE_REG(regs.regs[30]):
  68        case KVM_REG_ARM_CORE_REG(regs.sp):
  69        case KVM_REG_ARM_CORE_REG(regs.pc):
  70        case KVM_REG_ARM_CORE_REG(regs.pstate):
  71        case KVM_REG_ARM_CORE_REG(sp_el1):
  72        case KVM_REG_ARM_CORE_REG(elr_el1):
  73        case KVM_REG_ARM_CORE_REG(spsr[0]) ...
  74             KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
  75                size = sizeof(__u64);
  76                break;
  77
  78        case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
  79             KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
  80                size = sizeof(__uint128_t);
  81                break;
  82
  83        case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
  84        case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
  85                size = sizeof(__u32);
  86                break;
  87
  88        default:
  89                return -EINVAL;
  90        }
  91
  92        if (KVM_REG_SIZE(reg->id) == size &&
  93            IS_ALIGNED(off, size / sizeof(__u32)))
  94                return 0;
  95
  96        return -EINVAL;
  97}
  98
  99static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 100{
 101        /*
 102         * Because the kvm_regs structure is a mix of 32, 64 and
 103         * 128bit fields, we index it as if it was a 32bit
 104         * array. Hence below, nr_regs is the number of entries, and
 105         * off the index in the "array".
 106         */
 107        __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
 108        struct kvm_regs *regs = vcpu_gp_regs(vcpu);
 109        int nr_regs = sizeof(*regs) / sizeof(__u32);
 110        u32 off;
 111
 112        /* Our ID is an index into the kvm_regs struct. */
 113        off = core_reg_offset_from_id(reg->id);
 114        if (off >= nr_regs ||
 115            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 116                return -ENOENT;
 117
 118        if (validate_core_offset(reg))
 119                return -EINVAL;
 120
 121        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
 122                return -EFAULT;
 123
 124        return 0;
 125}
 126
 127static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 128{
 129        __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
 130        struct kvm_regs *regs = vcpu_gp_regs(vcpu);
 131        int nr_regs = sizeof(*regs) / sizeof(__u32);
 132        __uint128_t tmp;
 133        void *valp = &tmp;
 134        u64 off;
 135        int err = 0;
 136
 137        /* Our ID is an index into the kvm_regs struct. */
 138        off = core_reg_offset_from_id(reg->id);
 139        if (off >= nr_regs ||
 140            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 141                return -ENOENT;
 142
 143        if (validate_core_offset(reg))
 144                return -EINVAL;
 145
 146        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 147                return -EINVAL;
 148
 149        if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
 150                err = -EFAULT;
 151                goto out;
 152        }
 153
 154        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
 155                u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
 156                switch (mode) {
 157                case COMPAT_PSR_MODE_USR:
 158                        if (!system_supports_32bit_el0())
 159                                return -EINVAL;
 160                        break;
 161                case COMPAT_PSR_MODE_FIQ:
 162                case COMPAT_PSR_MODE_IRQ:
 163                case COMPAT_PSR_MODE_SVC:
 164                case COMPAT_PSR_MODE_ABT:
 165                case COMPAT_PSR_MODE_UND:
 166                        if (!vcpu_el1_is_32bit(vcpu))
 167                                return -EINVAL;
 168                        break;
 169                case PSR_MODE_EL0t:
 170                case PSR_MODE_EL1t:
 171                case PSR_MODE_EL1h:
 172                        if (vcpu_el1_is_32bit(vcpu))
 173                                return -EINVAL;
 174                        break;
 175                default:
 176                        err = -EINVAL;
 177                        goto out;
 178                }
 179        }
 180
 181        memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
 182out:
 183        return err;
 184}
 185
 186int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 187{
 188        return -EINVAL;
 189}
 190
 191int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 192{
 193        return -EINVAL;
 194}
 195
 196static unsigned long num_core_regs(void)
 197{
 198        return sizeof(struct kvm_regs) / sizeof(__u32);
 199}
 200
 201/**
 202 * ARM64 versions of the TIMER registers, always available on arm64
 203 */
 204
 205#define NUM_TIMER_REGS 3
 206
 207static bool is_timer_reg(u64 index)
 208{
 209        switch (index) {
 210        case KVM_REG_ARM_TIMER_CTL:
 211        case KVM_REG_ARM_TIMER_CNT:
 212        case KVM_REG_ARM_TIMER_CVAL:
 213                return true;
 214        }
 215        return false;
 216}
 217
 218static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 219{
 220        if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
 221                return -EFAULT;
 222        uindices++;
 223        if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
 224                return -EFAULT;
 225        uindices++;
 226        if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
 227                return -EFAULT;
 228
 229        return 0;
 230}
 231
 232static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 233{
 234        void __user *uaddr = (void __user *)(long)reg->addr;
 235        u64 val;
 236        int ret;
 237
 238        ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
 239        if (ret != 0)
 240                return -EFAULT;
 241
 242        return kvm_arm_timer_set_reg(vcpu, reg->id, val);
 243}
 244
 245static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 246{
 247        void __user *uaddr = (void __user *)(long)reg->addr;
 248        u64 val;
 249
 250        val = kvm_arm_timer_get_reg(vcpu, reg->id);
 251        return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 252}
 253
 254/**
 255 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
 256 *
 257 * This is for all registers.
 258 */
 259unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 260{
 261        return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
 262                + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
 263}
 264
 265/**
 266 * kvm_arm_copy_reg_indices - get indices of all registers.
 267 *
 268 * We do core registers right here, then we append system regs.
 269 */
 270int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 271{
 272        unsigned int i;
 273        const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
 274        int ret;
 275
 276        for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
 277                if (put_user(core_reg | i, uindices))
 278                        return -EFAULT;
 279                uindices++;
 280        }
 281
 282        ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
 283        if (ret)
 284                return ret;
 285        uindices += kvm_arm_get_fw_num_regs(vcpu);
 286
 287        ret = copy_timer_indices(vcpu, uindices);
 288        if (ret)
 289                return ret;
 290        uindices += NUM_TIMER_REGS;
 291
 292        return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
 293}
 294
 295int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 296{
 297        /* We currently use nothing arch-specific in upper 32 bits */
 298        if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
 299                return -EINVAL;
 300
 301        /* Register group 16 means we want a core register. */
 302        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 303                return get_core_reg(vcpu, reg);
 304
 305        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
 306                return kvm_arm_get_fw_reg(vcpu, reg);
 307
 308        if (is_timer_reg(reg->id))
 309                return get_timer_reg(vcpu, reg);
 310
 311        return kvm_arm_sys_reg_get_reg(vcpu, reg);
 312}
 313
 314int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 315{
 316        /* We currently use nothing arch-specific in upper 32 bits */
 317        if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
 318                return -EINVAL;
 319
 320        /* Register group 16 means we set a core register. */
 321        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
 322                return set_core_reg(vcpu, reg);
 323
 324        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
 325                return kvm_arm_set_fw_reg(vcpu, reg);
 326
 327        if (is_timer_reg(reg->id))
 328                return set_timer_reg(vcpu, reg);
 329
 330        return kvm_arm_sys_reg_set_reg(vcpu, reg);
 331}
 332
 333int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 334                                  struct kvm_sregs *sregs)
 335{
 336        return -EINVAL;
 337}
 338
 339int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 340                                  struct kvm_sregs *sregs)
 341{
 342        return -EINVAL;
 343}
 344
 345int __attribute_const__ kvm_target_cpu(void)
 346{
 347        unsigned long implementor = read_cpuid_implementor();
 348        unsigned long part_number = read_cpuid_part_number();
 349
 350        switch (implementor) {
 351        case ARM_CPU_IMP_ARM:
 352                switch (part_number) {
 353                case ARM_CPU_PART_AEM_V8:
 354                        return KVM_ARM_TARGET_AEM_V8;
 355                case ARM_CPU_PART_FOUNDATION:
 356                        return KVM_ARM_TARGET_FOUNDATION_V8;
 357                case ARM_CPU_PART_CORTEX_A53:
 358                        return KVM_ARM_TARGET_CORTEX_A53;
 359                case ARM_CPU_PART_CORTEX_A57:
 360                        return KVM_ARM_TARGET_CORTEX_A57;
 361                };
 362                break;
 363        case ARM_CPU_IMP_APM:
 364                switch (part_number) {
 365                case APM_CPU_PART_POTENZA:
 366                        return KVM_ARM_TARGET_XGENE_POTENZA;
 367                };
 368                break;
 369        };
 370
 371        /* Return a default generic target */
 372        return KVM_ARM_TARGET_GENERIC_V8;
 373}
 374
 375int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 376{
 377        int target = kvm_target_cpu();
 378
 379        if (target < 0)
 380                return -ENODEV;
 381
 382        memset(init, 0, sizeof(*init));
 383
 384        /*
 385         * For now, we don't return any features.
 386         * In future, we might use features to return target
 387         * specific features available for the preferred
 388         * target type.
 389         */
 390        init->target = (__u32)target;
 391
 392        return 0;
 393}
 394
 395int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 396{
 397        return -EINVAL;
 398}
 399
 400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 401{
 402        return -EINVAL;
 403}
 404
 405int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 406                                  struct kvm_translation *tr)
 407{
 408        return -EINVAL;
 409}
 410
 411#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
 412                            KVM_GUESTDBG_USE_SW_BP | \
 413                            KVM_GUESTDBG_USE_HW | \
 414                            KVM_GUESTDBG_SINGLESTEP)
 415
 416/**
 417 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
 418 * @kvm:        pointer to the KVM struct
 419 * @kvm_guest_debug: the ioctl data buffer
 420 *
 421 * This sets up and enables the VM for guest debugging. Userspace
 422 * passes in a control flag to enable different debug types and
 423 * potentially other architecture specific information in the rest of
 424 * the structure.
 425 */
 426int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 427                                        struct kvm_guest_debug *dbg)
 428{
 429        int ret = 0;
 430
 431        trace_kvm_set_guest_debug(vcpu, dbg->control);
 432
 433        if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
 434                ret = -EINVAL;
 435                goto out;
 436        }
 437
 438        if (dbg->control & KVM_GUESTDBG_ENABLE) {
 439                vcpu->guest_debug = dbg->control;
 440
 441                /* Hardware assisted Break and Watch points */
 442                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
 443                        vcpu->arch.external_debug_state = dbg->arch;
 444                }
 445
 446        } else {
 447                /* If not enabled clear all flags */
 448                vcpu->guest_debug = 0;
 449        }
 450
 451out:
 452        return ret;
 453}
 454
 455int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 456                               struct kvm_device_attr *attr)
 457{
 458        int ret;
 459
 460        switch (attr->group) {
 461        case KVM_ARM_VCPU_PMU_V3_CTRL:
 462                ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
 463                break;
 464        case KVM_ARM_VCPU_TIMER_CTRL:
 465                ret = kvm_arm_timer_set_attr(vcpu, attr);
 466                break;
 467        default:
 468                ret = -ENXIO;
 469                break;
 470        }
 471
 472        return ret;
 473}
 474
 475int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 476                               struct kvm_device_attr *attr)
 477{
 478        int ret;
 479
 480        switch (attr->group) {
 481        case KVM_ARM_VCPU_PMU_V3_CTRL:
 482                ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
 483                break;
 484        case KVM_ARM_VCPU_TIMER_CTRL:
 485                ret = kvm_arm_timer_get_attr(vcpu, attr);
 486                break;
 487        default:
 488                ret = -ENXIO;
 489                break;
 490        }
 491
 492        return ret;
 493}
 494
 495int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 496                               struct kvm_device_attr *attr)
 497{
 498        int ret;
 499
 500        switch (attr->group) {
 501        case KVM_ARM_VCPU_PMU_V3_CTRL:
 502                ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
 503                break;
 504        case KVM_ARM_VCPU_TIMER_CTRL:
 505                ret = kvm_arm_timer_has_attr(vcpu, attr);
 506                break;
 507        default:
 508                ret = -ENXIO;
 509                break;
 510        }
 511
 512        return ret;
 513}
 514