linux/arch/arm64/kvm/guest.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 *
   6 * Derived from arch/arm/kvm/guest.c:
   7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   9 */
  10
  11#include <linux/bits.h>
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/nospec.h>
  15#include <linux/kvm_host.h>
  16#include <linux/module.h>
  17#include <linux/stddef.h>
  18#include <linux/string.h>
  19#include <linux/vmalloc.h>
  20#include <linux/fs.h>
  21#include <kvm/arm_psci.h>
  22#include <asm/cputype.h>
  23#include <linux/uaccess.h>
  24#include <asm/fpsimd.h>
  25#include <asm/kvm.h>
  26#include <asm/kvm_emulate.h>
  27#include <asm/kvm_coproc.h>
  28#include <asm/sigcontext.h>
  29
  30#include "trace.h"
  31
  32struct kvm_stats_debugfs_item debugfs_entries[] = {
  33        VCPU_STAT("halt_successful_poll", halt_successful_poll),
  34        VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
  35        VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
  36        VCPU_STAT("halt_wakeup", halt_wakeup),
  37        VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
  38        VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
  39        VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
  40        VCPU_STAT("mmio_exit_user", mmio_exit_user),
  41        VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
  42        VCPU_STAT("exits", exits),
  43        VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
  44        VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
  45        { NULL }
  46};
  47
  48static bool core_reg_offset_is_vreg(u64 off)
  49{
  50        return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
  51                off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
  52}
  53
  54static u64 core_reg_offset_from_id(u64 id)
  55{
  56        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
  57}
  58
  59static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
  60{
  61        int size;
  62
  63        switch (off) {
  64        case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
  65             KVM_REG_ARM_CORE_REG(regs.regs[30]):
  66        case KVM_REG_ARM_CORE_REG(regs.sp):
  67        case KVM_REG_ARM_CORE_REG(regs.pc):
  68        case KVM_REG_ARM_CORE_REG(regs.pstate):
  69        case KVM_REG_ARM_CORE_REG(sp_el1):
  70        case KVM_REG_ARM_CORE_REG(elr_el1):
  71        case KVM_REG_ARM_CORE_REG(spsr[0]) ...
  72             KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
  73                size = sizeof(__u64);
  74                break;
  75
  76        case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
  77             KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
  78                size = sizeof(__uint128_t);
  79                break;
  80
  81        case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
  82        case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
  83                size = sizeof(__u32);
  84                break;
  85
  86        default:
  87                return -EINVAL;
  88        }
  89
  90        if (!IS_ALIGNED(off, size / sizeof(__u32)))
  91                return -EINVAL;
  92
  93        /*
  94         * The KVM_REG_ARM64_SVE regs must be used instead of
  95         * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
  96         * SVE-enabled vcpus:
  97         */
  98        if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
  99                return -EINVAL;
 100
 101        return size;
 102}
 103
 104static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 105{
 106        u64 off = core_reg_offset_from_id(reg->id);
 107        int size = core_reg_size_from_offset(vcpu, off);
 108
 109        if (size < 0)
 110                return NULL;
 111
 112        if (KVM_REG_SIZE(reg->id) != size)
 113                return NULL;
 114
 115        switch (off) {
 116        case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
 117             KVM_REG_ARM_CORE_REG(regs.regs[30]):
 118                off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
 119                off /= 2;
 120                return &vcpu->arch.ctxt.regs.regs[off];
 121
 122        case KVM_REG_ARM_CORE_REG(regs.sp):
 123                return &vcpu->arch.ctxt.regs.sp;
 124
 125        case KVM_REG_ARM_CORE_REG(regs.pc):
 126                return &vcpu->arch.ctxt.regs.pc;
 127
 128        case KVM_REG_ARM_CORE_REG(regs.pstate):
 129                return &vcpu->arch.ctxt.regs.pstate;
 130
 131        case KVM_REG_ARM_CORE_REG(sp_el1):
 132                return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
 133
 134        case KVM_REG_ARM_CORE_REG(elr_el1):
 135                return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
 136
 137        case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
 138                return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
 139
 140        case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
 141                return &vcpu->arch.ctxt.spsr_abt;
 142
 143        case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
 144                return &vcpu->arch.ctxt.spsr_und;
 145
 146        case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
 147                return &vcpu->arch.ctxt.spsr_irq;
 148
 149        case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
 150                return &vcpu->arch.ctxt.spsr_fiq;
 151
 152        case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
 153             KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
 154                off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
 155                off /= 4;
 156                return &vcpu->arch.ctxt.fp_regs.vregs[off];
 157
 158        case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
 159                return &vcpu->arch.ctxt.fp_regs.fpsr;
 160
 161        case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
 162                return &vcpu->arch.ctxt.fp_regs.fpcr;
 163
 164        default:
 165                return NULL;
 166        }
 167}
 168
 169static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 170{
 171        /*
 172         * Because the kvm_regs structure is a mix of 32, 64 and
 173         * 128bit fields, we index it as if it was a 32bit
 174         * array. Hence below, nr_regs is the number of entries, and
 175         * off the index in the "array".
 176         */
 177        __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
 178        int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
 179        void *addr;
 180        u32 off;
 181
 182        /* Our ID is an index into the kvm_regs struct. */
 183        off = core_reg_offset_from_id(reg->id);
 184        if (off >= nr_regs ||
 185            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 186                return -ENOENT;
 187
 188        addr = core_reg_addr(vcpu, reg);
 189        if (!addr)
 190                return -EINVAL;
 191
 192        if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
 193                return -EFAULT;
 194
 195        return 0;
 196}
 197
 198static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 199{
 200        __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
 201        int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
 202        __uint128_t tmp;
 203        void *valp = &tmp, *addr;
 204        u64 off;
 205        int err = 0;
 206
 207        /* Our ID is an index into the kvm_regs struct. */
 208        off = core_reg_offset_from_id(reg->id);
 209        if (off >= nr_regs ||
 210            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
 211                return -ENOENT;
 212
 213        addr = core_reg_addr(vcpu, reg);
 214        if (!addr)
 215                return -EINVAL;
 216
 217        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
 218                return -EINVAL;
 219
 220        if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
 221                err = -EFAULT;
 222                goto out;
 223        }
 224
 225        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
 226                u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
 227                switch (mode) {
 228                case PSR_AA32_MODE_USR:
 229                        if (!system_supports_32bit_el0())
 230                                return -EINVAL;
 231                        break;
 232                case PSR_AA32_MODE_FIQ:
 233                case PSR_AA32_MODE_IRQ:
 234                case PSR_AA32_MODE_SVC:
 235                case PSR_AA32_MODE_ABT:
 236                case PSR_AA32_MODE_UND:
 237                        if (!vcpu_el1_is_32bit(vcpu))
 238                                return -EINVAL;
 239                        break;
 240                case PSR_MODE_EL0t:
 241                case PSR_MODE_EL1t:
 242                case PSR_MODE_EL1h:
 243                        if (vcpu_el1_is_32bit(vcpu))
 244                                return -EINVAL;
 245                        break;
 246                default:
 247                        err = -EINVAL;
 248                        goto out;
 249                }
 250        }
 251
 252        memcpy(addr, valp, KVM_REG_SIZE(reg->id));
 253
 254        if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
 255                int i;
 256
 257                for (i = 0; i < 16; i++)
 258                        *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
 259        }
 260out:
 261        return err;
 262}
 263
 264#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
 265#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
 266#define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
 267
 268static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 269{
 270        unsigned int max_vq, vq;
 271        u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
 272
 273        if (!vcpu_has_sve(vcpu))
 274                return -ENOENT;
 275
 276        if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
 277                return -EINVAL;
 278
 279        memset(vqs, 0, sizeof(vqs));
 280
 281        max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
 282        for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
 283                if (sve_vq_available(vq))
 284                        vqs[vq_word(vq)] |= vq_mask(vq);
 285
 286        if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
 287                return -EFAULT;
 288
 289        return 0;
 290}
 291
 292static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 293{
 294        unsigned int max_vq, vq;
 295        u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
 296
 297        if (!vcpu_has_sve(vcpu))
 298                return -ENOENT;
 299
 300        if (kvm_arm_vcpu_sve_finalized(vcpu))
 301                return -EPERM; /* too late! */
 302
 303        if (WARN_ON(vcpu->arch.sve_state))
 304                return -EINVAL;
 305
 306        if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
 307                return -EFAULT;
 308
 309        max_vq = 0;
 310        for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
 311                if (vq_present(vqs, vq))
 312                        max_vq = vq;
 313
 314        if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
 315                return -EINVAL;
 316
 317        /*
 318         * Vector lengths supported by the host can't currently be
 319         * hidden from the guest individually: instead we can only set a
 320         * maximum via ZCR_EL2.LEN.  So, make sure the available vector
 321         * lengths match the set requested exactly up to the requested
 322         * maximum:
 323         */
 324        for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
 325                if (vq_present(vqs, vq) != sve_vq_available(vq))
 326                        return -EINVAL;
 327
 328        /* Can't run with no vector lengths at all: */
 329        if (max_vq < SVE_VQ_MIN)
 330                return -EINVAL;
 331
 332        /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
 333        vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
 334
 335        return 0;
 336}
 337
 338#define SVE_REG_SLICE_SHIFT     0
 339#define SVE_REG_SLICE_BITS      5
 340#define SVE_REG_ID_SHIFT        (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
 341#define SVE_REG_ID_BITS         5
 342
 343#define SVE_REG_SLICE_MASK                                      \
 344        GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,   \
 345                SVE_REG_SLICE_SHIFT)
 346#define SVE_REG_ID_MASK                                                 \
 347        GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
 348
 349#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
 350
 351#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
 352#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
 353
 354/*
 355 * Number of register slices required to cover each whole SVE register.
 356 * NOTE: Only the first slice every exists, for now.
 357 * If you are tempted to modify this, you must also rework sve_reg_to_region()
 358 * to match:
 359 */
 360#define vcpu_sve_slices(vcpu) 1
 361
 362/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
 363struct sve_state_reg_region {
 364        unsigned int koffset;   /* offset into sve_state in kernel memory */
 365        unsigned int klen;      /* length in kernel memory */
 366        unsigned int upad;      /* extra trailing padding in user memory */
 367};
 368
 369/*
 370 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
 371 * register copy
 372 */
 373static int sve_reg_to_region(struct sve_state_reg_region *region,
 374                             struct kvm_vcpu *vcpu,
 375                             const struct kvm_one_reg *reg)
 376{
 377        /* reg ID ranges for Z- registers */
 378        const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
 379        const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
 380                                                       SVE_NUM_SLICES - 1);
 381
 382        /* reg ID ranges for P- registers and FFR (which are contiguous) */
 383        const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
 384        const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
 385
 386        unsigned int vq;
 387        unsigned int reg_num;
 388
 389        unsigned int reqoffset, reqlen; /* User-requested offset and length */
 390        unsigned int maxlen; /* Maximum permitted length */
 391
 392        size_t sve_state_size;
 393
 394        const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
 395                                                        SVE_NUM_SLICES - 1);
 396
 397        /* Verify that the P-regs and FFR really do have contiguous IDs: */
 398        BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
 399
 400        /* Verify that we match the UAPI header: */
 401        BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
 402
 403        reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
 404
 405        if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
 406                if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
 407                        return -ENOENT;
 408
 409                vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
 410
 411                reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
 412                                SVE_SIG_REGS_OFFSET;
 413                reqlen = KVM_SVE_ZREG_SIZE;
 414                maxlen = SVE_SIG_ZREG_SIZE(vq);
 415        } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
 416                if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
 417                        return -ENOENT;
 418
 419                vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
 420
 421                reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
 422                                SVE_SIG_REGS_OFFSET;
 423                reqlen = KVM_SVE_PREG_SIZE;
 424                maxlen = SVE_SIG_PREG_SIZE(vq);
 425        } else {
 426                return -EINVAL;
 427        }
 428
 429        sve_state_size = vcpu_sve_state_size(vcpu);
 430        if (WARN_ON(!sve_state_size))
 431                return -EINVAL;
 432
 433        region->koffset = array_index_nospec(reqoffset, sve_state_size);
 434        region->klen = min(maxlen, reqlen);
 435        region->upad = reqlen - region->klen;
 436
 437        return 0;
 438}
 439
 440static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 441{
 442        int ret;
 443        struct sve_state_reg_region region;
 444        char __user *uptr = (char __user *)reg->addr;
 445
 446        /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
 447        if (reg->id == KVM_REG_ARM64_SVE_VLS)
 448                return get_sve_vls(vcpu, reg);
 449
 450        /* Try to interpret reg ID as an architectural SVE register... */
 451        ret = sve_reg_to_region(&region, vcpu, reg);
 452        if (ret)
 453                return ret;
 454
 455        if (!kvm_arm_vcpu_sve_finalized(vcpu))
 456                return -EPERM;
 457
 458        if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
 459                         region.klen) ||
 460            clear_user(uptr + region.klen, region.upad))
 461                return -EFAULT;
 462
 463        return 0;
 464}
 465
 466static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 467{
 468        int ret;
 469        struct sve_state_reg_region region;
 470        const char __user *uptr = (const char __user *)reg->addr;
 471
 472        /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
 473        if (reg->id == KVM_REG_ARM64_SVE_VLS)
 474                return set_sve_vls(vcpu, reg);
 475
 476        /* Try to interpret reg ID as an architectural SVE register... */
 477        ret = sve_reg_to_region(&region, vcpu, reg);
 478        if (ret)
 479                return ret;
 480
 481        if (!kvm_arm_vcpu_sve_finalized(vcpu))
 482                return -EPERM;
 483
 484        if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
 485                           region.klen))
 486                return -EFAULT;
 487
 488        return 0;
 489}
 490
 491int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 492{
 493        return -EINVAL;
 494}
 495
 496int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 497{
 498        return -EINVAL;
 499}
 500
 501static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
 502                                 u64 __user *uindices)
 503{
 504        unsigned int i;
 505        int n = 0;
 506
 507        for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
 508                u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
 509                int size = core_reg_size_from_offset(vcpu, i);
 510
 511                if (size < 0)
 512                        continue;
 513
 514                switch (size) {
 515                case sizeof(__u32):
 516                        reg |= KVM_REG_SIZE_U32;
 517                        break;
 518
 519                case sizeof(__u64):
 520                        reg |= KVM_REG_SIZE_U64;
 521                        break;
 522
 523                case sizeof(__uint128_t):
 524                        reg |= KVM_REG_SIZE_U128;
 525                        break;
 526
 527                default:
 528                        WARN_ON(1);
 529                        continue;
 530                }
 531
 532                if (uindices) {
 533                        if (put_user(reg, uindices))
 534                                return -EFAULT;
 535                        uindices++;
 536                }
 537
 538                n++;
 539        }
 540
 541        return n;
 542}
 543
 544static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
 545{
 546        return copy_core_reg_indices(vcpu, NULL);
 547}
 548
 549/**
 550 * ARM64 versions of the TIMER registers, always available on arm64
 551 */
 552
 553#define NUM_TIMER_REGS 3
 554
 555static bool is_timer_reg(u64 index)
 556{
 557        switch (index) {
 558        case KVM_REG_ARM_TIMER_CTL:
 559        case KVM_REG_ARM_TIMER_CNT:
 560        case KVM_REG_ARM_TIMER_CVAL:
 561                return true;
 562        }
 563        return false;
 564}
 565
 566static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 567{
 568        if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
 569                return -EFAULT;
 570        uindices++;
 571        if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
 572                return -EFAULT;
 573        uindices++;
 574        if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
 575                return -EFAULT;
 576
 577        return 0;
 578}
 579
 580static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 581{
 582        void __user *uaddr = (void __user *)(long)reg->addr;
 583        u64 val;
 584        int ret;
 585
 586        ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
 587        if (ret != 0)
 588                return -EFAULT;
 589
 590        return kvm_arm_timer_set_reg(vcpu, reg->id, val);
 591}
 592
 593static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 594{
 595        void __user *uaddr = (void __user *)(long)reg->addr;
 596        u64 val;
 597
 598        val = kvm_arm_timer_get_reg(vcpu, reg->id);
 599        return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
 600}
 601
 602static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
 603{
 604        const unsigned int slices = vcpu_sve_slices(vcpu);
 605
 606        if (!vcpu_has_sve(vcpu))
 607                return 0;
 608
 609        /* Policed by KVM_GET_REG_LIST: */
 610        WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
 611
 612        return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
 613                + 1; /* KVM_REG_ARM64_SVE_VLS */
 614}
 615
 616static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
 617                                u64 __user *uindices)
 618{
 619        const unsigned int slices = vcpu_sve_slices(vcpu);
 620        u64 reg;
 621        unsigned int i, n;
 622        int num_regs = 0;
 623
 624        if (!vcpu_has_sve(vcpu))
 625                return 0;
 626
 627        /* Policed by KVM_GET_REG_LIST: */
 628        WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
 629
 630        /*
 631         * Enumerate this first, so that userspace can save/restore in
 632         * the order reported by KVM_GET_REG_LIST:
 633         */
 634        reg = KVM_REG_ARM64_SVE_VLS;
 635        if (put_user(reg, uindices++))
 636                return -EFAULT;
 637        ++num_regs;
 638
 639        for (i = 0; i < slices; i++) {
 640                for (n = 0; n < SVE_NUM_ZREGS; n++) {
 641                        reg = KVM_REG_ARM64_SVE_ZREG(n, i);
 642                        if (put_user(reg, uindices++))
 643                                return -EFAULT;
 644                        num_regs++;
 645                }
 646
 647                for (n = 0; n < SVE_NUM_PREGS; n++) {
 648                        reg = KVM_REG_ARM64_SVE_PREG(n, i);
 649                        if (put_user(reg, uindices++))
 650                                return -EFAULT;
 651                        num_regs++;
 652                }
 653
 654                reg = KVM_REG_ARM64_SVE_FFR(i);
 655                if (put_user(reg, uindices++))
 656                        return -EFAULT;
 657                num_regs++;
 658        }
 659
 660        return num_regs;
 661}
 662
 663/**
 664 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
 665 *
 666 * This is for all registers.
 667 */
 668unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 669{
 670        unsigned long res = 0;
 671
 672        res += num_core_regs(vcpu);
 673        res += num_sve_regs(vcpu);
 674        res += kvm_arm_num_sys_reg_descs(vcpu);
 675        res += kvm_arm_get_fw_num_regs(vcpu);
 676        res += NUM_TIMER_REGS;
 677
 678        return res;
 679}
 680
 681/**
 682 * kvm_arm_copy_reg_indices - get indices of all registers.
 683 *
 684 * We do core registers right here, then we append system regs.
 685 */
 686int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 687{
 688        int ret;
 689
 690        ret = copy_core_reg_indices(vcpu, uindices);
 691        if (ret < 0)
 692                return ret;
 693        uindices += ret;
 694
 695        ret = copy_sve_reg_indices(vcpu, uindices);
 696        if (ret < 0)
 697                return ret;
 698        uindices += ret;
 699
 700        ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
 701        if (ret < 0)
 702                return ret;
 703        uindices += kvm_arm_get_fw_num_regs(vcpu);
 704
 705        ret = copy_timer_indices(vcpu, uindices);
 706        if (ret < 0)
 707                return ret;
 708        uindices += NUM_TIMER_REGS;
 709
 710        return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
 711}
 712
 713int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 714{
 715        /* We currently use nothing arch-specific in upper 32 bits */
 716        if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
 717                return -EINVAL;
 718
 719        switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
 720        case KVM_REG_ARM_CORE:  return get_core_reg(vcpu, reg);
 721        case KVM_REG_ARM_FW:    return kvm_arm_get_fw_reg(vcpu, reg);
 722        case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
 723        }
 724
 725        if (is_timer_reg(reg->id))
 726                return get_timer_reg(vcpu, reg);
 727
 728        return kvm_arm_sys_reg_get_reg(vcpu, reg);
 729}
 730
 731int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 732{
 733        /* We currently use nothing arch-specific in upper 32 bits */
 734        if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
 735                return -EINVAL;
 736
 737        switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
 738        case KVM_REG_ARM_CORE:  return set_core_reg(vcpu, reg);
 739        case KVM_REG_ARM_FW:    return kvm_arm_set_fw_reg(vcpu, reg);
 740        case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
 741        }
 742
 743        if (is_timer_reg(reg->id))
 744                return set_timer_reg(vcpu, reg);
 745
 746        return kvm_arm_sys_reg_set_reg(vcpu, reg);
 747}
 748
 749int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 750                                  struct kvm_sregs *sregs)
 751{
 752        return -EINVAL;
 753}
 754
 755int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 756                                  struct kvm_sregs *sregs)
 757{
 758        return -EINVAL;
 759}
 760
 761int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
 762                              struct kvm_vcpu_events *events)
 763{
 764        events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
 765        events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
 766
 767        if (events->exception.serror_pending && events->exception.serror_has_esr)
 768                events->exception.serror_esr = vcpu_get_vsesr(vcpu);
 769
 770        /*
 771         * We never return a pending ext_dabt here because we deliver it to
 772         * the virtual CPU directly when setting the event and it's no longer
 773         * 'pending' at this point.
 774         */
 775
 776        return 0;
 777}
 778
 779int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
 780                              struct kvm_vcpu_events *events)
 781{
 782        bool serror_pending = events->exception.serror_pending;
 783        bool has_esr = events->exception.serror_has_esr;
 784        bool ext_dabt_pending = events->exception.ext_dabt_pending;
 785
 786        if (serror_pending && has_esr) {
 787                if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
 788                        return -EINVAL;
 789
 790                if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
 791                        kvm_set_sei_esr(vcpu, events->exception.serror_esr);
 792                else
 793                        return -EINVAL;
 794        } else if (serror_pending) {
 795                kvm_inject_vabt(vcpu);
 796        }
 797
 798        if (ext_dabt_pending)
 799                kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 800
 801        return 0;
 802}
 803
 804int __attribute_const__ kvm_target_cpu(void)
 805{
 806        unsigned long implementor = read_cpuid_implementor();
 807        unsigned long part_number = read_cpuid_part_number();
 808
 809        switch (implementor) {
 810        case ARM_CPU_IMP_ARM:
 811                switch (part_number) {
 812                case ARM_CPU_PART_AEM_V8:
 813                        return KVM_ARM_TARGET_AEM_V8;
 814                case ARM_CPU_PART_FOUNDATION:
 815                        return KVM_ARM_TARGET_FOUNDATION_V8;
 816                case ARM_CPU_PART_CORTEX_A53:
 817                        return KVM_ARM_TARGET_CORTEX_A53;
 818                case ARM_CPU_PART_CORTEX_A57:
 819                        return KVM_ARM_TARGET_CORTEX_A57;
 820                }
 821                break;
 822        case ARM_CPU_IMP_APM:
 823                switch (part_number) {
 824                case APM_CPU_PART_POTENZA:
 825                        return KVM_ARM_TARGET_XGENE_POTENZA;
 826                }
 827                break;
 828        }
 829
 830        /* Return a default generic target */
 831        return KVM_ARM_TARGET_GENERIC_V8;
 832}
 833
 834int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
 835{
 836        int target = kvm_target_cpu();
 837
 838        if (target < 0)
 839                return -ENODEV;
 840
 841        memset(init, 0, sizeof(*init));
 842
 843        /*
 844         * For now, we don't return any features.
 845         * In future, we might use features to return target
 846         * specific features available for the preferred
 847         * target type.
 848         */
 849        init->target = (__u32)target;
 850
 851        return 0;
 852}
 853
 854int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 855{
 856        return -EINVAL;
 857}
 858
 859int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 860{
 861        return -EINVAL;
 862}
 863
 864int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 865                                  struct kvm_translation *tr)
 866{
 867        return -EINVAL;
 868}
 869
 870#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
 871                            KVM_GUESTDBG_USE_SW_BP | \
 872                            KVM_GUESTDBG_USE_HW | \
 873                            KVM_GUESTDBG_SINGLESTEP)
 874
 875/**
 876 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
 877 * @kvm:        pointer to the KVM struct
 878 * @kvm_guest_debug: the ioctl data buffer
 879 *
 880 * This sets up and enables the VM for guest debugging. Userspace
 881 * passes in a control flag to enable different debug types and
 882 * potentially other architecture specific information in the rest of
 883 * the structure.
 884 */
 885int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 886                                        struct kvm_guest_debug *dbg)
 887{
 888        int ret = 0;
 889
 890        trace_kvm_set_guest_debug(vcpu, dbg->control);
 891
 892        if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
 893                ret = -EINVAL;
 894                goto out;
 895        }
 896
 897        if (dbg->control & KVM_GUESTDBG_ENABLE) {
 898                vcpu->guest_debug = dbg->control;
 899
 900                /* Hardware assisted Break and Watch points */
 901                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
 902                        vcpu->arch.external_debug_state = dbg->arch;
 903                }
 904
 905        } else {
 906                /* If not enabled clear all flags */
 907                vcpu->guest_debug = 0;
 908        }
 909
 910out:
 911        return ret;
 912}
 913
 914int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
 915                               struct kvm_device_attr *attr)
 916{
 917        int ret;
 918
 919        switch (attr->group) {
 920        case KVM_ARM_VCPU_PMU_V3_CTRL:
 921                ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
 922                break;
 923        case KVM_ARM_VCPU_TIMER_CTRL:
 924                ret = kvm_arm_timer_set_attr(vcpu, attr);
 925                break;
 926        case KVM_ARM_VCPU_PVTIME_CTRL:
 927                ret = kvm_arm_pvtime_set_attr(vcpu, attr);
 928                break;
 929        default:
 930                ret = -ENXIO;
 931                break;
 932        }
 933
 934        return ret;
 935}
 936
 937int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
 938                               struct kvm_device_attr *attr)
 939{
 940        int ret;
 941
 942        switch (attr->group) {
 943        case KVM_ARM_VCPU_PMU_V3_CTRL:
 944                ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
 945                break;
 946        case KVM_ARM_VCPU_TIMER_CTRL:
 947                ret = kvm_arm_timer_get_attr(vcpu, attr);
 948                break;
 949        case KVM_ARM_VCPU_PVTIME_CTRL:
 950                ret = kvm_arm_pvtime_get_attr(vcpu, attr);
 951                break;
 952        default:
 953                ret = -ENXIO;
 954                break;
 955        }
 956
 957        return ret;
 958}
 959
 960int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
 961                               struct kvm_device_attr *attr)
 962{
 963        int ret;
 964
 965        switch (attr->group) {
 966        case KVM_ARM_VCPU_PMU_V3_CTRL:
 967                ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
 968                break;
 969        case KVM_ARM_VCPU_TIMER_CTRL:
 970                ret = kvm_arm_timer_has_attr(vcpu, attr);
 971                break;
 972        case KVM_ARM_VCPU_PVTIME_CTRL:
 973                ret = kvm_arm_pvtime_has_attr(vcpu, attr);
 974                break;
 975        default:
 976                ret = -ENXIO;
 977                break;
 978        }
 979
 980        return ret;
 981}
 982