linux/arch/arm64/include/asm/kvm_emulate.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 *
   6 * Derived from arch/arm/include/kvm_emulate.h
   7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   9 */
  10
  11#ifndef __ARM64_KVM_EMULATE_H__
  12#define __ARM64_KVM_EMULATE_H__
  13
  14#include <linux/kvm_host.h>
  15
  16#include <asm/debug-monitors.h>
  17#include <asm/esr.h>
  18#include <asm/kvm_arm.h>
  19#include <asm/kvm_hyp.h>
  20#include <asm/ptrace.h>
  21#include <asm/cputype.h>
  22#include <asm/virt.h>
  23
  24#define CURRENT_EL_SP_EL0_VECTOR        0x0
  25#define CURRENT_EL_SP_ELx_VECTOR        0x200
  26#define LOWER_EL_AArch64_VECTOR         0x400
  27#define LOWER_EL_AArch32_VECTOR         0x600
  28
  29enum exception_type {
  30        except_type_sync        = 0,
  31        except_type_irq         = 0x80,
  32        except_type_fiq         = 0x100,
  33        except_type_serror      = 0x180,
  34};
  35
  36bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
  37void kvm_skip_instr32(struct kvm_vcpu *vcpu);
  38
  39void kvm_inject_undefined(struct kvm_vcpu *vcpu);
  40void kvm_inject_vabt(struct kvm_vcpu *vcpu);
  41void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
  42void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
  43
  44static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
  45{
  46        return !(vcpu->arch.hcr_el2 & HCR_RW);
  47}
  48
  49static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
  50{
  51        vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
  52        if (is_kernel_in_hyp_mode())
  53                vcpu->arch.hcr_el2 |= HCR_E2H;
  54        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
  55                /* route synchronous external abort exceptions to EL2 */
  56                vcpu->arch.hcr_el2 |= HCR_TEA;
  57                /* trap error record accesses */
  58                vcpu->arch.hcr_el2 |= HCR_TERR;
  59        }
  60
  61        if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
  62                vcpu->arch.hcr_el2 |= HCR_FWB;
  63        } else {
  64                /*
  65                 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
  66                 * get set in SCTLR_EL1 such that we can detect when the guest
  67                 * MMU gets turned on and do the necessary cache maintenance
  68                 * then.
  69                 */
  70                vcpu->arch.hcr_el2 |= HCR_TVM;
  71        }
  72
  73        if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
  74                vcpu->arch.hcr_el2 &= ~HCR_RW;
  75
  76        /*
  77         * TID3: trap feature register accesses that we virtualise.
  78         * For now this is conditional, since no AArch32 feature regs
  79         * are currently virtualised.
  80         */
  81        if (!vcpu_el1_is_32bit(vcpu))
  82                vcpu->arch.hcr_el2 |= HCR_TID3;
  83
  84        if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
  85            vcpu_el1_is_32bit(vcpu))
  86                vcpu->arch.hcr_el2 |= HCR_TID2;
  87
  88        if (kvm_has_mte(vcpu->kvm))
  89                vcpu->arch.hcr_el2 |= HCR_ATA;
  90}
  91
  92static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
  93{
  94        return (unsigned long *)&vcpu->arch.hcr_el2;
  95}
  96
  97static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
  98{
  99        vcpu->arch.hcr_el2 &= ~HCR_TWE;
 100        if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
 101            vcpu->kvm->arch.vgic.nassgireq)
 102                vcpu->arch.hcr_el2 &= ~HCR_TWI;
 103        else
 104                vcpu->arch.hcr_el2 |= HCR_TWI;
 105}
 106
 107static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
 108{
 109        vcpu->arch.hcr_el2 |= HCR_TWE;
 110        vcpu->arch.hcr_el2 |= HCR_TWI;
 111}
 112
 113static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
 114{
 115        vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
 116}
 117
 118static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
 119{
 120        vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
 121}
 122
 123static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
 124{
 125        return vcpu->arch.vsesr_el2;
 126}
 127
 128static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
 129{
 130        vcpu->arch.vsesr_el2 = vsesr;
 131}
 132
 133static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 134{
 135        return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
 136}
 137
 138static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 139{
 140        return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
 141}
 142
 143static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 144{
 145        return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
 146}
 147
 148static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 149{
 150        if (vcpu_mode_is_32bit(vcpu))
 151                return kvm_condition_valid32(vcpu);
 152
 153        return true;
 154}
 155
 156static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 157{
 158        *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
 159}
 160
 161/*
 162 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
 163 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
 164 * AArch32 with banked registers.
 165 */
 166static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
 167                                         u8 reg_num)
 168{
 169        return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
 170}
 171
 172static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
 173                                unsigned long val)
 174{
 175        if (reg_num != 31)
 176                vcpu_gp_regs(vcpu)->regs[reg_num] = val;
 177}
 178
 179/*
 180 * The layout of SPSR for an AArch32 state is different when observed from an
 181 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
 182 * view given an AArch64 view.
 183 *
 184 * In ARM DDI 0487E.a see:
 185 *
 186 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
 187 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
 188 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
 189 *
 190 * Which show the following differences:
 191 *
 192 * | Bit | AA64 | AA32 | Notes                       |
 193 * +-----+------+------+-----------------------------|
 194 * | 24  | DIT  | J    | J is RES0 in ARMv8          |
 195 * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
 196 *
 197 * ... and all other bits are (currently) common.
 198 */
 199static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
 200{
 201        const unsigned long overlap = BIT(24) | BIT(21);
 202        unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
 203
 204        spsr &= ~overlap;
 205
 206        spsr |= dit << 21;
 207
 208        return spsr;
 209}
 210
 211static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 212{
 213        u32 mode;
 214
 215        if (vcpu_mode_is_32bit(vcpu)) {
 216                mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
 217                return mode > PSR_AA32_MODE_USR;
 218        }
 219
 220        mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
 221
 222        return mode != PSR_MODE_EL0t;
 223}
 224
 225static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
 226{
 227        return vcpu->arch.fault.esr_el2;
 228}
 229
 230static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 231{
 232        u32 esr = kvm_vcpu_get_esr(vcpu);
 233
 234        if (esr & ESR_ELx_CV)
 235                return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
 236
 237        return -1;
 238}
 239
 240static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
 241{
 242        return vcpu->arch.fault.far_el2;
 243}
 244
 245static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
 246{
 247        return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
 248}
 249
 250static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
 251{
 252        return vcpu->arch.fault.disr_el1;
 253}
 254
 255static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
 256{
 257        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
 258}
 259
 260static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
 261{
 262        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
 263}
 264
 265static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
 266{
 267        return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
 268}
 269
 270static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
 271{
 272        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
 273}
 274
 275static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 276{
 277        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
 278}
 279
 280static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
 281{
 282        return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
 283}
 284
 285static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
 286{
 287        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
 288}
 289
 290/* Always check for S1PTW *before* using this. */
 291static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
 292{
 293        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
 294}
 295
 296static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
 297{
 298        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
 299}
 300
 301static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
 302{
 303        return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
 304}
 305
 306/* This one is not specific to Data Abort */
 307static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
 308{
 309        return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
 310}
 311
 312static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
 313{
 314        return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
 315}
 316
 317static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
 318{
 319        return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
 320}
 321
 322static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
 323{
 324        return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
 325}
 326
 327static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
 328{
 329        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
 330}
 331
 332static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
 333{
 334        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
 335}
 336
 337static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
 338{
 339        return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
 340}
 341
 342static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
 343{
 344        switch (kvm_vcpu_trap_get_fault(vcpu)) {
 345        case FSC_SEA:
 346        case FSC_SEA_TTW0:
 347        case FSC_SEA_TTW1:
 348        case FSC_SEA_TTW2:
 349        case FSC_SEA_TTW3:
 350        case FSC_SECC:
 351        case FSC_SECC_TTW0:
 352        case FSC_SECC_TTW1:
 353        case FSC_SECC_TTW2:
 354        case FSC_SECC_TTW3:
 355                return true;
 356        default:
 357                return false;
 358        }
 359}
 360
 361static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 362{
 363        u32 esr = kvm_vcpu_get_esr(vcpu);
 364        return ESR_ELx_SYS64_ISS_RT(esr);
 365}
 366
 367static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 368{
 369        if (kvm_vcpu_abt_iss1tw(vcpu))
 370                return true;
 371
 372        if (kvm_vcpu_trap_is_iabt(vcpu))
 373                return false;
 374
 375        return kvm_vcpu_dabt_iswrite(vcpu);
 376}
 377
 378static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 379{
 380        return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 381}
 382
 383static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 384{
 385        if (vcpu_mode_is_32bit(vcpu)) {
 386                *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
 387        } else {
 388                u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
 389                sctlr |= (1 << 25);
 390                vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
 391        }
 392}
 393
 394static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
 395{
 396        if (vcpu_mode_is_32bit(vcpu))
 397                return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
 398
 399        return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
 400}
 401
 402static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
 403                                                    unsigned long data,
 404                                                    unsigned int len)
 405{
 406        if (kvm_vcpu_is_be(vcpu)) {
 407                switch (len) {
 408                case 1:
 409                        return data & 0xff;
 410                case 2:
 411                        return be16_to_cpu(data & 0xffff);
 412                case 4:
 413                        return be32_to_cpu(data & 0xffffffff);
 414                default:
 415                        return be64_to_cpu(data);
 416                }
 417        } else {
 418                switch (len) {
 419                case 1:
 420                        return data & 0xff;
 421                case 2:
 422                        return le16_to_cpu(data & 0xffff);
 423                case 4:
 424                        return le32_to_cpu(data & 0xffffffff);
 425                default:
 426                        return le64_to_cpu(data);
 427                }
 428        }
 429
 430        return data;            /* Leave LE untouched */
 431}
 432
 433static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
 434                                                    unsigned long data,
 435                                                    unsigned int len)
 436{
 437        if (kvm_vcpu_is_be(vcpu)) {
 438                switch (len) {
 439                case 1:
 440                        return data & 0xff;
 441                case 2:
 442                        return cpu_to_be16(data & 0xffff);
 443                case 4:
 444                        return cpu_to_be32(data & 0xffffffff);
 445                default:
 446                        return cpu_to_be64(data);
 447                }
 448        } else {
 449                switch (len) {
 450                case 1:
 451                        return data & 0xff;
 452                case 2:
 453                        return cpu_to_le16(data & 0xffff);
 454                case 4:
 455                        return cpu_to_le32(data & 0xffffffff);
 456                default:
 457                        return cpu_to_le64(data);
 458                }
 459        }
 460
 461        return data;            /* Leave LE untouched */
 462}
 463
 464static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
 465{
 466        vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
 467}
 468
 469static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
 470{
 471        return test_bit(feature, vcpu->arch.features);
 472}
 473
 474#endif /* __ARM64_KVM_EMULATE_H__ */
 475