linux/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012-2015 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
   8#define __ARM64_KVM_HYP_SYSREG_SR_H__
   9
  10#include <linux/compiler.h>
  11#include <linux/kvm_host.h>
  12
  13#include <asm/kprobes.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/kvm_emulate.h>
  16#include <asm/kvm_hyp.h>
  17#include <asm/kvm_mmu.h>
  18
  19static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
  20{
  21        ctxt_sys_reg(ctxt, MDSCR_EL1)   = read_sysreg(mdscr_el1);
  22}
  23
  24static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
  25{
  26        ctxt_sys_reg(ctxt, TPIDR_EL0)   = read_sysreg(tpidr_el0);
  27        ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
  28}
  29
  30static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
  31{
  32        struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
  33
  34        if (!vcpu)
  35                vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
  36
  37        return kvm_has_mte(kern_hyp_va(vcpu->kvm));
  38}
  39
  40static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
  41{
  42        ctxt_sys_reg(ctxt, CSSELR_EL1)  = read_sysreg(csselr_el1);
  43        ctxt_sys_reg(ctxt, SCTLR_EL1)   = read_sysreg_el1(SYS_SCTLR);
  44        ctxt_sys_reg(ctxt, CPACR_EL1)   = read_sysreg_el1(SYS_CPACR);
  45        ctxt_sys_reg(ctxt, TTBR0_EL1)   = read_sysreg_el1(SYS_TTBR0);
  46        ctxt_sys_reg(ctxt, TTBR1_EL1)   = read_sysreg_el1(SYS_TTBR1);
  47        ctxt_sys_reg(ctxt, TCR_EL1)     = read_sysreg_el1(SYS_TCR);
  48        ctxt_sys_reg(ctxt, ESR_EL1)     = read_sysreg_el1(SYS_ESR);
  49        ctxt_sys_reg(ctxt, AFSR0_EL1)   = read_sysreg_el1(SYS_AFSR0);
  50        ctxt_sys_reg(ctxt, AFSR1_EL1)   = read_sysreg_el1(SYS_AFSR1);
  51        ctxt_sys_reg(ctxt, FAR_EL1)     = read_sysreg_el1(SYS_FAR);
  52        ctxt_sys_reg(ctxt, MAIR_EL1)    = read_sysreg_el1(SYS_MAIR);
  53        ctxt_sys_reg(ctxt, VBAR_EL1)    = read_sysreg_el1(SYS_VBAR);
  54        ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
  55        ctxt_sys_reg(ctxt, AMAIR_EL1)   = read_sysreg_el1(SYS_AMAIR);
  56        ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
  57        ctxt_sys_reg(ctxt, PAR_EL1)     = read_sysreg_par();
  58        ctxt_sys_reg(ctxt, TPIDR_EL1)   = read_sysreg(tpidr_el1);
  59
  60        if (ctxt_has_mte(ctxt)) {
  61                ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
  62                ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
  63        }
  64
  65        ctxt_sys_reg(ctxt, SP_EL1)      = read_sysreg(sp_el1);
  66        ctxt_sys_reg(ctxt, ELR_EL1)     = read_sysreg_el1(SYS_ELR);
  67        ctxt_sys_reg(ctxt, SPSR_EL1)    = read_sysreg_el1(SYS_SPSR);
  68}
  69
  70static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
  71{
  72        ctxt->regs.pc                   = read_sysreg_el2(SYS_ELR);
  73        ctxt->regs.pstate               = read_sysreg_el2(SYS_SPSR);
  74
  75        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
  76                ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
  77}
  78
  79static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
  80{
  81        write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1),  mdscr_el1);
  82}
  83
  84static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
  85{
  86        write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0),     tpidr_el0);
  87        write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0),   tpidrro_el0);
  88}
  89
  90static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
  91{
  92        write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1),     vmpidr_el2);
  93        write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1),    csselr_el1);
  94
  95        if (has_vhe() ||
  96            !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
  97                write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
  98                write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),   SYS_TCR);
  99        } else  if (!ctxt->__hyp_running_vcpu) {
 100                /*
 101                 * Must only be done for guest registers, hence the context
 102                 * test. We're coming from the host, so SCTLR.M is already
 103                 * set. Pairs with nVHE's __activate_traps().
 104                 */
 105                write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
 106                                  TCR_EPD1_MASK | TCR_EPD0_MASK),
 107                                 SYS_TCR);
 108                isb();
 109        }
 110
 111        write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
 112        write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
 113        write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
 114        write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1),   SYS_ESR);
 115        write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
 116        write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
 117        write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1),   SYS_FAR);
 118        write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1),  SYS_MAIR);
 119        write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1),  SYS_VBAR);
 120        write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
 121        write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
 122        write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
 123        write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1),       par_el1);
 124        write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1),     tpidr_el1);
 125
 126        if (ctxt_has_mte(ctxt)) {
 127                write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
 128                write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
 129        }
 130
 131        if (!has_vhe() &&
 132            cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
 133            ctxt->__hyp_running_vcpu) {
 134                /*
 135                 * Must only be done for host registers, hence the context
 136                 * test. Pairs with nVHE's __deactivate_traps().
 137                 */
 138                isb();
 139                /*
 140                 * At this stage, and thanks to the above isb(), S2 is
 141                 * deconfigured and disabled. We can now restore the host's
 142                 * S1 configuration: SCTLR, and only then TCR.
 143                 */
 144                write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
 145                isb();
 146                write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1),   SYS_TCR);
 147        }
 148
 149        write_sysreg(ctxt_sys_reg(ctxt, SP_EL1),        sp_el1);
 150        write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1),   SYS_ELR);
 151        write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1),  SYS_SPSR);
 152}
 153
 154static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
 155{
 156        u64 pstate = ctxt->regs.pstate;
 157        u64 mode = pstate & PSR_AA32_MODE_MASK;
 158
 159        /*
 160         * Safety check to ensure we're setting the CPU up to enter the guest
 161         * in a less privileged mode.
 162         *
 163         * If we are attempting a return to EL2 or higher in AArch64 state,
 164         * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
 165         * we'll take an illegal exception state exception immediately after
 166         * the ERET to the guest.  Attempts to return to AArch32 Hyp will
 167         * result in an illegal exception return because EL2's execution state
 168         * is determined by SCR_EL3.RW.
 169         */
 170        if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
 171                pstate = PSR_MODE_EL2h | PSR_IL_BIT;
 172
 173        write_sysreg_el2(ctxt->regs.pc,                 SYS_ELR);
 174        write_sysreg_el2(pstate,                        SYS_SPSR);
 175
 176        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
 177                write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
 178}
 179
 180static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
 181{
 182        if (!vcpu_el1_is_32bit(vcpu))
 183                return;
 184
 185        vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt);
 186        vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und);
 187        vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
 188        vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
 189
 190        __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
 191        __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
 192
 193        if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
 194                __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
 195}
 196
 197static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
 198{
 199        if (!vcpu_el1_is_32bit(vcpu))
 200                return;
 201
 202        write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt);
 203        write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und);
 204        write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq);
 205        write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq);
 206
 207        write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
 208        write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
 209
 210        if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
 211                write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
 212}
 213
 214#endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
 215