linux/arch/arm64/kvm/hyp/entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2015 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/linkage.h>
   8
   9#include <asm/alternative.h>
  10#include <asm/assembler.h>
  11#include <asm/fpsimdmacros.h>
  12#include <asm/kvm.h>
  13#include <asm/kvm_arm.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/kvm_mmu.h>
  16#include <asm/kvm_ptrauth.h>
  17
  18        .text
  19
  20/*
  21 * u64 __guest_enter(struct kvm_vcpu *vcpu);
  22 */
  23SYM_FUNC_START(__guest_enter)
  24        // x0: vcpu
  25        // x1-x17: clobbered by macros
  26        // x29: guest context
  27
  28        adr_this_cpu x1, kvm_hyp_ctxt, x2
  29
  30        // Store the hyp regs
  31        save_callee_saved_regs x1
  32
  33        // Save hyp's sp_el0
  34        save_sp_el0     x1, x2
  35
  36        // Now the hyp state is stored if we have a pending RAS SError it must
  37        // affect the host or hyp. If any asynchronous exception is pending we
  38        // defer the guest entry. The DSB isn't necessary before v8.2 as any
  39        // SError would be fatal.
  40alternative_if ARM64_HAS_RAS_EXTN
  41        dsb     nshst
  42        isb
  43alternative_else_nop_endif
  44        mrs     x1, isr_el1
  45        cbz     x1,  1f
  46        mov     x0, #ARM_EXCEPTION_IRQ
  47        ret
  48
  491:
  50        set_loaded_vcpu x0, x1, x2
  51
  52        add     x29, x0, #VCPU_CONTEXT
  53
  54        // Macro ptrauth_switch_to_guest format:
  55        //      ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
  56        // The below macro to restore guest keys is not implemented in C code
  57        // as it may cause Pointer Authentication key signing mismatch errors
  58        // when this feature is enabled for kernel code.
  59        ptrauth_switch_to_guest x29, x0, x1, x2
  60
  61        // Restore the guest's sp_el0
  62        restore_sp_el0 x29, x0
  63
  64        // Restore guest regs x0-x17
  65        ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
  66        ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
  67        ldp     x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
  68        ldp     x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
  69        ldp     x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
  70        ldp     x10, x11, [x29, #CPU_XREG_OFFSET(10)]
  71        ldp     x12, x13, [x29, #CPU_XREG_OFFSET(12)]
  72        ldp     x14, x15, [x29, #CPU_XREG_OFFSET(14)]
  73        ldp     x16, x17, [x29, #CPU_XREG_OFFSET(16)]
  74
  75        // Restore guest regs x18-x29, lr
  76        restore_callee_saved_regs x29
  77
  78        // Do not touch any register after this!
  79        eret
  80        sb
  81
  82SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
  83        // x2-x29,lr: vcpu regs
  84        // vcpu x0-x1 on the stack
  85
  86        // If the hyp context is loaded, go straight to hyp_panic
  87        get_loaded_vcpu x0, x1
  88        cbnz    x0, 1f
  89        b       hyp_panic
  90
  911:
  92        // The hyp context is saved so make sure it is restored to allow
  93        // hyp_panic to run at hyp and, subsequently, panic to run in the host.
  94        // This makes use of __guest_exit to avoid duplication but sets the
  95        // return address to tail call into hyp_panic. As a side effect, the
  96        // current state is saved to the guest context but it will only be
  97        // accurate if the guest had been completely restored.
  98        adr_this_cpu x0, kvm_hyp_ctxt, x1
  99        adr_l   x1, hyp_panic
 100        str     x1, [x0, #CPU_XREG_OFFSET(30)]
 101
 102        get_vcpu_ptr    x1, x0
 103
 104SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
 105        // x0: return code
 106        // x1: vcpu
 107        // x2-x29,lr: vcpu regs
 108        // vcpu x0-x1 on the stack
 109
 110        add     x1, x1, #VCPU_CONTEXT
 111
 112        ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 113
 114        // Store the guest regs x2 and x3
 115        stp     x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
 116
 117        // Retrieve the guest regs x0-x1 from the stack
 118        ldp     x2, x3, [sp], #16       // x0, x1
 119
 120        // Store the guest regs x0-x1 and x4-x17
 121        stp     x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
 122        stp     x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
 123        stp     x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
 124        stp     x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
 125        stp     x10, x11, [x1, #CPU_XREG_OFFSET(10)]
 126        stp     x12, x13, [x1, #CPU_XREG_OFFSET(12)]
 127        stp     x14, x15, [x1, #CPU_XREG_OFFSET(14)]
 128        stp     x16, x17, [x1, #CPU_XREG_OFFSET(16)]
 129
 130        // Store the guest regs x18-x29, lr
 131        save_callee_saved_regs x1
 132
 133        // Store the guest's sp_el0
 134        save_sp_el0     x1, x2
 135
 136        adr_this_cpu x2, kvm_hyp_ctxt, x3
 137
 138        // Macro ptrauth_switch_to_hyp format:
 139        //      ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
 140        // The below macro to save/restore keys is not implemented in C code
 141        // as it may cause Pointer Authentication key signing mismatch errors
 142        // when this feature is enabled for kernel code.
 143        ptrauth_switch_to_hyp x1, x2, x3, x4, x5
 144
 145        // Restore hyp's sp_el0
 146        restore_sp_el0 x2, x3
 147
 148        // Now restore the hyp regs
 149        restore_callee_saved_regs x2
 150
 151        set_loaded_vcpu xzr, x2, x3
 152
 153alternative_if ARM64_HAS_RAS_EXTN
 154        // If we have the RAS extensions we can consume a pending error
 155        // without an unmask-SError and isb. The ESB-instruction consumed any
 156        // pending guest error when we took the exception from the guest.
 157        mrs_s   x2, SYS_DISR_EL1
 158        str     x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
 159        cbz     x2, 1f
 160        msr_s   SYS_DISR_EL1, xzr
 161        orr     x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
 1621:      ret
 163alternative_else
 164        dsb     sy              // Synchronize against in-flight ld/st
 165        isb                     // Prevent an early read of side-effect free ISR
 166        mrs     x2, isr_el1
 167        tbnz    x2, #8, 2f      // ISR_EL1.A
 168        ret
 169        nop
 1702:
 171alternative_endif
 172        // We know we have a pending asynchronous abort, now is the
 173        // time to flush it out. From your VAXorcist book, page 666:
 174        // "Threaten me not, oh Evil one!  For I speak with
 175        // the power of DEC, and I command thee to show thyself!"
 176        mrs     x2, elr_el2
 177        mrs     x3, esr_el2
 178        mrs     x4, spsr_el2
 179        mov     x5, x0
 180
 181        msr     daifclr, #4     // Unmask aborts
 182
 183        // This is our single instruction exception window. A pending
 184        // SError is guaranteed to occur at the earliest when we unmask
 185        // it, and at the latest just after the ISB.
 186abort_guest_exit_start:
 187
 188        isb
 189
 190abort_guest_exit_end:
 191
 192        msr     daifset, #4     // Mask aborts
 193        ret
 194
 195        _kvm_extable    abort_guest_exit_start, 9997f
 196        _kvm_extable    abort_guest_exit_end, 9997f
 1979997:
 198        msr     daifset, #4     // Mask aborts
 199        mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
 200
 201        // restore the EL1 exception context so that we can report some
 202        // information. Merge the exception code with the SError pending bit.
 203        msr     elr_el2, x2
 204        msr     esr_el2, x3
 205        msr     spsr_el2, x4
 206        orr     x0, x0, x5
 2071:      ret
 208SYM_FUNC_END(__guest_enter)
 209