linux/arch/x86/kvm/vmx/vmenter.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#include <linux/linkage.h>
   3#include <asm/asm.h>
   4#include <asm/bitsperlong.h>
   5#include <asm/kvm_vcpu_regs.h>
   6#include <asm/nospec-branch.h>
   7#include <asm/segment.h>
   8
   9#define WORD_SIZE (BITS_PER_LONG / 8)
  10
  11#define VCPU_RAX        __VCPU_REGS_RAX * WORD_SIZE
  12#define VCPU_RCX        __VCPU_REGS_RCX * WORD_SIZE
  13#define VCPU_RDX        __VCPU_REGS_RDX * WORD_SIZE
  14#define VCPU_RBX        __VCPU_REGS_RBX * WORD_SIZE
  15/* Intentionally omit RSP as it's context switched by hardware */
  16#define VCPU_RBP        __VCPU_REGS_RBP * WORD_SIZE
  17#define VCPU_RSI        __VCPU_REGS_RSI * WORD_SIZE
  18#define VCPU_RDI        __VCPU_REGS_RDI * WORD_SIZE
  19
  20#ifdef CONFIG_X86_64
  21#define VCPU_R8         __VCPU_REGS_R8  * WORD_SIZE
  22#define VCPU_R9         __VCPU_REGS_R9  * WORD_SIZE
  23#define VCPU_R10        __VCPU_REGS_R10 * WORD_SIZE
  24#define VCPU_R11        __VCPU_REGS_R11 * WORD_SIZE
  25#define VCPU_R12        __VCPU_REGS_R12 * WORD_SIZE
  26#define VCPU_R13        __VCPU_REGS_R13 * WORD_SIZE
  27#define VCPU_R14        __VCPU_REGS_R14 * WORD_SIZE
  28#define VCPU_R15        __VCPU_REGS_R15 * WORD_SIZE
  29#endif
  30
  31.section .noinstr.text, "ax"
  32
  33/**
  34 * vmx_vmenter - VM-Enter the current loaded VMCS
  35 *
  36 * %RFLAGS.ZF:  !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
  37 *
  38 * Returns:
  39 *      %RFLAGS.CF is set on VM-Fail Invalid
  40 *      %RFLAGS.ZF is set on VM-Fail Valid
  41 *      %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
  42 *
  43 * Note that VMRESUME/VMLAUNCH fall-through and return directly if
  44 * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
  45 * to vmx_vmexit.
  46 */
  47SYM_FUNC_START_LOCAL(vmx_vmenter)
  48        /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
  49        je 2f
  50
  511:      vmresume
  52        ret
  53
  542:      vmlaunch
  55        ret
  56
  573:      cmpb $0, kvm_rebooting
  58        je 4f
  59        ret
  604:      ud2
  61
  62        _ASM_EXTABLE(1b, 3b)
  63        _ASM_EXTABLE(2b, 3b)
  64
  65SYM_FUNC_END(vmx_vmenter)
  66
  67/**
  68 * vmx_vmexit - Handle a VMX VM-Exit
  69 *
  70 * Returns:
  71 *      %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
  72 *
  73 * This is vmx_vmenter's partner in crime.  On a VM-Exit, control will jump
  74 * here after hardware loads the host's state, i.e. this is the destination
  75 * referred to by VMCS.HOST_RIP.
  76 */
  77SYM_FUNC_START(vmx_vmexit)
  78#ifdef CONFIG_RETPOLINE
  79        ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
  80        /* Preserve guest's RAX, it's used to stuff the RSB. */
  81        push %_ASM_AX
  82
  83        /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
  84        FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
  85
  86        /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
  87        or $1, %_ASM_AX
  88
  89        pop %_ASM_AX
  90.Lvmexit_skip_rsb:
  91#endif
  92        ret
  93SYM_FUNC_END(vmx_vmexit)
  94
  95/**
  96 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
  97 * @vmx:        struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
  98 * @regs:       unsigned long * (to guest registers)
  99 * @launched:   %true if the VMCS has been launched
 100 *
 101 * Returns:
 102 *      0 on VM-Exit, 1 on VM-Fail
 103 */
 104SYM_FUNC_START(__vmx_vcpu_run)
 105        push %_ASM_BP
 106        mov  %_ASM_SP, %_ASM_BP
 107#ifdef CONFIG_X86_64
 108        push %r15
 109        push %r14
 110        push %r13
 111        push %r12
 112#else
 113        push %edi
 114        push %esi
 115#endif
 116        push %_ASM_BX
 117
 118        /*
 119         * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
 120         * @regs is needed after VM-Exit to save the guest's register values.
 121         */
 122        push %_ASM_ARG2
 123
 124        /* Copy @launched to BL, _ASM_ARG3 is volatile. */
 125        mov %_ASM_ARG3B, %bl
 126
 127        /* Adjust RSP to account for the CALL to vmx_vmenter(). */
 128        lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
 129        call vmx_update_host_rsp
 130
 131        /* Load @regs to RAX. */
 132        mov (%_ASM_SP), %_ASM_AX
 133
 134        /* Check if vmlaunch or vmresume is needed */
 135        testb %bl, %bl
 136
 137        /* Load guest registers.  Don't clobber flags. */
 138        mov VCPU_RCX(%_ASM_AX), %_ASM_CX
 139        mov VCPU_RDX(%_ASM_AX), %_ASM_DX
 140        mov VCPU_RBX(%_ASM_AX), %_ASM_BX
 141        mov VCPU_RBP(%_ASM_AX), %_ASM_BP
 142        mov VCPU_RSI(%_ASM_AX), %_ASM_SI
 143        mov VCPU_RDI(%_ASM_AX), %_ASM_DI
 144#ifdef CONFIG_X86_64
 145        mov VCPU_R8 (%_ASM_AX),  %r8
 146        mov VCPU_R9 (%_ASM_AX),  %r9
 147        mov VCPU_R10(%_ASM_AX), %r10
 148        mov VCPU_R11(%_ASM_AX), %r11
 149        mov VCPU_R12(%_ASM_AX), %r12
 150        mov VCPU_R13(%_ASM_AX), %r13
 151        mov VCPU_R14(%_ASM_AX), %r14
 152        mov VCPU_R15(%_ASM_AX), %r15
 153#endif
 154        /* Load guest RAX.  This kills the @regs pointer! */
 155        mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 156
 157        /* Enter guest mode */
 158        call vmx_vmenter
 159
 160        /* Jump on VM-Fail. */
 161        jbe 2f
 162
 163        /* Temporarily save guest's RAX. */
 164        push %_ASM_AX
 165
 166        /* Reload @regs to RAX. */
 167        mov WORD_SIZE(%_ASM_SP), %_ASM_AX
 168
 169        /* Save all guest registers, including RAX from the stack */
 170        pop           VCPU_RAX(%_ASM_AX)
 171        mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
 172        mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
 173        mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
 174        mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
 175        mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
 176        mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
 177#ifdef CONFIG_X86_64
 178        mov %r8,  VCPU_R8 (%_ASM_AX)
 179        mov %r9,  VCPU_R9 (%_ASM_AX)
 180        mov %r10, VCPU_R10(%_ASM_AX)
 181        mov %r11, VCPU_R11(%_ASM_AX)
 182        mov %r12, VCPU_R12(%_ASM_AX)
 183        mov %r13, VCPU_R13(%_ASM_AX)
 184        mov %r14, VCPU_R14(%_ASM_AX)
 185        mov %r15, VCPU_R15(%_ASM_AX)
 186#endif
 187
 188        /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
 189        xor %eax, %eax
 190
 191        /*
 192         * Clear all general purpose registers except RSP and RAX to prevent
 193         * speculative use of the guest's values, even those that are reloaded
 194         * via the stack.  In theory, an L1 cache miss when restoring registers
 195         * could lead to speculative execution with the guest's values.
 196         * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
 197         * free.  RSP and RAX are exempt as RSP is restored by hardware during
 198         * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
 199         */
 2001:      xor %ecx, %ecx
 201        xor %edx, %edx
 202        xor %ebx, %ebx
 203        xor %ebp, %ebp
 204        xor %esi, %esi
 205        xor %edi, %edi
 206#ifdef CONFIG_X86_64
 207        xor %r8d,  %r8d
 208        xor %r9d,  %r9d
 209        xor %r10d, %r10d
 210        xor %r11d, %r11d
 211        xor %r12d, %r12d
 212        xor %r13d, %r13d
 213        xor %r14d, %r14d
 214        xor %r15d, %r15d
 215#endif
 216
 217        /* "POP" @regs. */
 218        add $WORD_SIZE, %_ASM_SP
 219        pop %_ASM_BX
 220
 221#ifdef CONFIG_X86_64
 222        pop %r12
 223        pop %r13
 224        pop %r14
 225        pop %r15
 226#else
 227        pop %esi
 228        pop %edi
 229#endif
 230        pop %_ASM_BP
 231        ret
 232
 233        /* VM-Fail.  Out-of-line to avoid a taken Jcc after VM-Exit. */
 2342:      mov $1, %eax
 235        jmp 1b
 236SYM_FUNC_END(__vmx_vcpu_run)
 237
 238
 239.section .text, "ax"
 240
 241/**
 242 * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
 243 * @field:      VMCS field encoding that failed
 244 * @fault:      %true if the VMREAD faulted, %false if it failed
 245
 246 * Save and restore volatile registers across a call to vmread_error().  Note,
 247 * all parameters are passed on the stack.
 248 */
 249SYM_FUNC_START(vmread_error_trampoline)
 250        push %_ASM_BP
 251        mov  %_ASM_SP, %_ASM_BP
 252
 253        push %_ASM_AX
 254        push %_ASM_CX
 255        push %_ASM_DX
 256#ifdef CONFIG_X86_64
 257        push %rdi
 258        push %rsi
 259        push %r8
 260        push %r9
 261        push %r10
 262        push %r11
 263#endif
 264#ifdef CONFIG_X86_64
 265        /* Load @field and @fault to arg1 and arg2 respectively. */
 266        mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
 267        mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
 268#else
 269        /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
 270        push 3*WORD_SIZE(%ebp)
 271        push 2*WORD_SIZE(%ebp)
 272#endif
 273
 274        call vmread_error
 275
 276#ifndef CONFIG_X86_64
 277        add $8, %esp
 278#endif
 279
 280        /* Zero out @fault, which will be popped into the result register. */
 281        _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
 282
 283#ifdef CONFIG_X86_64
 284        pop %r11
 285        pop %r10
 286        pop %r9
 287        pop %r8
 288        pop %rsi
 289        pop %rdi
 290#endif
 291        pop %_ASM_DX
 292        pop %_ASM_CX
 293        pop %_ASM_AX
 294        pop %_ASM_BP
 295
 296        ret
 297SYM_FUNC_END(vmread_error_trampoline)
 298
 299SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
 300        /*
 301         * Unconditionally create a stack frame, getting the correct RSP on the
 302         * stack (for x86-64) would take two instructions anyways, and RBP can
 303         * be used to restore RSP to make objtool happy (see below).
 304         */
 305        push %_ASM_BP
 306        mov %_ASM_SP, %_ASM_BP
 307
 308#ifdef CONFIG_X86_64
 309        /*
 310         * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
 311         * creating the synthetic interrupt stack frame for the IRQ/NMI.
 312         */
 313        and  $-16, %rsp
 314        push $__KERNEL_DS
 315        push %rbp
 316#endif
 317        pushf
 318        push $__KERNEL_CS
 319        CALL_NOSPEC _ASM_ARG1
 320
 321        /*
 322         * "Restore" RSP from RBP, even though IRET has already unwound RSP to
 323         * the correct value.  objtool doesn't know the callee will IRET and,
 324         * without the explicit restore, thinks the stack is getting walloped.
 325         * Using an unwind hint is problematic due to x86-64's dynamic alignment.
 326         */
 327        mov %_ASM_BP, %_ASM_SP
 328        pop %_ASM_BP
 329        ret
 330SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
 331