linux/arch/arm64/kvm/hyp/hyp-entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2015-2018 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/arm-smccc.h>
   8#include <linux/linkage.h>
   9
  10#include <asm/alternative.h>
  11#include <asm/assembler.h>
  12#include <asm/cpufeature.h>
  13#include <asm/kvm_arm.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/kvm_mmu.h>
  16#include <asm/mmu.h>
  17
  18        .text
  19        .pushsection    .hyp.text, "ax"
  20
  21.macro do_el2_call
  22        /*
  23         * Shuffle the parameters before calling the function
  24         * pointed to in x0. Assumes parameters in x[1,2,3].
  25         */
  26        str     lr, [sp, #-16]!
  27        mov     lr, x0
  28        mov     x0, x1
  29        mov     x1, x2
  30        mov     x2, x3
  31        blr     lr
  32        ldr     lr, [sp], #16
  33.endm
  34
  35el1_sync:                               // Guest trapped into EL2
  36
  37        mrs     x0, esr_el2
  38        lsr     x0, x0, #ESR_ELx_EC_SHIFT
  39        cmp     x0, #ESR_ELx_EC_HVC64
  40        ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
  41        b.ne    el1_trap
  42
  43        mrs     x1, vttbr_el2           // If vttbr is valid, the guest
  44        cbnz    x1, el1_hvc_guest       // called HVC
  45
  46        /* Here, we're pretty sure the host called HVC. */
  47        ldp     x0, x1, [sp], #16
  48
  49        /* Check for a stub HVC call */
  50        cmp     x0, #HVC_STUB_HCALL_NR
  51        b.hs    1f
  52
  53        /*
  54         * Compute the idmap address of __kvm_handle_stub_hvc and
  55         * jump there. Since we use kimage_voffset, do not use the
  56         * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
  57         * (by loading it from the constant pool).
  58         *
  59         * Preserve x0-x4, which may contain stub parameters.
  60         */
  61        ldr     x5, =__kvm_handle_stub_hvc
  62        ldr_l   x6, kimage_voffset
  63
  64        /* x5 = __pa(x5) */
  65        sub     x5, x5, x6
  66        br      x5
  67
  681:
  69        /*
  70         * Perform the EL2 call
  71         */
  72        kern_hyp_va     x0
  73        do_el2_call
  74
  75        eret
  76        sb
  77
  78el1_hvc_guest:
  79        /*
  80         * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
  81         * The workaround has already been applied on the host,
  82         * so let's quickly get back to the guest. We don't bother
  83         * restoring x1, as it can be clobbered anyway.
  84         */
  85        ldr     x1, [sp]                                // Guest's x0
  86        eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
  87        cbz     w1, wa_epilogue
  88
  89        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
  90        eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
  91                          ARM_SMCCC_ARCH_WORKAROUND_2)
  92        cbnz    w1, el1_trap
  93
  94#ifdef CONFIG_ARM64_SSBD
  95alternative_cb  arm64_enable_wa2_handling
  96        b       wa2_end
  97alternative_cb_end
  98        get_vcpu_ptr    x2, x0
  99        ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
 100
 101        // Sanitize the argument and update the guest flags
 102        ldr     x1, [sp, #8]                    // Guest's x1
 103        clz     w1, w1                          // Murphy's device:
 104        lsr     w1, w1, #5                      // w1 = !!w1 without using
 105        eor     w1, w1, #1                      // the flags...
 106        bfi     x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
 107        str     x0, [x2, #VCPU_WORKAROUND_FLAGS]
 108
 109        /* Check that we actually need to perform the call */
 110        hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
 111        cbz     x0, wa2_end
 112
 113        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
 114        smc     #0
 115
 116        /* Don't leak data from the SMC call */
 117        mov     x3, xzr
 118wa2_end:
 119        mov     x2, xzr
 120        mov     x1, xzr
 121#endif
 122
 123wa_epilogue:
 124        mov     x0, xzr
 125        add     sp, sp, #16
 126        eret
 127        sb
 128
 129el1_trap:
 130        get_vcpu_ptr    x1, x0
 131        mov     x0, #ARM_EXCEPTION_TRAP
 132        b       __guest_exit
 133
 134el1_irq:
 135        get_vcpu_ptr    x1, x0
 136        mov     x0, #ARM_EXCEPTION_IRQ
 137        b       __guest_exit
 138
 139el1_error:
 140        get_vcpu_ptr    x1, x0
 141        mov     x0, #ARM_EXCEPTION_EL1_SERROR
 142        b       __guest_exit
 143
 144el2_sync:
 145        /* Check for illegal exception return, otherwise panic */
 146        mrs     x0, spsr_el2
 147
 148        /* if this was something else, then panic! */
 149        tst     x0, #PSR_IL_BIT
 150        b.eq    __hyp_panic
 151
 152        /* Let's attempt a recovery from the illegal exception return */
 153        get_vcpu_ptr    x1, x0
 154        mov     x0, #ARM_EXCEPTION_IL
 155        b       __guest_exit
 156
 157
 158el2_error:
 159        ldp     x0, x1, [sp], #16
 160
 161        /*
 162         * Only two possibilities:
 163         * 1) Either we come from the exit path, having just unmasked
 164         *    PSTATE.A: change the return code to an EL2 fault, and
 165         *    carry on, as we're already in a sane state to handle it.
 166         * 2) Or we come from anywhere else, and that's a bug: we panic.
 167         *
 168         * For (1), x0 contains the original return code and x1 doesn't
 169         * contain anything meaningful at that stage. We can reuse them
 170         * as temp registers.
 171         * For (2), who cares?
 172         */
 173        mrs     x0, elr_el2
 174        adr     x1, abort_guest_exit_start
 175        cmp     x0, x1
 176        adr     x1, abort_guest_exit_end
 177        ccmp    x0, x1, #4, ne
 178        b.ne    __hyp_panic
 179        mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
 180        eret
 181        sb
 182
 183ENTRY(__hyp_do_panic)
 184        mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
 185                      PSR_MODE_EL1h)
 186        msr     spsr_el2, lr
 187        ldr     lr, =panic
 188        msr     elr_el2, lr
 189        eret
 190        sb
 191ENDPROC(__hyp_do_panic)
 192
 193ENTRY(__hyp_panic)
 194        get_host_ctxt x0, x1
 195        b       hyp_panic
 196ENDPROC(__hyp_panic)
 197
 198.macro invalid_vector   label, target = __hyp_panic
 199        .align  2
 200\label:
 201        b \target
 202ENDPROC(\label)
 203.endm
 204
 205        /* None of these should ever happen */
 206        invalid_vector  el2t_sync_invalid
 207        invalid_vector  el2t_irq_invalid
 208        invalid_vector  el2t_fiq_invalid
 209        invalid_vector  el2t_error_invalid
 210        invalid_vector  el2h_sync_invalid
 211        invalid_vector  el2h_irq_invalid
 212        invalid_vector  el2h_fiq_invalid
 213        invalid_vector  el1_fiq_invalid
 214
 215        .ltorg
 216
 217        .align 11
 218
 219.macro check_preamble_length start, end
 220/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
 221.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
 222        .error "KVM vector preamble length mismatch"
 223.endif
 224.endm
 225
 226.macro valid_vect target
 227        .align 7
 228661:
 229        esb
 230        stp     x0, x1, [sp, #-16]!
 231662:
 232        b       \target
 233
 234check_preamble_length 661b, 662b
 235.endm
 236
 237.macro invalid_vect target
 238        .align 7
 239661:
 240        b       \target
 241        nop
 242662:
 243        ldp     x0, x1, [sp], #16
 244        b       \target
 245
 246check_preamble_length 661b, 662b
 247.endm
 248
 249ENTRY(__kvm_hyp_vector)
 250        invalid_vect    el2t_sync_invalid       // Synchronous EL2t
 251        invalid_vect    el2t_irq_invalid        // IRQ EL2t
 252        invalid_vect    el2t_fiq_invalid        // FIQ EL2t
 253        invalid_vect    el2t_error_invalid      // Error EL2t
 254
 255        valid_vect      el2_sync                // Synchronous EL2h
 256        invalid_vect    el2h_irq_invalid        // IRQ EL2h
 257        invalid_vect    el2h_fiq_invalid        // FIQ EL2h
 258        valid_vect      el2_error               // Error EL2h
 259
 260        valid_vect      el1_sync                // Synchronous 64-bit EL1
 261        valid_vect      el1_irq                 // IRQ 64-bit EL1
 262        invalid_vect    el1_fiq_invalid         // FIQ 64-bit EL1
 263        valid_vect      el1_error               // Error 64-bit EL1
 264
 265        valid_vect      el1_sync                // Synchronous 32-bit EL1
 266        valid_vect      el1_irq                 // IRQ 32-bit EL1
 267        invalid_vect    el1_fiq_invalid         // FIQ 32-bit EL1
 268        valid_vect      el1_error               // Error 32-bit EL1
 269ENDPROC(__kvm_hyp_vector)
 270
 271#ifdef CONFIG_KVM_INDIRECT_VECTORS
 272.macro hyp_ventry
 273        .align 7
 2741:      esb
 275        .rept 26
 276        nop
 277        .endr
 278/*
 279 * The default sequence is to directly branch to the KVM vectors,
 280 * using the computed offset. This applies for VHE as well as
 281 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
 282 *
 283 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
 284 * with:
 285 *
 286 * stp  x0, x1, [sp, #-16]!
 287 * movz x0, #(addr & 0xffff)
 288 * movk x0, #((addr >> 16) & 0xffff), lsl #16
 289 * movk x0, #((addr >> 32) & 0xffff), lsl #32
 290 * br   x0
 291 *
 292 * Where:
 293 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
 294 * See kvm_patch_vector_branch for details.
 295 */
 296alternative_cb  kvm_patch_vector_branch
 297        stp     x0, x1, [sp, #-16]!
 298        b       __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
 299        nop
 300        nop
 301        nop
 302alternative_cb_end
 303.endm
 304
 305.macro generate_vectors
 3060:
 307        .rept 16
 308        hyp_ventry
 309        .endr
 310        .org 0b + SZ_2K         // Safety measure
 311.endm
 312
 313        .align  11
 314ENTRY(__bp_harden_hyp_vecs_start)
 315        .rept BP_HARDEN_EL2_SLOTS
 316        generate_vectors
 317        .endr
 318ENTRY(__bp_harden_hyp_vecs_end)
 319
 320        .popsection
 321
 322ENTRY(__smccc_workaround_1_smc_start)
 323        esb
 324        sub     sp, sp, #(8 * 4)
 325        stp     x2, x3, [sp, #(8 * 0)]
 326        stp     x0, x1, [sp, #(8 * 2)]
 327        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1
 328        smc     #0
 329        ldp     x2, x3, [sp, #(8 * 0)]
 330        ldp     x0, x1, [sp, #(8 * 2)]
 331        add     sp, sp, #(8 * 4)
 332ENTRY(__smccc_workaround_1_smc_end)
 333#endif
 334