linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/arm-smccc.h>
   8#include <linux/linkage.h>
   9
  10#include <asm/alternative.h>
  11#include <asm/assembler.h>
  12#include <asm/el2_setup.h>
  13#include <asm/kvm_arm.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/kvm_mmu.h>
  16#include <asm/pgtable-hwdef.h>
  17#include <asm/sysreg.h>
  18#include <asm/virt.h>
  19
  20        .text
  21        .pushsection    .hyp.idmap.text, "ax"
  22
  23        .align  11
  24
  25SYM_CODE_START(__kvm_hyp_init)
  26        ventry  __invalid               // Synchronous EL2t
  27        ventry  __invalid               // IRQ EL2t
  28        ventry  __invalid               // FIQ EL2t
  29        ventry  __invalid               // Error EL2t
  30
  31        ventry  __invalid               // Synchronous EL2h
  32        ventry  __invalid               // IRQ EL2h
  33        ventry  __invalid               // FIQ EL2h
  34        ventry  __invalid               // Error EL2h
  35
  36        ventry  __do_hyp_init           // Synchronous 64-bit EL1
  37        ventry  __invalid               // IRQ 64-bit EL1
  38        ventry  __invalid               // FIQ 64-bit EL1
  39        ventry  __invalid               // Error 64-bit EL1
  40
  41        ventry  __invalid               // Synchronous 32-bit EL1
  42        ventry  __invalid               // IRQ 32-bit EL1
  43        ventry  __invalid               // FIQ 32-bit EL1
  44        ventry  __invalid               // Error 32-bit EL1
  45
  46__invalid:
  47        b       .
  48
  49        /*
  50         * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
  51         *
  52         * x0: SMCCC function ID
  53         * x1: struct kvm_nvhe_init_params PA
  54         */
  55__do_hyp_init:
  56        /* Check for a stub HVC call */
  57        cmp     x0, #HVC_STUB_HCALL_NR
  58        b.lo    __kvm_handle_stub_hvc
  59
  60        // We only actively check bits [24:31], and everything
  61        // else has to be zero, which we check at build time.
  62#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
  63#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
  64#endif
  65
  66        ror     x0, x0, #24
  67        eor     x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
  68        ror     x0, x0, #4
  69        eor     x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
  70        cbz     x0, 1f
  71        mov     x0, #SMCCC_RET_NOT_SUPPORTED
  72        eret
  73
  741:      mov     x0, x1
  75        mov     x3, lr
  76        bl      ___kvm_hyp_init                 // Clobbers x0..x2
  77        mov     lr, x3
  78
  79        /* Hello, World! */
  80        mov     x0, #SMCCC_RET_SUCCESS
  81        eret
  82SYM_CODE_END(__kvm_hyp_init)
  83
  84/*
  85 * Initialize the hypervisor in EL2.
  86 *
  87 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
  88 * and leave x3 for the caller.
  89 *
  90 * x0: struct kvm_nvhe_init_params PA
  91 */
  92SYM_CODE_START_LOCAL(___kvm_hyp_init)
  93alternative_if ARM64_KVM_PROTECTED_MODE
  94        mov_q   x1, HCR_HOST_NVHE_PROTECTED_FLAGS
  95        msr     hcr_el2, x1
  96alternative_else_nop_endif
  97
  98        ldr     x1, [x0, #NVHE_INIT_TPIDR_EL2]
  99        msr     tpidr_el2, x1
 100
 101        ldr     x1, [x0, #NVHE_INIT_STACK_HYP_VA]
 102        mov     sp, x1
 103
 104        ldr     x1, [x0, #NVHE_INIT_MAIR_EL2]
 105        msr     mair_el2, x1
 106
 107        ldr     x1, [x0, #NVHE_INIT_PGD_PA]
 108        phys_to_ttbr x2, x1
 109alternative_if ARM64_HAS_CNP
 110        orr     x2, x2, #TTBR_CNP_BIT
 111alternative_else_nop_endif
 112        msr     ttbr0_el2, x2
 113
 114        /*
 115         * Set the PS bits in TCR_EL2.
 116         */
 117        ldr     x0, [x0, #NVHE_INIT_TCR_EL2]
 118        tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
 119        msr     tcr_el2, x0
 120
 121        isb
 122
 123        /* Invalidate the stale TLBs from Bootloader */
 124        tlbi    alle2
 125        dsb     sy
 126
 127        /*
 128         * Preserve all the RES1 bits while setting the default flags,
 129         * as well as the EE bit on BE. Drop the A flag since the compiler
 130         * is allowed to generate unaligned accesses.
 131         */
 132        mov_q   x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
 133CPU_BE( orr     x0, x0, #SCTLR_ELx_EE)
 134alternative_if ARM64_HAS_ADDRESS_AUTH
 135        mov_q   x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
 136                     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
 137        orr     x0, x0, x1
 138alternative_else_nop_endif
 139        msr     sctlr_el2, x0
 140        isb
 141
 142        /* Set the host vector */
 143        ldr     x0, =__kvm_hyp_host_vector
 144        kimg_hyp_va x0, x1
 145        msr     vbar_el2, x0
 146
 147        ret
 148SYM_CODE_END(___kvm_hyp_init)
 149
 150/*
 151 * PSCI CPU_ON entry point
 152 *
 153 * x0: struct kvm_nvhe_init_params PA
 154 */
 155SYM_CODE_START(kvm_hyp_cpu_entry)
 156        mov     x1, #1                          // is_cpu_on = true
 157        b       __kvm_hyp_init_cpu
 158SYM_CODE_END(kvm_hyp_cpu_entry)
 159
 160/*
 161 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
 162 *
 163 * x0: struct kvm_nvhe_init_params PA
 164 */
 165SYM_CODE_START(kvm_hyp_cpu_resume)
 166        mov     x1, #0                          // is_cpu_on = false
 167        b       __kvm_hyp_init_cpu
 168SYM_CODE_END(kvm_hyp_cpu_resume)
 169
 170/*
 171 * Common code for CPU entry points. Initializes EL2 state and
 172 * installs the hypervisor before handing over to a C handler.
 173 *
 174 * x0: struct kvm_nvhe_init_params PA
 175 * x1: bool is_cpu_on
 176 */
 177SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
 178        mov     x28, x0                         // Stash arguments
 179        mov     x29, x1
 180
 181        /* Check that the core was booted in EL2. */
 182        mrs     x0, CurrentEL
 183        cmp     x0, #CurrentEL_EL2
 184        b.eq    2f
 185
 186        /* The core booted in EL1. KVM cannot be initialized on it. */
 1871:      wfe
 188        wfi
 189        b       1b
 190
 1912:      msr     SPsel, #1                       // We want to use SP_EL{1,2}
 192
 193        /* Initialize EL2 CPU state to sane values. */
 194        init_el2_state nvhe                     // Clobbers x0..x2
 195
 196        /* Enable MMU, set vectors and stack. */
 197        mov     x0, x28
 198        bl      ___kvm_hyp_init                 // Clobbers x0..x2
 199
 200        /* Leave idmap. */
 201        mov     x0, x29
 202        ldr     x1, =kvm_host_psci_cpu_entry
 203        kimg_hyp_va x1, x2
 204        br      x1
 205SYM_CODE_END(__kvm_hyp_init_cpu)
 206
 207SYM_CODE_START(__kvm_handle_stub_hvc)
 208        cmp     x0, #HVC_SOFT_RESTART
 209        b.ne    1f
 210
 211        /* This is where we're about to jump, staying at EL2 */
 212        msr     elr_el2, x1
 213        mov     x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
 214        msr     spsr_el2, x0
 215
 216        /* Shuffle the arguments, and don't come back */
 217        mov     x0, x2
 218        mov     x1, x3
 219        mov     x2, x4
 220        b       reset
 221
 2221:      cmp     x0, #HVC_RESET_VECTORS
 223        b.ne    1f
 224
 225        /*
 226         * Set the HVC_RESET_VECTORS return code before entering the common
 227         * path so that we do not clobber x0-x2 in case we are coming via
 228         * HVC_SOFT_RESTART.
 229         */
 230        mov     x0, xzr
 231reset:
 232        /* Reset kvm back to the hyp stub. */
 233        mrs     x5, sctlr_el2
 234        mov_q   x6, SCTLR_ELx_FLAGS
 235        bic     x5, x5, x6              // Clear SCTL_M and etc
 236        pre_disable_mmu_workaround
 237        msr     sctlr_el2, x5
 238        isb
 239
 240alternative_if ARM64_KVM_PROTECTED_MODE
 241        mov_q   x5, HCR_HOST_NVHE_FLAGS
 242        msr     hcr_el2, x5
 243alternative_else_nop_endif
 244
 245        /* Install stub vectors */
 246        adr_l   x5, __hyp_stub_vectors
 247        msr     vbar_el2, x5
 248        eret
 249
 2501:      /* Bad stub call */
 251        mov_q   x0, HVC_STUB_ERR
 252        eret
 253
 254SYM_CODE_END(__kvm_handle_stub_hvc)
 255
 256        .popsection
 257