linux/arch/arm64/kvm/hyp/nvhe/hyp-init.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/arm-smccc.h>
   8#include <linux/linkage.h>
   9
  10#include <asm/alternative.h>
  11#include <asm/assembler.h>
  12#include <asm/el2_setup.h>
  13#include <asm/kvm_arm.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/kvm_mmu.h>
  16#include <asm/pgtable-hwdef.h>
  17#include <asm/sysreg.h>
  18#include <asm/virt.h>
  19
  20        .text
  21        .pushsection    .idmap.text, "ax"
  22
  23        .align  11
  24
  25SYM_CODE_START(__kvm_hyp_init)
  26        ventry  __invalid               // Synchronous EL2t
  27        ventry  __invalid               // IRQ EL2t
  28        ventry  __invalid               // FIQ EL2t
  29        ventry  __invalid               // Error EL2t
  30
  31        ventry  __invalid               // Synchronous EL2h
  32        ventry  __invalid               // IRQ EL2h
  33        ventry  __invalid               // FIQ EL2h
  34        ventry  __invalid               // Error EL2h
  35
  36        ventry  __do_hyp_init           // Synchronous 64-bit EL1
  37        ventry  __invalid               // IRQ 64-bit EL1
  38        ventry  __invalid               // FIQ 64-bit EL1
  39        ventry  __invalid               // Error 64-bit EL1
  40
  41        ventry  __invalid               // Synchronous 32-bit EL1
  42        ventry  __invalid               // IRQ 32-bit EL1
  43        ventry  __invalid               // FIQ 32-bit EL1
  44        ventry  __invalid               // Error 32-bit EL1
  45
  46__invalid:
  47        b       .
  48
  49        /*
  50         * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
  51         *
  52         * x0: SMCCC function ID
  53         * x1: struct kvm_nvhe_init_params PA
  54         */
  55__do_hyp_init:
  56        /* Check for a stub HVC call */
  57        cmp     x0, #HVC_STUB_HCALL_NR
  58        b.lo    __kvm_handle_stub_hvc
  59
  60        mov     x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
  61        cmp     x0, x3
  62        b.eq    1f
  63
  64        mov     x0, #SMCCC_RET_NOT_SUPPORTED
  65        eret
  66
  671:      mov     x0, x1
  68        mov     x3, lr
  69        bl      ___kvm_hyp_init                 // Clobbers x0..x2
  70        mov     lr, x3
  71
  72        /* Hello, World! */
  73        mov     x0, #SMCCC_RET_SUCCESS
  74        eret
  75SYM_CODE_END(__kvm_hyp_init)
  76
  77/*
  78 * Initialize the hypervisor in EL2.
  79 *
  80 * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
  81 * and leave x3 for the caller.
  82 *
  83 * x0: struct kvm_nvhe_init_params PA
  84 */
  85SYM_CODE_START_LOCAL(___kvm_hyp_init)
  86        ldr     x1, [x0, #NVHE_INIT_TPIDR_EL2]
  87        msr     tpidr_el2, x1
  88
  89        ldr     x1, [x0, #NVHE_INIT_STACK_HYP_VA]
  90        mov     sp, x1
  91
  92        ldr     x1, [x0, #NVHE_INIT_MAIR_EL2]
  93        msr     mair_el2, x1
  94
  95        ldr     x1, [x0, #NVHE_INIT_HCR_EL2]
  96        msr     hcr_el2, x1
  97
  98        ldr     x1, [x0, #NVHE_INIT_VTTBR]
  99        msr     vttbr_el2, x1
 100
 101        ldr     x1, [x0, #NVHE_INIT_VTCR]
 102        msr     vtcr_el2, x1
 103
 104        ldr     x1, [x0, #NVHE_INIT_PGD_PA]
 105        phys_to_ttbr x2, x1
 106alternative_if ARM64_HAS_CNP
 107        orr     x2, x2, #TTBR_CNP_BIT
 108alternative_else_nop_endif
 109        msr     ttbr0_el2, x2
 110
 111        /*
 112         * Set the PS bits in TCR_EL2.
 113         */
 114        ldr     x0, [x0, #NVHE_INIT_TCR_EL2]
 115        tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
 116        msr     tcr_el2, x0
 117
 118        isb
 119
 120        /* Invalidate the stale TLBs from Bootloader */
 121        tlbi    alle2
 122        tlbi    vmalls12e1
 123        dsb     sy
 124
 125        mov_q   x0, INIT_SCTLR_EL2_MMU_ON
 126alternative_if ARM64_HAS_ADDRESS_AUTH
 127        mov_q   x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
 128                     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
 129        orr     x0, x0, x1
 130alternative_else_nop_endif
 131        msr     sctlr_el2, x0
 132        isb
 133
 134        /* Set the host vector */
 135        ldr     x0, =__kvm_hyp_host_vector
 136        msr     vbar_el2, x0
 137
 138        ret
 139SYM_CODE_END(___kvm_hyp_init)
 140
 141/*
 142 * PSCI CPU_ON entry point
 143 *
 144 * x0: struct kvm_nvhe_init_params PA
 145 */
 146SYM_CODE_START(kvm_hyp_cpu_entry)
 147        mov     x1, #1                          // is_cpu_on = true
 148        b       __kvm_hyp_init_cpu
 149SYM_CODE_END(kvm_hyp_cpu_entry)
 150
 151/*
 152 * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
 153 *
 154 * x0: struct kvm_nvhe_init_params PA
 155 */
 156SYM_CODE_START(kvm_hyp_cpu_resume)
 157        mov     x1, #0                          // is_cpu_on = false
 158        b       __kvm_hyp_init_cpu
 159SYM_CODE_END(kvm_hyp_cpu_resume)
 160
 161/*
 162 * Common code for CPU entry points. Initializes EL2 state and
 163 * installs the hypervisor before handing over to a C handler.
 164 *
 165 * x0: struct kvm_nvhe_init_params PA
 166 * x1: bool is_cpu_on
 167 */
 168SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
 169        mov     x28, x0                         // Stash arguments
 170        mov     x29, x1
 171
 172        /* Check that the core was booted in EL2. */
 173        mrs     x0, CurrentEL
 174        cmp     x0, #CurrentEL_EL2
 175        b.eq    2f
 176
 177        /* The core booted in EL1. KVM cannot be initialized on it. */
 1781:      wfe
 179        wfi
 180        b       1b
 181
 1822:      msr     SPsel, #1                       // We want to use SP_EL{1,2}
 183
 184        /* Initialize EL2 CPU state to sane values. */
 185        init_el2_state                          // Clobbers x0..x2
 186
 187        /* Enable MMU, set vectors and stack. */
 188        mov     x0, x28
 189        bl      ___kvm_hyp_init                 // Clobbers x0..x2
 190
 191        /* Leave idmap. */
 192        mov     x0, x29
 193        ldr     x1, =kvm_host_psci_cpu_entry
 194        br      x1
 195SYM_CODE_END(__kvm_hyp_init_cpu)
 196
 197SYM_CODE_START(__kvm_handle_stub_hvc)
 198        cmp     x0, #HVC_SOFT_RESTART
 199        b.ne    1f
 200
 201        /* This is where we're about to jump, staying at EL2 */
 202        msr     elr_el2, x1
 203        mov     x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
 204        msr     spsr_el2, x0
 205
 206        /* Shuffle the arguments, and don't come back */
 207        mov     x0, x2
 208        mov     x1, x3
 209        mov     x2, x4
 210        b       reset
 211
 2121:      cmp     x0, #HVC_RESET_VECTORS
 213        b.ne    1f
 214
 215        /*
 216         * Set the HVC_RESET_VECTORS return code before entering the common
 217         * path so that we do not clobber x0-x2 in case we are coming via
 218         * HVC_SOFT_RESTART.
 219         */
 220        mov     x0, xzr
 221reset:
 222        /* Reset kvm back to the hyp stub. */
 223        mov_q   x5, INIT_SCTLR_EL2_MMU_OFF
 224        pre_disable_mmu_workaround
 225        msr     sctlr_el2, x5
 226        isb
 227
 228alternative_if ARM64_KVM_PROTECTED_MODE
 229        mov_q   x5, HCR_HOST_NVHE_FLAGS
 230        msr     hcr_el2, x5
 231alternative_else_nop_endif
 232
 233        /* Install stub vectors */
 234        adr_l   x5, __hyp_stub_vectors
 235        msr     vbar_el2, x5
 236        eret
 237
 2381:      /* Bad stub call */
 239        mov_q   x0, HVC_STUB_ERR
 240        eret
 241
 242SYM_CODE_END(__kvm_handle_stub_hvc)
 243
 244SYM_FUNC_START(__pkvm_init_switch_pgd)
 245        /* Turn the MMU off */
 246        pre_disable_mmu_workaround
 247        mrs     x2, sctlr_el2
 248        bic     x3, x2, #SCTLR_ELx_M
 249        msr     sctlr_el2, x3
 250        isb
 251
 252        tlbi    alle2
 253
 254        /* Install the new pgtables */
 255        ldr     x3, [x0, #NVHE_INIT_PGD_PA]
 256        phys_to_ttbr x4, x3
 257alternative_if ARM64_HAS_CNP
 258        orr     x4, x4, #TTBR_CNP_BIT
 259alternative_else_nop_endif
 260        msr     ttbr0_el2, x4
 261
 262        /* Set the new stack pointer */
 263        ldr     x0, [x0, #NVHE_INIT_STACK_HYP_VA]
 264        mov     sp, x0
 265
 266        /* And turn the MMU back on! */
 267        set_sctlr_el2   x2
 268        ret     x1
 269SYM_FUNC_END(__pkvm_init_switch_pgd)
 270
 271        .popsection
 272