linux/arch/arm64/include/asm/kvm_asm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012,2013 - ARM Ltd
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#ifndef __ARM_KVM_ASM_H__
   8#define __ARM_KVM_ASM_H__
   9
  10#include <asm/hyp_image.h>
  11#include <asm/virt.h>
  12
  13#define ARM_EXIT_WITH_SERROR_BIT  31
  14#define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
  15#define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
  16#define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
  17
  18#define ARM_EXCEPTION_IRQ         0
  19#define ARM_EXCEPTION_EL1_SERROR  1
  20#define ARM_EXCEPTION_TRAP        2
  21#define ARM_EXCEPTION_IL          3
  22/* The hyp-stub will return this for any kvm_call_hyp() call */
  23#define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
  24
  25#define kvm_arm_exception_type                                  \
  26        {ARM_EXCEPTION_IRQ,             "IRQ"           },      \
  27        {ARM_EXCEPTION_EL1_SERROR,      "SERROR"        },      \
  28        {ARM_EXCEPTION_TRAP,            "TRAP"          },      \
  29        {ARM_EXCEPTION_HYP_GONE,        "HYP_GONE"      }
  30
  31/*
  32 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
  33 * that jumps over this.
  34 */
  35#define KVM_VECTOR_PREAMBLE     (2 * AARCH64_INSN_SIZE)
  36
  37#define __SMCCC_WORKAROUND_1_SMC_SZ 36
  38
  39#define KVM_HOST_SMCCC_ID(id)                                           \
  40        ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
  41                           ARM_SMCCC_SMC_64,                            \
  42                           ARM_SMCCC_OWNER_VENDOR_HYP,                  \
  43                           (id))
  44
  45#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
  46
  47#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                    0
  48#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run                    1
  49#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context            2
  50#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa          3
  51#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid              4
  52#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid        5
  53#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff           6
  54#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs                 7
  55#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2         8
  56#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr               9
  57#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr              10
  58#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs                11
  59#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2                12
  60#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs               13
  61#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs            14
  62
  63#ifndef __ASSEMBLY__
  64
  65#include <linux/mm.h>
  66
  67#define DECLARE_KVM_VHE_SYM(sym)        extern char sym[]
  68#define DECLARE_KVM_NVHE_SYM(sym)       extern char kvm_nvhe_sym(sym)[]
  69
  70/*
  71 * Define a pair of symbols sharing the same name but one defined in
  72 * VHE and the other in nVHE hyp implementations.
  73 */
  74#define DECLARE_KVM_HYP_SYM(sym)                \
  75        DECLARE_KVM_VHE_SYM(sym);               \
  76        DECLARE_KVM_NVHE_SYM(sym)
  77
  78#define DECLARE_KVM_VHE_PER_CPU(type, sym)      \
  79        DECLARE_PER_CPU(type, sym)
  80#define DECLARE_KVM_NVHE_PER_CPU(type, sym)     \
  81        DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
  82
  83#define DECLARE_KVM_HYP_PER_CPU(type, sym)      \
  84        DECLARE_KVM_VHE_PER_CPU(type, sym);     \
  85        DECLARE_KVM_NVHE_PER_CPU(type, sym)
  86
  87/*
  88 * Compute pointer to a symbol defined in nVHE percpu region.
  89 * Returns NULL if percpu memory has not been allocated yet.
  90 */
  91#define this_cpu_ptr_nvhe_sym(sym)      per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
  92#define per_cpu_ptr_nvhe_sym(sym, cpu)                                          \
  93        ({                                                                      \
  94                unsigned long base, off;                                        \
  95                base = kvm_arm_hyp_percpu_base[cpu];                            \
  96                off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
  97                      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
  98                base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
  99        })
 100
 101#if defined(__KVM_NVHE_HYPERVISOR__)
 102
 103#define CHOOSE_NVHE_SYM(sym)    sym
 104#define CHOOSE_HYP_SYM(sym)     CHOOSE_NVHE_SYM(sym)
 105
 106/* The nVHE hypervisor shouldn't even try to access VHE symbols */
 107extern void *__nvhe_undefined_symbol;
 108#define CHOOSE_VHE_SYM(sym)             __nvhe_undefined_symbol
 109#define this_cpu_ptr_hyp_sym(sym)       (&__nvhe_undefined_symbol)
 110#define per_cpu_ptr_hyp_sym(sym, cpu)   (&__nvhe_undefined_symbol)
 111
 112#elif defined(__KVM_VHE_HYPERVISOR__)
 113
 114#define CHOOSE_VHE_SYM(sym)     sym
 115#define CHOOSE_HYP_SYM(sym)     CHOOSE_VHE_SYM(sym)
 116
 117/* The VHE hypervisor shouldn't even try to access nVHE symbols */
 118extern void *__vhe_undefined_symbol;
 119#define CHOOSE_NVHE_SYM(sym)            __vhe_undefined_symbol
 120#define this_cpu_ptr_hyp_sym(sym)       (&__vhe_undefined_symbol)
 121#define per_cpu_ptr_hyp_sym(sym, cpu)   (&__vhe_undefined_symbol)
 122
 123#else
 124
 125/*
 126 * BIG FAT WARNINGS:
 127 *
 128 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
 129 *   to has_vhe(). has_vhe() is implemented as a *final* capability,
 130 *   while this is used early at boot time, when the capabilities are
 131 *   not final yet....
 132 *
 133 * - Don't let the nVHE hypervisor have access to this, as it will
 134 *   pick the *wrong* symbol (yes, it runs at EL2...).
 135 */
 136#define CHOOSE_HYP_SYM(sym)             (is_kernel_in_hyp_mode()        \
 137                                           ? CHOOSE_VHE_SYM(sym)        \
 138                                           : CHOOSE_NVHE_SYM(sym))
 139
 140#define this_cpu_ptr_hyp_sym(sym)       (is_kernel_in_hyp_mode()        \
 141                                           ? this_cpu_ptr(&sym)         \
 142                                           : this_cpu_ptr_nvhe_sym(sym))
 143
 144#define per_cpu_ptr_hyp_sym(sym, cpu)   (is_kernel_in_hyp_mode()        \
 145                                           ? per_cpu_ptr(&sym, cpu)     \
 146                                           : per_cpu_ptr_nvhe_sym(sym, cpu))
 147
 148#define CHOOSE_VHE_SYM(sym)     sym
 149#define CHOOSE_NVHE_SYM(sym)    kvm_nvhe_sym(sym)
 150
 151#endif
 152
 153/* Translate a kernel address @ptr into its equivalent linear mapping */
 154#define kvm_ksym_ref(ptr)                                               \
 155        ({                                                              \
 156                void *val = (ptr);                                      \
 157                if (!is_kernel_in_hyp_mode())                           \
 158                        val = lm_alias((ptr));                          \
 159                val;                                                    \
 160         })
 161#define kvm_ksym_ref_nvhe(sym)  kvm_ksym_ref(kvm_nvhe_sym(sym))
 162
 163struct kvm;
 164struct kvm_vcpu;
 165struct kvm_s2_mmu;
 166
 167DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
 168DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
 169DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 170#define __kvm_hyp_init          CHOOSE_NVHE_SYM(__kvm_hyp_init)
 171#define __kvm_hyp_host_vector   CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
 172#define __kvm_hyp_vector        CHOOSE_HYP_SYM(__kvm_hyp_vector)
 173
 174extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 175DECLARE_KVM_NVHE_SYM(__per_cpu_start);
 176DECLARE_KVM_NVHE_SYM(__per_cpu_end);
 177
 178extern atomic_t arm64_el2_vector_last_slot;
 179DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
 180#define __bp_harden_hyp_vecs    CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
 181
 182extern void __kvm_flush_vm_context(void);
 183extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
 184                                     int level);
 185extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
 186extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
 187
 188extern void __kvm_timer_set_cntvoff(u64 cntvoff);
 189
 190extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 191
 192extern void __kvm_enable_ssbs(void);
 193
 194extern u64 __vgic_v3_get_ich_vtr_el2(void);
 195extern u64 __vgic_v3_read_vmcr(void);
 196extern void __vgic_v3_write_vmcr(u32 vmcr);
 197extern void __vgic_v3_init_lrs(void);
 198
 199extern u32 __kvm_get_mdcr_el2(void);
 200
 201extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 202
 203/*
 204 * Obtain the PC-relative address of a kernel symbol
 205 * s: symbol
 206 *
 207 * The goal of this macro is to return a symbol's address based on a
 208 * PC-relative computation, as opposed to a loading the VA from a
 209 * constant pool or something similar. This works well for HYP, as an
 210 * absolute VA is guaranteed to be wrong. Only use this if trying to
 211 * obtain the address of a symbol (i.e. not something you obtained by
 212 * following a pointer).
 213 */
 214#define hyp_symbol_addr(s)                                              \
 215        ({                                                              \
 216                typeof(s) *addr;                                        \
 217                asm("adrp       %0, %1\n"                               \
 218                    "add        %0, %0, :lo12:%1\n"                     \
 219                    : "=r" (addr) : "S" (&s));                          \
 220                addr;                                                   \
 221        })
 222
 223#define __KVM_EXTABLE(from, to)                                         \
 224        "       .pushsection    __kvm_ex_table, \"a\"\n"                \
 225        "       .align          3\n"                                    \
 226        "       .long           (" #from " - .), (" #to " - .)\n"       \
 227        "       .popsection\n"
 228
 229
 230#define __kvm_at(at_op, addr)                                           \
 231( {                                                                     \
 232        int __kvm_at_err = 0;                                           \
 233        u64 spsr, elr;                                                  \
 234        asm volatile(                                                   \
 235        "       mrs     %1, spsr_el2\n"                                 \
 236        "       mrs     %2, elr_el2\n"                                  \
 237        "1:     at      "at_op", %3\n"                                  \
 238        "       isb\n"                                                  \
 239        "       b       9f\n"                                           \
 240        "2:     msr     spsr_el2, %1\n"                                 \
 241        "       msr     elr_el2, %2\n"                                  \
 242        "       mov     %w0, %4\n"                                      \
 243        "9:\n"                                                          \
 244        __KVM_EXTABLE(1b, 2b)                                           \
 245        : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
 246        : "r" (addr), "i" (-EFAULT));                                   \
 247        __kvm_at_err;                                                   \
 248} )
 249
 250
 251#else /* __ASSEMBLY__ */
 252
 253.macro get_host_ctxt reg, tmp
 254        adr_this_cpu \reg, kvm_host_data, \tmp
 255        add     \reg, \reg, #HOST_DATA_CONTEXT
 256.endm
 257
 258.macro get_vcpu_ptr vcpu, ctxt
 259        get_host_ctxt \ctxt, \vcpu
 260        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 261.endm
 262
 263.macro get_loaded_vcpu vcpu, ctxt
 264        adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
 265        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 266.endm
 267
 268.macro set_loaded_vcpu vcpu, ctxt, tmp
 269        adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
 270        str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 271.endm
 272
 273/*
 274 * KVM extable for unexpected exceptions.
 275 * In the same format _asm_extable, but output to a different section so that
 276 * it can be mapped to EL2. The KVM version is not sorted. The caller must
 277 * ensure:
 278 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
 279 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
 280 */
 281.macro  _kvm_extable, from, to
 282        .pushsection    __kvm_ex_table, "a"
 283        .align          3
 284        .long           (\from - .), (\to - .)
 285        .popsection
 286.endm
 287
 288#define CPU_XREG_OFFSET(x)      (CPU_USER_PT_REGS + 8*x)
 289#define CPU_LR_OFFSET           CPU_XREG_OFFSET(30)
 290#define CPU_SP_EL0_OFFSET       (CPU_LR_OFFSET + 8)
 291
 292/*
 293 * We treat x18 as callee-saved as the host may use it as a platform
 294 * register (e.g. for shadow call stack).
 295 */
 296.macro save_callee_saved_regs ctxt
 297        str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
 298        stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
 299        stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
 300        stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
 301        stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
 302        stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
 303        stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
 304.endm
 305
 306.macro restore_callee_saved_regs ctxt
 307        // We require \ctxt is not x18-x28
 308        ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
 309        ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
 310        ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
 311        ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
 312        ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
 313        ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
 314        ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
 315.endm
 316
 317.macro save_sp_el0 ctxt, tmp
 318        mrs     \tmp,   sp_el0
 319        str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
 320.endm
 321
 322.macro restore_sp_el0 ctxt, tmp
 323        ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
 324        msr     sp_el0, \tmp
 325.endm
 326
 327#endif
 328
 329#endif /* __ARM_KVM_ASM_H__ */
 330