linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020 - Google Inc
   4 * Author: Andrew Scull <ascull@google.com>
   5 */
   6
   7#include <hyp/switch.h>
   8
   9#include <asm/pgtable-types.h>
  10#include <asm/kvm_asm.h>
  11#include <asm/kvm_emulate.h>
  12#include <asm/kvm_host.h>
  13#include <asm/kvm_hyp.h>
  14#include <asm/kvm_mmu.h>
  15
  16#include <nvhe/mem_protect.h>
  17#include <nvhe/mm.h>
  18#include <nvhe/trap_handler.h>
  19
  20DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
  21
  22void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
  23
  24static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
  25{
  26        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
  27
  28        cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
  29}
  30
  31static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
  32{
  33        DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
  34
  35        __kvm_adjust_pc(kern_hyp_va(vcpu));
  36}
  37
  38static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
  39{
  40        __kvm_flush_vm_context();
  41}
  42
  43static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
  44{
  45        DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
  46        DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
  47        DECLARE_REG(int, level, host_ctxt, 3);
  48
  49        __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
  50}
  51
  52static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
  53{
  54        DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
  55
  56        __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
  57}
  58
  59static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
  60{
  61        DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
  62
  63        __kvm_flush_cpu_context(kern_hyp_va(mmu));
  64}
  65
  66static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
  67{
  68        __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
  69}
  70
  71static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
  72{
  73        u64 tmp;
  74
  75        tmp = read_sysreg_el2(SYS_SCTLR);
  76        tmp |= SCTLR_ELx_DSSBS;
  77        write_sysreg_el2(tmp, SYS_SCTLR);
  78}
  79
  80static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
  81{
  82        cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
  83}
  84
  85static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
  86{
  87        cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
  88}
  89
  90static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
  91{
  92        __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
  93}
  94
  95static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
  96{
  97        __vgic_v3_init_lrs();
  98}
  99
 100static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
 101{
 102        cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
 103}
 104
 105static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
 106{
 107        DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
 108
 109        __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
 110}
 111
 112static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
 113{
 114        DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
 115
 116        __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
 117}
 118
 119static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
 120{
 121        DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
 122        DECLARE_REG(unsigned long, size, host_ctxt, 2);
 123        DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
 124        DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
 125        DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
 126
 127        /*
 128         * __pkvm_init() will return only if an error occurred, otherwise it
 129         * will tail-call in __pkvm_init_finalise() which will have to deal
 130         * with the host context directly.
 131         */
 132        cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
 133                                            hyp_va_bits);
 134}
 135
 136static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
 137{
 138        DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
 139
 140        cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
 141}
 142
 143static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
 144{
 145        DECLARE_REG(u64, pfn, host_ctxt, 1);
 146
 147        cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
 148}
 149
 150static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
 151{
 152        DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
 153        DECLARE_REG(size_t, size, host_ctxt, 2);
 154        DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
 155
 156        cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
 157}
 158
 159static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
 160{
 161        cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
 162}
 163typedef void (*hcall_t)(struct kvm_cpu_context *);
 164
 165#define HANDLE_FUNC(x)  [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
 166
 167static const hcall_t host_hcall[] = {
 168        HANDLE_FUNC(__kvm_vcpu_run),
 169        HANDLE_FUNC(__kvm_adjust_pc),
 170        HANDLE_FUNC(__kvm_flush_vm_context),
 171        HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
 172        HANDLE_FUNC(__kvm_tlb_flush_vmid),
 173        HANDLE_FUNC(__kvm_flush_cpu_context),
 174        HANDLE_FUNC(__kvm_timer_set_cntvoff),
 175        HANDLE_FUNC(__kvm_enable_ssbs),
 176        HANDLE_FUNC(__vgic_v3_get_gic_config),
 177        HANDLE_FUNC(__vgic_v3_read_vmcr),
 178        HANDLE_FUNC(__vgic_v3_write_vmcr),
 179        HANDLE_FUNC(__vgic_v3_init_lrs),
 180        HANDLE_FUNC(__kvm_get_mdcr_el2),
 181        HANDLE_FUNC(__vgic_v3_save_aprs),
 182        HANDLE_FUNC(__vgic_v3_restore_aprs),
 183        HANDLE_FUNC(__pkvm_init),
 184        HANDLE_FUNC(__pkvm_cpu_set_vector),
 185        HANDLE_FUNC(__pkvm_host_share_hyp),
 186        HANDLE_FUNC(__pkvm_create_private_mapping),
 187        HANDLE_FUNC(__pkvm_prot_finalize),
 188};
 189
 190static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
 191{
 192        DECLARE_REG(unsigned long, id, host_ctxt, 0);
 193        hcall_t hfn;
 194
 195        id -= KVM_HOST_SMCCC_ID(0);
 196
 197        if (unlikely(id >= ARRAY_SIZE(host_hcall)))
 198                goto inval;
 199
 200        hfn = host_hcall[id];
 201        if (unlikely(!hfn))
 202                goto inval;
 203
 204        cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
 205        hfn(host_ctxt);
 206
 207        return;
 208inval:
 209        cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
 210}
 211
 212static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
 213{
 214        __kvm_hyp_host_forward_smc(host_ctxt);
 215}
 216
 217static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
 218{
 219        bool handled;
 220
 221        handled = kvm_host_psci_handler(host_ctxt);
 222        if (!handled)
 223                default_host_smc_handler(host_ctxt);
 224
 225        /* SMC was trapped, move ELR past the current PC. */
 226        kvm_skip_host_instr();
 227}
 228
 229void handle_trap(struct kvm_cpu_context *host_ctxt)
 230{
 231        u64 esr = read_sysreg_el2(SYS_ESR);
 232
 233        switch (ESR_ELx_EC(esr)) {
 234        case ESR_ELx_EC_HVC64:
 235                handle_host_hcall(host_ctxt);
 236                break;
 237        case ESR_ELx_EC_SMC64:
 238                handle_host_smc(host_ctxt);
 239                break;
 240        case ESR_ELx_EC_SVE:
 241                sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
 242                isb();
 243                sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
 244                break;
 245        case ESR_ELx_EC_IABT_LOW:
 246        case ESR_ELx_EC_DABT_LOW:
 247                handle_host_mem_abort(host_ctxt);
 248                break;
 249        default:
 250                BUG();
 251        }
 252}
 253