linux/arch/arm64/kvm/hyp/nvhe/hyp-main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020 - Google Inc
   4 * Author: Andrew Scull <ascull@google.com>
   5 */
   6
   7#include <hyp/switch.h>
   8
   9#include <asm/kvm_asm.h>
  10#include <asm/kvm_emulate.h>
  11#include <asm/kvm_host.h>
  12#include <asm/kvm_hyp.h>
  13#include <asm/kvm_mmu.h>
  14
  15#include <kvm/arm_hypercalls.h>
  16
  17static void handle_host_hcall(unsigned long func_id,
  18                              struct kvm_cpu_context *host_ctxt)
  19{
  20        unsigned long ret = 0;
  21
  22        switch (func_id) {
  23        case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
  24                unsigned long r1 = host_ctxt->regs.regs[1];
  25                struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
  26
  27                ret = __kvm_vcpu_run(kern_hyp_va(vcpu));
  28                break;
  29        }
  30        case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
  31                __kvm_flush_vm_context();
  32                break;
  33        case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
  34                unsigned long r1 = host_ctxt->regs.regs[1];
  35                struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
  36                phys_addr_t ipa = host_ctxt->regs.regs[2];
  37                int level = host_ctxt->regs.regs[3];
  38
  39                __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
  40                break;
  41        }
  42        case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
  43                unsigned long r1 = host_ctxt->regs.regs[1];
  44                struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
  45
  46                __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
  47                break;
  48        }
  49        case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
  50                unsigned long r1 = host_ctxt->regs.regs[1];
  51                struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
  52
  53                __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
  54                break;
  55        }
  56        case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
  57                u64 cntvoff = host_ctxt->regs.regs[1];
  58
  59                __kvm_timer_set_cntvoff(cntvoff);
  60                break;
  61        }
  62        case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
  63                __kvm_enable_ssbs();
  64                break;
  65        case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
  66                ret = __vgic_v3_get_ich_vtr_el2();
  67                break;
  68        case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
  69                ret = __vgic_v3_read_vmcr();
  70                break;
  71        case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
  72                u32 vmcr = host_ctxt->regs.regs[1];
  73
  74                __vgic_v3_write_vmcr(vmcr);
  75                break;
  76        }
  77        case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
  78                __vgic_v3_init_lrs();
  79                break;
  80        case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
  81                ret = __kvm_get_mdcr_el2();
  82                break;
  83        case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
  84                unsigned long r1 = host_ctxt->regs.regs[1];
  85                struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
  86
  87                __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
  88                break;
  89        }
  90        case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
  91                unsigned long r1 = host_ctxt->regs.regs[1];
  92                struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
  93
  94                __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
  95                break;
  96        }
  97        default:
  98                /* Invalid host HVC. */
  99                host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED;
 100                return;
 101        }
 102
 103        host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS;
 104        host_ctxt->regs.regs[1] = ret;
 105}
 106
 107void handle_trap(struct kvm_cpu_context *host_ctxt)
 108{
 109        u64 esr = read_sysreg_el2(SYS_ESR);
 110        unsigned long func_id;
 111
 112        if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
 113                hyp_panic();
 114
 115        func_id = host_ctxt->regs.regs[0];
 116        handle_host_hcall(func_id, host_ctxt);
 117}
 118