linux/virt/kvm/arm/hyp/vgic-v2-sr.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012-2015 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/compiler.h>
  19#include <linux/irqchip/arm-gic.h>
  20#include <linux/kvm_host.h>
  21
  22#include <asm/kvm_emulate.h>
  23#include <asm/kvm_hyp.h>
  24#include <asm/kvm_mmu.h>
  25
  26static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
  27{
  28        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  29        int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
  30        u32 elrsr0, elrsr1;
  31
  32        elrsr0 = readl_relaxed(base + GICH_ELRSR0);
  33        if (unlikely(nr_lr > 32))
  34                elrsr1 = readl_relaxed(base + GICH_ELRSR1);
  35        else
  36                elrsr1 = 0;
  37
  38        cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
  39}
  40
  41static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  42{
  43        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  44        int i;
  45        u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  46
  47        for (i = 0; i < used_lrs; i++) {
  48                if (cpu_if->vgic_elrsr & (1UL << i))
  49                        cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  50                else
  51                        cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
  52
  53                writel_relaxed(0, base + GICH_LR0 + (i * 4));
  54        }
  55}
  56
  57/* vcpu is already in the HYP VA space */
  58void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
  59{
  60        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  61        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  62        struct vgic_dist *vgic = &kvm->arch.vgic;
  63        void __iomem *base = kern_hyp_va(vgic->vctrl_base);
  64        u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  65
  66        if (!base)
  67                return;
  68
  69        if (used_lrs) {
  70                cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
  71
  72                save_elrsr(vcpu, base);
  73                save_lrs(vcpu, base);
  74
  75                writel_relaxed(0, base + GICH_HCR);
  76        } else {
  77                cpu_if->vgic_elrsr = ~0UL;
  78                cpu_if->vgic_apr = 0;
  79        }
  80}
  81
  82/* vcpu is already in the HYP VA space */
  83void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
  84{
  85        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
  86        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  87        struct vgic_dist *vgic = &kvm->arch.vgic;
  88        void __iomem *base = kern_hyp_va(vgic->vctrl_base);
  89        int i;
  90        u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
  91
  92        if (!base)
  93                return;
  94
  95        if (used_lrs) {
  96                writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
  97                writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
  98                for (i = 0; i < used_lrs; i++) {
  99                        writel_relaxed(cpu_if->vgic_lr[i],
 100                                       base + GICH_LR0 + (i * 4));
 101                }
 102        }
 103}
 104
 105#ifdef CONFIG_ARM64
 106/*
 107 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
 108 *                                   guest.
 109 *
 110 * @vcpu: the offending vcpu
 111 *
 112 * Returns:
 113 *  1: GICV access successfully performed
 114 *  0: Not a GICV access
 115 * -1: Illegal GICV access
 116 */
 117int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
 118{
 119        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 120        struct vgic_dist *vgic = &kvm->arch.vgic;
 121        phys_addr_t fault_ipa;
 122        void __iomem *addr;
 123        int rd;
 124
 125        /* Build the full address */
 126        fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
 127        fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
 128
 129        /* If not for GICV, move on */
 130        if (fault_ipa <  vgic->vgic_cpu_base ||
 131            fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
 132                return 0;
 133
 134        /* Reject anything but a 32bit access */
 135        if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
 136                return -1;
 137
 138        /* Not aligned? Don't bother */
 139        if (fault_ipa & 3)
 140                return -1;
 141
 142        rd = kvm_vcpu_dabt_get_rd(vcpu);
 143        addr  = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
 144        addr += fault_ipa - vgic->vgic_cpu_base;
 145
 146        if (kvm_vcpu_dabt_iswrite(vcpu)) {
 147                u32 data = vcpu_data_guest_to_host(vcpu,
 148                                                   vcpu_get_reg(vcpu, rd),
 149                                                   sizeof(u32));
 150                writel_relaxed(data, addr);
 151        } else {
 152                u32 data = readl_relaxed(addr);
 153                vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
 154                                                               sizeof(u32)));
 155        }
 156
 157        return 1;
 158}
 159#endif
 160