linux/virt/kvm/arm/hyp/vgic-v2-sr.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012-2015 - ARM Ltd
   3 * Author: Marc Zyngier <marc.zyngier@arm.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include <linux/compiler.h>
  19#include <linux/irqchip/arm-gic.h>
  20#include <linux/kvm_host.h>
  21
  22#include <asm/kvm_hyp.h>
  23
  24static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
  25                                            void __iomem *base)
  26{
  27        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  28        int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
  29        u32 eisr0, eisr1;
  30        int i;
  31        bool expect_mi;
  32
  33        expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
  34
  35        for (i = 0; i < nr_lr; i++) {
  36                if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  37                                continue;
  38
  39                expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
  40                              (cpu_if->vgic_lr[i] & GICH_LR_EOI));
  41        }
  42
  43        if (expect_mi) {
  44                cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
  45
  46                if (cpu_if->vgic_misr & GICH_MISR_EOI) {
  47                        eisr0  = readl_relaxed(base + GICH_EISR0);
  48                        if (unlikely(nr_lr > 32))
  49                                eisr1  = readl_relaxed(base + GICH_EISR1);
  50                        else
  51                                eisr1 = 0;
  52                } else {
  53                        eisr0 = eisr1 = 0;
  54                }
  55        } else {
  56                cpu_if->vgic_misr = 0;
  57                eisr0 = eisr1 = 0;
  58        }
  59
  60#ifdef CONFIG_CPU_BIG_ENDIAN
  61        cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
  62#else
  63        cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
  64#endif
  65}
  66
  67static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
  68{
  69        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  70        int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
  71        u32 elrsr0, elrsr1;
  72
  73        elrsr0 = readl_relaxed(base + GICH_ELRSR0);
  74        if (unlikely(nr_lr > 32))
  75                elrsr1 = readl_relaxed(base + GICH_ELRSR1);
  76        else
  77                elrsr1 = 0;
  78
  79#ifdef CONFIG_CPU_BIG_ENDIAN
  80        cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
  81#else
  82        cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
  83#endif
  84}
  85
  86static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  87{
  88        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  89        int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
  90        int i;
  91
  92        for (i = 0; i < nr_lr; i++) {
  93                if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
  94                        continue;
  95
  96                if (cpu_if->vgic_elrsr & (1UL << i)) {
  97                        cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  98                        continue;
  99                }
 100
 101                cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 102                writel_relaxed(0, base + GICH_LR0 + (i * 4));
 103        }
 104}
 105
 106/* vcpu is already in the HYP VA space */
 107void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
 108{
 109        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 110        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 111        struct vgic_dist *vgic = &kvm->arch.vgic;
 112        void __iomem *base = kern_hyp_va(vgic->vctrl_base);
 113
 114        if (!base)
 115                return;
 116
 117        cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
 118
 119        if (vcpu->arch.vgic_cpu.live_lrs) {
 120                cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
 121
 122                save_maint_int_state(vcpu, base);
 123                save_elrsr(vcpu, base);
 124                save_lrs(vcpu, base);
 125
 126                writel_relaxed(0, base + GICH_HCR);
 127
 128                vcpu->arch.vgic_cpu.live_lrs = 0;
 129        } else {
 130                cpu_if->vgic_eisr = 0;
 131                cpu_if->vgic_elrsr = ~0UL;
 132                cpu_if->vgic_misr = 0;
 133                cpu_if->vgic_apr = 0;
 134        }
 135}
 136
 137/* vcpu is already in the HYP VA space */
 138void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
 139{
 140        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 141        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
 142        struct vgic_dist *vgic = &kvm->arch.vgic;
 143        void __iomem *base = kern_hyp_va(vgic->vctrl_base);
 144        int i, nr_lr;
 145        u64 live_lrs = 0;
 146
 147        if (!base)
 148                return;
 149
 150        nr_lr = vcpu->arch.vgic_cpu.nr_lr;
 151
 152        for (i = 0; i < nr_lr; i++)
 153                if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
 154                        live_lrs |= 1UL << i;
 155
 156        if (live_lrs) {
 157                writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
 158                writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
 159                for (i = 0; i < nr_lr; i++) {
 160                        if (!(live_lrs & (1UL << i)))
 161                                continue;
 162
 163                        writel_relaxed(cpu_if->vgic_lr[i],
 164                                       base + GICH_LR0 + (i * 4));
 165                }
 166        }
 167
 168        writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
 169        vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 170}
 171