1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic.h>
20#include <linux/kvm_host.h>
21
22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h>
24#include <asm/kvm_mmu.h>
25
26static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
27{
28 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
29 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
30 u32 elrsr0, elrsr1;
31
32 elrsr0 = readl_relaxed(base + GICH_ELRSR0);
33 if (unlikely(nr_lr > 32))
34 elrsr1 = readl_relaxed(base + GICH_ELRSR1);
35 else
36 elrsr1 = 0;
37
38 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
39}
40
41static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
42{
43 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
44 int i;
45 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
46
47 for (i = 0; i < used_lrs; i++) {
48 if (cpu_if->vgic_elrsr & (1UL << i))
49 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
50 else
51 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
52
53 writel_relaxed(0, base + GICH_LR0 + (i * 4));
54 }
55}
56
57
58void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
59{
60 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
61 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
62 struct vgic_dist *vgic = &kvm->arch.vgic;
63 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
64 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
65
66 if (!base)
67 return;
68
69 if (used_lrs) {
70 cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
71
72 save_elrsr(vcpu, base);
73 save_lrs(vcpu, base);
74
75 writel_relaxed(0, base + GICH_HCR);
76 } else {
77 cpu_if->vgic_elrsr = ~0UL;
78 cpu_if->vgic_apr = 0;
79 }
80}
81
82
83void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
84{
85 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
86 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
87 struct vgic_dist *vgic = &kvm->arch.vgic;
88 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
89 int i;
90 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
91
92 if (!base)
93 return;
94
95 if (used_lrs) {
96 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
97 writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
98 for (i = 0; i < used_lrs; i++) {
99 writel_relaxed(cpu_if->vgic_lr[i],
100 base + GICH_LR0 + (i * 4));
101 }
102 }
103}
104
105#ifdef CONFIG_ARM64
106
107
108
109
110
111
112
113
114
115
116
117int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
118{
119 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
120 struct vgic_dist *vgic = &kvm->arch.vgic;
121 phys_addr_t fault_ipa;
122 void __iomem *addr;
123 int rd;
124
125
126 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
127 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
128
129
130 if (fault_ipa < vgic->vgic_cpu_base ||
131 fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
132 return 0;
133
134
135 if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
136 return -1;
137
138
139 if (fault_ipa & 3)
140 return -1;
141
142 rd = kvm_vcpu_dabt_get_rd(vcpu);
143 addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
144 addr += fault_ipa - vgic->vgic_cpu_base;
145
146 if (kvm_vcpu_dabt_iswrite(vcpu)) {
147 u32 data = vcpu_data_guest_to_host(vcpu,
148 vcpu_get_reg(vcpu, rd),
149 sizeof(u32));
150 writel_relaxed(data, addr);
151 } else {
152 u32 data = readl_relaxed(addr);
153 vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
154 sizeof(u32)));
155 }
156
157 return 1;
158}
159#endif
160