1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/jump_label.h>
18
19#include <asm/kvm_asm.h>
20#include <asm/kvm_hyp.h>
21
22__asm__(".arch_extension virt");
23
24
25
26
27
28static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
29{
30 u32 val;
31
32
33
34
35
36
37
38
39 val = read_sysreg(VFP_FPEXC);
40 *fpexc_host = val;
41 if (!(val & FPEXC_EN)) {
42 write_sysreg(val | FPEXC_EN, VFP_FPEXC);
43 isb();
44 }
45
46 write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
47
48 write_sysreg(HSTR_T(15), HSTR);
49 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
50 val = read_sysreg(HDCR);
51 write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
52}
53
54static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
55{
56 u32 val;
57
58
59
60
61
62
63
64 if (vcpu->arch.hcr & HCR_VA)
65 vcpu->arch.hcr = read_sysreg(HCR);
66
67 write_sysreg(0, HCR);
68 write_sysreg(0, HSTR);
69 val = read_sysreg(HDCR);
70 write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
71 write_sysreg(0, HCPTR);
72}
73
74static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
75{
76 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
77 write_sysreg(kvm->arch.vttbr, VTTBR);
78 write_sysreg(vcpu->arch.midr, VPIDR);
79}
80
81static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
82{
83 write_sysreg(0, VTTBR);
84 write_sysreg(read_sysreg(MIDR), VPIDR);
85}
86
87
88static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
89{
90 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
91 __vgic_v3_save_state(vcpu);
92 else
93 __vgic_v2_save_state(vcpu);
94}
95
96static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
97{
98 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
99 __vgic_v3_restore_state(vcpu);
100 else
101 __vgic_v2_restore_state(vcpu);
102}
103
104static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
105{
106 u32 hsr = read_sysreg(HSR);
107 u8 ec = hsr >> HSR_EC_SHIFT;
108 u32 hpfar, far;
109
110 vcpu->arch.fault.hsr = hsr;
111
112 if (ec == HSR_EC_IABT)
113 far = read_sysreg(HIFAR);
114 else if (ec == HSR_EC_DABT)
115 far = read_sysreg(HDFAR);
116 else
117 return true;
118
119
120
121
122
123
124
125
126
127
128
129
130
131 if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
132 u64 par, tmp;
133
134 par = read_sysreg(PAR);
135 write_sysreg(far, ATS1CPR);
136 isb();
137
138 tmp = read_sysreg(PAR);
139 write_sysreg(par, PAR);
140
141 if (unlikely(tmp & 1))
142 return false;
143
144 hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
145 } else {
146 hpfar = read_sysreg(HPFAR);
147 }
148
149 vcpu->arch.fault.hxfar = far;
150 vcpu->arch.fault.hpfar = hpfar;
151 return true;
152}
153
154int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
155{
156 struct kvm_cpu_context *host_ctxt;
157 struct kvm_cpu_context *guest_ctxt;
158 bool fp_enabled;
159 u64 exit_code;
160 u32 fpexc;
161
162 vcpu = kern_hyp_va(vcpu);
163 write_sysreg(vcpu, HTPIDR);
164
165 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
166 guest_ctxt = &vcpu->arch.ctxt;
167
168 __sysreg_save_state(host_ctxt);
169 __banked_save_state(host_ctxt);
170
171 __activate_traps(vcpu, &fpexc);
172 __activate_vm(vcpu);
173
174 __vgic_restore_state(vcpu);
175 __timer_restore_state(vcpu);
176
177 __sysreg_restore_state(guest_ctxt);
178 __banked_restore_state(guest_ctxt);
179
180
181again:
182 exit_code = __guest_enter(vcpu, host_ctxt);
183
184
185 if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
186 goto again;
187
188 fp_enabled = __vfp_enabled();
189
190 __banked_save_state(guest_ctxt);
191 __sysreg_save_state(guest_ctxt);
192 __timer_save_state(vcpu);
193 __vgic_save_state(vcpu);
194
195 __deactivate_traps(vcpu);
196 __deactivate_vm(vcpu);
197
198 __banked_restore_state(host_ctxt);
199 __sysreg_restore_state(host_ctxt);
200
201 if (fp_enabled) {
202 __vfp_save_state(&guest_ctxt->vfp);
203 __vfp_restore_state(&host_ctxt->vfp);
204 }
205
206 write_sysreg(fpexc, VFP_FPEXC);
207
208 return exit_code;
209}
210
211static const char * const __hyp_panic_string[] = {
212 [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
213 [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
214 [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
215 [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
216 [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
217 [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
218 [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
219 [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
220};
221
222void __hyp_text __noreturn __hyp_panic(int cause)
223{
224 u32 elr = read_special(ELR_hyp);
225 u32 val;
226
227 if (cause == ARM_EXCEPTION_DATA_ABORT)
228 val = read_sysreg(HDFAR);
229 else
230 val = read_special(SPSR);
231
232 if (read_sysreg(VTTBR)) {
233 struct kvm_vcpu *vcpu;
234 struct kvm_cpu_context *host_ctxt;
235
236 vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
237 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
238 __deactivate_traps(vcpu);
239 __deactivate_vm(vcpu);
240 __sysreg_restore_state(host_ctxt);
241 }
242
243
244 __hyp_do_panic(__hyp_panic_string[cause], elr, val);
245
246 unreachable();
247}
248