1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ARM_KVM_HOST_H__
20#define __ARM_KVM_HOST_H__
21
22#include <linux/types.h>
23#include <linux/kvm_types.h>
24#include <asm/kvm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmio.h>
27#include <asm/fpstate.h>
28#include <kvm/arm_arch_timer.h>
29
30#define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
32#define KVM_USER_MEM_SLOTS 32
33#define KVM_HAVE_ONE_REG
34#define KVM_HALT_POLL_NS_DEFAULT 500000
35
36#define KVM_VCPU_MAX_FEATURES 2
37
38#include <kvm/arm_vgic.h>
39
40
41#ifdef CONFIG_ARM_GIC_V3
42#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
43#else
44#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
45#endif
46
47#define KVM_REQ_SLEEP \
48 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
49#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
50
51DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
52
53u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
54int __attribute_const__ kvm_target_cpu(void);
55int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
56void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
57
58struct kvm_arch {
59
60 u64 vttbr;
61
62
63 int __percpu *last_vcpu_ran;
64
65
66
67
68
69
70
71 u64 vmid_gen;
72 u32 vmid;
73
74
75 pgd_t *pgd;
76
77
78 struct vgic_dist vgic;
79 int max_vcpus;
80};
81
82#define KVM_NR_MEM_OBJS 40
83
84
85
86
87
88struct kvm_mmu_memory_cache {
89 int nobjs;
90 void *objects[KVM_NR_MEM_OBJS];
91};
92
93struct kvm_vcpu_fault_info {
94 u32 hsr;
95 u32 hxfar;
96 u32 hpfar;
97};
98
99
100
101
102
103enum vcpu_sysreg {
104 __INVALID_SYSREG__,
105 c0_MPIDR,
106 c0_CSSELR,
107 c1_SCTLR,
108 c1_ACTLR,
109 c1_CPACR,
110 c2_TTBR0,
111 c2_TTBR0_high,
112 c2_TTBR1,
113 c2_TTBR1_high,
114 c2_TTBCR,
115 c3_DACR,
116 c5_DFSR,
117 c5_IFSR,
118 c5_ADFSR,
119 c5_AIFSR,
120 c6_DFAR,
121 c6_IFAR,
122 c7_PAR,
123 c7_PAR_high,
124 c9_L2CTLR,
125 c10_PRRR,
126 c10_NMRR,
127 c12_VBAR,
128 c13_CID,
129 c13_TID_URW,
130 c13_TID_URO,
131 c13_TID_PRIV,
132 c14_CNTKCTL,
133 c10_AMAIR0,
134 c10_AMAIR1,
135 NR_CP15_REGS
136};
137
138struct kvm_cpu_context {
139 struct kvm_regs gp_regs;
140 struct vfp_hard_struct vfp;
141 u32 cp15[NR_CP15_REGS];
142};
143
144typedef struct kvm_cpu_context kvm_cpu_context_t;
145
146struct kvm_vcpu_arch {
147 struct kvm_cpu_context ctxt;
148
149 int target;
150 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
151
152
153 u32 midr;
154
155
156 u32 hcr;
157
158
159 u32 irq_lines;
160
161
162 struct kvm_vcpu_fault_info fault;
163
164
165 kvm_cpu_context_t *host_cpu_context;
166
167
168 struct vgic_cpu vgic_cpu;
169 struct arch_timer_cpu timer_cpu;
170
171
172
173
174
175
176
177 bool power_off;
178
179
180 bool pause;
181
182
183 struct kvm_decode mmio_decode;
184
185
186 struct kvm_mmu_memory_cache mmu_page_cache;
187
188
189 bool has_run_once;
190};
191
192struct kvm_vm_stat {
193 ulong remote_tlb_flush;
194};
195
196struct kvm_vcpu_stat {
197 u64 halt_successful_poll;
198 u64 halt_attempted_poll;
199 u64 halt_poll_invalid;
200 u64 halt_wakeup;
201 u64 hvc_exit_stat;
202 u64 wfe_exit_stat;
203 u64 wfi_exit_stat;
204 u64 mmio_exit_user;
205 u64 mmio_exit_kernel;
206 u64 exits;
207};
208
209#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
210
211int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
212unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
213int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
214int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
215int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
216unsigned long kvm_call_hyp(void *hypfn, ...);
217void force_vm_exit(const cpumask_t *mask);
218
219#define KVM_ARCH_WANT_MMU_NOTIFIER
220int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
221int kvm_unmap_hva_range(struct kvm *kvm,
222 unsigned long start, unsigned long end);
223void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
224
225unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
226int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
227int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
228int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
229
230struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
231struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
232void kvm_arm_halt_guest(struct kvm *kvm);
233void kvm_arm_resume_guest(struct kvm *kvm);
234
235int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
236unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
237int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
238int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
239
240int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
241 int exception_index);
242
243static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
244 int exception_index) {}
245
246static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
247 unsigned long hyp_stack_ptr,
248 unsigned long vector_ptr)
249{
250
251
252
253
254
255
256
257
258
259
260
261
262
263 kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
264}
265
266static inline void __cpu_init_stage2(void)
267{
268 kvm_call_hyp(__init_stage2_translation);
269}
270
271static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
272{
273 return 0;
274}
275
276int kvm_perf_init(void);
277int kvm_perf_teardown(void);
278
279void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
280
281struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
282
283static inline void kvm_arch_hardware_unsetup(void) {}
284static inline void kvm_arch_sync_events(struct kvm *kvm) {}
285static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
286static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
287static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
288
289static inline void kvm_arm_init_debug(void) {}
290static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
291static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
292static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
293static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
294 struct kvm_run *run)
295{
296 return false;
297}
298
299int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
300 struct kvm_device_attr *attr);
301int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
302 struct kvm_device_attr *attr);
303int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
304 struct kvm_device_attr *attr);
305
306
307static inline void kvm_fpsimd_flush_cpu_state(void) {}
308
309static inline void kvm_arm_vhe_guest_enter(void) {}
310static inline void kvm_arm_vhe_guest_exit(void) {}
311
312static inline bool kvm_arm_harden_branch_predictor(void)
313{
314
315 return false;
316}
317
318#endif
319