1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bits.h>
21
22#include <asm/svm.h>
23#include <asm/sev-common.h>
24
25#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26
27#define IOPM_SIZE PAGE_SIZE * 3
28#define MSRPM_SIZE PAGE_SIZE * 2
29
30#define MAX_DIRECT_ACCESS_MSRS 20
31#define MSRPM_OFFSETS 16
32extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33extern bool npt_enabled;
34extern bool intercept_smi;
35
36
37
38
39
40
41enum {
42 VMCB_INTERCEPTS,
43
44 VMCB_PERM_MAP,
45 VMCB_ASID,
46 VMCB_INTR,
47 VMCB_NPT,
48 VMCB_CR,
49 VMCB_DR,
50 VMCB_DT,
51 VMCB_SEG,
52 VMCB_CR2,
53 VMCB_LBR,
54 VMCB_AVIC,
55
56
57
58 VMCB_SW = 31,
59};
60
61#define VMCB_ALL_CLEAN_MASK ( \
62 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
63 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
64 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
65 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
66 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
67 (1U << VMCB_SW))
68
69
70#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
71
72struct kvm_sev_info {
73 bool active;
74 bool es_active;
75 unsigned int asid;
76 unsigned int handle;
77 int fd;
78 unsigned long pages_locked;
79 struct list_head regions_list;
80 u64 ap_jump_table;
81 struct kvm *enc_context_owner;
82 struct misc_cg *misc_cg;
83};
84
85struct kvm_svm {
86 struct kvm kvm;
87
88
89 u32 avic_vm_id;
90 struct page *avic_logical_id_table_page;
91 struct page *avic_physical_id_table_page;
92 struct hlist_node hnode;
93
94 struct kvm_sev_info sev_info;
95};
96
97struct kvm_vcpu;
98
99struct kvm_vmcb_info {
100 struct vmcb *ptr;
101 unsigned long pa;
102 int cpu;
103 uint64_t asid_generation;
104};
105
106struct svm_nested_state {
107 struct kvm_vmcb_info vmcb02;
108 u64 hsave_msr;
109 u64 vm_cr_msr;
110 u64 vmcb12_gpa;
111 u64 last_vmcb12_gpa;
112
113
114 u32 *msrpm;
115
116
117
118 bool nested_run_pending;
119
120
121 struct vmcb_control_area ctl;
122
123 bool initialized;
124};
125
126struct vcpu_svm {
127 struct kvm_vcpu vcpu;
128
129 struct vmcb *vmcb;
130 struct kvm_vmcb_info vmcb01;
131 struct kvm_vmcb_info *current_vmcb;
132 struct svm_cpu_data *svm_data;
133 u32 asid;
134 u32 sysenter_esp_hi;
135 u32 sysenter_eip_hi;
136 uint64_t tsc_aux;
137
138 u64 msr_decfg;
139
140 u64 next_rip;
141
142 u64 spec_ctrl;
143
144
145
146
147
148 u64 virt_spec_ctrl;
149
150 u32 *msrpm;
151
152 ulong nmi_iret_rip;
153
154 struct svm_nested_state nested;
155
156 bool nmi_singlestep;
157 u64 nmi_singlestep_guest_rflags;
158
159 unsigned int3_injected;
160 unsigned long int3_rip;
161
162
163 bool nrips_enabled : 1;
164
165 u32 ldr_reg;
166 u32 dfr_reg;
167 struct page *avic_backing_page;
168 u64 *avic_physical_id_cache;
169 bool avic_is_running;
170
171
172
173
174
175
176
177 struct list_head ir_list;
178 spinlock_t ir_list_lock;
179
180
181 struct {
182 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
183 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
184 } shadow_msr_intercept;
185
186
187 struct vmcb_save_area *vmsa;
188 struct ghcb *ghcb;
189 struct kvm_host_map ghcb_map;
190 bool received_first_sipi;
191
192
193 void *ghcb_sa;
194 u32 ghcb_sa_len;
195 bool ghcb_sa_sync;
196 bool ghcb_sa_free;
197
198 bool guest_state_loaded;
199};
200
201struct svm_cpu_data {
202 int cpu;
203
204 u64 asid_generation;
205 u32 max_asid;
206 u32 next_asid;
207 u32 min_asid;
208 struct kvm_ldttss_desc *tss_desc;
209
210 struct page *save_area;
211 struct vmcb *current_vmcb;
212
213
214 struct vmcb **sev_vmcbs;
215};
216
217DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
218
219void recalc_intercepts(struct vcpu_svm *svm);
220
221static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
222{
223 return container_of(kvm, struct kvm_svm, kvm);
224}
225
226static inline bool sev_guest(struct kvm *kvm)
227{
228#ifdef CONFIG_KVM_AMD_SEV
229 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
230
231 return sev->active;
232#else
233 return false;
234#endif
235}
236
237static inline bool sev_es_guest(struct kvm *kvm)
238{
239#ifdef CONFIG_KVM_AMD_SEV
240 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
241
242 return sev_guest(kvm) && sev->es_active;
243#else
244 return false;
245#endif
246}
247
248static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
249{
250 vmcb->control.clean = 0;
251}
252
253static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
254{
255 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
256 & ~VMCB_ALWAYS_DIRTY_MASK;
257}
258
259static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
260{
261 return (vmcb->control.clean & (1 << bit));
262}
263
264static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
265{
266 vmcb->control.clean &= ~(1 << bit);
267}
268
269static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
270{
271 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
272}
273
274static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
275{
276 return container_of(vcpu, struct vcpu_svm, vcpu);
277}
278
279static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
280{
281 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
282 __set_bit(bit, (unsigned long *)&control->intercepts);
283}
284
285static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
286{
287 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
288 __clear_bit(bit, (unsigned long *)&control->intercepts);
289}
290
291static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
292{
293 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
294 return test_bit(bit, (unsigned long *)&control->intercepts);
295}
296
297static inline void set_dr_intercepts(struct vcpu_svm *svm)
298{
299 struct vmcb *vmcb = svm->vmcb01.ptr;
300
301 if (!sev_es_guest(svm->vcpu.kvm)) {
302 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
303 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
304 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
306 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
307 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
308 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
309 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
310 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
311 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
312 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
313 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
314 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
315 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
316 }
317
318 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
319 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
320
321 recalc_intercepts(svm);
322}
323
324static inline void clr_dr_intercepts(struct vcpu_svm *svm)
325{
326 struct vmcb *vmcb = svm->vmcb01.ptr;
327
328 vmcb->control.intercepts[INTERCEPT_DR] = 0;
329
330
331 if (sev_es_guest(svm->vcpu.kvm)) {
332 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
333 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
334 }
335
336 recalc_intercepts(svm);
337}
338
339static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
340{
341 struct vmcb *vmcb = svm->vmcb01.ptr;
342
343 WARN_ON_ONCE(bit >= 32);
344 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
345
346 recalc_intercepts(svm);
347}
348
349static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
350{
351 struct vmcb *vmcb = svm->vmcb01.ptr;
352
353 WARN_ON_ONCE(bit >= 32);
354 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
355
356 recalc_intercepts(svm);
357}
358
359static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
360{
361 struct vmcb *vmcb = svm->vmcb01.ptr;
362
363 vmcb_set_intercept(&vmcb->control, bit);
364
365 recalc_intercepts(svm);
366}
367
368static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
369{
370 struct vmcb *vmcb = svm->vmcb01.ptr;
371
372 vmcb_clr_intercept(&vmcb->control, bit);
373
374 recalc_intercepts(svm);
375}
376
377static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
378{
379 return vmcb_is_intercept(&svm->vmcb->control, bit);
380}
381
382static inline bool vgif_enabled(struct vcpu_svm *svm)
383{
384 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
385}
386
387static inline void enable_gif(struct vcpu_svm *svm)
388{
389 if (vgif_enabled(svm))
390 svm->vmcb->control.int_ctl |= V_GIF_MASK;
391 else
392 svm->vcpu.arch.hflags |= HF_GIF_MASK;
393}
394
395static inline void disable_gif(struct vcpu_svm *svm)
396{
397 if (vgif_enabled(svm))
398 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
399 else
400 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
401}
402
403static inline bool gif_set(struct vcpu_svm *svm)
404{
405 if (vgif_enabled(svm))
406 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
407 else
408 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
409}
410
411
412#define MSR_INVALID 0xffffffffU
413
414extern bool dump_invalid_vmcb;
415
416u32 svm_msrpm_offset(u32 msr);
417u32 *svm_vcpu_alloc_msrpm(void);
418void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
419void svm_vcpu_free_msrpm(u32 *msrpm);
420
421int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
422void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
423void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
424void svm_flush_tlb(struct kvm_vcpu *vcpu);
425void disable_nmi_singlestep(struct vcpu_svm *svm);
426bool svm_smi_blocked(struct kvm_vcpu *vcpu);
427bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
428bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
429void svm_set_gif(struct vcpu_svm *svm, bool value);
430int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
431void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
432 int read, int write);
433
434
435
436#define NESTED_EXIT_HOST 0
437#define NESTED_EXIT_DONE 1
438#define NESTED_EXIT_CONTINUE 2
439
440static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
441{
442 struct vcpu_svm *svm = to_svm(vcpu);
443
444 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
445}
446
447static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
448{
449 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
450}
451
452static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
453{
454 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
455}
456
457static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
458{
459 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
460}
461
462int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
463 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
464void svm_leave_nested(struct vcpu_svm *svm);
465void svm_free_nested(struct vcpu_svm *svm);
466int svm_allocate_nested(struct vcpu_svm *svm);
467int nested_svm_vmrun(struct kvm_vcpu *vcpu);
468void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
469 struct vmcb_save_area *from_save);
470void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
471int nested_svm_vmexit(struct vcpu_svm *svm);
472
473static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
474{
475 svm->vmcb->control.exit_code = exit_code;
476 svm->vmcb->control.exit_info_1 = 0;
477 svm->vmcb->control.exit_info_2 = 0;
478 return nested_svm_vmexit(svm);
479}
480
481int nested_svm_exit_handled(struct vcpu_svm *svm);
482int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
483int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
484 bool has_error_code, u32 error_code);
485int nested_svm_exit_special(struct vcpu_svm *svm);
486void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
487 struct vmcb_control_area *control);
488void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
489void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
490void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
491
492extern struct kvm_x86_nested_ops svm_nested_ops;
493
494
495
496#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
497#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
498#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
499
500#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
501#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
502#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
503#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
504
505#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
506
507static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
508{
509 struct vcpu_svm *svm = to_svm(vcpu);
510 u64 *entry = svm->avic_physical_id_cache;
511
512 if (!entry)
513 return false;
514
515 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
516}
517
518int avic_ga_log_notifier(u32 ga_tag);
519void avic_vm_destroy(struct kvm *kvm);
520int avic_vm_init(struct kvm *kvm);
521void avic_init_vmcb(struct vcpu_svm *svm);
522int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
523int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
524int avic_init_vcpu(struct vcpu_svm *svm);
525void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
526void avic_vcpu_put(struct kvm_vcpu *vcpu);
527void avic_post_state_restore(struct kvm_vcpu *vcpu);
528void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
529void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
530bool svm_check_apicv_inhibit_reasons(ulong bit);
531void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
532void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
533void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
534int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
535bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
536int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
537 uint32_t guest_irq, bool set);
538void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
539void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
540
541
542
543#define GHCB_VERSION_MAX 1ULL
544#define GHCB_VERSION_MIN 1ULL
545
546
547extern unsigned int max_sev_asid;
548
549void sev_vm_destroy(struct kvm *kvm);
550int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
551int svm_register_enc_region(struct kvm *kvm,
552 struct kvm_enc_region *range);
553int svm_unregister_enc_region(struct kvm *kvm,
554 struct kvm_enc_region *range);
555int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
556void pre_sev_run(struct vcpu_svm *svm, int cpu);
557void __init sev_set_cpu_caps(void);
558void __init sev_hardware_setup(void);
559void sev_hardware_teardown(void);
560int sev_cpu_init(struct svm_cpu_data *sd);
561void sev_free_vcpu(struct kvm_vcpu *vcpu);
562int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
563int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
564void sev_es_init_vmcb(struct vcpu_svm *svm);
565void sev_es_create_vcpu(struct vcpu_svm *svm);
566void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
567void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
568void sev_es_unmap_ghcb(struct vcpu_svm *svm);
569
570
571
572void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
573void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
574
575#endif
576