1
2#ifndef __KVM_X86_MMU_H
3#define __KVM_X86_MMU_H
4
5#include <linux/kvm_host.h>
6#include "kvm_cache_regs.h"
7#include "cpuid.h"
8
9#define PT64_PT_BITS 9
10#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11#define PT32_PT_BITS 10
12#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
13
14#define PT_WRITABLE_SHIFT 1
15#define PT_USER_SHIFT 2
16
17#define PT_PRESENT_MASK (1ULL << 0)
18#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
20#define PT_PWT_MASK (1ULL << 3)
21#define PT_PCD_MASK (1ULL << 4)
22#define PT_ACCESSED_SHIFT 5
23#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
24#define PT_DIRTY_SHIFT 6
25#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
26#define PT_PAGE_SIZE_SHIFT 7
27#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
28#define PT_PAT_MASK (1ULL << 7)
29#define PT_GLOBAL_MASK (1ULL << 8)
30#define PT64_NX_SHIFT 63
31#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
32
33#define PT_PAT_SHIFT 7
34#define PT_DIR_PAT_SHIFT 12
35#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
36
37#define PT32_DIR_PSE36_SIZE 4
38#define PT32_DIR_PSE36_SHIFT 13
39#define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
41
42#define PT64_ROOT_5LEVEL 5
43#define PT64_ROOT_4LEVEL 4
44#define PT32_ROOT_LEVEL 2
45#define PT32E_ROOT_LEVEL 3
46
47static __always_inline u64 rsvd_bits(int s, int e)
48{
49 BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
50
51 if (__builtin_constant_p(e))
52 BUILD_BUG_ON(e > 63);
53 else
54 e &= 63;
55
56 if (e < s)
57 return 0;
58
59 return ((2ULL << (e - s)) - 1) << s;
60}
61
62void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
63
64void
65reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
66
67void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
68void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
69 gpa_t nested_cr3);
70void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
71 bool accessed_dirty, gpa_t new_eptp);
72bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
73int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
74 u64 fault_address, char *insn, int insn_len);
75
76static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
77{
78 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
79 return 0;
80
81 return kvm_mmu_load(vcpu);
82}
83
84static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
85{
86 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
87
88 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
89 ? cr3 & X86_CR3_PCID_MASK
90 : 0;
91}
92
93static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
94{
95 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
96}
97
98static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
99{
100 u64 root_hpa = vcpu->arch.mmu->root_hpa;
101
102 if (!VALID_PAGE(root_hpa))
103 return;
104
105 kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
106 vcpu->arch.mmu->shadow_root_level);
107}
108
109int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
110 bool prefault);
111
112static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
113 u32 err, bool prefault)
114{
115#ifdef CONFIG_RETPOLINE
116 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
117 return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
118#endif
119 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static inline int is_writable_pte(unsigned long pte)
156{
157 return pte & PT_WRITABLE_MASK;
158}
159
160static inline bool is_write_protection(struct kvm_vcpu *vcpu)
161{
162 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
163}
164
165
166
167
168
169
170
171
172
173static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
174 unsigned pte_access, unsigned pte_pkey,
175 unsigned pfec)
176{
177 int cpl = kvm_x86_ops.get_cpl(vcpu);
178 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
194 int index = (pfec >> 1) +
195 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
196 bool fault = (mmu->permissions[index] >> pte_access) & 1;
197 u32 errcode = PFERR_PRESENT_MASK;
198
199 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
200 if (unlikely(mmu->pkru_mask)) {
201 u32 pkru_bits, offset;
202
203
204
205
206
207
208
209 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
210
211
212 offset = (pfec & ~1) +
213 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
214
215 pkru_bits &= mmu->pkru_mask >> offset;
216 errcode |= -pkru_bits & PFERR_PK_MASK;
217 fault |= (pkru_bits != 0);
218 }
219
220 return -(u32)fault & errcode;
221}
222
223void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
224
225int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
226
227int kvm_mmu_post_init_vm(struct kvm *kvm);
228void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
229
230#endif
231