1
2#ifndef __KVM_X86_MMU_H
3#define __KVM_X86_MMU_H
4
5#include <linux/kvm_host.h>
6#include "kvm_cache_regs.h"
7#include "cpuid.h"
8
9#define PT64_PT_BITS 9
10#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11#define PT32_PT_BITS 10
12#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
13
14#define PT_WRITABLE_SHIFT 1
15#define PT_USER_SHIFT 2
16
17#define PT_PRESENT_MASK (1ULL << 0)
18#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
20#define PT_PWT_MASK (1ULL << 3)
21#define PT_PCD_MASK (1ULL << 4)
22#define PT_ACCESSED_SHIFT 5
23#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
24#define PT_DIRTY_SHIFT 6
25#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
26#define PT_PAGE_SIZE_SHIFT 7
27#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
28#define PT_PAT_MASK (1ULL << 7)
29#define PT_GLOBAL_MASK (1ULL << 8)
30#define PT64_NX_SHIFT 63
31#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
32
33#define PT_PAT_SHIFT 7
34#define PT_DIR_PAT_SHIFT 12
35#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
36
37#define PT32_DIR_PSE36_SIZE 4
38#define PT32_DIR_PSE36_SHIFT 13
39#define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
41
42#define PT64_ROOT_5LEVEL 5
43#define PT64_ROOT_4LEVEL 4
44#define PT32_ROOT_LEVEL 2
45#define PT32E_ROOT_LEVEL 3
46
47#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
48 X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
49 X86_CR4_LA57)
50
51#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
52
53static __always_inline u64 rsvd_bits(int s, int e)
54{
55 BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
56
57 if (__builtin_constant_p(e))
58 BUILD_BUG_ON(e > 63);
59 else
60 e &= 63;
61
62 if (e < s)
63 return 0;
64
65 return ((2ULL << (e - s)) - 1) << s;
66}
67
68void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
69void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
70
71void kvm_init_mmu(struct kvm_vcpu *vcpu);
72void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
73 unsigned long cr4, u64 efer, gpa_t nested_cr3);
74void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
75 bool accessed_dirty, gpa_t new_eptp);
76bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
77int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
78 u64 fault_address, char *insn, int insn_len);
79
80int kvm_mmu_load(struct kvm_vcpu *vcpu);
81void kvm_mmu_unload(struct kvm_vcpu *vcpu);
82void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
83
84static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
85{
86 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
87 return 0;
88
89 return kvm_mmu_load(vcpu);
90}
91
92static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
93{
94 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
95
96 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
97 ? cr3 & X86_CR3_PCID_MASK
98 : 0;
99}
100
101static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
102{
103 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
104}
105
106static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
107{
108 u64 root_hpa = vcpu->arch.mmu->root_hpa;
109
110 if (!VALID_PAGE(root_hpa))
111 return;
112
113 static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
114 vcpu->arch.mmu->shadow_root_level);
115}
116
117int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
118 bool prefault);
119
120static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
121 u32 err, bool prefault)
122{
123#ifdef CONFIG_RETPOLINE
124 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
125 return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
126#endif
127 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163static inline bool is_writable_pte(unsigned long pte)
164{
165 return pte & PT_WRITABLE_MASK;
166}
167
168
169
170
171
172
173
174
175
176static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
177 unsigned pte_access, unsigned pte_pkey,
178 unsigned pfec)
179{
180 int cpl = static_call(kvm_x86_get_cpl)(vcpu);
181 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
197 int index = (pfec >> 1) +
198 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
199 bool fault = (mmu->permissions[index] >> pte_access) & 1;
200 u32 errcode = PFERR_PRESENT_MASK;
201
202 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
203 if (unlikely(mmu->pkru_mask)) {
204 u32 pkru_bits, offset;
205
206
207
208
209
210
211
212 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
213
214
215 offset = (pfec & ~1) +
216 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
217
218 pkru_bits &= mmu->pkru_mask >> offset;
219 errcode |= -pkru_bits & PFERR_PK_MASK;
220 fault |= (pkru_bits != 0);
221 }
222
223 return -(u32)fault & errcode;
224}
225
226void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
227
228int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
229
230int kvm_mmu_post_init_vm(struct kvm *kvm);
231void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
232
233static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
234{
235
236
237
238
239
240 return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
241}
242
243static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
244{
245
246 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
247 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
248}
249
250static inline unsigned long
251__kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
252 int level)
253{
254 return gfn_to_index(slot->base_gfn + npages - 1,
255 slot->base_gfn, level) + 1;
256}
257
258static inline unsigned long
259kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
260{
261 return __kvm_mmu_slot_lpages(slot, slot->npages, level);
262}
263
264static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
265{
266 atomic64_add(count, &kvm->stat.pages[level - 1]);
267}
268#endif
269