1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kvm_host.h>
13#include "mmu.h"
14#include "mmu_internal.h"
15#include "x86.h"
16#include "spte.h"
17
18#include <asm/e820/api.h>
19#include <asm/vmx.h>
20
21static bool __read_mostly enable_mmio_caching = true;
22module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
23
24u64 __read_mostly shadow_host_writable_mask;
25u64 __read_mostly shadow_mmu_writable_mask;
26u64 __read_mostly shadow_nx_mask;
27u64 __read_mostly shadow_x_mask;
28u64 __read_mostly shadow_user_mask;
29u64 __read_mostly shadow_accessed_mask;
30u64 __read_mostly shadow_dirty_mask;
31u64 __read_mostly shadow_mmio_value;
32u64 __read_mostly shadow_mmio_mask;
33u64 __read_mostly shadow_mmio_access_mask;
34u64 __read_mostly shadow_present_mask;
35u64 __read_mostly shadow_me_mask;
36u64 __read_mostly shadow_acc_track_mask;
37
38u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
39u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
40
41u8 __read_mostly shadow_phys_bits;
42
43static u64 generation_mmio_spte_mask(u64 gen)
44{
45 u64 mask;
46
47 WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
48
49 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
50 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
51 return mask;
52}
53
54u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
55{
56 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
57 u64 spte = generation_mmio_spte_mask(gen);
58 u64 gpa = gfn << PAGE_SHIFT;
59
60 WARN_ON_ONCE(!shadow_mmio_value);
61
62 access &= shadow_mmio_access_mask;
63 spte |= shadow_mmio_value | access;
64 spte |= gpa | shadow_nonpresent_or_rsvd_mask;
65 spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
66 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
67
68 return spte;
69}
70
71static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
72{
73 if (pfn_valid(pfn))
74 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
75
76
77
78
79
80
81
82
83
84
85 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
86
87 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
88 pfn_to_hpa(pfn + 1) - 1,
89 E820_TYPE_RAM);
90}
91
92int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
93 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
94 bool can_unsync, bool host_writable, bool ad_disabled,
95 u64 *new_spte)
96{
97 u64 spte = SPTE_MMU_PRESENT_MASK;
98 int ret = 0;
99
100 if (ad_disabled)
101 spte |= SPTE_TDP_AD_DISABLED_MASK;
102 else if (kvm_vcpu_ad_need_write_protect(vcpu))
103 spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
104
105
106
107
108
109
110
111 spte |= shadow_present_mask;
112 if (!speculative)
113 spte |= spte_shadow_accessed_mask(spte);
114
115 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
116 is_nx_huge_page_enabled()) {
117 pte_access &= ~ACC_EXEC_MASK;
118 }
119
120 if (pte_access & ACC_EXEC_MASK)
121 spte |= shadow_x_mask;
122 else
123 spte |= shadow_nx_mask;
124
125 if (pte_access & ACC_USER_MASK)
126 spte |= shadow_user_mask;
127
128 if (level > PG_LEVEL_4K)
129 spte |= PT_PAGE_SIZE_MASK;
130 if (tdp_enabled)
131 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
132 kvm_is_mmio_pfn(pfn));
133
134 if (host_writable)
135 spte |= shadow_host_writable_mask;
136 else
137 pte_access &= ~ACC_WRITE_MASK;
138
139 if (!kvm_is_mmio_pfn(pfn))
140 spte |= shadow_me_mask;
141
142 spte |= (u64)pfn << PAGE_SHIFT;
143
144 if (pte_access & ACC_WRITE_MASK) {
145 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
146
147
148
149
150
151
152
153 if (!can_unsync && is_writable_pte(old_spte))
154 goto out;
155
156
157
158
159
160
161
162 if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
163 pgprintk("%s: found shadow page for %llx, marking ro\n",
164 __func__, gfn);
165 ret |= SET_SPTE_WRITE_PROTECTED_PT;
166 pte_access &= ~ACC_WRITE_MASK;
167 spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
168 }
169 }
170
171 if (pte_access & ACC_WRITE_MASK)
172 spte |= spte_shadow_dirty_mask(spte);
173
174 if (speculative)
175 spte = mark_spte_for_access_track(spte);
176
177out:
178 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
179 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
180 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
181
182 *new_spte = spte;
183 return ret;
184}
185
186u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
187{
188 u64 spte = SPTE_MMU_PRESENT_MASK;
189
190 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
191 shadow_user_mask | shadow_x_mask | shadow_me_mask;
192
193 if (ad_disabled)
194 spte |= SPTE_TDP_AD_DISABLED_MASK;
195 else
196 spte |= shadow_accessed_mask;
197
198 return spte;
199}
200
201u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
202{
203 u64 new_spte;
204
205 new_spte = old_spte & ~PT64_BASE_ADDR_MASK;
206 new_spte |= (u64)new_pfn << PAGE_SHIFT;
207
208 new_spte &= ~PT_WRITABLE_MASK;
209 new_spte &= ~shadow_host_writable_mask;
210
211 new_spte = mark_spte_for_access_track(new_spte);
212
213 return new_spte;
214}
215
216static u8 kvm_get_shadow_phys_bits(void)
217{
218
219
220
221
222
223
224 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
225 return cpuid_eax(0x80000008) & 0xff;
226
227
228
229
230
231
232 return boot_cpu_data.x86_phys_bits;
233}
234
235u64 mark_spte_for_access_track(u64 spte)
236{
237 if (spte_ad_enabled(spte))
238 return spte & ~shadow_accessed_mask;
239
240 if (is_access_track_spte(spte))
241 return spte;
242
243
244
245
246
247
248 WARN_ONCE((spte & PT_WRITABLE_MASK) &&
249 !spte_can_locklessly_be_made_writable(spte),
250 "kvm: Writable SPTE is not locklessly dirty-trackable\n");
251
252 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
253 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
254 "kvm: Access Tracking saved bit locations are not zero\n");
255
256 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
257 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
258 spte &= ~shadow_acc_track_mask;
259
260 return spte;
261}
262
263void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
264{
265 BUG_ON((u64)(unsigned)access_mask != access_mask);
266 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
267
268 if (!enable_mmio_caching)
269 mmio_value = 0;
270
271
272
273
274
275
276
277
278 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
279 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
280 mmio_value = 0;
281
282
283
284
285
286
287
288 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
289 WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
290 mmio_value = 0;
291
292 shadow_mmio_value = mmio_value;
293 shadow_mmio_mask = mmio_mask;
294 shadow_mmio_access_mask = access_mask;
295}
296EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
297
298void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
299{
300 shadow_user_mask = VMX_EPT_READABLE_MASK;
301 shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
302 shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
303 shadow_nx_mask = 0ull;
304 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
305 shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
306 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
307 shadow_me_mask = 0ull;
308
309 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
310 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
311
312
313
314
315
316 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
317 VMX_EPT_RWX_MASK, 0);
318}
319EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
320
321void kvm_mmu_reset_all_pte_masks(void)
322{
323 u8 low_phys_bits;
324 u64 mask;
325
326 shadow_phys_bits = kvm_get_shadow_phys_bits();
327
328
329
330
331
332
333
334
335
336
337
338 shadow_nonpresent_or_rsvd_mask = 0;
339 low_phys_bits = boot_cpu_data.x86_phys_bits;
340 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
341 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
342 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
343 low_phys_bits = boot_cpu_data.x86_cache_bits
344 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
345 shadow_nonpresent_or_rsvd_mask =
346 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
347 }
348
349 shadow_nonpresent_or_rsvd_lower_gfn_mask =
350 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
351
352 shadow_user_mask = PT_USER_MASK;
353 shadow_accessed_mask = PT_ACCESSED_MASK;
354 shadow_dirty_mask = PT_DIRTY_MASK;
355 shadow_nx_mask = PT64_NX_MASK;
356 shadow_x_mask = 0;
357 shadow_present_mask = PT_PRESENT_MASK;
358 shadow_acc_track_mask = 0;
359 shadow_me_mask = sme_me_mask;
360
361 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE;
362 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE;
363
364
365
366
367
368
369
370
371 if (shadow_phys_bits < 52)
372 mask = BIT_ULL(51) | PT_PRESENT_MASK;
373 else
374 mask = 0;
375
376 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
377}
378