1
2#ifndef __KVM_X86_MMU_H
3#define __KVM_X86_MMU_H
4
5#include <linux/kvm_host.h>
6#include "kvm_cache_regs.h"
7#include "cpuid.h"
8
9#define PT64_PT_BITS 9
10#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11#define PT32_PT_BITS 10
12#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
13
14#define PT_WRITABLE_SHIFT 1
15#define PT_USER_SHIFT 2
16
17#define PT_PRESENT_MASK (1ULL << 0)
18#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
20#define PT_PWT_MASK (1ULL << 3)
21#define PT_PCD_MASK (1ULL << 4)
22#define PT_ACCESSED_SHIFT 5
23#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
24#define PT_DIRTY_SHIFT 6
25#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
26#define PT_PAGE_SIZE_SHIFT 7
27#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
28#define PT_PAT_MASK (1ULL << 7)
29#define PT_GLOBAL_MASK (1ULL << 8)
30#define PT64_NX_SHIFT 63
31#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
32
33#define PT_PAT_SHIFT 7
34#define PT_DIR_PAT_SHIFT 12
35#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
36
37#define PT32_DIR_PSE36_SIZE 4
38#define PT32_DIR_PSE36_SHIFT 13
39#define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
41
42#define PT64_ROOT_5LEVEL 5
43#define PT64_ROOT_4LEVEL 4
44#define PT32_ROOT_LEVEL 2
45#define PT32E_ROOT_LEVEL 3
46
47static inline u64 rsvd_bits(int s, int e)
48{
49 if (e < s)
50 return 0;
51
52 return ((1ULL << (e - s + 1)) - 1) << s;
53}
54
55void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
56
57void
58reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
59
60void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
61void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
62 gpa_t nested_cr3);
63void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
64 bool accessed_dirty, gpa_t new_eptp);
65bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
66int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
67 u64 fault_address, char *insn, int insn_len);
68
69static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
70{
71 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
72 return 0;
73
74 return kvm_mmu_load(vcpu);
75}
76
77static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
78{
79 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
80
81 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
82 ? cr3 & X86_CR3_PCID_MASK
83 : 0;
84}
85
86static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
87{
88 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
89}
90
91static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
92{
93 u64 root_hpa = vcpu->arch.mmu->root_hpa;
94
95 if (!VALID_PAGE(root_hpa))
96 return;
97
98 kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
99 vcpu->arch.mmu->shadow_root_level);
100}
101
102int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
103 bool prefault);
104
105static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
106 u32 err, bool prefault)
107{
108#ifdef CONFIG_RETPOLINE
109 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
110 return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
111#endif
112 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
113}
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static inline int is_writable_pte(unsigned long pte)
149{
150 return pte & PT_WRITABLE_MASK;
151}
152
153static inline bool is_write_protection(struct kvm_vcpu *vcpu)
154{
155 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
156}
157
158static inline bool kvm_mmu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
159{
160 return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
161}
162
163
164
165
166
167
168
169
170
171static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
172 unsigned pte_access, unsigned pte_pkey,
173 unsigned pfec)
174{
175 int cpl = kvm_x86_ops.get_cpl(vcpu);
176 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
192 int index = (pfec >> 1) +
193 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
194 bool fault = (mmu->permissions[index] >> pte_access) & 1;
195 u32 errcode = PFERR_PRESENT_MASK;
196
197 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
198 if (unlikely(mmu->pkru_mask)) {
199 u32 pkru_bits, offset;
200
201
202
203
204
205
206
207 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
208
209
210 offset = (pfec & ~1) +
211 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
212
213 pkru_bits &= mmu->pkru_mask >> offset;
214 errcode |= -pkru_bits & PFERR_PK_MASK;
215 fault |= (pkru_bits != 0);
216 }
217
218 return -(u32)fault & errcode;
219}
220
221void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
222
223int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
224
225int kvm_mmu_post_init_vm(struct kvm *kvm);
226void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
227
228#endif
229