1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
9#define PT32_PT_BITS 10
10#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
11
12#define PT_WRITABLE_SHIFT 1
13#define PT_USER_SHIFT 2
14
15#define PT_PRESENT_MASK (1ULL << 0)
16#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
17#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
18#define PT_PWT_MASK (1ULL << 3)
19#define PT_PCD_MASK (1ULL << 4)
20#define PT_ACCESSED_SHIFT 5
21#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
22#define PT_DIRTY_SHIFT 6
23#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
24#define PT_PAGE_SIZE_SHIFT 7
25#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
26#define PT_PAT_MASK (1ULL << 7)
27#define PT_GLOBAL_MASK (1ULL << 8)
28#define PT64_NX_SHIFT 63
29#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
30
31#define PT_PAT_SHIFT 7
32#define PT_DIR_PAT_SHIFT 12
33#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
34
35#define PT32_DIR_PSE36_SIZE 4
36#define PT32_DIR_PSE36_SHIFT 13
37#define PT32_DIR_PSE36_MASK \
38 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
39
40#define PT64_ROOT_LEVEL 4
41#define PT32_ROOT_LEVEL 2
42#define PT32E_ROOT_LEVEL 3
43
44#define PT_PDPE_LEVEL 3
45#define PT_DIRECTORY_LEVEL 2
46#define PT_PAGE_TABLE_LEVEL 1
47#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
48
49static inline u64 rsvd_bits(int s, int e)
50{
51 return ((1ULL << (e - s + 1)) - 1) << s;
52}
53
54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
55
56void
57reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
58
59
60
61
62
63
64
65
66
67
68enum {
69 RET_MMIO_PF_EMULATE = 1,
70 RET_MMIO_PF_INVALID = 2,
71 RET_MMIO_PF_RETRY = 0,
72 RET_MMIO_PF_BUG = -1
73};
74
75int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
78
79static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
80{
81 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
82 return kvm->arch.n_max_mmu_pages -
83 kvm->arch.n_used_mmu_pages;
84
85 return 0;
86}
87
88static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
89{
90 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
91 return 0;
92
93 return kvm_mmu_load(vcpu);
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129static inline int is_writable_pte(unsigned long pte)
130{
131 return pte & PT_WRITABLE_MASK;
132}
133
134static inline bool is_write_protection(struct kvm_vcpu *vcpu)
135{
136 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
137}
138
139
140
141
142
143
144
145
146
147static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148 unsigned pte_access, unsigned pte_pkey,
149 unsigned pfec)
150{
151 int cpl = kvm_x86_ops->get_cpl(vcpu);
152 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
168 int index = (pfec >> 1) +
169 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
170 bool fault = (mmu->permissions[index] >> pte_access) & 1;
171 u32 errcode = PFERR_PRESENT_MASK;
172
173 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
174 if (unlikely(mmu->pkru_mask)) {
175 u32 pkru_bits, offset;
176
177
178
179
180
181
182
183 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
184
185
186 offset = (pfec & ~1) +
187 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
188
189 pkru_bits &= mmu->pkru_mask >> offset;
190 errcode |= -pkru_bits & PFERR_PK_MASK;
191 fault |= (pkru_bits != 0);
192 }
193
194 return -(u32)fault & errcode;
195}
196
197void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
198void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
199
200void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
201void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
202bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
203 struct kvm_memory_slot *slot, u64 gfn);
204#endif
205