1
2#ifndef __KVM_X86_MMU_H
3#define __KVM_X86_MMU_H
4
5#include <linux/kvm_host.h>
6#include "kvm_cache_regs.h"
7
8#define PT64_PT_BITS 9
9#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
10#define PT32_PT_BITS 10
11#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
12
13#define PT_WRITABLE_SHIFT 1
14#define PT_USER_SHIFT 2
15
16#define PT_PRESENT_MASK (1ULL << 0)
17#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
18#define PT_USER_MASK (1ULL << PT_USER_SHIFT)
19#define PT_PWT_MASK (1ULL << 3)
20#define PT_PCD_MASK (1ULL << 4)
21#define PT_ACCESSED_SHIFT 5
22#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
23#define PT_DIRTY_SHIFT 6
24#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
25#define PT_PAGE_SIZE_SHIFT 7
26#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
27#define PT_PAT_MASK (1ULL << 7)
28#define PT_GLOBAL_MASK (1ULL << 8)
29#define PT64_NX_SHIFT 63
30#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
31
32#define PT_PAT_SHIFT 7
33#define PT_DIR_PAT_SHIFT 12
34#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
35
36#define PT32_DIR_PSE36_SIZE 4
37#define PT32_DIR_PSE36_SHIFT 13
38#define PT32_DIR_PSE36_MASK \
39 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
40
41#define PT64_ROOT_5LEVEL 5
42#define PT64_ROOT_4LEVEL 4
43#define PT32_ROOT_LEVEL 2
44#define PT32E_ROOT_LEVEL 3
45
46#define PT_PDPE_LEVEL 3
47#define PT_DIRECTORY_LEVEL 2
48#define PT_PAGE_TABLE_LEVEL 1
49#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
50
51static inline u64 rsvd_bits(int s, int e)
52{
53 if (e < s)
54 return 0;
55
56 return ((1ULL << (e - s + 1)) - 1) << s;
57}
58
59void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
60
61void
62reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
63
64void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
65void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
66 bool accessed_dirty);
67bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
68int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
69 u64 fault_address, char *insn, int insn_len,
70 bool need_unprotect);
71
72static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
73{
74 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
75 return kvm->arch.n_max_mmu_pages -
76 kvm->arch.n_used_mmu_pages;
77
78 return 0;
79}
80
81static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
82{
83 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
84 return 0;
85
86 return kvm_mmu_load(vcpu);
87}
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122static inline int is_writable_pte(unsigned long pte)
123{
124 return pte & PT_WRITABLE_MASK;
125}
126
127static inline bool is_write_protection(struct kvm_vcpu *vcpu)
128{
129 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
130}
131
132
133
134
135
136
137
138
139
140static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
141 unsigned pte_access, unsigned pte_pkey,
142 unsigned pfec)
143{
144 int cpl = kvm_x86_ops->get_cpl(vcpu);
145 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
161 int index = (pfec >> 1) +
162 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
163 bool fault = (mmu->permissions[index] >> pte_access) & 1;
164 u32 errcode = PFERR_PRESENT_MASK;
165
166 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
167 if (unlikely(mmu->pkru_mask)) {
168 u32 pkru_bits, offset;
169
170
171
172
173
174
175
176 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
177
178
179 offset = (pfec & ~1) +
180 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
181
182 pkru_bits &= mmu->pkru_mask >> offset;
183 errcode |= -pkru_bits & PFERR_PK_MASK;
184 fault |= (pkru_bits != 0);
185 }
186
187 return -(u32)fault & errcode;
188}
189
190void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
191void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
192
193void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
194void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
195bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
196 struct kvm_memory_slot *slot, u64 gfn);
197int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
198#endif
199