1
2#ifndef __KVM_X86_MMU_INTERNAL_H
3#define __KVM_X86_MMU_INTERNAL_H
4
5#include <linux/types.h>
6#include <linux/kvm_host.h>
7#include <asm/kvm_host.h>
8
9#undef MMU_DEBUG
10
11#ifdef MMU_DEBUG
12extern bool dbg;
13
14#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
15#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
16#define MMU_WARN_ON(x) WARN_ON(x)
17#else
18#define pgprintk(x...) do { } while (0)
19#define rmap_printk(x...) do { } while (0)
20#define MMU_WARN_ON(x) do { } while (0)
21#endif
22
23
24
25
26
27
28
29
30#define INVALID_PAE_ROOT 0
31#define IS_VALID_PAE_ROOT(x) (!!(x))
32
33struct kvm_mmu_page {
34
35
36
37
38 struct list_head link;
39 struct hlist_node hash_link;
40
41 bool tdp_mmu_page;
42 bool unsync;
43 u8 mmu_valid_gen;
44 bool lpage_disallowed;
45
46
47
48
49
50 union kvm_mmu_page_role role;
51 gfn_t gfn;
52
53 u64 *spt;
54
55 gfn_t *gfns;
56
57 union {
58 int root_count;
59 refcount_t tdp_mmu_root_count;
60 };
61 unsigned int unsync_children;
62 struct kvm_rmap_head parent_ptes;
63 DECLARE_BITMAP(unsync_child_bitmap, 512);
64
65 struct list_head lpage_disallowed_link;
66#ifdef CONFIG_X86_32
67
68
69
70
71 int clear_spte_count;
72#endif
73
74
75 atomic_t write_flooding_count;
76
77#ifdef CONFIG_X86_64
78
79 struct rcu_head rcu_head;
80#endif
81};
82
83extern struct kmem_cache *mmu_page_header_cache;
84
85static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
86{
87 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
88
89 return (struct kvm_mmu_page *)page_private(page);
90}
91
92static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
93{
94 return to_shadow_page(__pa(sptep));
95}
96
97static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
98{
99 return role.smm ? 1 : 0;
100}
101
102static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
103{
104 return kvm_mmu_role_as_id(sp->role);
105}
106
107static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
108{
109
110
111
112
113
114
115
116
117 return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
118 kvm_x86_ops.cpu_dirty_log_size;
119}
120
121extern int nx_huge_pages;
122static inline bool is_nx_huge_page_enabled(void)
123{
124 return READ_ONCE(nx_huge_pages);
125}
126
127int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
128
129void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
130void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
131bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
132 struct kvm_memory_slot *slot, u64 gfn,
133 int min_level);
134void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
135 u64 start_gfn, u64 pages);
136unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
137
138
139
140
141
142
143
144
145
146
147
148
149
150enum {
151 RET_PF_RETRY = 0,
152 RET_PF_EMULATE,
153 RET_PF_INVALID,
154 RET_PF_FIXED,
155 RET_PF_SPURIOUS,
156};
157
158
159#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
160#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
161#define SET_SPTE_SPURIOUS BIT(2)
162
163int kvm_mmu_max_mapping_level(struct kvm *kvm,
164 const struct kvm_memory_slot *slot, gfn_t gfn,
165 kvm_pfn_t pfn, int max_level);
166int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
167 int max_level, kvm_pfn_t *pfnp,
168 bool huge_page_disallowed, int *req_level);
169void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
170 kvm_pfn_t *pfnp, int *goal_levelp);
171
172void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
173
174void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
175void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
176
177#endif
178