1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ARM64_S2_PGTABLE_H_
20#define __ARM64_S2_PGTABLE_H_
21
22#include <linux/hugetlb.h>
23#include <asm/pgtable.h>
24
25
26
27
28
29
30#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
31
32
33
34
35
36
37
38
39
40
41
42#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
43#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
44
45
46#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
47#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm))
48#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1)
49
50
51
52
53
54
55
56#define pgd_ptrs_shift(ipa, pgdir_shift) \
57 ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
58#define __s2_pgd_ptrs(ipa, lvls) \
59 (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
60#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
61
62#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
63#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
64
65
66
67
68
69
70#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
71
72
73static inline bool kvm_stage2_has_pud(struct kvm *kvm)
74{
75 return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
76}
77
78#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
79#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
80#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
81
82static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
83{
84 if (kvm_stage2_has_pud(kvm))
85 return pgd_none(pgd);
86 else
87 return 0;
88}
89
90static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
91{
92 if (kvm_stage2_has_pud(kvm))
93 pgd_clear(pgdp);
94}
95
96static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
97{
98 if (kvm_stage2_has_pud(kvm))
99 return pgd_present(pgd);
100 else
101 return 1;
102}
103
104static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
105{
106 if (kvm_stage2_has_pud(kvm))
107 pgd_populate(NULL, pgd, pud);
108}
109
110static inline pud_t *stage2_pud_offset(struct kvm *kvm,
111 pgd_t *pgd, unsigned long address)
112{
113 if (kvm_stage2_has_pud(kvm))
114 return pud_offset(pgd, address);
115 else
116 return (pud_t *)pgd;
117}
118
119static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
120{
121 if (kvm_stage2_has_pud(kvm))
122 free_page((unsigned long)pud);
123}
124
125static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
126{
127 if (kvm_stage2_has_pud(kvm))
128 return kvm_page_empty(pudp);
129 else
130 return false;
131}
132
133static inline phys_addr_t
134stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
135{
136 if (kvm_stage2_has_pud(kvm)) {
137 phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
138
139 return (boundary - 1 < end - 1) ? boundary : end;
140 } else {
141 return end;
142 }
143}
144
145
146static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
147{
148 return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
149}
150
151#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
152#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
153#define S2_PMD_MASK (~(S2_PMD_SIZE - 1))
154
155static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
156{
157 if (kvm_stage2_has_pmd(kvm))
158 return pud_none(pud);
159 else
160 return 0;
161}
162
163static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
164{
165 if (kvm_stage2_has_pmd(kvm))
166 pud_clear(pud);
167}
168
169static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
170{
171 if (kvm_stage2_has_pmd(kvm))
172 return pud_present(pud);
173 else
174 return 1;
175}
176
177static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
178{
179 if (kvm_stage2_has_pmd(kvm))
180 pud_populate(NULL, pud, pmd);
181}
182
183static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
184 pud_t *pud, unsigned long address)
185{
186 if (kvm_stage2_has_pmd(kvm))
187 return pmd_offset(pud, address);
188 else
189 return (pmd_t *)pud;
190}
191
192static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
193{
194 if (kvm_stage2_has_pmd(kvm))
195 free_page((unsigned long)pmd);
196}
197
198static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
199{
200 if (kvm_stage2_has_pmd(kvm))
201 return pud_huge(pud);
202 else
203 return 0;
204}
205
206static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
207{
208 if (kvm_stage2_has_pmd(kvm))
209 return kvm_page_empty(pmdp);
210 else
211 return 0;
212}
213
214static inline phys_addr_t
215stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
216{
217 if (kvm_stage2_has_pmd(kvm)) {
218 phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
219
220 return (boundary - 1 < end - 1) ? boundary : end;
221 } else {
222 return end;
223 }
224}
225
226static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
227{
228 return kvm_page_empty(ptep);
229}
230
231static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
232{
233 return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
234}
235
236static inline phys_addr_t
237stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
238{
239 phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
240
241 return (boundary - 1 < end - 1) ? boundary : end;
242}
243
244#endif
245