1
2#include <linux/mm.h>
3#include <linux/rmap.h>
4#include <linux/hugetlb.h>
5#include <linux/swap.h>
6#include <linux/swapops.h>
7
8#include "internal.h"
9
10static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11{
12 page_vma_mapped_walk_done(pvmw);
13 return false;
14}
15
16static bool map_pte(struct page_vma_mapped_walk *pvmw)
17{
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
22 return false;
23 } else {
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 if (is_swap_pte(*pvmw->pte)) {
40 swp_entry_t entry;
41
42
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry) &&
45 !is_device_exclusive_entry(entry))
46 return false;
47 } else if (!pte_present(*pvmw->pte))
48 return false;
49 }
50 }
51 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
52 spin_lock(pvmw->ptl);
53 return true;
54}
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75static bool check_pte(struct page_vma_mapped_walk *pvmw)
76{
77 unsigned long pfn;
78
79 if (pvmw->flags & PVMW_MIGRATION) {
80 swp_entry_t entry;
81 if (!is_swap_pte(*pvmw->pte))
82 return false;
83 entry = pte_to_swp_entry(*pvmw->pte);
84
85 if (!is_migration_entry(entry) &&
86 !is_device_exclusive_entry(entry))
87 return false;
88
89 pfn = swp_offset(entry);
90 } else if (is_swap_pte(*pvmw->pte)) {
91 swp_entry_t entry;
92
93
94 entry = pte_to_swp_entry(*pvmw->pte);
95 if (!is_device_private_entry(entry) &&
96 !is_device_exclusive_entry(entry))
97 return false;
98
99 pfn = swp_offset(entry);
100 } else {
101 if (!pte_present(*pvmw->pte))
102 return false;
103
104 pfn = pte_pfn(*pvmw->pte);
105 }
106
107 return (pfn - pvmw->pfn) < pvmw->nr_pages;
108}
109
110
111static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
112{
113 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
114 return false;
115 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
116 return false;
117 return true;
118}
119
120static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121{
122 pvmw->address = (pvmw->address + size) & ~(size - 1);
123 if (!pvmw->address)
124 pvmw->address = ULONG_MAX;
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152{
153 struct vm_area_struct *vma = pvmw->vma;
154 struct mm_struct *mm = vma->vm_mm;
155 unsigned long end;
156 pgd_t *pgd;
157 p4d_t *p4d;
158 pud_t *pud;
159 pmd_t pmde;
160
161
162 if (pvmw->pmd && !pvmw->pte)
163 return not_found(pvmw);
164
165 if (unlikely(is_vm_hugetlb_page(vma))) {
166 struct hstate *hstate = hstate_vma(vma);
167 unsigned long size = huge_page_size(hstate);
168
169 if (pvmw->pte)
170 return not_found(pvmw);
171
172
173 pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
174 if (!pvmw->pte)
175 return false;
176
177 pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
178 spin_lock(pvmw->ptl);
179 if (!check_pte(pvmw))
180 return not_found(pvmw);
181 return true;
182 }
183
184 end = vma_address_end(pvmw);
185 if (pvmw->pte)
186 goto next_pte;
187restart:
188 do {
189 pgd = pgd_offset(mm, pvmw->address);
190 if (!pgd_present(*pgd)) {
191 step_forward(pvmw, PGDIR_SIZE);
192 continue;
193 }
194 p4d = p4d_offset(pgd, pvmw->address);
195 if (!p4d_present(*p4d)) {
196 step_forward(pvmw, P4D_SIZE);
197 continue;
198 }
199 pud = pud_offset(p4d, pvmw->address);
200 if (!pud_present(*pud)) {
201 step_forward(pvmw, PUD_SIZE);
202 continue;
203 }
204
205 pvmw->pmd = pmd_offset(pud, pvmw->address);
206
207
208
209
210
211 pmde = READ_ONCE(*pvmw->pmd);
212
213 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
214 (pmd_present(pmde) && pmd_devmap(pmde))) {
215 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
216 pmde = *pvmw->pmd;
217 if (!pmd_present(pmde)) {
218 swp_entry_t entry;
219
220 if (!thp_migration_supported() ||
221 !(pvmw->flags & PVMW_MIGRATION))
222 return not_found(pvmw);
223 entry = pmd_to_swp_entry(pmde);
224 if (!is_migration_entry(entry) ||
225 !check_pmd(swp_offset(entry), pvmw))
226 return not_found(pvmw);
227 return true;
228 }
229 if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
230 if (pvmw->flags & PVMW_MIGRATION)
231 return not_found(pvmw);
232 if (!check_pmd(pmd_pfn(pmde), pvmw))
233 return not_found(pvmw);
234 return true;
235 }
236
237 spin_unlock(pvmw->ptl);
238 pvmw->ptl = NULL;
239 } else if (!pmd_present(pmde)) {
240
241
242
243
244
245 if ((pvmw->flags & PVMW_SYNC) &&
246 transparent_hugepage_active(vma) &&
247 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
248 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
249
250 spin_unlock(ptl);
251 }
252 step_forward(pvmw, PMD_SIZE);
253 continue;
254 }
255 if (!map_pte(pvmw))
256 goto next_pte;
257this_pte:
258 if (check_pte(pvmw))
259 return true;
260next_pte:
261 do {
262 pvmw->address += PAGE_SIZE;
263 if (pvmw->address >= end)
264 return not_found(pvmw);
265
266 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
267 if (pvmw->ptl) {
268 spin_unlock(pvmw->ptl);
269 pvmw->ptl = NULL;
270 }
271 pte_unmap(pvmw->pte);
272 pvmw->pte = NULL;
273 goto restart;
274 }
275 pvmw->pte++;
276 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
277 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
278 spin_lock(pvmw->ptl);
279 }
280 } while (pte_none(*pvmw->pte));
281
282 if (!pvmw->ptl) {
283 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
284 spin_lock(pvmw->ptl);
285 }
286 goto this_pte;
287 } while (pvmw->address < end);
288
289 return false;
290}
291
292
293
294
295
296
297
298
299
300
301int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
302{
303 struct page_vma_mapped_walk pvmw = {
304 .pfn = page_to_pfn(page),
305 .nr_pages = 1,
306 .vma = vma,
307 .flags = PVMW_SYNC,
308 };
309
310 pvmw.address = vma_address(page, vma);
311 if (pvmw.address == -EFAULT)
312 return 0;
313 if (!page_vma_mapped_walk(&pvmw))
314 return 0;
315 page_vma_mapped_walk_done(&pvmw);
316 return 1;
317}
318