1
2#include <linux/mm.h>
3#include <linux/highmem.h>
4#include <linux/sched.h>
5#include <linux/hugetlb.h>
6
7static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
8 struct mm_walk *walk)
9{
10 pte_t *pte;
11 int err = 0;
12
13 pte = pte_offset_map(pmd, addr);
14 for (;;) {
15 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
16 if (err)
17 break;
18 addr += PAGE_SIZE;
19 if (addr == end)
20 break;
21 pte++;
22 }
23
24 pte_unmap(pte);
25 return err;
26}
27
28static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
29 struct mm_walk *walk)
30{
31 pmd_t *pmd;
32 unsigned long next;
33 int err = 0;
34
35 pmd = pmd_offset(pud, addr);
36 do {
37again:
38 next = pmd_addr_end(addr, end);
39 if (pmd_none(*pmd) || !walk->vma) {
40 if (walk->pte_hole)
41 err = walk->pte_hole(addr, next, walk);
42 if (err)
43 break;
44 continue;
45 }
46
47
48
49
50 if (walk->pmd_entry)
51 err = walk->pmd_entry(pmd, addr, next, walk);
52 if (err)
53 break;
54
55
56
57
58
59 if (!walk->pte_entry)
60 continue;
61
62 split_huge_pmd(walk->vma, pmd, addr);
63 if (pmd_trans_unstable(pmd))
64 goto again;
65 err = walk_pte_range(pmd, addr, next, walk);
66 if (err)
67 break;
68 } while (pmd++, addr = next, addr != end);
69
70 return err;
71}
72
73static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
74 struct mm_walk *walk)
75{
76 pud_t *pud;
77 unsigned long next;
78 int err = 0;
79
80 pud = pud_offset(p4d, addr);
81 do {
82 again:
83 next = pud_addr_end(addr, end);
84 if (pud_none(*pud) || !walk->vma) {
85 if (walk->pte_hole)
86 err = walk->pte_hole(addr, next, walk);
87 if (err)
88 break;
89 continue;
90 }
91
92 if (walk->pud_entry) {
93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
94
95 if (ptl) {
96 err = walk->pud_entry(pud, addr, next, walk);
97 spin_unlock(ptl);
98 if (err)
99 break;
100 continue;
101 }
102 }
103
104 split_huge_pud(walk->vma, pud, addr);
105 if (pud_none(*pud))
106 goto again;
107
108 if (walk->pmd_entry || walk->pte_entry)
109 err = walk_pmd_range(pud, addr, next, walk);
110 if (err)
111 break;
112 } while (pud++, addr = next, addr != end);
113
114 return err;
115}
116
117static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
118 struct mm_walk *walk)
119{
120 p4d_t *p4d;
121 unsigned long next;
122 int err = 0;
123
124 p4d = p4d_offset(pgd, addr);
125 do {
126 next = p4d_addr_end(addr, end);
127 if (p4d_none_or_clear_bad(p4d)) {
128 if (walk->pte_hole)
129 err = walk->pte_hole(addr, next, walk);
130 if (err)
131 break;
132 continue;
133 }
134 if (walk->pmd_entry || walk->pte_entry)
135 err = walk_pud_range(p4d, addr, next, walk);
136 if (err)
137 break;
138 } while (p4d++, addr = next, addr != end);
139
140 return err;
141}
142
143static int walk_pgd_range(unsigned long addr, unsigned long end,
144 struct mm_walk *walk)
145{
146 pgd_t *pgd;
147 unsigned long next;
148 int err = 0;
149
150 pgd = pgd_offset(walk->mm, addr);
151 do {
152 next = pgd_addr_end(addr, end);
153 if (pgd_none_or_clear_bad(pgd)) {
154 if (walk->pte_hole)
155 err = walk->pte_hole(addr, next, walk);
156 if (err)
157 break;
158 continue;
159 }
160 if (walk->pmd_entry || walk->pte_entry)
161 err = walk_p4d_range(pgd, addr, next, walk);
162 if (err)
163 break;
164 } while (pgd++, addr = next, addr != end);
165
166 return err;
167}
168
169#ifdef CONFIG_HUGETLB_PAGE
170static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
171 unsigned long end)
172{
173 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
174 return boundary < end ? boundary : end;
175}
176
177static int walk_hugetlb_range(unsigned long addr, unsigned long end,
178 struct mm_walk *walk)
179{
180 struct vm_area_struct *vma = walk->vma;
181 struct hstate *h = hstate_vma(vma);
182 unsigned long next;
183 unsigned long hmask = huge_page_mask(h);
184 unsigned long sz = huge_page_size(h);
185 pte_t *pte;
186 int err = 0;
187
188 do {
189 next = hugetlb_entry_end(h, addr, end);
190 pte = huge_pte_offset(walk->mm, addr & hmask, sz);
191
192 if (pte)
193 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
194 else if (walk->pte_hole)
195 err = walk->pte_hole(addr, next, walk);
196
197 if (err)
198 break;
199 } while (addr = next, addr != end);
200
201 return err;
202}
203
204#else
205static int walk_hugetlb_range(unsigned long addr, unsigned long end,
206 struct mm_walk *walk)
207{
208 return 0;
209}
210
211#endif
212
213
214
215
216
217
218
219static int walk_page_test(unsigned long start, unsigned long end,
220 struct mm_walk *walk)
221{
222 struct vm_area_struct *vma = walk->vma;
223
224 if (walk->test_walk)
225 return walk->test_walk(start, end, walk);
226
227
228
229
230
231
232
233
234
235 if (vma->vm_flags & VM_PFNMAP) {
236 int err = 1;
237 if (walk->pte_hole)
238 err = walk->pte_hole(start, end, walk);
239 return err ? err : 1;
240 }
241 return 0;
242}
243
244static int __walk_page_range(unsigned long start, unsigned long end,
245 struct mm_walk *walk)
246{
247 int err = 0;
248 struct vm_area_struct *vma = walk->vma;
249
250 if (vma && is_vm_hugetlb_page(vma)) {
251 if (walk->hugetlb_entry)
252 err = walk_hugetlb_range(start, end, walk);
253 } else
254 err = walk_pgd_range(start, end, walk);
255
256 return err;
257}
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293int walk_page_range(unsigned long start, unsigned long end,
294 struct mm_walk *walk)
295{
296 int err = 0;
297 unsigned long next;
298 struct vm_area_struct *vma;
299
300 if (start >= end)
301 return -EINVAL;
302
303 if (!walk->mm)
304 return -EINVAL;
305
306 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
307
308 vma = find_vma(walk->mm, start);
309 do {
310 if (!vma) {
311 walk->vma = NULL;
312 next = end;
313 } else if (start < vma->vm_start) {
314 walk->vma = NULL;
315 next = min(end, vma->vm_start);
316 } else {
317 walk->vma = vma;
318 next = min(end, vma->vm_end);
319 vma = vma->vm_next;
320
321 err = walk_page_test(start, next, walk);
322 if (err > 0) {
323
324
325
326
327
328 err = 0;
329 continue;
330 }
331 if (err < 0)
332 break;
333 }
334 if (walk->vma || walk->pte_hole)
335 err = __walk_page_range(start, next, walk);
336 if (err)
337 break;
338 } while (start = next, start < end);
339 return err;
340}
341
342int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
343{
344 int err;
345
346 if (!walk->mm)
347 return -EINVAL;
348
349 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
350 VM_BUG_ON(!vma);
351 walk->vma = vma;
352 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
353 if (err > 0)
354 return 0;
355 if (err < 0)
356 return err;
357 return __walk_page_range(vma->vm_start, vma->vm_end, walk);
358}
359