1
2
3
4
5
6
7
8
9
10
11#include <linux/pagemap.h>
12#include <linux/gfp.h>
13#include <linux/pagewalk.h>
14#include <linux/mman.h>
15#include <linux/syscalls.h>
16#include <linux/swap.h>
17#include <linux/swapops.h>
18#include <linux/shmem_fs.h>
19#include <linux/hugetlb.h>
20#include <linux/pgtable.h>
21
22#include <linux/uaccess.h>
23#include "swap.h"
24#include "internal.h"
25
26static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
27 unsigned long end, struct mm_walk *walk)
28{
29#ifdef CONFIG_HUGETLB_PAGE
30 unsigned char present;
31 unsigned char *vec = walk->private;
32 spinlock_t *ptl;
33
34 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
35
36
37
38
39 present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
40 for (; addr != end; vec++, addr += PAGE_SIZE)
41 *vec = present;
42 walk->private = vec;
43 spin_unlock(ptl);
44#else
45 BUG();
46#endif
47 return 0;
48}
49
50
51
52
53
54
55
56static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
57{
58 unsigned char present = 0;
59 struct folio *folio;
60
61
62
63
64
65
66
67 folio = filemap_get_incore_folio(mapping, index);
68 if (!IS_ERR(folio)) {
69 present = folio_test_uptodate(folio);
70 folio_put(folio);
71 }
72
73 return present;
74}
75
76static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
77 struct vm_area_struct *vma, unsigned char *vec)
78{
79 unsigned long nr = (end - addr) >> PAGE_SHIFT;
80 int i;
81
82 if (vma->vm_file) {
83 pgoff_t pgoff;
84
85 pgoff = linear_page_index(vma, addr);
86 for (i = 0; i < nr; i++, pgoff++)
87 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
88 } else {
89 for (i = 0; i < nr; i++)
90 vec[i] = 0;
91 }
92 return nr;
93}
94
95static int mincore_unmapped_range(unsigned long addr, unsigned long end,
96 __always_unused int depth,
97 struct mm_walk *walk)
98{
99 walk->private += __mincore_unmapped_range(addr, end,
100 walk->vma, walk->private);
101 return 0;
102}
103
104static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
105 struct mm_walk *walk)
106{
107 spinlock_t *ptl;
108 struct vm_area_struct *vma = walk->vma;
109 pte_t *ptep;
110 unsigned char *vec = walk->private;
111 int nr = (end - addr) >> PAGE_SHIFT;
112 int step, i;
113
114 ptl = pmd_trans_huge_lock(pmd, vma);
115 if (ptl) {
116 memset(vec, 1, nr);
117 spin_unlock(ptl);
118 goto out;
119 }
120
121 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
122 if (!ptep) {
123 walk->action = ACTION_AGAIN;
124 return 0;
125 }
126 for (; addr != end; ptep += step, addr += step * PAGE_SIZE) {
127 pte_t pte = ptep_get(ptep);
128
129 step = 1;
130
131 if (pte_none_mostly(pte))
132 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
133 vma, vec);
134 else if (pte_present(pte)) {
135 unsigned int batch = pte_batch_hint(ptep, pte);
136
137 if (batch > 1) {
138 unsigned int max_nr = (end - addr) >> PAGE_SHIFT;
139
140 step = min_t(unsigned int, batch, max_nr);
141 }
142
143 for (i = 0; i < step; i++)
144 vec[i] = 1;
145 } else {
146 swp_entry_t entry = pte_to_swp_entry(pte);
147
148 if (non_swap_entry(entry)) {
149
150
151
152
153 *vec = 1;
154 } else {
155#ifdef CONFIG_SWAP
156 *vec = mincore_page(swap_address_space(entry),
157 swap_cache_index(entry));
158#else
159 WARN_ON(1);
160 *vec = 1;
161#endif
162 }
163 }
164 vec += step;
165 }
166 pte_unmap_unlock(ptep - 1, ptl);
167out:
168 walk->private += nr;
169 cond_resched();
170 return 0;
171}
172
173static inline bool can_do_mincore(struct vm_area_struct *vma)
174{
175 if (vma_is_anonymous(vma))
176 return true;
177 if (!vma->vm_file)
178 return false;
179
180
181
182
183
184
185 return inode_owner_or_capable(&nop_mnt_idmap,
186 file_inode(vma->vm_file)) ||
187 file_permission(vma->vm_file, MAY_WRITE) == 0;
188}
189
190static const struct mm_walk_ops mincore_walk_ops = {
191 .pmd_entry = mincore_pte_range,
192 .pte_hole = mincore_unmapped_range,
193 .hugetlb_entry = mincore_hugetlb,
194 .walk_lock = PGWALK_RDLOCK,
195};
196
197
198
199
200
201
202static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
203{
204 struct vm_area_struct *vma;
205 unsigned long end;
206 int err;
207
208 vma = vma_lookup(current->mm, addr);
209 if (!vma)
210 return -ENOMEM;
211 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
212 if (!can_do_mincore(vma)) {
213 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
214 memset(vec, 1, pages);
215 return pages;
216 }
217 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
218 if (err < 0)
219 return err;
220 return (end - addr) >> PAGE_SHIFT;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
248 unsigned char __user *, vec)
249{
250 long retval;
251 unsigned long pages;
252 unsigned char *tmp;
253
254 start = untagged_addr(start);
255
256
257 if (unlikely(start & ~PAGE_MASK))
258 return -EINVAL;
259
260
261 if (!access_ok((void __user *) start, len))
262 return -ENOMEM;
263
264
265 pages = len >> PAGE_SHIFT;
266 pages += (offset_in_page(len)) != 0;
267
268 if (!access_ok(vec, pages))
269 return -EFAULT;
270
271 tmp = (void *) __get_free_page(GFP_USER);
272 if (!tmp)
273 return -EAGAIN;
274
275 retval = 0;
276 while (pages) {
277
278
279
280
281 mmap_read_lock(current->mm);
282 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
283 mmap_read_unlock(current->mm);
284
285 if (retval <= 0)
286 break;
287 if (copy_to_user(vec, tmp, retval)) {
288 retval = -EFAULT;
289 break;
290 }
291 pages -= retval;
292 vec += retval;
293 start += retval << PAGE_SHIFT;
294 retval = 0;
295 }
296 free_page((unsigned long) tmp);
297 return retval;
298}
299