1
2
3
4
5
6
7
8
9
10
11#include <linux/pagemap.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/syscalls.h>
16#include <linux/swap.h>
17#include <linux/swapops.h>
18#include <linux/shmem_fs.h>
19#include <linux/hugetlb.h>
20
21#include <linux/uaccess.h>
22#include <asm/pgtable.h>
23
24static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
25 unsigned long end, struct mm_walk *walk)
26{
27#ifdef CONFIG_HUGETLB_PAGE
28 unsigned char present;
29 unsigned char *vec = walk->private;
30
31
32
33
34
35 present = pte && !huge_pte_none(huge_ptep_get(pte));
36 for (; addr != end; vec++, addr += PAGE_SIZE)
37 *vec = present;
38 walk->private = vec;
39#else
40 BUG();
41#endif
42 return 0;
43}
44
45
46
47
48
49
50
51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
52{
53 unsigned char present = 0;
54 struct page *page;
55
56
57
58
59
60
61
62#ifdef CONFIG_SWAP
63 if (shmem_mapping(mapping)) {
64 page = find_get_entry(mapping, pgoff);
65
66
67
68
69 if (xa_is_value(page)) {
70 swp_entry_t swp = radix_to_swp_entry(page);
71 page = find_get_page(swap_address_space(swp),
72 swp_offset(swp));
73 }
74 } else
75 page = find_get_page(mapping, pgoff);
76#else
77 page = find_get_page(mapping, pgoff);
78#endif
79 if (page) {
80 present = PageUptodate(page);
81 put_page(page);
82 }
83
84 return present;
85}
86
87static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
88 struct vm_area_struct *vma, unsigned char *vec)
89{
90 unsigned long nr = (end - addr) >> PAGE_SHIFT;
91 int i;
92
93 if (vma->vm_file) {
94 pgoff_t pgoff;
95
96 pgoff = linear_page_index(vma, addr);
97 for (i = 0; i < nr; i++, pgoff++)
98 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
99 } else {
100 for (i = 0; i < nr; i++)
101 vec[i] = 0;
102 }
103 return nr;
104}
105
106static int mincore_unmapped_range(unsigned long addr, unsigned long end,
107 struct mm_walk *walk)
108{
109 walk->private += __mincore_unmapped_range(addr, end,
110 walk->vma, walk->private);
111 return 0;
112}
113
114static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
115 struct mm_walk *walk)
116{
117 spinlock_t *ptl;
118 struct vm_area_struct *vma = walk->vma;
119 pte_t *ptep;
120 unsigned char *vec = walk->private;
121 int nr = (end - addr) >> PAGE_SHIFT;
122
123 ptl = pmd_trans_huge_lock(pmd, vma);
124 if (ptl) {
125 memset(vec, 1, nr);
126 spin_unlock(ptl);
127 goto out;
128 }
129
130 if (pmd_trans_unstable(pmd)) {
131 __mincore_unmapped_range(addr, end, vma, vec);
132 goto out;
133 }
134
135 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
136 for (; addr != end; ptep++, addr += PAGE_SIZE) {
137 pte_t pte = *ptep;
138
139 if (pte_none(pte))
140 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
141 vma, vec);
142 else if (pte_present(pte))
143 *vec = 1;
144 else {
145 swp_entry_t entry = pte_to_swp_entry(pte);
146
147 if (non_swap_entry(entry)) {
148
149
150
151
152 *vec = 1;
153 } else {
154#ifdef CONFIG_SWAP
155 *vec = mincore_page(swap_address_space(entry),
156 swp_offset(entry));
157#else
158 WARN_ON(1);
159 *vec = 1;
160#endif
161 }
162 }
163 vec++;
164 }
165 pte_unmap_unlock(ptep - 1, ptl);
166out:
167 walk->private += nr;
168 cond_resched();
169 return 0;
170}
171
172static inline bool can_do_mincore(struct vm_area_struct *vma)
173{
174 if (vma_is_anonymous(vma))
175 return true;
176 if (!vma->vm_file)
177 return false;
178
179
180
181
182
183
184 return inode_owner_or_capable(file_inode(vma->vm_file)) ||
185 inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
186}
187
188
189
190
191
192
193static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
194{
195 struct vm_area_struct *vma;
196 unsigned long end;
197 int err;
198 struct mm_walk mincore_walk = {
199 .pmd_entry = mincore_pte_range,
200 .pte_hole = mincore_unmapped_range,
201 .hugetlb_entry = mincore_hugetlb,
202 .private = vec,
203 };
204
205 vma = find_vma(current->mm, addr);
206 if (!vma || addr < vma->vm_start)
207 return -ENOMEM;
208 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
209 if (!can_do_mincore(vma)) {
210 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
211 memset(vec, 1, pages);
212 return pages;
213 }
214 mincore_walk.mm = vma->vm_mm;
215 err = walk_page_range(addr, end, &mincore_walk);
216 if (err < 0)
217 return err;
218 return (end - addr) >> PAGE_SHIFT;
219}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
246 unsigned char __user *, vec)
247{
248 long retval;
249 unsigned long pages;
250 unsigned char *tmp;
251
252
253 if (start & ~PAGE_MASK)
254 return -EINVAL;
255
256
257 if (!access_ok((void __user *) start, len))
258 return -ENOMEM;
259
260
261 pages = len >> PAGE_SHIFT;
262 pages += (offset_in_page(len)) != 0;
263
264 if (!access_ok(vec, pages))
265 return -EFAULT;
266
267 tmp = (void *) __get_free_page(GFP_USER);
268 if (!tmp)
269 return -EAGAIN;
270
271 retval = 0;
272 while (pages) {
273
274
275
276
277 down_read(¤t->mm->mmap_sem);
278 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
279 up_read(¤t->mm->mmap_sem);
280
281 if (retval <= 0)
282 break;
283 if (copy_to_user(vec, tmp, retval)) {
284 retval = -EFAULT;
285 break;
286 }
287 pages -= retval;
288 vec += retval;
289 start += retval << PAGE_SHIFT;
290 retval = 0;
291 }
292 free_page((unsigned long) tmp);
293 return retval;
294}
295