linux/mm/mincore.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      linux/mm/mincore.c
   4 *
   5 * Copyright (C) 1994-2006  Linus Torvalds
   6 */
   7
   8/*
   9 * The mincore() system call.
  10 */
  11#include <linux/pagemap.h>
  12#include <linux/gfp.h>
  13#include <linux/mm.h>
  14#include <linux/mman.h>
  15#include <linux/syscalls.h>
  16#include <linux/swap.h>
  17#include <linux/swapops.h>
  18#include <linux/shmem_fs.h>
  19#include <linux/hugetlb.h>
  20
  21#include <linux/uaccess.h>
  22#include <asm/pgtable.h>
  23
  24static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
  25                        unsigned long end, struct mm_walk *walk)
  26{
  27#ifdef CONFIG_HUGETLB_PAGE
  28        unsigned char present;
  29        unsigned char *vec = walk->private;
  30
  31        /*
  32         * Hugepages under user process are always in RAM and never
  33         * swapped out, but theoretically it needs to be checked.
  34         */
  35        present = pte && !huge_pte_none(huge_ptep_get(pte));
  36        for (; addr != end; vec++, addr += PAGE_SIZE)
  37                *vec = present;
  38        walk->private = vec;
  39#else
  40        BUG();
  41#endif
  42        return 0;
  43}
  44
  45/*
  46 * Later we can get more picky about what "in core" means precisely.
  47 * For now, simply check to see if the page is in the page cache,
  48 * and is up to date; i.e. that no page-in operation would be required
  49 * at this time if an application were to map and access this page.
  50 */
  51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
  52{
  53        unsigned char present = 0;
  54        struct page *page;
  55
  56        /*
  57         * When tmpfs swaps out a page from a file, any process mapping that
  58         * file will not get a swp_entry_t in its pte, but rather it is like
  59         * any other file mapping (ie. marked !present and faulted in with
  60         * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
  61         */
  62#ifdef CONFIG_SWAP
  63        if (shmem_mapping(mapping)) {
  64                page = find_get_entry(mapping, pgoff);
  65                /*
  66                 * shmem/tmpfs may return swap: account for swapcache
  67                 * page too.
  68                 */
  69                if (radix_tree_exceptional_entry(page)) {
  70                        swp_entry_t swp = radix_to_swp_entry(page);
  71                        page = find_get_page(swap_address_space(swp),
  72                                             swp_offset(swp));
  73                }
  74        } else
  75                page = find_get_page(mapping, pgoff);
  76#else
  77        page = find_get_page(mapping, pgoff);
  78#endif
  79        if (page) {
  80                present = PageUptodate(page);
  81                put_page(page);
  82        }
  83
  84        return present;
  85}
  86
  87static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
  88                                struct vm_area_struct *vma, unsigned char *vec)
  89{
  90        unsigned long nr = (end - addr) >> PAGE_SHIFT;
  91        int i;
  92
  93        if (vma->vm_file) {
  94                pgoff_t pgoff;
  95
  96                pgoff = linear_page_index(vma, addr);
  97                for (i = 0; i < nr; i++, pgoff++)
  98                        vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
  99        } else {
 100                for (i = 0; i < nr; i++)
 101                        vec[i] = 0;
 102        }
 103        return nr;
 104}
 105
 106static int mincore_unmapped_range(unsigned long addr, unsigned long end,
 107                                   struct mm_walk *walk)
 108{
 109        walk->private += __mincore_unmapped_range(addr, end,
 110                                                  walk->vma, walk->private);
 111        return 0;
 112}
 113
 114static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 115                        struct mm_walk *walk)
 116{
 117        spinlock_t *ptl;
 118        struct vm_area_struct *vma = walk->vma;
 119        pte_t *ptep;
 120        unsigned char *vec = walk->private;
 121        int nr = (end - addr) >> PAGE_SHIFT;
 122
 123        ptl = pmd_trans_huge_lock(pmd, vma);
 124        if (ptl) {
 125                memset(vec, 1, nr);
 126                spin_unlock(ptl);
 127                goto out;
 128        }
 129
 130        if (pmd_trans_unstable(pmd)) {
 131                __mincore_unmapped_range(addr, end, vma, vec);
 132                goto out;
 133        }
 134
 135        ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 136        for (; addr != end; ptep++, addr += PAGE_SIZE) {
 137                pte_t pte = *ptep;
 138
 139                if (pte_none(pte))
 140                        __mincore_unmapped_range(addr, addr + PAGE_SIZE,
 141                                                 vma, vec);
 142                else if (pte_present(pte))
 143                        *vec = 1;
 144                else { /* pte is a swap entry */
 145                        swp_entry_t entry = pte_to_swp_entry(pte);
 146
 147                        if (non_swap_entry(entry)) {
 148                                /*
 149                                 * migration or hwpoison entries are always
 150                                 * uptodate
 151                                 */
 152                                *vec = 1;
 153                        } else {
 154#ifdef CONFIG_SWAP
 155                                *vec = mincore_page(swap_address_space(entry),
 156                                                    swp_offset(entry));
 157#else
 158                                WARN_ON(1);
 159                                *vec = 1;
 160#endif
 161                        }
 162                }
 163                vec++;
 164        }
 165        pte_unmap_unlock(ptep - 1, ptl);
 166out:
 167        walk->private += nr;
 168        cond_resched();
 169        return 0;
 170}
 171
 172/*
 173 * Do a chunk of "sys_mincore()". We've already checked
 174 * all the arguments, we hold the mmap semaphore: we should
 175 * just return the amount of info we're asked for.
 176 */
 177static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
 178{
 179        struct vm_area_struct *vma;
 180        unsigned long end;
 181        int err;
 182        struct mm_walk mincore_walk = {
 183                .pmd_entry = mincore_pte_range,
 184                .pte_hole = mincore_unmapped_range,
 185                .hugetlb_entry = mincore_hugetlb,
 186                .private = vec,
 187        };
 188
 189        vma = find_vma(current->mm, addr);
 190        if (!vma || addr < vma->vm_start)
 191                return -ENOMEM;
 192        mincore_walk.mm = vma->vm_mm;
 193        end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 194        err = walk_page_range(addr, end, &mincore_walk);
 195        if (err < 0)
 196                return err;
 197        return (end - addr) >> PAGE_SHIFT;
 198}
 199
 200/*
 201 * The mincore(2) system call.
 202 *
 203 * mincore() returns the memory residency status of the pages in the
 204 * current process's address space specified by [addr, addr + len).
 205 * The status is returned in a vector of bytes.  The least significant
 206 * bit of each byte is 1 if the referenced page is in memory, otherwise
 207 * it is zero.
 208 *
 209 * Because the status of a page can change after mincore() checks it
 210 * but before it returns to the application, the returned vector may
 211 * contain stale information.  Only locked pages are guaranteed to
 212 * remain in memory.
 213 *
 214 * return values:
 215 *  zero    - success
 216 *  -EFAULT - vec points to an illegal address
 217 *  -EINVAL - addr is not a multiple of PAGE_SIZE
 218 *  -ENOMEM - Addresses in the range [addr, addr + len] are
 219 *              invalid for the address space of this process, or
 220 *              specify one or more pages which are not currently
 221 *              mapped
 222 *  -EAGAIN - A kernel resource was temporarily unavailable.
 223 */
 224SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
 225                unsigned char __user *, vec)
 226{
 227        long retval;
 228        unsigned long pages;
 229        unsigned char *tmp;
 230
 231        /* Check the start address: needs to be page-aligned.. */
 232        if (start & ~PAGE_MASK)
 233                return -EINVAL;
 234
 235        /* ..and we need to be passed a valid user-space range */
 236        if (!access_ok(VERIFY_READ, (void __user *) start, len))
 237                return -ENOMEM;
 238
 239        /* This also avoids any overflows on PAGE_ALIGN */
 240        pages = len >> PAGE_SHIFT;
 241        pages += (offset_in_page(len)) != 0;
 242
 243        if (!access_ok(VERIFY_WRITE, vec, pages))
 244                return -EFAULT;
 245
 246        tmp = (void *) __get_free_page(GFP_USER);
 247        if (!tmp)
 248                return -EAGAIN;
 249
 250        retval = 0;
 251        while (pages) {
 252                /*
 253                 * Do at most PAGE_SIZE entries per iteration, due to
 254                 * the temporary buffer size.
 255                 */
 256                down_read(&current->mm->mmap_sem);
 257                retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
 258                up_read(&current->mm->mmap_sem);
 259
 260                if (retval <= 0)
 261                        break;
 262                if (copy_to_user(vec, tmp, retval)) {
 263                        retval = -EFAULT;
 264                        break;
 265                }
 266                pages -= retval;
 267                vec += retval;
 268                start += retval << PAGE_SHIFT;
 269                retval = 0;
 270        }
 271        free_page((unsigned long) tmp);
 272        return retval;
 273}
 274