linux/mm/mincore.c
<<
>>
Prefs
   1/*
   2 *      linux/mm/mincore.c
   3 *
   4 * Copyright (C) 1994-2006  Linus Torvalds
   5 */
   6
   7/*
   8 * The mincore() system call.
   9 */
  10#include <linux/pagemap.h>
  11#include <linux/gfp.h>
  12#include <linux/mm.h>
  13#include <linux/mman.h>
  14#include <linux/syscalls.h>
  15#include <linux/swap.h>
  16#include <linux/swapops.h>
  17#include <linux/shmem_fs.h>
  18#include <linux/hugetlb.h>
  19
  20#include <linux/uaccess.h>
  21#include <asm/pgtable.h>
  22
  23static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
  24                        unsigned long end, struct mm_walk *walk)
  25{
  26#ifdef CONFIG_HUGETLB_PAGE
  27        unsigned char present;
  28        unsigned char *vec = walk->private;
  29
  30        /*
  31         * Hugepages under user process are always in RAM and never
  32         * swapped out, but theoretically it needs to be checked.
  33         */
  34        present = pte && !huge_pte_none(huge_ptep_get(pte));
  35        for (; addr != end; vec++, addr += PAGE_SIZE)
  36                *vec = present;
  37        walk->private = vec;
  38#else
  39        BUG();
  40#endif
  41        return 0;
  42}
  43
  44/*
  45 * Later we can get more picky about what "in core" means precisely.
  46 * For now, simply check to see if the page is in the page cache,
  47 * and is up to date; i.e. that no page-in operation would be required
  48 * at this time if an application were to map and access this page.
  49 */
  50static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
  51{
  52        unsigned char present = 0;
  53        struct page *page;
  54
  55        /*
  56         * When tmpfs swaps out a page from a file, any process mapping that
  57         * file will not get a swp_entry_t in its pte, but rather it is like
  58         * any other file mapping (ie. marked !present and faulted in with
  59         * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
  60         */
  61#ifdef CONFIG_SWAP
  62        if (shmem_mapping(mapping)) {
  63                page = find_get_entry(mapping, pgoff);
  64                /*
  65                 * shmem/tmpfs may return swap: account for swapcache
  66                 * page too.
  67                 */
  68                if (radix_tree_exceptional_entry(page)) {
  69                        swp_entry_t swp = radix_to_swp_entry(page);
  70                        page = find_get_page(swap_address_space(swp),
  71                                             swp_offset(swp));
  72                }
  73        } else
  74                page = find_get_page(mapping, pgoff);
  75#else
  76        page = find_get_page(mapping, pgoff);
  77#endif
  78        if (page) {
  79                present = PageUptodate(page);
  80                put_page(page);
  81        }
  82
  83        return present;
  84}
  85
  86static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
  87                                struct vm_area_struct *vma, unsigned char *vec)
  88{
  89        unsigned long nr = (end - addr) >> PAGE_SHIFT;
  90        int i;
  91
  92        if (vma->vm_file) {
  93                pgoff_t pgoff;
  94
  95                pgoff = linear_page_index(vma, addr);
  96                for (i = 0; i < nr; i++, pgoff++)
  97                        vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
  98        } else {
  99                for (i = 0; i < nr; i++)
 100                        vec[i] = 0;
 101        }
 102        return nr;
 103}
 104
 105static int mincore_unmapped_range(unsigned long addr, unsigned long end,
 106                                   struct mm_walk *walk)
 107{
 108        walk->private += __mincore_unmapped_range(addr, end,
 109                                                  walk->vma, walk->private);
 110        return 0;
 111}
 112
 113static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 114                        struct mm_walk *walk)
 115{
 116        spinlock_t *ptl;
 117        struct vm_area_struct *vma = walk->vma;
 118        pte_t *ptep;
 119        unsigned char *vec = walk->private;
 120        int nr = (end - addr) >> PAGE_SHIFT;
 121
 122        ptl = pmd_trans_huge_lock(pmd, vma);
 123        if (ptl) {
 124                memset(vec, 1, nr);
 125                spin_unlock(ptl);
 126                goto out;
 127        }
 128
 129        if (pmd_trans_unstable(pmd)) {
 130                __mincore_unmapped_range(addr, end, vma, vec);
 131                goto out;
 132        }
 133
 134        ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 135        for (; addr != end; ptep++, addr += PAGE_SIZE) {
 136                pte_t pte = *ptep;
 137
 138                if (pte_none(pte))
 139                        __mincore_unmapped_range(addr, addr + PAGE_SIZE,
 140                                                 vma, vec);
 141                else if (pte_present(pte))
 142                        *vec = 1;
 143                else { /* pte is a swap entry */
 144                        swp_entry_t entry = pte_to_swp_entry(pte);
 145
 146                        if (non_swap_entry(entry)) {
 147                                /*
 148                                 * migration or hwpoison entries are always
 149                                 * uptodate
 150                                 */
 151                                *vec = 1;
 152                        } else {
 153#ifdef CONFIG_SWAP
 154                                *vec = mincore_page(swap_address_space(entry),
 155                                                    swp_offset(entry));
 156#else
 157                                WARN_ON(1);
 158                                *vec = 1;
 159#endif
 160                        }
 161                }
 162                vec++;
 163        }
 164        pte_unmap_unlock(ptep - 1, ptl);
 165out:
 166        walk->private += nr;
 167        cond_resched();
 168        return 0;
 169}
 170
 171/*
 172 * Do a chunk of "sys_mincore()". We've already checked
 173 * all the arguments, we hold the mmap semaphore: we should
 174 * just return the amount of info we're asked for.
 175 */
 176static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
 177{
 178        struct vm_area_struct *vma;
 179        unsigned long end;
 180        int err;
 181        struct mm_walk mincore_walk = {
 182                .pmd_entry = mincore_pte_range,
 183                .pte_hole = mincore_unmapped_range,
 184                .hugetlb_entry = mincore_hugetlb,
 185                .private = vec,
 186        };
 187
 188        vma = find_vma(current->mm, addr);
 189        if (!vma || addr < vma->vm_start)
 190                return -ENOMEM;
 191        mincore_walk.mm = vma->vm_mm;
 192        end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 193        err = walk_page_range(addr, end, &mincore_walk);
 194        if (err < 0)
 195                return err;
 196        return (end - addr) >> PAGE_SHIFT;
 197}
 198
 199/*
 200 * The mincore(2) system call.
 201 *
 202 * mincore() returns the memory residency status of the pages in the
 203 * current process's address space specified by [addr, addr + len).
 204 * The status is returned in a vector of bytes.  The least significant
 205 * bit of each byte is 1 if the referenced page is in memory, otherwise
 206 * it is zero.
 207 *
 208 * Because the status of a page can change after mincore() checks it
 209 * but before it returns to the application, the returned vector may
 210 * contain stale information.  Only locked pages are guaranteed to
 211 * remain in memory.
 212 *
 213 * return values:
 214 *  zero    - success
 215 *  -EFAULT - vec points to an illegal address
 216 *  -EINVAL - addr is not a multiple of PAGE_SIZE
 217 *  -ENOMEM - Addresses in the range [addr, addr + len] are
 218 *              invalid for the address space of this process, or
 219 *              specify one or more pages which are not currently
 220 *              mapped
 221 *  -EAGAIN - A kernel resource was temporarily unavailable.
 222 */
 223SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
 224                unsigned char __user *, vec)
 225{
 226        long retval;
 227        unsigned long pages;
 228        unsigned char *tmp;
 229
 230        /* Check the start address: needs to be page-aligned.. */
 231        if (start & ~PAGE_MASK)
 232                return -EINVAL;
 233
 234        /* ..and we need to be passed a valid user-space range */
 235        if (!access_ok(VERIFY_READ, (void __user *) start, len))
 236                return -ENOMEM;
 237
 238        /* This also avoids any overflows on PAGE_ALIGN */
 239        pages = len >> PAGE_SHIFT;
 240        pages += (offset_in_page(len)) != 0;
 241
 242        if (!access_ok(VERIFY_WRITE, vec, pages))
 243                return -EFAULT;
 244
 245        tmp = (void *) __get_free_page(GFP_USER);
 246        if (!tmp)
 247                return -EAGAIN;
 248
 249        retval = 0;
 250        while (pages) {
 251                /*
 252                 * Do at most PAGE_SIZE entries per iteration, due to
 253                 * the temporary buffer size.
 254                 */
 255                down_read(&current->mm->mmap_sem);
 256                retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
 257                up_read(&current->mm->mmap_sem);
 258
 259                if (retval <= 0)
 260                        break;
 261                if (copy_to_user(vec, tmp, retval)) {
 262                        retval = -EFAULT;
 263                        break;
 264                }
 265                pages -= retval;
 266                vec += retval;
 267                start += retval << PAGE_SHIFT;
 268                retval = 0;
 269        }
 270        free_page((unsigned long) tmp);
 271        return retval;
 272}
 273