linux/mm/mincore.c
<<
>>
Prefs
   1/*
   2 *      linux/mm/mincore.c
   3 *
   4 * Copyright (C) 1994-2006  Linus Torvalds
   5 */
   6
   7/*
   8 * The mincore() system call.
   9 */
  10#include <linux/pagemap.h>
  11#include <linux/gfp.h>
  12#include <linux/mm.h>
  13#include <linux/mman.h>
  14#include <linux/syscalls.h>
  15#include <linux/swap.h>
  16#include <linux/swapops.h>
  17#include <linux/hugetlb.h>
  18
  19#include <asm/uaccess.h>
  20#include <asm/pgtable.h>
  21
  22static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
  23                        unsigned long end, struct mm_walk *walk)
  24{
  25#ifdef CONFIG_HUGETLB_PAGE
  26        unsigned char present;
  27        unsigned char *vec = walk->private;
  28
  29        /*
  30         * Hugepages under user process are always in RAM and never
  31         * swapped out, but theoretically it needs to be checked.
  32         */
  33        present = pte && !huge_pte_none(huge_ptep_get(pte));
  34        for (; addr != end; vec++, addr += PAGE_SIZE)
  35                *vec = present;
  36        walk->private = vec;
  37#else
  38        BUG();
  39#endif
  40        return 0;
  41}
  42
  43/*
  44 * Later we can get more picky about what "in core" means precisely.
  45 * For now, simply check to see if the page is in the page cache,
  46 * and is up to date; i.e. that no page-in operation would be required
  47 * at this time if an application were to map and access this page.
  48 */
  49static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
  50{
  51        unsigned char present = 0;
  52        struct page *page;
  53
  54        /*
  55         * When tmpfs swaps out a page from a file, any process mapping that
  56         * file will not get a swp_entry_t in its pte, but rather it is like
  57         * any other file mapping (ie. marked !present and faulted in with
  58         * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
  59         */
  60#ifdef CONFIG_SWAP
  61        if (shmem_mapping(mapping)) {
  62                page = find_get_entry(mapping, pgoff);
  63                /*
  64                 * shmem/tmpfs may return swap: account for swapcache
  65                 * page too.
  66                 */
  67                if (radix_tree_exceptional_entry(page)) {
  68                        swp_entry_t swp = radix_to_swp_entry(page);
  69                        page = find_get_page(swap_address_space(swp), swp.val);
  70                }
  71        } else
  72                page = find_get_page(mapping, pgoff);
  73#else
  74        page = find_get_page(mapping, pgoff);
  75#endif
  76        if (page) {
  77                present = PageUptodate(page);
  78                page_cache_release(page);
  79        }
  80
  81        return present;
  82}
  83
  84static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
  85                                struct vm_area_struct *vma, unsigned char *vec)
  86{
  87        unsigned long nr = (end - addr) >> PAGE_SHIFT;
  88        int i;
  89
  90        if (vma->vm_file) {
  91                pgoff_t pgoff;
  92
  93                pgoff = linear_page_index(vma, addr);
  94                for (i = 0; i < nr; i++, pgoff++)
  95                        vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
  96        } else {
  97                for (i = 0; i < nr; i++)
  98                        vec[i] = 0;
  99        }
 100        return nr;
 101}
 102
 103static int mincore_unmapped_range(unsigned long addr, unsigned long end,
 104                                   struct mm_walk *walk)
 105{
 106        walk->private += __mincore_unmapped_range(addr, end,
 107                                                  walk->vma, walk->private);
 108        return 0;
 109}
 110
 111static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 112                        struct mm_walk *walk)
 113{
 114        spinlock_t *ptl;
 115        struct vm_area_struct *vma = walk->vma;
 116        pte_t *ptep;
 117        unsigned char *vec = walk->private;
 118        int nr = (end - addr) >> PAGE_SHIFT;
 119
 120        if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
 121                memset(vec, 1, nr);
 122                spin_unlock(ptl);
 123                goto out;
 124        }
 125
 126        if (pmd_trans_unstable(pmd)) {
 127                __mincore_unmapped_range(addr, end, vma, vec);
 128                goto out;
 129        }
 130
 131        ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 132        for (; addr != end; ptep++, addr += PAGE_SIZE) {
 133                pte_t pte = *ptep;
 134
 135                if (pte_none(pte))
 136                        __mincore_unmapped_range(addr, addr + PAGE_SIZE,
 137                                                 vma, vec);
 138                else if (pte_present(pte))
 139                        *vec = 1;
 140                else { /* pte is a swap entry */
 141                        swp_entry_t entry = pte_to_swp_entry(pte);
 142
 143                        if (non_swap_entry(entry)) {
 144                                /*
 145                                 * migration or hwpoison entries are always
 146                                 * uptodate
 147                                 */
 148                                *vec = 1;
 149                        } else {
 150#ifdef CONFIG_SWAP
 151                                *vec = mincore_page(swap_address_space(entry),
 152                                        entry.val);
 153#else
 154                                WARN_ON(1);
 155                                *vec = 1;
 156#endif
 157                        }
 158                }
 159                vec++;
 160        }
 161        pte_unmap_unlock(ptep - 1, ptl);
 162out:
 163        walk->private += nr;
 164        cond_resched();
 165        return 0;
 166}
 167
 168/*
 169 * Do a chunk of "sys_mincore()". We've already checked
 170 * all the arguments, we hold the mmap semaphore: we should
 171 * just return the amount of info we're asked for.
 172 */
 173static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
 174{
 175        struct vm_area_struct *vma;
 176        unsigned long end;
 177        int err;
 178        struct mm_walk mincore_walk = {
 179                .pmd_entry = mincore_pte_range,
 180                .pte_hole = mincore_unmapped_range,
 181                .hugetlb_entry = mincore_hugetlb,
 182                .private = vec,
 183        };
 184
 185        vma = find_vma(current->mm, addr);
 186        if (!vma || addr < vma->vm_start)
 187                return -ENOMEM;
 188        mincore_walk.mm = vma->vm_mm;
 189        end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
 190        err = walk_page_range(addr, end, &mincore_walk);
 191        if (err < 0)
 192                return err;
 193        return (end - addr) >> PAGE_SHIFT;
 194}
 195
 196/*
 197 * The mincore(2) system call.
 198 *
 199 * mincore() returns the memory residency status of the pages in the
 200 * current process's address space specified by [addr, addr + len).
 201 * The status is returned in a vector of bytes.  The least significant
 202 * bit of each byte is 1 if the referenced page is in memory, otherwise
 203 * it is zero.
 204 *
 205 * Because the status of a page can change after mincore() checks it
 206 * but before it returns to the application, the returned vector may
 207 * contain stale information.  Only locked pages are guaranteed to
 208 * remain in memory.
 209 *
 210 * return values:
 211 *  zero    - success
 212 *  -EFAULT - vec points to an illegal address
 213 *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE
 214 *  -ENOMEM - Addresses in the range [addr, addr + len] are
 215 *              invalid for the address space of this process, or
 216 *              specify one or more pages which are not currently
 217 *              mapped
 218 *  -EAGAIN - A kernel resource was temporarily unavailable.
 219 */
 220SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
 221                unsigned char __user *, vec)
 222{
 223        long retval;
 224        unsigned long pages;
 225        unsigned char *tmp;
 226
 227        /* Check the start address: needs to be page-aligned.. */
 228        if (start & ~PAGE_CACHE_MASK)
 229                return -EINVAL;
 230
 231        /* ..and we need to be passed a valid user-space range */
 232        if (!access_ok(VERIFY_READ, (void __user *) start, len))
 233                return -ENOMEM;
 234
 235        /* This also avoids any overflows on PAGE_CACHE_ALIGN */
 236        pages = len >> PAGE_SHIFT;
 237        pages += (offset_in_page(len)) != 0;
 238
 239        if (!access_ok(VERIFY_WRITE, vec, pages))
 240                return -EFAULT;
 241
 242        tmp = (void *) __get_free_page(GFP_USER);
 243        if (!tmp)
 244                return -EAGAIN;
 245
 246        retval = 0;
 247        while (pages) {
 248                /*
 249                 * Do at most PAGE_SIZE entries per iteration, due to
 250                 * the temporary buffer size.
 251                 */
 252                down_read(&current->mm->mmap_sem);
 253                retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
 254                up_read(&current->mm->mmap_sem);
 255
 256                if (retval <= 0)
 257                        break;
 258                if (copy_to_user(vec, tmp, retval)) {
 259                        retval = -EFAULT;
 260                        break;
 261                }
 262                pages -= retval;
 263                vec += retval;
 264                start += retval << PAGE_SHIFT;
 265                retval = 0;
 266        }
 267        free_page((unsigned long) tmp);
 268        return retval;
 269}
 270