linux/mm/fremap.c
<<
>>
Prefs
   1/*
   2 *   linux/mm/fremap.c
   3 * 
   4 * Explicit pagetable population and nonlinear (random) mappings support.
   5 *
   6 * started by Ingo Molnar, Copyright (C) 2002, 2003
   7 */
   8#include <linux/export.h>
   9#include <linux/backing-dev.h>
  10#include <linux/mm.h>
  11#include <linux/swap.h>
  12#include <linux/file.h>
  13#include <linux/mman.h>
  14#include <linux/pagemap.h>
  15#include <linux/swapops.h>
  16#include <linux/rmap.h>
  17#include <linux/syscalls.h>
  18#include <linux/mmu_notifier.h>
  19
  20#include <asm/mmu_context.h>
  21#include <asm/cacheflush.h>
  22#include <asm/tlbflush.h>
  23
  24#include "internal.h"
  25
  26static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  27                        unsigned long addr, pte_t *ptep)
  28{
  29        pte_t pte = *ptep;
  30
  31        if (pte_present(pte)) {
  32                struct page *page;
  33
  34                flush_cache_page(vma, addr, pte_pfn(pte));
  35                pte = ptep_clear_flush(vma, addr, ptep);
  36                page = vm_normal_page(vma, addr, pte);
  37                if (page) {
  38                        if (pte_dirty(pte))
  39                                set_page_dirty(page);
  40                        page_remove_rmap(page);
  41                        page_cache_release(page);
  42                        update_hiwater_rss(mm);
  43                        dec_mm_counter(mm, MM_FILEPAGES);
  44                }
  45        } else {
  46                if (!pte_file(pte))
  47                        free_swap_and_cache(pte_to_swp_entry(pte));
  48                pte_clear_not_present_full(mm, addr, ptep, 0);
  49        }
  50}
  51
  52/*
  53 * Install a file pte to a given virtual memory address, release any
  54 * previously existing mapping.
  55 */
  56static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  57                unsigned long addr, unsigned long pgoff, pgprot_t prot)
  58{
  59        int err = -ENOMEM;
  60        pte_t *pte, ptfile;
  61        spinlock_t *ptl;
  62
  63        pte = get_locked_pte(mm, addr, &ptl);
  64        if (!pte)
  65                goto out;
  66
  67        ptfile = pgoff_to_pte(pgoff);
  68
  69        if (!pte_none(*pte)) {
  70                if (pte_present(*pte) && pte_soft_dirty(*pte))
  71                        pte_file_mksoft_dirty(ptfile);
  72                zap_pte(mm, vma, addr, pte);
  73        }
  74
  75        set_pte_at(mm, addr, pte, ptfile);
  76        /*
  77         * We don't need to run update_mmu_cache() here because the "file pte"
  78         * being installed by install_file_pte() is not a real pte - it's a
  79         * non-present entry (like a swap entry), noting what file offset should
  80         * be mapped there when there's a fault (in a non-linear vma where
  81         * that's not obvious).
  82         */
  83        pte_unmap_unlock(pte, ptl);
  84        err = 0;
  85out:
  86        return err;
  87}
  88
  89int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
  90                             unsigned long size, pgoff_t pgoff)
  91{
  92        struct mm_struct *mm = vma->vm_mm;
  93        int err;
  94
  95        do {
  96                err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
  97                if (err)
  98                        return err;
  99
 100                size -= PAGE_SIZE;
 101                addr += PAGE_SIZE;
 102                pgoff++;
 103        } while (size);
 104
 105        return 0;
 106}
 107EXPORT_SYMBOL(generic_file_remap_pages);
 108
 109/**
 110 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
 111 * @start: start of the remapped virtual memory range
 112 * @size: size of the remapped virtual memory range
 113 * @prot: new protection bits of the range (see NOTE)
 114 * @pgoff: to-be-mapped page of the backing store file
 115 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
 116 *
 117 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
 118 * (shared backing store file).
 119 *
 120 * This syscall works purely via pagetables, so it's the most efficient
 121 * way to map the same (large) file into a given virtual window. Unlike
 122 * mmap()/mremap() it does not create any new vmas. The new mappings are
 123 * also safe across swapout.
 124 *
 125 * NOTE: the @prot parameter right now is ignored (but must be zero),
 126 * and the vma's default protection is used. Arbitrary protections
 127 * might be implemented in the future.
 128 */
 129SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 130                unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
 131{
 132        struct mm_struct *mm = current->mm;
 133        struct address_space *mapping;
 134        struct vm_area_struct *vma;
 135        int err = -EINVAL;
 136        int has_write_lock = 0;
 137        vm_flags_t vm_flags = 0;
 138
 139        if (prot)
 140                return err;
 141        /*
 142         * Sanitize the syscall parameters:
 143         */
 144        start = start & PAGE_MASK;
 145        size = size & PAGE_MASK;
 146
 147        /* Does the address range wrap, or is the span zero-sized? */
 148        if (start + size <= start)
 149                return err;
 150
 151        /* Does pgoff wrap? */
 152        if (pgoff + (size >> PAGE_SHIFT) < pgoff)
 153                return err;
 154
 155        /* Can we represent this offset inside this architecture's pte's? */
 156#if PTE_FILE_MAX_BITS < BITS_PER_LONG
 157        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
 158                return err;
 159#endif
 160
 161        /* We need down_write() to change vma->vm_flags. */
 162        down_read(&mm->mmap_sem);
 163 retry:
 164        vma = find_vma(mm, start);
 165
 166        /*
 167         * Make sure the vma is shared, that it supports prefaulting,
 168         * and that the remapped range is valid and fully within
 169         * the single existing vma.
 170         */
 171        if (!vma || !(vma->vm_flags & VM_SHARED))
 172                goto out;
 173
 174        if (!vma->vm_ops || !vma->vm_ops->remap_pages)
 175                goto out;
 176
 177        if (start < vma->vm_start || start + size > vma->vm_end)
 178                goto out;
 179
 180        /* Must set VM_NONLINEAR before any pages are populated. */
 181        if (!(vma->vm_flags & VM_NONLINEAR)) {
 182                /*
 183                 * vm_private_data is used as a swapout cursor
 184                 * in a VM_NONLINEAR vma.
 185                 */
 186                if (vma->vm_private_data)
 187                        goto out;
 188
 189                /* Don't need a nonlinear mapping, exit success */
 190                if (pgoff == linear_page_index(vma, start)) {
 191                        err = 0;
 192                        goto out;
 193                }
 194
 195                if (!has_write_lock) {
 196get_write_lock:
 197                        up_read(&mm->mmap_sem);
 198                        down_write(&mm->mmap_sem);
 199                        has_write_lock = 1;
 200                        goto retry;
 201                }
 202                mapping = vma->vm_file->f_mapping;
 203                /*
 204                 * page_mkclean doesn't work on nonlinear vmas, so if
 205                 * dirty pages need to be accounted, emulate with linear
 206                 * vmas.
 207                 */
 208                if (mapping_cap_account_dirty(mapping)) {
 209                        unsigned long addr;
 210                        struct file *file = get_file(vma->vm_file);
 211
 212                        addr = mmap_region(file, start, size,
 213                                        vma->vm_flags, pgoff);
 214                        fput(file);
 215                        if (IS_ERR_VALUE(addr)) {
 216                                err = addr;
 217                        } else {
 218                                BUG_ON(addr != start);
 219                                err = 0;
 220                        }
 221                        goto out;
 222                }
 223                mutex_lock(&mapping->i_mmap_mutex);
 224                flush_dcache_mmap_lock(mapping);
 225                vma->vm_flags |= VM_NONLINEAR;
 226                vma_interval_tree_remove(vma, &mapping->i_mmap);
 227                vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
 228                flush_dcache_mmap_unlock(mapping);
 229                mutex_unlock(&mapping->i_mmap_mutex);
 230        }
 231
 232        if (vma->vm_flags & VM_LOCKED) {
 233                /*
 234                 * drop PG_Mlocked flag for over-mapped range
 235                 */
 236                if (!has_write_lock)
 237                        goto get_write_lock;
 238                vm_flags = vma->vm_flags;
 239                munlock_vma_pages_range(vma, start, start + size);
 240                vma->vm_flags = vm_flags;
 241        }
 242
 243        mmu_notifier_invalidate_range_start(mm, start, start + size);
 244        err = vma->vm_ops->remap_pages(vma, start, size, pgoff);
 245        mmu_notifier_invalidate_range_end(mm, start, start + size);
 246
 247        /*
 248         * We can't clear VM_NONLINEAR because we'd have to do
 249         * it after ->populate completes, and that would prevent
 250         * downgrading the lock.  (Locks can't be upgraded).
 251         */
 252
 253out:
 254        if (vma)
 255                vm_flags = vma->vm_flags;
 256        if (likely(!has_write_lock))
 257                up_read(&mm->mmap_sem);
 258        else
 259                up_write(&mm->mmap_sem);
 260        if (!err && ((vm_flags & VM_LOCKED) || !(flags & MAP_NONBLOCK)))
 261                mm_populate(start, size);
 262
 263        return err;
 264}
 265