linux/mm/nommu.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/nommu.c
   3 *
   4 *  Replacement code for mm functions to support CPU's that don't
   5 *  have any form of memory management unit (thus no virtual memory).
   6 *
   7 *  See Documentation/nommu-mmap.txt
   8 *
   9 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  10 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  13 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  14 */
  15
  16#include <linux/export.h>
  17#include <linux/mm.h>
  18#include <linux/mman.h>
  19#include <linux/swap.h>
  20#include <linux/file.h>
  21#include <linux/highmem.h>
  22#include <linux/pagemap.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/blkdev.h>
  26#include <linux/backing-dev.h>
  27#include <linux/mount.h>
  28#include <linux/personality.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32#include <linux/sched/sysctl.h>
  33
  34#include <asm/uaccess.h>
  35#include <asm/tlb.h>
  36#include <asm/tlbflush.h>
  37#include <asm/mmu_context.h>
  38#include "internal.h"
  39
  40#if 0
  41#define kenter(FMT, ...) \
  42        printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  43#define kleave(FMT, ...) \
  44        printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  45#define kdebug(FMT, ...) \
  46        printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
  47#else
  48#define kenter(FMT, ...) \
  49        no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  50#define kleave(FMT, ...) \
  51        no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  52#define kdebug(FMT, ...) \
  53        no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
  54#endif
  55
  56void *high_memory;
  57struct page *mem_map;
  58unsigned long max_mapnr;
  59unsigned long num_physpages;
  60unsigned long highest_memmap_pfn;
  61struct percpu_counter vm_committed_as;
  62int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  63int sysctl_overcommit_ratio = 50; /* default is 50% */
  64unsigned long sysctl_overcommit_kbytes __read_mostly;
  65int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  66int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  67unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  68unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
  69int heap_stack_gap = 0;
  70
  71atomic_long_t mmap_pages_allocated;
  72
  73/*
  74 * The global memory commitment made in the system can be a metric
  75 * that can be used to drive ballooning decisions when Linux is hosted
  76 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
  77 * balancing memory across competing virtual machines that are hosted.
  78 * Several metrics drive this policy engine including the guest reported
  79 * memory commitment.
  80 */
  81unsigned long vm_memory_committed(void)
  82{
  83        return percpu_counter_read_positive(&vm_committed_as);
  84}
  85
  86EXPORT_SYMBOL_GPL(vm_memory_committed);
  87
  88EXPORT_SYMBOL(mem_map);
  89EXPORT_SYMBOL(num_physpages);
  90
  91/* list of mapped, potentially shareable regions */
  92static struct kmem_cache *vm_region_jar;
  93struct rb_root nommu_region_tree = RB_ROOT;
  94DECLARE_RWSEM(nommu_region_sem);
  95
  96const struct vm_operations_struct generic_file_vm_ops = {
  97};
  98
  99/*
 100 * Return the total memory allocated for this pointer, not
 101 * just what the caller asked for.
 102 *
 103 * Doesn't have to be accurate, i.e. may have races.
 104 */
 105unsigned int kobjsize(const void *objp)
 106{
 107        struct page *page;
 108
 109        /*
 110         * If the object we have should not have ksize performed on it,
 111         * return size of 0
 112         */
 113        if (!objp || !virt_addr_valid(objp))
 114                return 0;
 115
 116        page = virt_to_head_page(objp);
 117
 118        /*
 119         * If the allocator sets PageSlab, we know the pointer came from
 120         * kmalloc().
 121         */
 122        if (PageSlab(page))
 123                return ksize(objp);
 124
 125        /*
 126         * If it's not a compound page, see if we have a matching VMA
 127         * region. This test is intentionally done in reverse order,
 128         * so if there's no VMA, we still fall through and hand back
 129         * PAGE_SIZE for 0-order pages.
 130         */
 131        if (!PageCompound(page)) {
 132                struct vm_area_struct *vma;
 133
 134                vma = find_vma(current->mm, (unsigned long)objp);
 135                if (vma)
 136                        return vma->vm_end - vma->vm_start;
 137        }
 138
 139        /*
 140         * The ksize() function is only guaranteed to work for pointers
 141         * returned by kmalloc(). So handle arbitrary pointers here.
 142         */
 143        return PAGE_SIZE << compound_order(page);
 144}
 145
 146long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 147                      unsigned long start, unsigned long nr_pages,
 148                      unsigned int foll_flags, struct page **pages,
 149                      struct vm_area_struct **vmas, int *nonblocking)
 150{
 151        struct vm_area_struct *vma;
 152        unsigned long vm_flags;
 153        int i;
 154
 155        /* calculate required read or write permissions.
 156         * If FOLL_FORCE is set, we only require the "MAY" flags.
 157         */
 158        vm_flags  = (foll_flags & FOLL_WRITE) ?
 159                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 160        vm_flags &= (foll_flags & FOLL_FORCE) ?
 161                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 162
 163        for (i = 0; i < nr_pages; i++) {
 164                vma = find_vma(mm, start);
 165                if (!vma)
 166                        goto finish_or_fault;
 167
 168                /* protect what we can, including chardevs */
 169                if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 170                    !(vm_flags & vma->vm_flags))
 171                        goto finish_or_fault;
 172
 173                if (pages) {
 174                        pages[i] = virt_to_page(start);
 175                        if (pages[i])
 176                                page_cache_get(pages[i]);
 177                }
 178                if (vmas)
 179                        vmas[i] = vma;
 180                start = (start + PAGE_SIZE) & PAGE_MASK;
 181        }
 182
 183        return i;
 184
 185finish_or_fault:
 186        return i ? : -EFAULT;
 187}
 188
 189/*
 190 * get a list of pages in an address range belonging to the specified process
 191 * and indicate the VMA that covers each page
 192 * - this is potentially dodgy as we may end incrementing the page count of a
 193 *   slab page or a secondary page from a compound page
 194 * - don't permit access to VMAs that don't support it, such as I/O mappings
 195 */
 196long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 197                    unsigned long start, unsigned long nr_pages,
 198                    int write, int force, struct page **pages,
 199                    struct vm_area_struct **vmas)
 200{
 201        int flags = 0;
 202
 203        if (write)
 204                flags |= FOLL_WRITE;
 205        if (force)
 206                flags |= FOLL_FORCE;
 207
 208        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
 209                                NULL);
 210}
 211EXPORT_SYMBOL(get_user_pages);
 212
 213long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
 214                           unsigned long start, unsigned long nr_pages,
 215                           int write, int force, struct page **pages,
 216                           int *locked)
 217{
 218        return get_user_pages(tsk, mm, start, nr_pages, write, force,
 219                              pages, NULL);
 220}
 221EXPORT_SYMBOL(get_user_pages_locked);
 222
 223long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 224                               unsigned long start, unsigned long nr_pages,
 225                               int write, int force, struct page **pages,
 226                               unsigned int gup_flags)
 227{
 228        long ret;
 229        down_read(&mm->mmap_sem);
 230        ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
 231                             pages, NULL);
 232        up_read(&mm->mmap_sem);
 233        return ret;
 234}
 235EXPORT_SYMBOL(__get_user_pages_unlocked);
 236
 237long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 238                             unsigned long start, unsigned long nr_pages,
 239                             int write, int force, struct page **pages)
 240{
 241        return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
 242                                         force, pages, 0);
 243}
 244EXPORT_SYMBOL(get_user_pages_unlocked);
 245
 246/**
 247 * follow_pfn - look up PFN at a user virtual address
 248 * @vma: memory mapping
 249 * @address: user virtual address
 250 * @pfn: location to store found PFN
 251 *
 252 * Only IO mappings and raw PFN mappings are allowed.
 253 *
 254 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 255 */
 256int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 257        unsigned long *pfn)
 258{
 259        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 260                return -EINVAL;
 261
 262        *pfn = address >> PAGE_SHIFT;
 263        return 0;
 264}
 265EXPORT_SYMBOL(follow_pfn);
 266
 267LIST_HEAD(vmap_area_list);
 268
 269void vfree(const void *addr)
 270{
 271        kfree(addr);
 272}
 273EXPORT_SYMBOL(vfree);
 274
 275void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 276{
 277        /*
 278         *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 279         * returns only a logical address.
 280         */
 281        return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 282}
 283EXPORT_SYMBOL(__vmalloc);
 284
 285void *vmalloc_user(unsigned long size)
 286{
 287        void *ret;
 288
 289        ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 290                        PAGE_KERNEL);
 291        if (ret) {
 292                struct vm_area_struct *vma;
 293
 294                down_write(&current->mm->mmap_sem);
 295                vma = find_vma(current->mm, (unsigned long)ret);
 296                if (vma)
 297                        vma->vm_flags |= VM_USERMAP;
 298                up_write(&current->mm->mmap_sem);
 299        }
 300
 301        return ret;
 302}
 303EXPORT_SYMBOL(vmalloc_user);
 304
 305struct page *vmalloc_to_page(const void *addr)
 306{
 307        return virt_to_page(addr);
 308}
 309EXPORT_SYMBOL(vmalloc_to_page);
 310
 311unsigned long vmalloc_to_pfn(const void *addr)
 312{
 313        return page_to_pfn(virt_to_page(addr));
 314}
 315EXPORT_SYMBOL(vmalloc_to_pfn);
 316
 317long vread(char *buf, char *addr, unsigned long count)
 318{
 319        memcpy(buf, addr, count);
 320        return count;
 321}
 322
 323long vwrite(char *buf, char *addr, unsigned long count)
 324{
 325        /* Don't allow overflow */
 326        if ((unsigned long) addr + count < count)
 327                count = -(unsigned long) addr;
 328
 329        memcpy(addr, buf, count);
 330        return(count);
 331}
 332
 333/*
 334 *      vmalloc  -  allocate virtually continguos memory
 335 *
 336 *      @size:          allocation size
 337 *
 338 *      Allocate enough pages to cover @size from the page level
 339 *      allocator and map them into continguos kernel virtual space.
 340 *
 341 *      For tight control over page level allocator and protection flags
 342 *      use __vmalloc() instead.
 343 */
 344void *vmalloc(unsigned long size)
 345{
 346       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 347}
 348EXPORT_SYMBOL(vmalloc);
 349
 350/*
 351 *      vzalloc - allocate virtually continguos memory with zero fill
 352 *
 353 *      @size:          allocation size
 354 *
 355 *      Allocate enough pages to cover @size from the page level
 356 *      allocator and map them into continguos kernel virtual space.
 357 *      The memory allocated is set to zero.
 358 *
 359 *      For tight control over page level allocator and protection flags
 360 *      use __vmalloc() instead.
 361 */
 362void *vzalloc(unsigned long size)
 363{
 364        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 365                        PAGE_KERNEL);
 366}
 367EXPORT_SYMBOL(vzalloc);
 368
 369/**
 370 * vmalloc_node - allocate memory on a specific node
 371 * @size:       allocation size
 372 * @node:       numa node
 373 *
 374 * Allocate enough pages to cover @size from the page level
 375 * allocator and map them into contiguous kernel virtual space.
 376 *
 377 * For tight control over page level allocator and protection flags
 378 * use __vmalloc() instead.
 379 */
 380void *vmalloc_node(unsigned long size, int node)
 381{
 382        return vmalloc(size);
 383}
 384EXPORT_SYMBOL(vmalloc_node);
 385
 386/**
 387 * vzalloc_node - allocate memory on a specific node with zero fill
 388 * @size:       allocation size
 389 * @node:       numa node
 390 *
 391 * Allocate enough pages to cover @size from the page level
 392 * allocator and map them into contiguous kernel virtual space.
 393 * The memory allocated is set to zero.
 394 *
 395 * For tight control over page level allocator and protection flags
 396 * use __vmalloc() instead.
 397 */
 398void *vzalloc_node(unsigned long size, int node)
 399{
 400        return vzalloc(size);
 401}
 402EXPORT_SYMBOL(vzalloc_node);
 403
 404#ifndef PAGE_KERNEL_EXEC
 405# define PAGE_KERNEL_EXEC PAGE_KERNEL
 406#endif
 407
 408/**
 409 *      vmalloc_exec  -  allocate virtually contiguous, executable memory
 410 *      @size:          allocation size
 411 *
 412 *      Kernel-internal function to allocate enough pages to cover @size
 413 *      the page level allocator and map them into contiguous and
 414 *      executable kernel virtual space.
 415 *
 416 *      For tight control over page level allocator and protection flags
 417 *      use __vmalloc() instead.
 418 */
 419
 420void *vmalloc_exec(unsigned long size)
 421{
 422        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
 423}
 424
 425/**
 426 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 427 *      @size:          allocation size
 428 *
 429 *      Allocate enough 32bit PA addressable pages to cover @size from the
 430 *      page level allocator and map them into continguos kernel virtual space.
 431 */
 432void *vmalloc_32(unsigned long size)
 433{
 434        return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
 435}
 436EXPORT_SYMBOL(vmalloc_32);
 437
 438/**
 439 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 440 *      @size:          allocation size
 441 *
 442 * The resulting memory area is 32bit addressable and zeroed so it can be
 443 * mapped to userspace without leaking data.
 444 *
 445 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 446 * remap_vmalloc_range() are permissible.
 447 */
 448void *vmalloc_32_user(unsigned long size)
 449{
 450        /*
 451         * We'll have to sort out the ZONE_DMA bits for 64-bit,
 452         * but for now this can simply use vmalloc_user() directly.
 453         */
 454        return vmalloc_user(size);
 455}
 456EXPORT_SYMBOL(vmalloc_32_user);
 457
 458void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 459{
 460        BUG();
 461        return NULL;
 462}
 463EXPORT_SYMBOL(vmap);
 464
 465void vunmap(const void *addr)
 466{
 467        BUG();
 468}
 469EXPORT_SYMBOL(vunmap);
 470
 471void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 472{
 473        BUG();
 474        return NULL;
 475}
 476EXPORT_SYMBOL(vm_map_ram);
 477
 478void vm_unmap_ram(const void *mem, unsigned int count)
 479{
 480        BUG();
 481}
 482EXPORT_SYMBOL(vm_unmap_ram);
 483
 484void vm_unmap_aliases(void)
 485{
 486}
 487EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 488
 489/*
 490 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 491 * have one.
 492 */
 493void  __attribute__((weak)) vmalloc_sync_all(void)
 494{
 495}
 496
 497/**
 498 *      alloc_vm_area - allocate a range of kernel address space
 499 *      @size:          size of the area
 500 *
 501 *      Returns:        NULL on failure, vm_struct on success
 502 *
 503 *      This function reserves a range of kernel address space, and
 504 *      allocates pagetables to map that range.  No actual mappings
 505 *      are created.  If the kernel address space is not shared
 506 *      between processes, it syncs the pagetable across all
 507 *      processes.
 508 */
 509struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 510{
 511        BUG();
 512        return NULL;
 513}
 514EXPORT_SYMBOL_GPL(alloc_vm_area);
 515
 516void free_vm_area(struct vm_struct *area)
 517{
 518        BUG();
 519}
 520EXPORT_SYMBOL_GPL(free_vm_area);
 521
 522int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 523                   struct page *page)
 524{
 525        return -EINVAL;
 526}
 527EXPORT_SYMBOL(vm_insert_page);
 528
 529/*
 530 *  sys_brk() for the most part doesn't need the global kernel
 531 *  lock, except when an application is doing something nasty
 532 *  like trying to un-brk an area that has already been mapped
 533 *  to a regular file.  in this case, the unmapping will need
 534 *  to invoke file system routines that need the global lock.
 535 */
 536SYSCALL_DEFINE1(brk, unsigned long, brk)
 537{
 538        struct mm_struct *mm = current->mm;
 539
 540        if (brk < mm->start_brk || brk > mm->context.end_brk)
 541                return mm->brk;
 542
 543        if (mm->brk == brk)
 544                return mm->brk;
 545
 546        /*
 547         * Always allow shrinking brk
 548         */
 549        if (brk <= mm->brk) {
 550                mm->brk = brk;
 551                return brk;
 552        }
 553
 554        /*
 555         * Ok, looks good - let it rip.
 556         */
 557        flush_icache_range(mm->brk, brk);
 558        return mm->brk = brk;
 559}
 560
 561/*
 562 * initialise the VMA and region record slabs
 563 */
 564void __init mmap_init(void)
 565{
 566        int ret;
 567
 568        ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 569        VM_BUG_ON(ret);
 570        vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
 571}
 572
 573/*
 574 * validate the region tree
 575 * - the caller must hold the region lock
 576 */
 577#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 578static noinline void validate_nommu_regions(void)
 579{
 580        struct vm_region *region, *last;
 581        struct rb_node *p, *lastp;
 582
 583        lastp = rb_first(&nommu_region_tree);
 584        if (!lastp)
 585                return;
 586
 587        last = rb_entry(lastp, struct vm_region, vm_rb);
 588        BUG_ON(unlikely(last->vm_end <= last->vm_start));
 589        BUG_ON(unlikely(last->vm_top < last->vm_end));
 590
 591        while ((p = rb_next(lastp))) {
 592                region = rb_entry(p, struct vm_region, vm_rb);
 593                last = rb_entry(lastp, struct vm_region, vm_rb);
 594
 595                BUG_ON(unlikely(region->vm_end <= region->vm_start));
 596                BUG_ON(unlikely(region->vm_top < region->vm_end));
 597                BUG_ON(unlikely(region->vm_start < last->vm_top));
 598
 599                lastp = p;
 600        }
 601}
 602#else
 603static void validate_nommu_regions(void)
 604{
 605}
 606#endif
 607
 608/*
 609 * add a region into the global tree
 610 */
 611static void add_nommu_region(struct vm_region *region)
 612{
 613        struct vm_region *pregion;
 614        struct rb_node **p, *parent;
 615
 616        validate_nommu_regions();
 617
 618        parent = NULL;
 619        p = &nommu_region_tree.rb_node;
 620        while (*p) {
 621                parent = *p;
 622                pregion = rb_entry(parent, struct vm_region, vm_rb);
 623                if (region->vm_start < pregion->vm_start)
 624                        p = &(*p)->rb_left;
 625                else if (region->vm_start > pregion->vm_start)
 626                        p = &(*p)->rb_right;
 627                else if (pregion == region)
 628                        return;
 629                else
 630                        BUG();
 631        }
 632
 633        rb_link_node(&region->vm_rb, parent, p);
 634        rb_insert_color(&region->vm_rb, &nommu_region_tree);
 635
 636        validate_nommu_regions();
 637}
 638
 639/*
 640 * delete a region from the global tree
 641 */
 642static void delete_nommu_region(struct vm_region *region)
 643{
 644        BUG_ON(!nommu_region_tree.rb_node);
 645
 646        validate_nommu_regions();
 647        rb_erase(&region->vm_rb, &nommu_region_tree);
 648        validate_nommu_regions();
 649}
 650
 651/*
 652 * free a contiguous series of pages
 653 */
 654static void free_page_series(unsigned long from, unsigned long to)
 655{
 656        for (; from < to; from += PAGE_SIZE) {
 657                struct page *page = virt_to_page(from);
 658
 659                kdebug("- free %lx", from);
 660                atomic_long_dec(&mmap_pages_allocated);
 661                if (page_count(page) != 1)
 662                        kdebug("free page %p: refcount not one: %d",
 663                               page, page_count(page));
 664                put_page(page);
 665        }
 666}
 667
 668/*
 669 * release a reference to a region
 670 * - the caller must hold the region semaphore for writing, which this releases
 671 * - the region may not have been added to the tree yet, in which case vm_top
 672 *   will equal vm_start
 673 */
 674static void __put_nommu_region(struct vm_region *region)
 675        __releases(nommu_region_sem)
 676{
 677        kenter("%p{%d}", region, region->vm_usage);
 678
 679        BUG_ON(!nommu_region_tree.rb_node);
 680
 681        if (--region->vm_usage == 0) {
 682                if (region->vm_top > region->vm_start)
 683                        delete_nommu_region(region);
 684                up_write(&nommu_region_sem);
 685
 686                if (region->vm_file)
 687                        fput(region->vm_file);
 688
 689                /* IO memory and memory shared directly out of the pagecache
 690                 * from ramfs/tmpfs mustn't be released here */
 691                if (region->vm_flags & VM_MAPPED_COPY) {
 692                        kdebug("free series");
 693                        free_page_series(region->vm_start, region->vm_top);
 694                }
 695                kmem_cache_free(vm_region_jar, region);
 696        } else {
 697                up_write(&nommu_region_sem);
 698        }
 699}
 700
 701/*
 702 * release a reference to a region
 703 */
 704static void put_nommu_region(struct vm_region *region)
 705{
 706        down_write(&nommu_region_sem);
 707        __put_nommu_region(region);
 708}
 709
 710/*
 711 * update protection on a vma
 712 */
 713static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
 714{
 715#ifdef CONFIG_MPU
 716        struct mm_struct *mm = vma->vm_mm;
 717        long start = vma->vm_start & PAGE_MASK;
 718        while (start < vma->vm_end) {
 719                protect_page(mm, start, flags);
 720                start += PAGE_SIZE;
 721        }
 722        update_protections(mm);
 723#endif
 724}
 725
 726/*
 727 * add a VMA into a process's mm_struct in the appropriate place in the list
 728 * and tree and add to the address space's page tree also if not an anonymous
 729 * page
 730 * - should be called with mm->mmap_sem held writelocked
 731 */
 732static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 733{
 734        struct vm_area_struct *pvma, *prev;
 735        struct address_space *mapping;
 736        struct rb_node **p, *parent, *rb_prev;
 737
 738        kenter(",%p", vma);
 739
 740        BUG_ON(!vma->vm_region);
 741
 742        mm->map_count++;
 743        vma->vm_mm = mm;
 744
 745        protect_vma(vma, vma->vm_flags);
 746
 747        /* add the VMA to the mapping */
 748        if (vma->vm_file) {
 749                mapping = vma->vm_file->f_mapping;
 750
 751                mutex_lock(&mapping->i_mmap_mutex);
 752                flush_dcache_mmap_lock(mapping);
 753                vma_interval_tree_insert(vma, &mapping->i_mmap);
 754                flush_dcache_mmap_unlock(mapping);
 755                mutex_unlock(&mapping->i_mmap_mutex);
 756        }
 757
 758        /* add the VMA to the tree */
 759        parent = rb_prev = NULL;
 760        p = &mm->mm_rb.rb_node;
 761        while (*p) {
 762                parent = *p;
 763                pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 764
 765                /* sort by: start addr, end addr, VMA struct addr in that order
 766                 * (the latter is necessary as we may get identical VMAs) */
 767                if (vma->vm_start < pvma->vm_start)
 768                        p = &(*p)->rb_left;
 769                else if (vma->vm_start > pvma->vm_start) {
 770                        rb_prev = parent;
 771                        p = &(*p)->rb_right;
 772                } else if (vma->vm_end < pvma->vm_end)
 773                        p = &(*p)->rb_left;
 774                else if (vma->vm_end > pvma->vm_end) {
 775                        rb_prev = parent;
 776                        p = &(*p)->rb_right;
 777                } else if (vma < pvma)
 778                        p = &(*p)->rb_left;
 779                else if (vma > pvma) {
 780                        rb_prev = parent;
 781                        p = &(*p)->rb_right;
 782                } else
 783                        BUG();
 784        }
 785
 786        rb_link_node(&vma->vm_rb, parent, p);
 787        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 788
 789        /* add VMA to the VMA list also */
 790        prev = NULL;
 791        if (rb_prev)
 792                prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 793
 794        __vma_link_list(mm, vma, prev, parent);
 795}
 796
 797/*
 798 * delete a VMA from its owning mm_struct and address space
 799 */
 800static void delete_vma_from_mm(struct vm_area_struct *vma)
 801{
 802        struct address_space *mapping;
 803        struct mm_struct *mm = vma->vm_mm;
 804
 805        kenter("%p", vma);
 806
 807        protect_vma(vma, 0);
 808
 809        mm->map_count--;
 810        if (mm->mmap_cache == vma)
 811                mm->mmap_cache = NULL;
 812
 813        /* remove the VMA from the mapping */
 814        if (vma->vm_file) {
 815                mapping = vma->vm_file->f_mapping;
 816
 817                mutex_lock(&mapping->i_mmap_mutex);
 818                flush_dcache_mmap_lock(mapping);
 819                vma_interval_tree_remove(vma, &mapping->i_mmap);
 820                flush_dcache_mmap_unlock(mapping);
 821                mutex_unlock(&mapping->i_mmap_mutex);
 822        }
 823
 824        /* remove from the MM's tree and list */
 825        rb_erase(&vma->vm_rb, &mm->mm_rb);
 826
 827        if (vma->vm_prev)
 828                vma->vm_prev->vm_next = vma->vm_next;
 829        else
 830                mm->mmap = vma->vm_next;
 831
 832        if (vma->vm_next)
 833                vma->vm_next->vm_prev = vma->vm_prev;
 834}
 835
 836/*
 837 * destroy a VMA record
 838 */
 839static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 840{
 841        kenter("%p", vma);
 842        if (vma->vm_ops && vma->vm_ops->close)
 843                vma->vm_ops->close(vma);
 844        if (vma->vm_file)
 845                fput(vma->vm_file);
 846        put_nommu_region(vma->vm_region);
 847        kmem_cache_free(vm_area_cachep, vma);
 848}
 849
 850/*
 851 * look up the first VMA in which addr resides, NULL if none
 852 * - should be called with mm->mmap_sem at least held readlocked
 853 */
 854struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 855{
 856        struct vm_area_struct *vma;
 857
 858        /* check the cache first */
 859        vma = ACCESS_ONCE(mm->mmap_cache);
 860        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
 861                return vma;
 862
 863        /* trawl the list (there may be multiple mappings in which addr
 864         * resides) */
 865        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 866                if (vma->vm_start > addr)
 867                        return NULL;
 868                if (vma->vm_end > addr) {
 869                        mm->mmap_cache = vma;
 870                        return vma;
 871                }
 872        }
 873
 874        return NULL;
 875}
 876EXPORT_SYMBOL(find_vma);
 877
 878/*
 879 * find a VMA
 880 * - we don't extend stack VMAs under NOMMU conditions
 881 */
 882struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 883{
 884        return find_vma(mm, addr);
 885}
 886
 887/*
 888 * expand a stack to a given address
 889 * - not supported under NOMMU conditions
 890 */
 891int expand_stack(struct vm_area_struct *vma, unsigned long address)
 892{
 893        return -ENOMEM;
 894}
 895
 896/*
 897 * look up the first VMA exactly that exactly matches addr
 898 * - should be called with mm->mmap_sem at least held readlocked
 899 */
 900static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 901                                             unsigned long addr,
 902                                             unsigned long len)
 903{
 904        struct vm_area_struct *vma;
 905        unsigned long end = addr + len;
 906
 907        /* check the cache first */
 908        vma = mm->mmap_cache;
 909        if (vma && vma->vm_start == addr && vma->vm_end == end)
 910                return vma;
 911
 912        /* trawl the list (there may be multiple mappings in which addr
 913         * resides) */
 914        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 915                if (vma->vm_start < addr)
 916                        continue;
 917                if (vma->vm_start > addr)
 918                        return NULL;
 919                if (vma->vm_end == end) {
 920                        mm->mmap_cache = vma;
 921                        return vma;
 922                }
 923        }
 924
 925        return NULL;
 926}
 927
 928/*
 929 * determine whether a mapping should be permitted and, if so, what sort of
 930 * mapping we're capable of supporting
 931 */
 932static int validate_mmap_request(struct file *file,
 933                                 unsigned long addr,
 934                                 unsigned long len,
 935                                 unsigned long prot,
 936                                 unsigned long flags,
 937                                 unsigned long pgoff,
 938                                 unsigned long *_capabilities)
 939{
 940        unsigned long capabilities, rlen;
 941        int ret;
 942
 943        /* do the simple checks first */
 944        if (flags & MAP_FIXED) {
 945                printk(KERN_DEBUG
 946                       "%d: Can't do fixed-address/overlay mmap of RAM\n",
 947                       current->pid);
 948                return -EINVAL;
 949        }
 950
 951        if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 952            (flags & MAP_TYPE) != MAP_SHARED)
 953                return -EINVAL;
 954
 955        if (!len)
 956                return -EINVAL;
 957
 958        /* Careful about overflows.. */
 959        rlen = PAGE_ALIGN(len);
 960        if (!rlen || rlen > TASK_SIZE)
 961                return -ENOMEM;
 962
 963        /* offset overflow? */
 964        if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 965                return -EOVERFLOW;
 966
 967        if (file) {
 968                /* validate file mapping requests */
 969                struct address_space *mapping;
 970
 971                /* files must support mmap */
 972                if (!file->f_op || !file->f_op->mmap)
 973                        return -ENODEV;
 974
 975                /* work out if what we've got could possibly be shared
 976                 * - we support chardevs that provide their own "memory"
 977                 * - we support files/blockdevs that are memory backed
 978                 */
 979                mapping = file->f_mapping;
 980                if (!mapping)
 981                        mapping = file_inode(file)->i_mapping;
 982
 983                capabilities = 0;
 984                if (mapping && mapping->backing_dev_info)
 985                        capabilities = mapping->backing_dev_info->capabilities;
 986
 987                if (!capabilities) {
 988                        /* no explicit capabilities set, so assume some
 989                         * defaults */
 990                        switch (file_inode(file)->i_mode & S_IFMT) {
 991                        case S_IFREG:
 992                        case S_IFBLK:
 993                                capabilities = BDI_CAP_MAP_COPY;
 994                                break;
 995
 996                        case S_IFCHR:
 997                                capabilities =
 998                                        BDI_CAP_MAP_DIRECT |
 999                                        BDI_CAP_READ_MAP |
1000                                        BDI_CAP_WRITE_MAP;
1001                                break;
1002
1003                        default:
1004                                return -EINVAL;
1005                        }
1006                }
1007
1008                /* eliminate any capabilities that we can't support on this
1009                 * device */
1010                if (!file->f_op->get_unmapped_area)
1011                        capabilities &= ~BDI_CAP_MAP_DIRECT;
1012                if (!file->f_op->read)
1013                        capabilities &= ~BDI_CAP_MAP_COPY;
1014
1015                /* The file shall have been opened with read permission. */
1016                if (!(file->f_mode & FMODE_READ))
1017                        return -EACCES;
1018
1019                if (flags & MAP_SHARED) {
1020                        /* do checks for writing, appending and locking */
1021                        if ((prot & PROT_WRITE) &&
1022                            !(file->f_mode & FMODE_WRITE))
1023                                return -EACCES;
1024
1025                        if (IS_APPEND(file_inode(file)) &&
1026                            (file->f_mode & FMODE_WRITE))
1027                                return -EACCES;
1028
1029                        if (locks_verify_locked(file))
1030                                return -EAGAIN;
1031
1032                        if (!(capabilities & BDI_CAP_MAP_DIRECT))
1033                                return -ENODEV;
1034
1035                        /* we mustn't privatise shared mappings */
1036                        capabilities &= ~BDI_CAP_MAP_COPY;
1037                }
1038                else {
1039                        /* we're going to read the file into private memory we
1040                         * allocate */
1041                        if (!(capabilities & BDI_CAP_MAP_COPY))
1042                                return -ENODEV;
1043
1044                        /* we don't permit a private writable mapping to be
1045                         * shared with the backing device */
1046                        if (prot & PROT_WRITE)
1047                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1048                }
1049
1050                if (capabilities & BDI_CAP_MAP_DIRECT) {
1051                        if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
1052                            ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1053                            ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
1054                            ) {
1055                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1056                                if (flags & MAP_SHARED) {
1057                                        printk(KERN_WARNING
1058                                               "MAP_SHARED not completely supported on !MMU\n");
1059                                        return -EINVAL;
1060                                }
1061                        }
1062                }
1063
1064                /* handle executable mappings and implied executable
1065                 * mappings */
1066                if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1067                        if (prot & PROT_EXEC)
1068                                return -EPERM;
1069                }
1070                else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1071                        /* handle implication of PROT_EXEC by PROT_READ */
1072                        if (current->personality & READ_IMPLIES_EXEC) {
1073                                if (capabilities & BDI_CAP_EXEC_MAP)
1074                                        prot |= PROT_EXEC;
1075                        }
1076                }
1077                else if ((prot & PROT_READ) &&
1078                         (prot & PROT_EXEC) &&
1079                         !(capabilities & BDI_CAP_EXEC_MAP)
1080                         ) {
1081                        /* backing file is not executable, try to copy */
1082                        capabilities &= ~BDI_CAP_MAP_DIRECT;
1083                }
1084        }
1085        else {
1086                /* anonymous mappings are always memory backed and can be
1087                 * privately mapped
1088                 */
1089                capabilities = BDI_CAP_MAP_COPY;
1090
1091                /* handle PROT_EXEC implication by PROT_READ */
1092                if ((prot & PROT_READ) &&
1093                    (current->personality & READ_IMPLIES_EXEC))
1094                        prot |= PROT_EXEC;
1095        }
1096
1097        /* allow the security API to have its say */
1098        ret = security_mmap_addr(addr);
1099        if (ret < 0)
1100                return ret;
1101
1102        /* looks okay */
1103        *_capabilities = capabilities;
1104        return 0;
1105}
1106
1107/*
1108 * we've determined that we can make the mapping, now translate what we
1109 * now know into VMA flags
1110 */
1111static unsigned long determine_vm_flags(struct file *file,
1112                                        unsigned long prot,
1113                                        unsigned long flags,
1114                                        unsigned long capabilities)
1115{
1116        unsigned long vm_flags;
1117
1118        vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1119        /* vm_flags |= mm->def_flags; */
1120
1121        if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1122                /* attempt to share read-only copies of mapped file chunks */
1123                vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1124                if (file && !(prot & PROT_WRITE))
1125                        vm_flags |= VM_MAYSHARE;
1126        } else {
1127                /* overlay a shareable mapping on the backing device or inode
1128                 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1129                 * romfs/cramfs */
1130                vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1131                if (flags & MAP_SHARED)
1132                        vm_flags |= VM_SHARED;
1133        }
1134
1135        /* refuse to let anyone share private mappings with this process if
1136         * it's being traced - otherwise breakpoints set in it may interfere
1137         * with another untraced process
1138         */
1139        if ((flags & MAP_PRIVATE) && current->ptrace)
1140                vm_flags &= ~VM_MAYSHARE;
1141
1142        return vm_flags;
1143}
1144
1145/*
1146 * set up a shared mapping on a file (the driver or filesystem provides and
1147 * pins the storage)
1148 */
1149static int do_mmap_shared_file(struct vm_area_struct *vma)
1150{
1151        int ret;
1152
1153        ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1154        if (ret == 0) {
1155                vma->vm_region->vm_top = vma->vm_region->vm_end;
1156                return 0;
1157        }
1158        if (ret != -ENOSYS)
1159                return ret;
1160
1161        /* getting -ENOSYS indicates that direct mmap isn't possible (as
1162         * opposed to tried but failed) so we can only give a suitable error as
1163         * it's not possible to make a private copy if MAP_SHARED was given */
1164        return -ENODEV;
1165}
1166
1167/*
1168 * set up a private mapping or an anonymous shared mapping
1169 */
1170static int do_mmap_private(struct vm_area_struct *vma,
1171                           struct vm_region *region,
1172                           unsigned long len,
1173                           unsigned long capabilities)
1174{
1175        struct page *pages;
1176        unsigned long total, point, n;
1177        void *base;
1178        int ret, order;
1179
1180        /* invoke the file's mapping function so that it can keep track of
1181         * shared mappings on devices or memory
1182         * - VM_MAYSHARE will be set if it may attempt to share
1183         */
1184        if (capabilities & BDI_CAP_MAP_DIRECT) {
1185                ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1186                if (ret == 0) {
1187                        /* shouldn't return success if we're not sharing */
1188                        BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1189                        vma->vm_region->vm_top = vma->vm_region->vm_end;
1190                        return 0;
1191                }
1192                if (ret != -ENOSYS)
1193                        return ret;
1194
1195                /* getting an ENOSYS error indicates that direct mmap isn't
1196                 * possible (as opposed to tried but failed) so we'll try to
1197                 * make a private copy of the data and map that instead */
1198        }
1199
1200
1201        /* allocate some memory to hold the mapping
1202         * - note that this may not return a page-aligned address if the object
1203         *   we're allocating is smaller than a page
1204         */
1205        order = get_order(len);
1206        kdebug("alloc order %d for %lx", order, len);
1207
1208        pages = alloc_pages(GFP_KERNEL, order);
1209        if (!pages)
1210                goto enomem;
1211
1212        total = 1 << order;
1213        atomic_long_add(total, &mmap_pages_allocated);
1214
1215        point = len >> PAGE_SHIFT;
1216
1217        /* we allocated a power-of-2 sized page set, so we may want to trim off
1218         * the excess */
1219        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1220                while (total > point) {
1221                        order = ilog2(total - point);
1222                        n = 1 << order;
1223                        kdebug("shave %lu/%lu @%lu", n, total - point, total);
1224                        atomic_long_sub(n, &mmap_pages_allocated);
1225                        total -= n;
1226                        set_page_refcounted(pages + total);
1227                        __free_pages(pages + total, order);
1228                }
1229        }
1230
1231        for (point = 1; point < total; point++)
1232                set_page_refcounted(&pages[point]);
1233
1234        base = page_address(pages);
1235        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1236        region->vm_start = (unsigned long) base;
1237        region->vm_end   = region->vm_start + len;
1238        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1239
1240        vma->vm_start = region->vm_start;
1241        vma->vm_end   = region->vm_start + len;
1242
1243        if (vma->vm_file) {
1244                /* read the contents of a file into the copy */
1245                mm_segment_t old_fs;
1246                loff_t fpos;
1247
1248                fpos = vma->vm_pgoff;
1249                fpos <<= PAGE_SHIFT;
1250
1251                old_fs = get_fs();
1252                set_fs(KERNEL_DS);
1253                ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1254                set_fs(old_fs);
1255
1256                if (ret < 0)
1257                        goto error_free;
1258
1259                /* clear the last little bit */
1260                if (ret < len)
1261                        memset(base + ret, 0, len - ret);
1262
1263        }
1264
1265        return 0;
1266
1267error_free:
1268        free_page_series(region->vm_start, region->vm_top);
1269        region->vm_start = vma->vm_start = 0;
1270        region->vm_end   = vma->vm_end = 0;
1271        region->vm_top   = 0;
1272        return ret;
1273
1274enomem:
1275        printk("Allocation of length %lu from process %d (%s) failed\n",
1276               len, current->pid, current->comm);
1277        show_free_areas(0);
1278        return -ENOMEM;
1279}
1280
1281/*
1282 * handle mapping creation for uClinux
1283 */
1284unsigned long do_mmap_pgoff(struct file *file,
1285                            unsigned long addr,
1286                            unsigned long len,
1287                            unsigned long prot,
1288                            unsigned long flags,
1289                            unsigned long pgoff,
1290                            unsigned long *populate,
1291                            struct list_head *uf_unused)
1292{
1293        struct vm_area_struct *vma;
1294        struct vm_region *region;
1295        struct rb_node *rb;
1296        unsigned long capabilities, vm_flags, result;
1297        int ret;
1298
1299        kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1300
1301        *populate = 0;
1302
1303        /* decide whether we should attempt the mapping, and if so what sort of
1304         * mapping */
1305        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1306                                    &capabilities);
1307        if (ret < 0) {
1308                kleave(" = %d [val]", ret);
1309                return ret;
1310        }
1311
1312        /* we ignore the address hint */
1313        addr = 0;
1314        len = PAGE_ALIGN(len);
1315
1316        /* we've determined that we can make the mapping, now translate what we
1317         * now know into VMA flags */
1318        vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1319
1320        /* we're going to need to record the mapping */
1321        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1322        if (!region)
1323                goto error_getting_region;
1324
1325        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1326        if (!vma)
1327                goto error_getting_vma;
1328
1329        region->vm_usage = 1;
1330        region->vm_flags = vm_flags;
1331        region->vm_pgoff = pgoff;
1332
1333        INIT_LIST_HEAD(&vma->anon_vma_chain);
1334        vma->vm_flags = vm_flags;
1335        vma->vm_pgoff = pgoff;
1336
1337        if (file) {
1338                region->vm_file = get_file(file);
1339                vma->vm_file = get_file(file);
1340        }
1341
1342        down_write(&nommu_region_sem);
1343
1344        /* if we want to share, we need to check for regions created by other
1345         * mmap() calls that overlap with our proposed mapping
1346         * - we can only share with a superset match on most regular files
1347         * - shared mappings on character devices and memory backed files are
1348         *   permitted to overlap inexactly as far as we are concerned for in
1349         *   these cases, sharing is handled in the driver or filesystem rather
1350         *   than here
1351         */
1352        if (vm_flags & VM_MAYSHARE) {
1353                struct vm_region *pregion;
1354                unsigned long pglen, rpglen, pgend, rpgend, start;
1355
1356                pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1357                pgend = pgoff + pglen;
1358
1359                for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1360                        pregion = rb_entry(rb, struct vm_region, vm_rb);
1361
1362                        if (!(pregion->vm_flags & VM_MAYSHARE))
1363                                continue;
1364
1365                        /* search for overlapping mappings on the same file */
1366                        if (file_inode(pregion->vm_file) !=
1367                            file_inode(file))
1368                                continue;
1369
1370                        if (pregion->vm_pgoff >= pgend)
1371                                continue;
1372
1373                        rpglen = pregion->vm_end - pregion->vm_start;
1374                        rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1375                        rpgend = pregion->vm_pgoff + rpglen;
1376                        if (pgoff >= rpgend)
1377                                continue;
1378
1379                        /* handle inexactly overlapping matches between
1380                         * mappings */
1381                        if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1382                            !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1383                                /* new mapping is not a subset of the region */
1384                                if (!(capabilities & BDI_CAP_MAP_DIRECT))
1385                                        goto sharing_violation;
1386                                continue;
1387                        }
1388
1389                        /* we've found a region we can share */
1390                        pregion->vm_usage++;
1391                        vma->vm_region = pregion;
1392                        start = pregion->vm_start;
1393                        start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1394                        vma->vm_start = start;
1395                        vma->vm_end = start + len;
1396
1397                        if (pregion->vm_flags & VM_MAPPED_COPY) {
1398                                kdebug("share copy");
1399                                vma->vm_flags |= VM_MAPPED_COPY;
1400                        } else {
1401                                kdebug("share mmap");
1402                                ret = do_mmap_shared_file(vma);
1403                                if (ret < 0) {
1404                                        vma->vm_region = NULL;
1405                                        vma->vm_start = 0;
1406                                        vma->vm_end = 0;
1407                                        pregion->vm_usage--;
1408                                        pregion = NULL;
1409                                        goto error_just_free;
1410                                }
1411                        }
1412                        fput(region->vm_file);
1413                        kmem_cache_free(vm_region_jar, region);
1414                        region = pregion;
1415                        result = start;
1416                        goto share;
1417                }
1418
1419                /* obtain the address at which to make a shared mapping
1420                 * - this is the hook for quasi-memory character devices to
1421                 *   tell us the location of a shared mapping
1422                 */
1423                if (capabilities & BDI_CAP_MAP_DIRECT) {
1424                        addr = file->f_op->get_unmapped_area(file, addr, len,
1425                                                             pgoff, flags);
1426                        if (IS_ERR_VALUE(addr)) {
1427                                ret = addr;
1428                                if (ret != -ENOSYS)
1429                                        goto error_just_free;
1430
1431                                /* the driver refused to tell us where to site
1432                                 * the mapping so we'll have to attempt to copy
1433                                 * it */
1434                                ret = -ENODEV;
1435                                if (!(capabilities & BDI_CAP_MAP_COPY))
1436                                        goto error_just_free;
1437
1438                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1439                        } else {
1440                                vma->vm_start = region->vm_start = addr;
1441                                vma->vm_end = region->vm_end = addr + len;
1442                        }
1443                }
1444        }
1445
1446        vma->vm_region = region;
1447
1448        /* set up the mapping
1449         * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1450         */
1451        if (file && vma->vm_flags & VM_SHARED)
1452                ret = do_mmap_shared_file(vma);
1453        else
1454                ret = do_mmap_private(vma, region, len, capabilities);
1455        if (ret < 0)
1456                goto error_just_free;
1457        add_nommu_region(region);
1458
1459        /* clear anonymous mappings that don't ask for uninitialized data */
1460        if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1461                memset((void *)region->vm_start, 0,
1462                       region->vm_end - region->vm_start);
1463
1464        /* okay... we have a mapping; now we have to register it */
1465        result = vma->vm_start;
1466
1467        current->mm->total_vm += len >> PAGE_SHIFT;
1468
1469share:
1470        add_vma_to_mm(current->mm, vma);
1471
1472        /* we flush the region from the icache only when the first executable
1473         * mapping of it is made  */
1474        if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1475                flush_icache_range(region->vm_start, region->vm_end);
1476                region->vm_icache_flushed = true;
1477        }
1478
1479        up_write(&nommu_region_sem);
1480
1481        kleave(" = %lx", result);
1482        return result;
1483
1484error_just_free:
1485        up_write(&nommu_region_sem);
1486error:
1487        if (region->vm_file)
1488                fput(region->vm_file);
1489        kmem_cache_free(vm_region_jar, region);
1490        if (vma->vm_file)
1491                fput(vma->vm_file);
1492        kmem_cache_free(vm_area_cachep, vma);
1493        kleave(" = %d", ret);
1494        return ret;
1495
1496sharing_violation:
1497        up_write(&nommu_region_sem);
1498        printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1499        ret = -EINVAL;
1500        goto error;
1501
1502error_getting_vma:
1503        kmem_cache_free(vm_region_jar, region);
1504        printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1505               " from process %d failed\n",
1506               len, current->pid);
1507        show_free_areas(0);
1508        return -ENOMEM;
1509
1510error_getting_region:
1511        printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1512               " from process %d failed\n",
1513               len, current->pid);
1514        show_free_areas(0);
1515        return -ENOMEM;
1516}
1517
1518SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1519                unsigned long, prot, unsigned long, flags,
1520                unsigned long, fd, unsigned long, pgoff)
1521{
1522        struct file *file = NULL;
1523        unsigned long retval = -EBADF;
1524
1525        audit_mmap_fd(fd, flags);
1526        if (!(flags & MAP_ANONYMOUS)) {
1527                file = fget(fd);
1528                if (!file)
1529                        goto out;
1530        }
1531
1532        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1533
1534        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1535
1536        if (file)
1537                fput(file);
1538out:
1539        return retval;
1540}
1541
1542#ifdef __ARCH_WANT_SYS_OLD_MMAP
1543struct mmap_arg_struct {
1544        unsigned long addr;
1545        unsigned long len;
1546        unsigned long prot;
1547        unsigned long flags;
1548        unsigned long fd;
1549        unsigned long offset;
1550};
1551
1552SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1553{
1554        struct mmap_arg_struct a;
1555
1556        if (copy_from_user(&a, arg, sizeof(a)))
1557                return -EFAULT;
1558        if (a.offset & ~PAGE_MASK)
1559                return -EINVAL;
1560
1561        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1562                              a.offset >> PAGE_SHIFT);
1563}
1564#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1565
1566/*
1567 * split a vma into two pieces at address 'addr', a new vma is allocated either
1568 * for the first part or the tail.
1569 */
1570int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1571              unsigned long addr, int new_below)
1572{
1573        struct vm_area_struct *new;
1574        struct vm_region *region;
1575        unsigned long npages;
1576
1577        kenter("");
1578
1579        /* we're only permitted to split anonymous regions (these should have
1580         * only a single usage on the region) */
1581        if (vma->vm_file)
1582                return -ENOMEM;
1583
1584        if (mm->map_count >= sysctl_max_map_count)
1585                return -ENOMEM;
1586
1587        region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1588        if (!region)
1589                return -ENOMEM;
1590
1591        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1592        if (!new) {
1593                kmem_cache_free(vm_region_jar, region);
1594                return -ENOMEM;
1595        }
1596
1597        /* most fields are the same, copy all, and then fixup */
1598        *new = *vma;
1599        *region = *vma->vm_region;
1600        new->vm_region = region;
1601
1602        npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1603
1604        if (new_below) {
1605                region->vm_top = region->vm_end = new->vm_end = addr;
1606        } else {
1607                region->vm_start = new->vm_start = addr;
1608                region->vm_pgoff = new->vm_pgoff += npages;
1609        }
1610
1611        if (new->vm_ops && new->vm_ops->open)
1612                new->vm_ops->open(new);
1613
1614        delete_vma_from_mm(vma);
1615        down_write(&nommu_region_sem);
1616        delete_nommu_region(vma->vm_region);
1617        if (new_below) {
1618                vma->vm_region->vm_start = vma->vm_start = addr;
1619                vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1620        } else {
1621                vma->vm_region->vm_end = vma->vm_end = addr;
1622                vma->vm_region->vm_top = addr;
1623        }
1624        add_nommu_region(vma->vm_region);
1625        add_nommu_region(new->vm_region);
1626        up_write(&nommu_region_sem);
1627        add_vma_to_mm(mm, vma);
1628        add_vma_to_mm(mm, new);
1629        return 0;
1630}
1631
1632/*
1633 * shrink a VMA by removing the specified chunk from either the beginning or
1634 * the end
1635 */
1636static int shrink_vma(struct mm_struct *mm,
1637                      struct vm_area_struct *vma,
1638                      unsigned long from, unsigned long to)
1639{
1640        struct vm_region *region;
1641
1642        kenter("");
1643
1644        /* adjust the VMA's pointers, which may reposition it in the MM's tree
1645         * and list */
1646        delete_vma_from_mm(vma);
1647        if (from > vma->vm_start)
1648                vma->vm_end = from;
1649        else
1650                vma->vm_start = to;
1651        add_vma_to_mm(mm, vma);
1652
1653        /* cut the backing region down to size */
1654        region = vma->vm_region;
1655        BUG_ON(region->vm_usage != 1);
1656
1657        down_write(&nommu_region_sem);
1658        delete_nommu_region(region);
1659        if (from > region->vm_start) {
1660                to = region->vm_top;
1661                region->vm_top = region->vm_end = from;
1662        } else {
1663                region->vm_start = to;
1664        }
1665        add_nommu_region(region);
1666        up_write(&nommu_region_sem);
1667
1668        free_page_series(from, to);
1669        return 0;
1670}
1671
1672/*
1673 * release a mapping
1674 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1675 *   VMA, though it need not cover the whole VMA
1676 */
1677int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1678{
1679        struct vm_area_struct *vma;
1680        unsigned long end;
1681        int ret;
1682
1683        kenter(",%lx,%zx", start, len);
1684
1685        len = PAGE_ALIGN(len);
1686        if (len == 0)
1687                return -EINVAL;
1688
1689        end = start + len;
1690
1691        /* find the first potentially overlapping VMA */
1692        vma = find_vma(mm, start);
1693        if (!vma) {
1694                static int limit = 0;
1695                if (limit < 5) {
1696                        printk(KERN_WARNING
1697                               "munmap of memory not mmapped by process %d"
1698                               " (%s): 0x%lx-0x%lx\n",
1699                               current->pid, current->comm,
1700                               start, start + len - 1);
1701                        limit++;
1702                }
1703                return -EINVAL;
1704        }
1705
1706        /* we're allowed to split an anonymous VMA but not a file-backed one */
1707        if (vma->vm_file) {
1708                do {
1709                        if (start > vma->vm_start) {
1710                                kleave(" = -EINVAL [miss]");
1711                                return -EINVAL;
1712                        }
1713                        if (end == vma->vm_end)
1714                                goto erase_whole_vma;
1715                        vma = vma->vm_next;
1716                } while (vma);
1717                kleave(" = -EINVAL [split file]");
1718                return -EINVAL;
1719        } else {
1720                /* the chunk must be a subset of the VMA found */
1721                if (start == vma->vm_start && end == vma->vm_end)
1722                        goto erase_whole_vma;
1723                if (start < vma->vm_start || end > vma->vm_end) {
1724                        kleave(" = -EINVAL [superset]");
1725                        return -EINVAL;
1726                }
1727                if (start & ~PAGE_MASK) {
1728                        kleave(" = -EINVAL [unaligned start]");
1729                        return -EINVAL;
1730                }
1731                if (end != vma->vm_end && end & ~PAGE_MASK) {
1732                        kleave(" = -EINVAL [unaligned split]");
1733                        return -EINVAL;
1734                }
1735                if (start != vma->vm_start && end != vma->vm_end) {
1736                        ret = split_vma(mm, vma, start, 1);
1737                        if (ret < 0) {
1738                                kleave(" = %d [split]", ret);
1739                                return ret;
1740                        }
1741                }
1742                return shrink_vma(mm, vma, start, end);
1743        }
1744
1745erase_whole_vma:
1746        delete_vma_from_mm(vma);
1747        delete_vma(mm, vma);
1748        kleave(" = 0");
1749        return 0;
1750}
1751EXPORT_SYMBOL(do_munmap);
1752
1753int vm_munmap(unsigned long addr, size_t len)
1754{
1755        struct mm_struct *mm = current->mm;
1756        int ret;
1757
1758        down_write(&mm->mmap_sem);
1759        ret = do_munmap(mm, addr, len, NULL);
1760        up_write(&mm->mmap_sem);
1761        return ret;
1762}
1763EXPORT_SYMBOL(vm_munmap);
1764
1765SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1766{
1767        return vm_munmap(addr, len);
1768}
1769
1770/*
1771 * release all the mappings made in a process's VM space
1772 */
1773void exit_mmap(struct mm_struct *mm)
1774{
1775        struct vm_area_struct *vma;
1776
1777        if (!mm)
1778                return;
1779
1780        kenter("");
1781
1782        mm->total_vm = 0;
1783
1784        while ((vma = mm->mmap)) {
1785                mm->mmap = vma->vm_next;
1786                delete_vma_from_mm(vma);
1787                delete_vma(mm, vma);
1788                cond_resched();
1789        }
1790
1791        kleave("");
1792}
1793
1794unsigned long vm_brk(unsigned long addr, unsigned long len)
1795{
1796        return -ENOMEM;
1797}
1798
1799/*
1800 * expand (or shrink) an existing mapping, potentially moving it at the same
1801 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1802 *
1803 * under NOMMU conditions, we only permit changing a mapping's size, and only
1804 * as long as it stays within the region allocated by do_mmap_private() and the
1805 * block is not shareable
1806 *
1807 * MREMAP_FIXED is not supported under NOMMU conditions
1808 */
1809static unsigned long do_mremap(unsigned long addr,
1810                        unsigned long old_len, unsigned long new_len,
1811                        unsigned long flags, unsigned long new_addr)
1812{
1813        struct vm_area_struct *vma;
1814
1815        /* insanity checks first */
1816        old_len = PAGE_ALIGN(old_len);
1817        new_len = PAGE_ALIGN(new_len);
1818        if (old_len == 0 || new_len == 0)
1819                return (unsigned long) -EINVAL;
1820
1821        if (addr & ~PAGE_MASK)
1822                return -EINVAL;
1823
1824        if (flags & MREMAP_FIXED && new_addr != addr)
1825                return (unsigned long) -EINVAL;
1826
1827        vma = find_vma_exact(current->mm, addr, old_len);
1828        if (!vma)
1829                return (unsigned long) -EINVAL;
1830
1831        if (vma->vm_end != vma->vm_start + old_len)
1832                return (unsigned long) -EFAULT;
1833
1834        if (vma->vm_flags & VM_MAYSHARE)
1835                return (unsigned long) -EPERM;
1836
1837        if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1838                return (unsigned long) -ENOMEM;
1839
1840        /* all checks complete - do it */
1841        vma->vm_end = vma->vm_start + new_len;
1842        return vma->vm_start;
1843}
1844
1845SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1846                unsigned long, new_len, unsigned long, flags,
1847                unsigned long, new_addr)
1848{
1849        unsigned long ret;
1850
1851        down_write(&current->mm->mmap_sem);
1852        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1853        up_write(&current->mm->mmap_sem);
1854        return ret;
1855}
1856
1857struct page *follow_page_mask(struct vm_area_struct *vma,
1858                              unsigned long address, unsigned int flags,
1859                              unsigned int *page_mask)
1860{
1861        *page_mask = 0;
1862        return NULL;
1863}
1864
1865int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1866                unsigned long pfn, unsigned long size, pgprot_t prot)
1867{
1868        if (addr != (pfn << PAGE_SHIFT))
1869                return -EINVAL;
1870
1871        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1872        return 0;
1873}
1874EXPORT_SYMBOL(remap_pfn_range);
1875
1876int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1877{
1878        unsigned long pfn = start >> PAGE_SHIFT;
1879        unsigned long vm_len = vma->vm_end - vma->vm_start;
1880
1881        pfn += vma->vm_pgoff;
1882        return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1883}
1884EXPORT_SYMBOL(vm_iomap_memory);
1885
1886int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1887                        unsigned long pgoff)
1888{
1889        unsigned int size = vma->vm_end - vma->vm_start;
1890
1891        if (!(vma->vm_flags & VM_USERMAP))
1892                return -EINVAL;
1893
1894        vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1895        vma->vm_end = vma->vm_start + size;
1896
1897        return 0;
1898}
1899EXPORT_SYMBOL(remap_vmalloc_range);
1900
1901unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1902        unsigned long len, unsigned long pgoff, unsigned long flags)
1903{
1904        return -ENOMEM;
1905}
1906
1907void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1908{
1909}
1910
1911void unmap_mapping_range(struct address_space *mapping,
1912                         loff_t const holebegin, loff_t const holelen,
1913                         int even_cows)
1914{
1915}
1916EXPORT_SYMBOL(unmap_mapping_range);
1917
1918/*
1919 * Check that a process has enough memory to allocate a new virtual
1920 * mapping. 0 means there is enough memory for the allocation to
1921 * succeed and -ENOMEM implies there is not.
1922 *
1923 * We currently support three overcommit policies, which are set via the
1924 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1925 *
1926 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1927 * Additional code 2002 Jul 20 by Robert Love.
1928 *
1929 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1930 *
1931 * Note this is a helper function intended to be used by LSMs which
1932 * wish to use this logic.
1933 */
1934int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1935{
1936        unsigned long free, allowed, reserve;
1937
1938        vm_acct_memory(pages);
1939
1940        /*
1941         * Sometimes we want to use more memory than we have
1942         */
1943        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1944                return 0;
1945
1946        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1947                free = global_page_state(NR_FREE_PAGES);
1948                free += global_page_state(NR_FILE_PAGES);
1949
1950                /*
1951                 * shmem pages shouldn't be counted as free in this
1952                 * case, they can't be purged, only swapped out, and
1953                 * that won't affect the overall amount of available
1954                 * memory in the system.
1955                 */
1956                free -= global_page_state(NR_SHMEM);
1957
1958                free += get_nr_swap_pages();
1959
1960                /*
1961                 * Any slabs which are created with the
1962                 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1963                 * which are reclaimable, under pressure.  The dentry
1964                 * cache and most inode caches should fall into this
1965                 */
1966                free += global_page_state(NR_SLAB_RECLAIMABLE);
1967
1968                /*
1969                 * Leave reserved pages. The pages are not for anonymous pages.
1970                 */
1971                if (free <= totalreserve_pages)
1972                        goto error;
1973                else
1974                        free -= totalreserve_pages;
1975
1976                /*
1977                 * Reserve some for root
1978                 */
1979                if (!cap_sys_admin)
1980                        free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1981
1982                if (free > pages)
1983                        return 0;
1984
1985                goto error;
1986        }
1987
1988        allowed = vm_commit_limit();
1989        /*
1990         * Reserve some 3% for root
1991         */
1992        if (!cap_sys_admin)
1993                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1994
1995        /*
1996         * Don't let a single process grow so big a user can't recover
1997         */
1998        if (mm) {
1999                reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
2000                allowed -= min(mm->total_vm / 32, reserve);
2001        }
2002
2003        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
2004                return 0;
2005
2006error:
2007        vm_unacct_memory(pages);
2008
2009        return -ENOMEM;
2010}
2011
2012int in_gate_area_no_mm(unsigned long addr)
2013{
2014        return 0;
2015}
2016
2017int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2018{
2019        BUG();
2020        return 0;
2021}
2022EXPORT_SYMBOL(filemap_fault);
2023
2024int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
2025                             unsigned long size, pgoff_t pgoff)
2026{
2027        BUG();
2028        return 0;
2029}
2030EXPORT_SYMBOL(generic_file_remap_pages);
2031
2032static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
2033                unsigned long addr, void *buf, int len, int write)
2034{
2035        struct vm_area_struct *vma;
2036
2037        down_read(&mm->mmap_sem);
2038
2039        /* the access must start within one of the target process's mappings */
2040        vma = find_vma(mm, addr);
2041        if (vma) {
2042                /* don't overrun this mapping */
2043                if (addr + len >= vma->vm_end)
2044                        len = vma->vm_end - addr;
2045
2046                /* only read or write mappings where it is permitted */
2047                if (write && vma->vm_flags & VM_MAYWRITE)
2048                        copy_to_user_page(vma, NULL, addr,
2049                                         (void *) addr, buf, len);
2050                else if (!write && vma->vm_flags & VM_MAYREAD)
2051                        copy_from_user_page(vma, NULL, addr,
2052                                            buf, (void *) addr, len);
2053                else
2054                        len = 0;
2055        } else {
2056                len = 0;
2057        }
2058
2059        up_read(&mm->mmap_sem);
2060
2061        return len;
2062}
2063
2064/**
2065 * @access_remote_vm - access another process' address space
2066 * @mm:         the mm_struct of the target address space
2067 * @addr:       start address to access
2068 * @buf:        source or destination buffer
2069 * @len:        number of bytes to transfer
2070 * @write:      whether the access is a write
2071 *
2072 * The caller must hold a reference on @mm.
2073 */
2074int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2075                void *buf, int len, int write)
2076{
2077        return __access_remote_vm(NULL, mm, addr, buf, len, write);
2078}
2079
2080/*
2081 * Access another process' address space.
2082 * - source/target buffer must be kernel space
2083 */
2084int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2085{
2086        struct mm_struct *mm;
2087
2088        if (addr + len < addr)
2089                return 0;
2090
2091        mm = get_task_mm(tsk);
2092        if (!mm)
2093                return 0;
2094
2095        len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2096
2097        mmput(mm);
2098        return len;
2099}
2100
2101/**
2102 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
2103 * @inode: The inode to check
2104 * @size: The current filesize of the inode
2105 * @newsize: The proposed filesize of the inode
2106 *
2107 * Check the shared mappings on an inode on behalf of a shrinking truncate to
2108 * make sure that that any outstanding VMAs aren't broken and then shrink the
2109 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2110 * automatically grant mappings that are too large.
2111 */
2112int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2113                                size_t newsize)
2114{
2115        struct vm_area_struct *vma;
2116        struct vm_region *region;
2117        pgoff_t low, high;
2118        size_t r_size, r_top;
2119
2120        low = newsize >> PAGE_SHIFT;
2121        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2122
2123        down_write(&nommu_region_sem);
2124        mutex_lock(&inode->i_mapping->i_mmap_mutex);
2125
2126        /* search for VMAs that fall within the dead zone */
2127        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2128                /* found one - only interested if it's shared out of the page
2129                 * cache */
2130                if (vma->vm_flags & VM_SHARED) {
2131                        mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2132                        up_write(&nommu_region_sem);
2133                        return -ETXTBSY; /* not quite true, but near enough */
2134                }
2135        }
2136
2137        /* reduce any regions that overlap the dead zone - if in existence,
2138         * these will be pointed to by VMAs that don't overlap the dead zone
2139         *
2140         * we don't check for any regions that start beyond the EOF as there
2141         * shouldn't be any
2142         */
2143        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
2144                                  0, ULONG_MAX) {
2145                if (!(vma->vm_flags & VM_SHARED))
2146                        continue;
2147
2148                region = vma->vm_region;
2149                r_size = region->vm_top - region->vm_start;
2150                r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2151
2152                if (r_top > newsize) {
2153                        region->vm_top -= r_top - newsize;
2154                        if (region->vm_end > region->vm_top)
2155                                region->vm_end = region->vm_top;
2156                }
2157        }
2158
2159        mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2160        up_write(&nommu_region_sem);
2161        return 0;
2162}
2163
2164/*
2165 * Initialise sysctl_user_reserve_kbytes.
2166 *
2167 * This is intended to prevent a user from starting a single memory hogging
2168 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
2169 * mode.
2170 *
2171 * The default value is min(3% of free memory, 128MB)
2172 * 128MB is enough to recover with sshd/login, bash, and top/kill.
2173 */
2174static int __meminit init_user_reserve(void)
2175{
2176        unsigned long free_kbytes;
2177
2178        free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2179
2180        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
2181        return 0;
2182}
2183module_init(init_user_reserve)
2184
2185/*
2186 * Initialise sysctl_admin_reserve_kbytes.
2187 *
2188 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
2189 * to log in and kill a memory hogging process.
2190 *
2191 * Systems with more than 256MB will reserve 8MB, enough to recover
2192 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
2193 * only reserve 3% of free pages by default.
2194 */
2195static int __meminit init_admin_reserve(void)
2196{
2197        unsigned long free_kbytes;
2198
2199        free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2200
2201        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
2202        return 0;
2203}
2204module_init(init_admin_reserve)
2205