linux/mm/nommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/admin-guide/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
  22#include <linux/vmacache.h>
  23#include <linux/mman.h>
  24#include <linux/swap.h>
  25#include <linux/file.h>
  26#include <linux/highmem.h>
  27#include <linux/pagemap.h>
  28#include <linux/slab.h>
  29#include <linux/vmalloc.h>
  30#include <linux/blkdev.h>
  31#include <linux/backing-dev.h>
  32#include <linux/compiler.h>
  33#include <linux/mount.h>
  34#include <linux/personality.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/printk.h>
  39
  40#include <linux/uaccess.h>
  41#include <asm/tlb.h>
  42#include <asm/tlbflush.h>
  43#include <asm/mmu_context.h>
  44#include "internal.h"
  45
  46void *high_memory;
  47EXPORT_SYMBOL(high_memory);
  48struct page *mem_map;
  49unsigned long max_mapnr;
  50EXPORT_SYMBOL(max_mapnr);
  51unsigned long highest_memmap_pfn;
  52int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  53int heap_stack_gap = 0;
  54
  55atomic_long_t mmap_pages_allocated;
  56
  57EXPORT_SYMBOL(mem_map);
  58
  59/* list of mapped, potentially shareable regions */
  60static struct kmem_cache *vm_region_jar;
  61struct rb_root nommu_region_tree = RB_ROOT;
  62DECLARE_RWSEM(nommu_region_sem);
  63
  64const struct vm_operations_struct generic_file_vm_ops = {
  65};
  66
  67/*
  68 * Return the total memory allocated for this pointer, not
  69 * just what the caller asked for.
  70 *
  71 * Doesn't have to be accurate, i.e. may have races.
  72 */
  73unsigned int kobjsize(const void *objp)
  74{
  75        struct page *page;
  76
  77        /*
  78         * If the object we have should not have ksize performed on it,
  79         * return size of 0
  80         */
  81        if (!objp || !virt_addr_valid(objp))
  82                return 0;
  83
  84        page = virt_to_head_page(objp);
  85
  86        /*
  87         * If the allocator sets PageSlab, we know the pointer came from
  88         * kmalloc().
  89         */
  90        if (PageSlab(page))
  91                return ksize(objp);
  92
  93        /*
  94         * If it's not a compound page, see if we have a matching VMA
  95         * region. This test is intentionally done in reverse order,
  96         * so if there's no VMA, we still fall through and hand back
  97         * PAGE_SIZE for 0-order pages.
  98         */
  99        if (!PageCompound(page)) {
 100                struct vm_area_struct *vma;
 101
 102                vma = find_vma(current->mm, (unsigned long)objp);
 103                if (vma)
 104                        return vma->vm_end - vma->vm_start;
 105        }
 106
 107        /*
 108         * The ksize() function is only guaranteed to work for pointers
 109         * returned by kmalloc(). So handle arbitrary pointers here.
 110         */
 111        return page_size(page);
 112}
 113
 114/**
 115 * follow_pfn - look up PFN at a user virtual address
 116 * @vma: memory mapping
 117 * @address: user virtual address
 118 * @pfn: location to store found PFN
 119 *
 120 * Only IO mappings and raw PFN mappings are allowed.
 121 *
 122 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 123 */
 124int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 125        unsigned long *pfn)
 126{
 127        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 128                return -EINVAL;
 129
 130        *pfn = address >> PAGE_SHIFT;
 131        return 0;
 132}
 133EXPORT_SYMBOL(follow_pfn);
 134
 135LIST_HEAD(vmap_area_list);
 136
 137void vfree(const void *addr)
 138{
 139        kfree(addr);
 140}
 141EXPORT_SYMBOL(vfree);
 142
 143void *__vmalloc(unsigned long size, gfp_t gfp_mask)
 144{
 145        /*
 146         *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 147         * returns only a logical address.
 148         */
 149        return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 150}
 151EXPORT_SYMBOL(__vmalloc);
 152
 153void *__vmalloc_node_range(unsigned long size, unsigned long align,
 154                unsigned long start, unsigned long end, gfp_t gfp_mask,
 155                pgprot_t prot, unsigned long vm_flags, int node,
 156                const void *caller)
 157{
 158        return __vmalloc(size, gfp_mask);
 159}
 160
 161void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
 162                int node, const void *caller)
 163{
 164        return __vmalloc(size, gfp_mask);
 165}
 166
 167static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 168{
 169        void *ret;
 170
 171        ret = __vmalloc(size, flags);
 172        if (ret) {
 173                struct vm_area_struct *vma;
 174
 175                mmap_write_lock(current->mm);
 176                vma = find_vma(current->mm, (unsigned long)ret);
 177                if (vma)
 178                        vma->vm_flags |= VM_USERMAP;
 179                mmap_write_unlock(current->mm);
 180        }
 181
 182        return ret;
 183}
 184
 185void *vmalloc_user(unsigned long size)
 186{
 187        return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 188}
 189EXPORT_SYMBOL(vmalloc_user);
 190
 191struct page *vmalloc_to_page(const void *addr)
 192{
 193        return virt_to_page(addr);
 194}
 195EXPORT_SYMBOL(vmalloc_to_page);
 196
 197unsigned long vmalloc_to_pfn(const void *addr)
 198{
 199        return page_to_pfn(virt_to_page(addr));
 200}
 201EXPORT_SYMBOL(vmalloc_to_pfn);
 202
 203long vread(char *buf, char *addr, unsigned long count)
 204{
 205        /* Don't allow overflow */
 206        if ((unsigned long) buf + count < count)
 207                count = -(unsigned long) buf;
 208
 209        memcpy(buf, addr, count);
 210        return count;
 211}
 212
 213/*
 214 *      vmalloc  -  allocate virtually contiguous memory
 215 *
 216 *      @size:          allocation size
 217 *
 218 *      Allocate enough pages to cover @size from the page level
 219 *      allocator and map them into contiguous kernel virtual space.
 220 *
 221 *      For tight control over page level allocator and protection flags
 222 *      use __vmalloc() instead.
 223 */
 224void *vmalloc(unsigned long size)
 225{
 226        return __vmalloc(size, GFP_KERNEL);
 227}
 228EXPORT_SYMBOL(vmalloc);
 229
 230/*
 231 *      vzalloc - allocate virtually contiguous memory with zero fill
 232 *
 233 *      @size:          allocation size
 234 *
 235 *      Allocate enough pages to cover @size from the page level
 236 *      allocator and map them into contiguous kernel virtual space.
 237 *      The memory allocated is set to zero.
 238 *
 239 *      For tight control over page level allocator and protection flags
 240 *      use __vmalloc() instead.
 241 */
 242void *vzalloc(unsigned long size)
 243{
 244        return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
 245}
 246EXPORT_SYMBOL(vzalloc);
 247
 248/**
 249 * vmalloc_node - allocate memory on a specific node
 250 * @size:       allocation size
 251 * @node:       numa node
 252 *
 253 * Allocate enough pages to cover @size from the page level
 254 * allocator and map them into contiguous kernel virtual space.
 255 *
 256 * For tight control over page level allocator and protection flags
 257 * use __vmalloc() instead.
 258 */
 259void *vmalloc_node(unsigned long size, int node)
 260{
 261        return vmalloc(size);
 262}
 263EXPORT_SYMBOL(vmalloc_node);
 264
 265/**
 266 * vzalloc_node - allocate memory on a specific node with zero fill
 267 * @size:       allocation size
 268 * @node:       numa node
 269 *
 270 * Allocate enough pages to cover @size from the page level
 271 * allocator and map them into contiguous kernel virtual space.
 272 * The memory allocated is set to zero.
 273 *
 274 * For tight control over page level allocator and protection flags
 275 * use __vmalloc() instead.
 276 */
 277void *vzalloc_node(unsigned long size, int node)
 278{
 279        return vzalloc(size);
 280}
 281EXPORT_SYMBOL(vzalloc_node);
 282
 283/**
 284 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 285 *      @size:          allocation size
 286 *
 287 *      Allocate enough 32bit PA addressable pages to cover @size from the
 288 *      page level allocator and map them into contiguous kernel virtual space.
 289 */
 290void *vmalloc_32(unsigned long size)
 291{
 292        return __vmalloc(size, GFP_KERNEL);
 293}
 294EXPORT_SYMBOL(vmalloc_32);
 295
 296/**
 297 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 298 *      @size:          allocation size
 299 *
 300 * The resulting memory area is 32bit addressable and zeroed so it can be
 301 * mapped to userspace without leaking data.
 302 *
 303 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 304 * remap_vmalloc_range() are permissible.
 305 */
 306void *vmalloc_32_user(unsigned long size)
 307{
 308        /*
 309         * We'll have to sort out the ZONE_DMA bits for 64-bit,
 310         * but for now this can simply use vmalloc_user() directly.
 311         */
 312        return vmalloc_user(size);
 313}
 314EXPORT_SYMBOL(vmalloc_32_user);
 315
 316void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 317{
 318        BUG();
 319        return NULL;
 320}
 321EXPORT_SYMBOL(vmap);
 322
 323void vunmap(const void *addr)
 324{
 325        BUG();
 326}
 327EXPORT_SYMBOL(vunmap);
 328
 329void *vm_map_ram(struct page **pages, unsigned int count, int node)
 330{
 331        BUG();
 332        return NULL;
 333}
 334EXPORT_SYMBOL(vm_map_ram);
 335
 336void vm_unmap_ram(const void *mem, unsigned int count)
 337{
 338        BUG();
 339}
 340EXPORT_SYMBOL(vm_unmap_ram);
 341
 342void vm_unmap_aliases(void)
 343{
 344}
 345EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 346
 347void free_vm_area(struct vm_struct *area)
 348{
 349        BUG();
 350}
 351EXPORT_SYMBOL_GPL(free_vm_area);
 352
 353int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 354                   struct page *page)
 355{
 356        return -EINVAL;
 357}
 358EXPORT_SYMBOL(vm_insert_page);
 359
 360int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 361                        unsigned long num)
 362{
 363        return -EINVAL;
 364}
 365EXPORT_SYMBOL(vm_map_pages);
 366
 367int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 368                                unsigned long num)
 369{
 370        return -EINVAL;
 371}
 372EXPORT_SYMBOL(vm_map_pages_zero);
 373
 374/*
 375 *  sys_brk() for the most part doesn't need the global kernel
 376 *  lock, except when an application is doing something nasty
 377 *  like trying to un-brk an area that has already been mapped
 378 *  to a regular file.  in this case, the unmapping will need
 379 *  to invoke file system routines that need the global lock.
 380 */
 381SYSCALL_DEFINE1(brk, unsigned long, brk)
 382{
 383        struct mm_struct *mm = current->mm;
 384
 385        if (brk < mm->start_brk || brk > mm->context.end_brk)
 386                return mm->brk;
 387
 388        if (mm->brk == brk)
 389                return mm->brk;
 390
 391        /*
 392         * Always allow shrinking brk
 393         */
 394        if (brk <= mm->brk) {
 395                mm->brk = brk;
 396                return brk;
 397        }
 398
 399        /*
 400         * Ok, looks good - let it rip.
 401         */
 402        flush_icache_user_range(mm->brk, brk);
 403        return mm->brk = brk;
 404}
 405
 406/*
 407 * initialise the percpu counter for VM and region record slabs
 408 */
 409void __init mmap_init(void)
 410{
 411        int ret;
 412
 413        ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 414        VM_BUG_ON(ret);
 415        vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 416}
 417
 418/*
 419 * validate the region tree
 420 * - the caller must hold the region lock
 421 */
 422#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 423static noinline void validate_nommu_regions(void)
 424{
 425        struct vm_region *region, *last;
 426        struct rb_node *p, *lastp;
 427
 428        lastp = rb_first(&nommu_region_tree);
 429        if (!lastp)
 430                return;
 431
 432        last = rb_entry(lastp, struct vm_region, vm_rb);
 433        BUG_ON(last->vm_end <= last->vm_start);
 434        BUG_ON(last->vm_top < last->vm_end);
 435
 436        while ((p = rb_next(lastp))) {
 437                region = rb_entry(p, struct vm_region, vm_rb);
 438                last = rb_entry(lastp, struct vm_region, vm_rb);
 439
 440                BUG_ON(region->vm_end <= region->vm_start);
 441                BUG_ON(region->vm_top < region->vm_end);
 442                BUG_ON(region->vm_start < last->vm_top);
 443
 444                lastp = p;
 445        }
 446}
 447#else
 448static void validate_nommu_regions(void)
 449{
 450}
 451#endif
 452
 453/*
 454 * add a region into the global tree
 455 */
 456static void add_nommu_region(struct vm_region *region)
 457{
 458        struct vm_region *pregion;
 459        struct rb_node **p, *parent;
 460
 461        validate_nommu_regions();
 462
 463        parent = NULL;
 464        p = &nommu_region_tree.rb_node;
 465        while (*p) {
 466                parent = *p;
 467                pregion = rb_entry(parent, struct vm_region, vm_rb);
 468                if (region->vm_start < pregion->vm_start)
 469                        p = &(*p)->rb_left;
 470                else if (region->vm_start > pregion->vm_start)
 471                        p = &(*p)->rb_right;
 472                else if (pregion == region)
 473                        return;
 474                else
 475                        BUG();
 476        }
 477
 478        rb_link_node(&region->vm_rb, parent, p);
 479        rb_insert_color(&region->vm_rb, &nommu_region_tree);
 480
 481        validate_nommu_regions();
 482}
 483
 484/*
 485 * delete a region from the global tree
 486 */
 487static void delete_nommu_region(struct vm_region *region)
 488{
 489        BUG_ON(!nommu_region_tree.rb_node);
 490
 491        validate_nommu_regions();
 492        rb_erase(&region->vm_rb, &nommu_region_tree);
 493        validate_nommu_regions();
 494}
 495
 496/*
 497 * free a contiguous series of pages
 498 */
 499static void free_page_series(unsigned long from, unsigned long to)
 500{
 501        for (; from < to; from += PAGE_SIZE) {
 502                struct page *page = virt_to_page(from);
 503
 504                atomic_long_dec(&mmap_pages_allocated);
 505                put_page(page);
 506        }
 507}
 508
 509/*
 510 * release a reference to a region
 511 * - the caller must hold the region semaphore for writing, which this releases
 512 * - the region may not have been added to the tree yet, in which case vm_top
 513 *   will equal vm_start
 514 */
 515static void __put_nommu_region(struct vm_region *region)
 516        __releases(nommu_region_sem)
 517{
 518        BUG_ON(!nommu_region_tree.rb_node);
 519
 520        if (--region->vm_usage == 0) {
 521                if (region->vm_top > region->vm_start)
 522                        delete_nommu_region(region);
 523                up_write(&nommu_region_sem);
 524
 525                if (region->vm_file)
 526                        fput(region->vm_file);
 527
 528                /* IO memory and memory shared directly out of the pagecache
 529                 * from ramfs/tmpfs mustn't be released here */
 530                if (region->vm_flags & VM_MAPPED_COPY)
 531                        free_page_series(region->vm_start, region->vm_top);
 532                kmem_cache_free(vm_region_jar, region);
 533        } else {
 534                up_write(&nommu_region_sem);
 535        }
 536}
 537
 538/*
 539 * release a reference to a region
 540 */
 541static void put_nommu_region(struct vm_region *region)
 542{
 543        down_write(&nommu_region_sem);
 544        __put_nommu_region(region);
 545}
 546
 547/*
 548 * add a VMA into a process's mm_struct in the appropriate place in the list
 549 * and tree and add to the address space's page tree also if not an anonymous
 550 * page
 551 * - should be called with mm->mmap_lock held writelocked
 552 */
 553static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 554{
 555        struct vm_area_struct *pvma, *prev;
 556        struct address_space *mapping;
 557        struct rb_node **p, *parent, *rb_prev;
 558
 559        BUG_ON(!vma->vm_region);
 560
 561        mm->map_count++;
 562        vma->vm_mm = mm;
 563
 564        /* add the VMA to the mapping */
 565        if (vma->vm_file) {
 566                mapping = vma->vm_file->f_mapping;
 567
 568                i_mmap_lock_write(mapping);
 569                flush_dcache_mmap_lock(mapping);
 570                vma_interval_tree_insert(vma, &mapping->i_mmap);
 571                flush_dcache_mmap_unlock(mapping);
 572                i_mmap_unlock_write(mapping);
 573        }
 574
 575        /* add the VMA to the tree */
 576        parent = rb_prev = NULL;
 577        p = &mm->mm_rb.rb_node;
 578        while (*p) {
 579                parent = *p;
 580                pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 581
 582                /* sort by: start addr, end addr, VMA struct addr in that order
 583                 * (the latter is necessary as we may get identical VMAs) */
 584                if (vma->vm_start < pvma->vm_start)
 585                        p = &(*p)->rb_left;
 586                else if (vma->vm_start > pvma->vm_start) {
 587                        rb_prev = parent;
 588                        p = &(*p)->rb_right;
 589                } else if (vma->vm_end < pvma->vm_end)
 590                        p = &(*p)->rb_left;
 591                else if (vma->vm_end > pvma->vm_end) {
 592                        rb_prev = parent;
 593                        p = &(*p)->rb_right;
 594                } else if (vma < pvma)
 595                        p = &(*p)->rb_left;
 596                else if (vma > pvma) {
 597                        rb_prev = parent;
 598                        p = &(*p)->rb_right;
 599                } else
 600                        BUG();
 601        }
 602
 603        rb_link_node(&vma->vm_rb, parent, p);
 604        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 605
 606        /* add VMA to the VMA list also */
 607        prev = NULL;
 608        if (rb_prev)
 609                prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 610
 611        __vma_link_list(mm, vma, prev);
 612}
 613
 614/*
 615 * delete a VMA from its owning mm_struct and address space
 616 */
 617static void delete_vma_from_mm(struct vm_area_struct *vma)
 618{
 619        int i;
 620        struct address_space *mapping;
 621        struct mm_struct *mm = vma->vm_mm;
 622        struct task_struct *curr = current;
 623
 624        mm->map_count--;
 625        for (i = 0; i < VMACACHE_SIZE; i++) {
 626                /* if the vma is cached, invalidate the entire cache */
 627                if (curr->vmacache.vmas[i] == vma) {
 628                        vmacache_invalidate(mm);
 629                        break;
 630                }
 631        }
 632
 633        /* remove the VMA from the mapping */
 634        if (vma->vm_file) {
 635                mapping = vma->vm_file->f_mapping;
 636
 637                i_mmap_lock_write(mapping);
 638                flush_dcache_mmap_lock(mapping);
 639                vma_interval_tree_remove(vma, &mapping->i_mmap);
 640                flush_dcache_mmap_unlock(mapping);
 641                i_mmap_unlock_write(mapping);
 642        }
 643
 644        /* remove from the MM's tree and list */
 645        rb_erase(&vma->vm_rb, &mm->mm_rb);
 646
 647        __vma_unlink_list(mm, vma);
 648}
 649
 650/*
 651 * destroy a VMA record
 652 */
 653static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 654{
 655        if (vma->vm_ops && vma->vm_ops->close)
 656                vma->vm_ops->close(vma);
 657        if (vma->vm_file)
 658                fput(vma->vm_file);
 659        put_nommu_region(vma->vm_region);
 660        vm_area_free(vma);
 661}
 662
 663/*
 664 * look up the first VMA in which addr resides, NULL if none
 665 * - should be called with mm->mmap_lock at least held readlocked
 666 */
 667struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 668{
 669        struct vm_area_struct *vma;
 670
 671        /* check the cache first */
 672        vma = vmacache_find(mm, addr);
 673        if (likely(vma))
 674                return vma;
 675
 676        /* trawl the list (there may be multiple mappings in which addr
 677         * resides) */
 678        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 679                if (vma->vm_start > addr)
 680                        return NULL;
 681                if (vma->vm_end > addr) {
 682                        vmacache_update(addr, vma);
 683                        return vma;
 684                }
 685        }
 686
 687        return NULL;
 688}
 689EXPORT_SYMBOL(find_vma);
 690
 691/*
 692 * find a VMA
 693 * - we don't extend stack VMAs under NOMMU conditions
 694 */
 695struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 696{
 697        return find_vma(mm, addr);
 698}
 699
 700/*
 701 * expand a stack to a given address
 702 * - not supported under NOMMU conditions
 703 */
 704int expand_stack(struct vm_area_struct *vma, unsigned long address)
 705{
 706        return -ENOMEM;
 707}
 708
 709/*
 710 * look up the first VMA exactly that exactly matches addr
 711 * - should be called with mm->mmap_lock at least held readlocked
 712 */
 713static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 714                                             unsigned long addr,
 715                                             unsigned long len)
 716{
 717        struct vm_area_struct *vma;
 718        unsigned long end = addr + len;
 719
 720        /* check the cache first */
 721        vma = vmacache_find_exact(mm, addr, end);
 722        if (vma)
 723                return vma;
 724
 725        /* trawl the list (there may be multiple mappings in which addr
 726         * resides) */
 727        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 728                if (vma->vm_start < addr)
 729                        continue;
 730                if (vma->vm_start > addr)
 731                        return NULL;
 732                if (vma->vm_end == end) {
 733                        vmacache_update(addr, vma);
 734                        return vma;
 735                }
 736        }
 737
 738        return NULL;
 739}
 740
 741/*
 742 * determine whether a mapping should be permitted and, if so, what sort of
 743 * mapping we're capable of supporting
 744 */
 745static int validate_mmap_request(struct file *file,
 746                                 unsigned long addr,
 747                                 unsigned long len,
 748                                 unsigned long prot,
 749                                 unsigned long flags,
 750                                 unsigned long pgoff,
 751                                 unsigned long *_capabilities)
 752{
 753        unsigned long capabilities, rlen;
 754        int ret;
 755
 756        /* do the simple checks first */
 757        if (flags & MAP_FIXED)
 758                return -EINVAL;
 759
 760        if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 761            (flags & MAP_TYPE) != MAP_SHARED)
 762                return -EINVAL;
 763
 764        if (!len)
 765                return -EINVAL;
 766
 767        /* Careful about overflows.. */
 768        rlen = PAGE_ALIGN(len);
 769        if (!rlen || rlen > TASK_SIZE)
 770                return -ENOMEM;
 771
 772        /* offset overflow? */
 773        if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 774                return -EOVERFLOW;
 775
 776        if (file) {
 777                /* files must support mmap */
 778                if (!file->f_op->mmap)
 779                        return -ENODEV;
 780
 781                /* work out if what we've got could possibly be shared
 782                 * - we support chardevs that provide their own "memory"
 783                 * - we support files/blockdevs that are memory backed
 784                 */
 785                if (file->f_op->mmap_capabilities) {
 786                        capabilities = file->f_op->mmap_capabilities(file);
 787                } else {
 788                        /* no explicit capabilities set, so assume some
 789                         * defaults */
 790                        switch (file_inode(file)->i_mode & S_IFMT) {
 791                        case S_IFREG:
 792                        case S_IFBLK:
 793                                capabilities = NOMMU_MAP_COPY;
 794                                break;
 795
 796                        case S_IFCHR:
 797                                capabilities =
 798                                        NOMMU_MAP_DIRECT |
 799                                        NOMMU_MAP_READ |
 800                                        NOMMU_MAP_WRITE;
 801                                break;
 802
 803                        default:
 804                                return -EINVAL;
 805                        }
 806                }
 807
 808                /* eliminate any capabilities that we can't support on this
 809                 * device */
 810                if (!file->f_op->get_unmapped_area)
 811                        capabilities &= ~NOMMU_MAP_DIRECT;
 812                if (!(file->f_mode & FMODE_CAN_READ))
 813                        capabilities &= ~NOMMU_MAP_COPY;
 814
 815                /* The file shall have been opened with read permission. */
 816                if (!(file->f_mode & FMODE_READ))
 817                        return -EACCES;
 818
 819                if (flags & MAP_SHARED) {
 820                        /* do checks for writing, appending and locking */
 821                        if ((prot & PROT_WRITE) &&
 822                            !(file->f_mode & FMODE_WRITE))
 823                                return -EACCES;
 824
 825                        if (IS_APPEND(file_inode(file)) &&
 826                            (file->f_mode & FMODE_WRITE))
 827                                return -EACCES;
 828
 829                        if (!(capabilities & NOMMU_MAP_DIRECT))
 830                                return -ENODEV;
 831
 832                        /* we mustn't privatise shared mappings */
 833                        capabilities &= ~NOMMU_MAP_COPY;
 834                } else {
 835                        /* we're going to read the file into private memory we
 836                         * allocate */
 837                        if (!(capabilities & NOMMU_MAP_COPY))
 838                                return -ENODEV;
 839
 840                        /* we don't permit a private writable mapping to be
 841                         * shared with the backing device */
 842                        if (prot & PROT_WRITE)
 843                                capabilities &= ~NOMMU_MAP_DIRECT;
 844                }
 845
 846                if (capabilities & NOMMU_MAP_DIRECT) {
 847                        if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 848                            ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 849                            ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 850                            ) {
 851                                capabilities &= ~NOMMU_MAP_DIRECT;
 852                                if (flags & MAP_SHARED) {
 853                                        pr_warn("MAP_SHARED not completely supported on !MMU\n");
 854                                        return -EINVAL;
 855                                }
 856                        }
 857                }
 858
 859                /* handle executable mappings and implied executable
 860                 * mappings */
 861                if (path_noexec(&file->f_path)) {
 862                        if (prot & PROT_EXEC)
 863                                return -EPERM;
 864                } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 865                        /* handle implication of PROT_EXEC by PROT_READ */
 866                        if (current->personality & READ_IMPLIES_EXEC) {
 867                                if (capabilities & NOMMU_MAP_EXEC)
 868                                        prot |= PROT_EXEC;
 869                        }
 870                } else if ((prot & PROT_READ) &&
 871                         (prot & PROT_EXEC) &&
 872                         !(capabilities & NOMMU_MAP_EXEC)
 873                         ) {
 874                        /* backing file is not executable, try to copy */
 875                        capabilities &= ~NOMMU_MAP_DIRECT;
 876                }
 877        } else {
 878                /* anonymous mappings are always memory backed and can be
 879                 * privately mapped
 880                 */
 881                capabilities = NOMMU_MAP_COPY;
 882
 883                /* handle PROT_EXEC implication by PROT_READ */
 884                if ((prot & PROT_READ) &&
 885                    (current->personality & READ_IMPLIES_EXEC))
 886                        prot |= PROT_EXEC;
 887        }
 888
 889        /* allow the security API to have its say */
 890        ret = security_mmap_addr(addr);
 891        if (ret < 0)
 892                return ret;
 893
 894        /* looks okay */
 895        *_capabilities = capabilities;
 896        return 0;
 897}
 898
 899/*
 900 * we've determined that we can make the mapping, now translate what we
 901 * now know into VMA flags
 902 */
 903static unsigned long determine_vm_flags(struct file *file,
 904                                        unsigned long prot,
 905                                        unsigned long flags,
 906                                        unsigned long capabilities)
 907{
 908        unsigned long vm_flags;
 909
 910        vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
 911        /* vm_flags |= mm->def_flags; */
 912
 913        if (!(capabilities & NOMMU_MAP_DIRECT)) {
 914                /* attempt to share read-only copies of mapped file chunks */
 915                vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 916                if (file && !(prot & PROT_WRITE))
 917                        vm_flags |= VM_MAYSHARE;
 918        } else {
 919                /* overlay a shareable mapping on the backing device or inode
 920                 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
 921                 * romfs/cramfs */
 922                vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
 923                if (flags & MAP_SHARED)
 924                        vm_flags |= VM_SHARED;
 925        }
 926
 927        /* refuse to let anyone share private mappings with this process if
 928         * it's being traced - otherwise breakpoints set in it may interfere
 929         * with another untraced process
 930         */
 931        if ((flags & MAP_PRIVATE) && current->ptrace)
 932                vm_flags &= ~VM_MAYSHARE;
 933
 934        return vm_flags;
 935}
 936
 937/*
 938 * set up a shared mapping on a file (the driver or filesystem provides and
 939 * pins the storage)
 940 */
 941static int do_mmap_shared_file(struct vm_area_struct *vma)
 942{
 943        int ret;
 944
 945        ret = call_mmap(vma->vm_file, vma);
 946        if (ret == 0) {
 947                vma->vm_region->vm_top = vma->vm_region->vm_end;
 948                return 0;
 949        }
 950        if (ret != -ENOSYS)
 951                return ret;
 952
 953        /* getting -ENOSYS indicates that direct mmap isn't possible (as
 954         * opposed to tried but failed) so we can only give a suitable error as
 955         * it's not possible to make a private copy if MAP_SHARED was given */
 956        return -ENODEV;
 957}
 958
 959/*
 960 * set up a private mapping or an anonymous shared mapping
 961 */
 962static int do_mmap_private(struct vm_area_struct *vma,
 963                           struct vm_region *region,
 964                           unsigned long len,
 965                           unsigned long capabilities)
 966{
 967        unsigned long total, point;
 968        void *base;
 969        int ret, order;
 970
 971        /* invoke the file's mapping function so that it can keep track of
 972         * shared mappings on devices or memory
 973         * - VM_MAYSHARE will be set if it may attempt to share
 974         */
 975        if (capabilities & NOMMU_MAP_DIRECT) {
 976                ret = call_mmap(vma->vm_file, vma);
 977                if (ret == 0) {
 978                        /* shouldn't return success if we're not sharing */
 979                        BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
 980                        vma->vm_region->vm_top = vma->vm_region->vm_end;
 981                        return 0;
 982                }
 983                if (ret != -ENOSYS)
 984                        return ret;
 985
 986                /* getting an ENOSYS error indicates that direct mmap isn't
 987                 * possible (as opposed to tried but failed) so we'll try to
 988                 * make a private copy of the data and map that instead */
 989        }
 990
 991
 992        /* allocate some memory to hold the mapping
 993         * - note that this may not return a page-aligned address if the object
 994         *   we're allocating is smaller than a page
 995         */
 996        order = get_order(len);
 997        total = 1 << order;
 998        point = len >> PAGE_SHIFT;
 999
1000        /* we don't want to allocate a power-of-2 sized page set */
1001        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1002                total = point;
1003
1004        base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1005        if (!base)
1006                goto enomem;
1007
1008        atomic_long_add(total, &mmap_pages_allocated);
1009
1010        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1011        region->vm_start = (unsigned long) base;
1012        region->vm_end   = region->vm_start + len;
1013        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1014
1015        vma->vm_start = region->vm_start;
1016        vma->vm_end   = region->vm_start + len;
1017
1018        if (vma->vm_file) {
1019                /* read the contents of a file into the copy */
1020                loff_t fpos;
1021
1022                fpos = vma->vm_pgoff;
1023                fpos <<= PAGE_SHIFT;
1024
1025                ret = kernel_read(vma->vm_file, base, len, &fpos);
1026                if (ret < 0)
1027                        goto error_free;
1028
1029                /* clear the last little bit */
1030                if (ret < len)
1031                        memset(base + ret, 0, len - ret);
1032
1033        } else {
1034                vma_set_anonymous(vma);
1035        }
1036
1037        return 0;
1038
1039error_free:
1040        free_page_series(region->vm_start, region->vm_top);
1041        region->vm_start = vma->vm_start = 0;
1042        region->vm_end   = vma->vm_end = 0;
1043        region->vm_top   = 0;
1044        return ret;
1045
1046enomem:
1047        pr_err("Allocation of length %lu from process %d (%s) failed\n",
1048               len, current->pid, current->comm);
1049        show_free_areas(0, NULL);
1050        return -ENOMEM;
1051}
1052
1053/*
1054 * handle mapping creation for uClinux
1055 */
1056unsigned long do_mmap(struct file *file,
1057                        unsigned long addr,
1058                        unsigned long len,
1059                        unsigned long prot,
1060                        unsigned long flags,
1061                        unsigned long pgoff,
1062                        unsigned long *populate,
1063                        struct list_head *uf)
1064{
1065        struct vm_area_struct *vma;
1066        struct vm_region *region;
1067        struct rb_node *rb;
1068        vm_flags_t vm_flags;
1069        unsigned long capabilities, result;
1070        int ret;
1071
1072        *populate = 0;
1073
1074        /* decide whether we should attempt the mapping, and if so what sort of
1075         * mapping */
1076        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1077                                    &capabilities);
1078        if (ret < 0)
1079                return ret;
1080
1081        /* we ignore the address hint */
1082        addr = 0;
1083        len = PAGE_ALIGN(len);
1084
1085        /* we've determined that we can make the mapping, now translate what we
1086         * now know into VMA flags */
1087        vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1088
1089        /* we're going to need to record the mapping */
1090        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1091        if (!region)
1092                goto error_getting_region;
1093
1094        vma = vm_area_alloc(current->mm);
1095        if (!vma)
1096                goto error_getting_vma;
1097
1098        region->vm_usage = 1;
1099        region->vm_flags = vm_flags;
1100        region->vm_pgoff = pgoff;
1101
1102        vma->vm_flags = vm_flags;
1103        vma->vm_pgoff = pgoff;
1104
1105        if (file) {
1106                region->vm_file = get_file(file);
1107                vma->vm_file = get_file(file);
1108        }
1109
1110        down_write(&nommu_region_sem);
1111
1112        /* if we want to share, we need to check for regions created by other
1113         * mmap() calls that overlap with our proposed mapping
1114         * - we can only share with a superset match on most regular files
1115         * - shared mappings on character devices and memory backed files are
1116         *   permitted to overlap inexactly as far as we are concerned for in
1117         *   these cases, sharing is handled in the driver or filesystem rather
1118         *   than here
1119         */
1120        if (vm_flags & VM_MAYSHARE) {
1121                struct vm_region *pregion;
1122                unsigned long pglen, rpglen, pgend, rpgend, start;
1123
1124                pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1125                pgend = pgoff + pglen;
1126
1127                for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1128                        pregion = rb_entry(rb, struct vm_region, vm_rb);
1129
1130                        if (!(pregion->vm_flags & VM_MAYSHARE))
1131                                continue;
1132
1133                        /* search for overlapping mappings on the same file */
1134                        if (file_inode(pregion->vm_file) !=
1135                            file_inode(file))
1136                                continue;
1137
1138                        if (pregion->vm_pgoff >= pgend)
1139                                continue;
1140
1141                        rpglen = pregion->vm_end - pregion->vm_start;
1142                        rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1143                        rpgend = pregion->vm_pgoff + rpglen;
1144                        if (pgoff >= rpgend)
1145                                continue;
1146
1147                        /* handle inexactly overlapping matches between
1148                         * mappings */
1149                        if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1150                            !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1151                                /* new mapping is not a subset of the region */
1152                                if (!(capabilities & NOMMU_MAP_DIRECT))
1153                                        goto sharing_violation;
1154                                continue;
1155                        }
1156
1157                        /* we've found a region we can share */
1158                        pregion->vm_usage++;
1159                        vma->vm_region = pregion;
1160                        start = pregion->vm_start;
1161                        start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1162                        vma->vm_start = start;
1163                        vma->vm_end = start + len;
1164
1165                        if (pregion->vm_flags & VM_MAPPED_COPY)
1166                                vma->vm_flags |= VM_MAPPED_COPY;
1167                        else {
1168                                ret = do_mmap_shared_file(vma);
1169                                if (ret < 0) {
1170                                        vma->vm_region = NULL;
1171                                        vma->vm_start = 0;
1172                                        vma->vm_end = 0;
1173                                        pregion->vm_usage--;
1174                                        pregion = NULL;
1175                                        goto error_just_free;
1176                                }
1177                        }
1178                        fput(region->vm_file);
1179                        kmem_cache_free(vm_region_jar, region);
1180                        region = pregion;
1181                        result = start;
1182                        goto share;
1183                }
1184
1185                /* obtain the address at which to make a shared mapping
1186                 * - this is the hook for quasi-memory character devices to
1187                 *   tell us the location of a shared mapping
1188                 */
1189                if (capabilities & NOMMU_MAP_DIRECT) {
1190                        addr = file->f_op->get_unmapped_area(file, addr, len,
1191                                                             pgoff, flags);
1192                        if (IS_ERR_VALUE(addr)) {
1193                                ret = addr;
1194                                if (ret != -ENOSYS)
1195                                        goto error_just_free;
1196
1197                                /* the driver refused to tell us where to site
1198                                 * the mapping so we'll have to attempt to copy
1199                                 * it */
1200                                ret = -ENODEV;
1201                                if (!(capabilities & NOMMU_MAP_COPY))
1202                                        goto error_just_free;
1203
1204                                capabilities &= ~NOMMU_MAP_DIRECT;
1205                        } else {
1206                                vma->vm_start = region->vm_start = addr;
1207                                vma->vm_end = region->vm_end = addr + len;
1208                        }
1209                }
1210        }
1211
1212        vma->vm_region = region;
1213
1214        /* set up the mapping
1215         * - the region is filled in if NOMMU_MAP_DIRECT is still set
1216         */
1217        if (file && vma->vm_flags & VM_SHARED)
1218                ret = do_mmap_shared_file(vma);
1219        else
1220                ret = do_mmap_private(vma, region, len, capabilities);
1221        if (ret < 0)
1222                goto error_just_free;
1223        add_nommu_region(region);
1224
1225        /* clear anonymous mappings that don't ask for uninitialized data */
1226        if (!vma->vm_file &&
1227            (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1228             !(flags & MAP_UNINITIALIZED)))
1229                memset((void *)region->vm_start, 0,
1230                       region->vm_end - region->vm_start);
1231
1232        /* okay... we have a mapping; now we have to register it */
1233        result = vma->vm_start;
1234
1235        current->mm->total_vm += len >> PAGE_SHIFT;
1236
1237share:
1238        add_vma_to_mm(current->mm, vma);
1239
1240        /* we flush the region from the icache only when the first executable
1241         * mapping of it is made  */
1242        if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1243                flush_icache_user_range(region->vm_start, region->vm_end);
1244                region->vm_icache_flushed = true;
1245        }
1246
1247        up_write(&nommu_region_sem);
1248
1249        return result;
1250
1251error_just_free:
1252        up_write(&nommu_region_sem);
1253error:
1254        if (region->vm_file)
1255                fput(region->vm_file);
1256        kmem_cache_free(vm_region_jar, region);
1257        if (vma->vm_file)
1258                fput(vma->vm_file);
1259        vm_area_free(vma);
1260        return ret;
1261
1262sharing_violation:
1263        up_write(&nommu_region_sem);
1264        pr_warn("Attempt to share mismatched mappings\n");
1265        ret = -EINVAL;
1266        goto error;
1267
1268error_getting_vma:
1269        kmem_cache_free(vm_region_jar, region);
1270        pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1271                        len, current->pid);
1272        show_free_areas(0, NULL);
1273        return -ENOMEM;
1274
1275error_getting_region:
1276        pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1277                        len, current->pid);
1278        show_free_areas(0, NULL);
1279        return -ENOMEM;
1280}
1281
1282unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1283                              unsigned long prot, unsigned long flags,
1284                              unsigned long fd, unsigned long pgoff)
1285{
1286        struct file *file = NULL;
1287        unsigned long retval = -EBADF;
1288
1289        audit_mmap_fd(fd, flags);
1290        if (!(flags & MAP_ANONYMOUS)) {
1291                file = fget(fd);
1292                if (!file)
1293                        goto out;
1294        }
1295
1296        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1297
1298        if (file)
1299                fput(file);
1300out:
1301        return retval;
1302}
1303
1304SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1305                unsigned long, prot, unsigned long, flags,
1306                unsigned long, fd, unsigned long, pgoff)
1307{
1308        return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1309}
1310
1311#ifdef __ARCH_WANT_SYS_OLD_MMAP
1312struct mmap_arg_struct {
1313        unsigned long addr;
1314        unsigned long len;
1315        unsigned long prot;
1316        unsigned long flags;
1317        unsigned long fd;
1318        unsigned long offset;
1319};
1320
1321SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1322{
1323        struct mmap_arg_struct a;
1324
1325        if (copy_from_user(&a, arg, sizeof(a)))
1326                return -EFAULT;
1327        if (offset_in_page(a.offset))
1328                return -EINVAL;
1329
1330        return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1331                               a.offset >> PAGE_SHIFT);
1332}
1333#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1334
1335/*
1336 * split a vma into two pieces at address 'addr', a new vma is allocated either
1337 * for the first part or the tail.
1338 */
1339int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1340              unsigned long addr, int new_below)
1341{
1342        struct vm_area_struct *new;
1343        struct vm_region *region;
1344        unsigned long npages;
1345
1346        /* we're only permitted to split anonymous regions (these should have
1347         * only a single usage on the region) */
1348        if (vma->vm_file)
1349                return -ENOMEM;
1350
1351        if (mm->map_count >= sysctl_max_map_count)
1352                return -ENOMEM;
1353
1354        region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1355        if (!region)
1356                return -ENOMEM;
1357
1358        new = vm_area_dup(vma);
1359        if (!new) {
1360                kmem_cache_free(vm_region_jar, region);
1361                return -ENOMEM;
1362        }
1363
1364        /* most fields are the same, copy all, and then fixup */
1365        *region = *vma->vm_region;
1366        new->vm_region = region;
1367
1368        npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1369
1370        if (new_below) {
1371                region->vm_top = region->vm_end = new->vm_end = addr;
1372        } else {
1373                region->vm_start = new->vm_start = addr;
1374                region->vm_pgoff = new->vm_pgoff += npages;
1375        }
1376
1377        if (new->vm_ops && new->vm_ops->open)
1378                new->vm_ops->open(new);
1379
1380        delete_vma_from_mm(vma);
1381        down_write(&nommu_region_sem);
1382        delete_nommu_region(vma->vm_region);
1383        if (new_below) {
1384                vma->vm_region->vm_start = vma->vm_start = addr;
1385                vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1386        } else {
1387                vma->vm_region->vm_end = vma->vm_end = addr;
1388                vma->vm_region->vm_top = addr;
1389        }
1390        add_nommu_region(vma->vm_region);
1391        add_nommu_region(new->vm_region);
1392        up_write(&nommu_region_sem);
1393        add_vma_to_mm(mm, vma);
1394        add_vma_to_mm(mm, new);
1395        return 0;
1396}
1397
1398/*
1399 * shrink a VMA by removing the specified chunk from either the beginning or
1400 * the end
1401 */
1402static int shrink_vma(struct mm_struct *mm,
1403                      struct vm_area_struct *vma,
1404                      unsigned long from, unsigned long to)
1405{
1406        struct vm_region *region;
1407
1408        /* adjust the VMA's pointers, which may reposition it in the MM's tree
1409         * and list */
1410        delete_vma_from_mm(vma);
1411        if (from > vma->vm_start)
1412                vma->vm_end = from;
1413        else
1414                vma->vm_start = to;
1415        add_vma_to_mm(mm, vma);
1416
1417        /* cut the backing region down to size */
1418        region = vma->vm_region;
1419        BUG_ON(region->vm_usage != 1);
1420
1421        down_write(&nommu_region_sem);
1422        delete_nommu_region(region);
1423        if (from > region->vm_start) {
1424                to = region->vm_top;
1425                region->vm_top = region->vm_end = from;
1426        } else {
1427                region->vm_start = to;
1428        }
1429        add_nommu_region(region);
1430        up_write(&nommu_region_sem);
1431
1432        free_page_series(from, to);
1433        return 0;
1434}
1435
1436/*
1437 * release a mapping
1438 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1439 *   VMA, though it need not cover the whole VMA
1440 */
1441int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1442{
1443        struct vm_area_struct *vma;
1444        unsigned long end;
1445        int ret;
1446
1447        len = PAGE_ALIGN(len);
1448        if (len == 0)
1449                return -EINVAL;
1450
1451        end = start + len;
1452
1453        /* find the first potentially overlapping VMA */
1454        vma = find_vma(mm, start);
1455        if (!vma) {
1456                static int limit;
1457                if (limit < 5) {
1458                        pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1459                                        current->pid, current->comm,
1460                                        start, start + len - 1);
1461                        limit++;
1462                }
1463                return -EINVAL;
1464        }
1465
1466        /* we're allowed to split an anonymous VMA but not a file-backed one */
1467        if (vma->vm_file) {
1468                do {
1469                        if (start > vma->vm_start)
1470                                return -EINVAL;
1471                        if (end == vma->vm_end)
1472                                goto erase_whole_vma;
1473                        vma = vma->vm_next;
1474                } while (vma);
1475                return -EINVAL;
1476        } else {
1477                /* the chunk must be a subset of the VMA found */
1478                if (start == vma->vm_start && end == vma->vm_end)
1479                        goto erase_whole_vma;
1480                if (start < vma->vm_start || end > vma->vm_end)
1481                        return -EINVAL;
1482                if (offset_in_page(start))
1483                        return -EINVAL;
1484                if (end != vma->vm_end && offset_in_page(end))
1485                        return -EINVAL;
1486                if (start != vma->vm_start && end != vma->vm_end) {
1487                        ret = split_vma(mm, vma, start, 1);
1488                        if (ret < 0)
1489                                return ret;
1490                }
1491                return shrink_vma(mm, vma, start, end);
1492        }
1493
1494erase_whole_vma:
1495        delete_vma_from_mm(vma);
1496        delete_vma(mm, vma);
1497        return 0;
1498}
1499
1500int vm_munmap(unsigned long addr, size_t len)
1501{
1502        struct mm_struct *mm = current->mm;
1503        int ret;
1504
1505        mmap_write_lock(mm);
1506        ret = do_munmap(mm, addr, len, NULL);
1507        mmap_write_unlock(mm);
1508        return ret;
1509}
1510EXPORT_SYMBOL(vm_munmap);
1511
1512SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1513{
1514        return vm_munmap(addr, len);
1515}
1516
1517/*
1518 * release all the mappings made in a process's VM space
1519 */
1520void exit_mmap(struct mm_struct *mm)
1521{
1522        struct vm_area_struct *vma;
1523
1524        if (!mm)
1525                return;
1526
1527        mm->total_vm = 0;
1528
1529        while ((vma = mm->mmap)) {
1530                mm->mmap = vma->vm_next;
1531                delete_vma_from_mm(vma);
1532                delete_vma(mm, vma);
1533                cond_resched();
1534        }
1535}
1536
1537int vm_brk(unsigned long addr, unsigned long len)
1538{
1539        return -ENOMEM;
1540}
1541
1542/*
1543 * expand (or shrink) an existing mapping, potentially moving it at the same
1544 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1545 *
1546 * under NOMMU conditions, we only permit changing a mapping's size, and only
1547 * as long as it stays within the region allocated by do_mmap_private() and the
1548 * block is not shareable
1549 *
1550 * MREMAP_FIXED is not supported under NOMMU conditions
1551 */
1552static unsigned long do_mremap(unsigned long addr,
1553                        unsigned long old_len, unsigned long new_len,
1554                        unsigned long flags, unsigned long new_addr)
1555{
1556        struct vm_area_struct *vma;
1557
1558        /* insanity checks first */
1559        old_len = PAGE_ALIGN(old_len);
1560        new_len = PAGE_ALIGN(new_len);
1561        if (old_len == 0 || new_len == 0)
1562                return (unsigned long) -EINVAL;
1563
1564        if (offset_in_page(addr))
1565                return -EINVAL;
1566
1567        if (flags & MREMAP_FIXED && new_addr != addr)
1568                return (unsigned long) -EINVAL;
1569
1570        vma = find_vma_exact(current->mm, addr, old_len);
1571        if (!vma)
1572                return (unsigned long) -EINVAL;
1573
1574        if (vma->vm_end != vma->vm_start + old_len)
1575                return (unsigned long) -EFAULT;
1576
1577        if (vma->vm_flags & VM_MAYSHARE)
1578                return (unsigned long) -EPERM;
1579
1580        if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1581                return (unsigned long) -ENOMEM;
1582
1583        /* all checks complete - do it */
1584        vma->vm_end = vma->vm_start + new_len;
1585        return vma->vm_start;
1586}
1587
1588SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1589                unsigned long, new_len, unsigned long, flags,
1590                unsigned long, new_addr)
1591{
1592        unsigned long ret;
1593
1594        mmap_write_lock(current->mm);
1595        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1596        mmap_write_unlock(current->mm);
1597        return ret;
1598}
1599
1600struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1601                         unsigned int foll_flags)
1602{
1603        return NULL;
1604}
1605
1606int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1607                unsigned long pfn, unsigned long size, pgprot_t prot)
1608{
1609        if (addr != (pfn << PAGE_SHIFT))
1610                return -EINVAL;
1611
1612        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1613        return 0;
1614}
1615EXPORT_SYMBOL(remap_pfn_range);
1616
1617int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1618{
1619        unsigned long pfn = start >> PAGE_SHIFT;
1620        unsigned long vm_len = vma->vm_end - vma->vm_start;
1621
1622        pfn += vma->vm_pgoff;
1623        return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1624}
1625EXPORT_SYMBOL(vm_iomap_memory);
1626
1627int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1628                        unsigned long pgoff)
1629{
1630        unsigned int size = vma->vm_end - vma->vm_start;
1631
1632        if (!(vma->vm_flags & VM_USERMAP))
1633                return -EINVAL;
1634
1635        vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1636        vma->vm_end = vma->vm_start + size;
1637
1638        return 0;
1639}
1640EXPORT_SYMBOL(remap_vmalloc_range);
1641
1642unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1643        unsigned long len, unsigned long pgoff, unsigned long flags)
1644{
1645        return -ENOMEM;
1646}
1647
1648vm_fault_t filemap_fault(struct vm_fault *vmf)
1649{
1650        BUG();
1651        return 0;
1652}
1653EXPORT_SYMBOL(filemap_fault);
1654
1655vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1656                pgoff_t start_pgoff, pgoff_t end_pgoff)
1657{
1658        BUG();
1659        return 0;
1660}
1661EXPORT_SYMBOL(filemap_map_pages);
1662
1663int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
1664                       int len, unsigned int gup_flags)
1665{
1666        struct vm_area_struct *vma;
1667        int write = gup_flags & FOLL_WRITE;
1668
1669        if (mmap_read_lock_killable(mm))
1670                return 0;
1671
1672        /* the access must start within one of the target process's mappings */
1673        vma = find_vma(mm, addr);
1674        if (vma) {
1675                /* don't overrun this mapping */
1676                if (addr + len >= vma->vm_end)
1677                        len = vma->vm_end - addr;
1678
1679                /* only read or write mappings where it is permitted */
1680                if (write && vma->vm_flags & VM_MAYWRITE)
1681                        copy_to_user_page(vma, NULL, addr,
1682                                         (void *) addr, buf, len);
1683                else if (!write && vma->vm_flags & VM_MAYREAD)
1684                        copy_from_user_page(vma, NULL, addr,
1685                                            buf, (void *) addr, len);
1686                else
1687                        len = 0;
1688        } else {
1689                len = 0;
1690        }
1691
1692        mmap_read_unlock(mm);
1693
1694        return len;
1695}
1696
1697/**
1698 * access_remote_vm - access another process' address space
1699 * @mm:         the mm_struct of the target address space
1700 * @addr:       start address to access
1701 * @buf:        source or destination buffer
1702 * @len:        number of bytes to transfer
1703 * @gup_flags:  flags modifying lookup behaviour
1704 *
1705 * The caller must hold a reference on @mm.
1706 */
1707int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1708                void *buf, int len, unsigned int gup_flags)
1709{
1710        return __access_remote_vm(mm, addr, buf, len, gup_flags);
1711}
1712
1713/*
1714 * Access another process' address space.
1715 * - source/target buffer must be kernel space
1716 */
1717int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1718                unsigned int gup_flags)
1719{
1720        struct mm_struct *mm;
1721
1722        if (addr + len < addr)
1723                return 0;
1724
1725        mm = get_task_mm(tsk);
1726        if (!mm)
1727                return 0;
1728
1729        len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1730
1731        mmput(mm);
1732        return len;
1733}
1734EXPORT_SYMBOL_GPL(access_process_vm);
1735
1736/**
1737 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1738 * @inode: The inode to check
1739 * @size: The current filesize of the inode
1740 * @newsize: The proposed filesize of the inode
1741 *
1742 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1743 * make sure that any outstanding VMAs aren't broken and then shrink the
1744 * vm_regions that extend beyond so that do_mmap() doesn't
1745 * automatically grant mappings that are too large.
1746 */
1747int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1748                                size_t newsize)
1749{
1750        struct vm_area_struct *vma;
1751        struct vm_region *region;
1752        pgoff_t low, high;
1753        size_t r_size, r_top;
1754
1755        low = newsize >> PAGE_SHIFT;
1756        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1757
1758        down_write(&nommu_region_sem);
1759        i_mmap_lock_read(inode->i_mapping);
1760
1761        /* search for VMAs that fall within the dead zone */
1762        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1763                /* found one - only interested if it's shared out of the page
1764                 * cache */
1765                if (vma->vm_flags & VM_SHARED) {
1766                        i_mmap_unlock_read(inode->i_mapping);
1767                        up_write(&nommu_region_sem);
1768                        return -ETXTBSY; /* not quite true, but near enough */
1769                }
1770        }
1771
1772        /* reduce any regions that overlap the dead zone - if in existence,
1773         * these will be pointed to by VMAs that don't overlap the dead zone
1774         *
1775         * we don't check for any regions that start beyond the EOF as there
1776         * shouldn't be any
1777         */
1778        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1779                if (!(vma->vm_flags & VM_SHARED))
1780                        continue;
1781
1782                region = vma->vm_region;
1783                r_size = region->vm_top - region->vm_start;
1784                r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1785
1786                if (r_top > newsize) {
1787                        region->vm_top -= r_top - newsize;
1788                        if (region->vm_end > region->vm_top)
1789                                region->vm_end = region->vm_top;
1790                }
1791        }
1792
1793        i_mmap_unlock_read(inode->i_mapping);
1794        up_write(&nommu_region_sem);
1795        return 0;
1796}
1797
1798/*
1799 * Initialise sysctl_user_reserve_kbytes.
1800 *
1801 * This is intended to prevent a user from starting a single memory hogging
1802 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1803 * mode.
1804 *
1805 * The default value is min(3% of free memory, 128MB)
1806 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1807 */
1808static int __meminit init_user_reserve(void)
1809{
1810        unsigned long free_kbytes;
1811
1812        free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1813
1814        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1815        return 0;
1816}
1817subsys_initcall(init_user_reserve);
1818
1819/*
1820 * Initialise sysctl_admin_reserve_kbytes.
1821 *
1822 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1823 * to log in and kill a memory hogging process.
1824 *
1825 * Systems with more than 256MB will reserve 8MB, enough to recover
1826 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1827 * only reserve 3% of free pages by default.
1828 */
1829static int __meminit init_admin_reserve(void)
1830{
1831        unsigned long free_kbytes;
1832
1833        free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1834
1835        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1836        return 0;
1837}
1838subsys_initcall(init_admin_reserve);
1839