linux/mm/nommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/admin-guide/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
  22#include <linux/mman.h>
  23#include <linux/swap.h>
  24#include <linux/file.h>
  25#include <linux/highmem.h>
  26#include <linux/pagemap.h>
  27#include <linux/slab.h>
  28#include <linux/vmalloc.h>
  29#include <linux/backing-dev.h>
  30#include <linux/compiler.h>
  31#include <linux/mount.h>
  32#include <linux/personality.h>
  33#include <linux/security.h>
  34#include <linux/syscalls.h>
  35#include <linux/audit.h>
  36#include <linux/printk.h>
  37
  38#include <linux/uaccess.h>
  39#include <linux/uio.h>
  40#include <asm/tlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/mmu_context.h>
  43#include "internal.h"
  44
  45unsigned long highest_memmap_pfn;
  46int heap_stack_gap = 0;
  47
  48atomic_long_t mmap_pages_allocated;
  49
  50
  51/* list of mapped, potentially shareable regions */
  52static struct kmem_cache *vm_region_jar;
  53struct rb_root nommu_region_tree = RB_ROOT;
  54DECLARE_RWSEM(nommu_region_sem);
  55
  56const struct vm_operations_struct generic_file_vm_ops = {
  57};
  58
  59/*
  60 * Return the total memory allocated for this pointer, not
  61 * just what the caller asked for.
  62 *
  63 * Doesn't have to be accurate, i.e. may have races.
  64 */
  65unsigned int kobjsize(const void *objp)
  66{
  67        struct page *page;
  68
  69        /*
  70         * If the object we have should not have ksize performed on it,
  71         * return size of 0
  72         */
  73        if (!objp || !virt_addr_valid(objp))
  74                return 0;
  75
  76        page = virt_to_head_page(objp);
  77
  78        /*
  79         * If the allocator sets PageSlab, we know the pointer came from
  80         * kmalloc().
  81         */
  82        if (PageSlab(page))
  83                return ksize(objp);
  84
  85        /*
  86         * If it's not a compound page, see if we have a matching VMA
  87         * region. This test is intentionally done in reverse order,
  88         * so if there's no VMA, we still fall through and hand back
  89         * PAGE_SIZE for 0-order pages.
  90         */
  91        if (!PageCompound(page)) {
  92                struct vm_area_struct *vma;
  93
  94                vma = find_vma(current->mm, (unsigned long)objp);
  95                if (vma)
  96                        return vma->vm_end - vma->vm_start;
  97        }
  98
  99        /*
 100         * The ksize() function is only guaranteed to work for pointers
 101         * returned by kmalloc(). So handle arbitrary pointers here.
 102         */
 103        return page_size(page);
 104}
 105
 106void vfree(const void *addr)
 107{
 108        kfree(addr);
 109}
 110EXPORT_SYMBOL(vfree);
 111
 112void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 113{
 114        /*
 115         *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 116         * returns only a logical address.
 117         */
 118        return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 119}
 120EXPORT_SYMBOL(__vmalloc_noprof);
 121
 122void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 123{
 124        return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
 125}
 126
 127void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 128                unsigned long start, unsigned long end, gfp_t gfp_mask,
 129                pgprot_t prot, unsigned long vm_flags, int node,
 130                const void *caller)
 131{
 132        return __vmalloc_noprof(size, gfp_mask);
 133}
 134
 135void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 136                int node, const void *caller)
 137{
 138        return __vmalloc_noprof(size, gfp_mask);
 139}
 140
 141static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 142{
 143        void *ret;
 144
 145        ret = __vmalloc(size, flags);
 146        if (ret) {
 147                struct vm_area_struct *vma;
 148
 149                mmap_write_lock(current->mm);
 150                vma = find_vma(current->mm, (unsigned long)ret);
 151                if (vma)
 152                        vm_flags_set(vma, VM_USERMAP);
 153                mmap_write_unlock(current->mm);
 154        }
 155
 156        return ret;
 157}
 158
 159void *vmalloc_user_noprof(unsigned long size)
 160{
 161        return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 162}
 163EXPORT_SYMBOL(vmalloc_user_noprof);
 164
 165struct page *vmalloc_to_page(const void *addr)
 166{
 167        return virt_to_page(addr);
 168}
 169EXPORT_SYMBOL(vmalloc_to_page);
 170
 171unsigned long vmalloc_to_pfn(const void *addr)
 172{
 173        return page_to_pfn(virt_to_page(addr));
 174}
 175EXPORT_SYMBOL(vmalloc_to_pfn);
 176
 177long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 178{
 179        /* Don't allow overflow */
 180        if ((unsigned long) addr + count < count)
 181                count = -(unsigned long) addr;
 182
 183        return copy_to_iter(addr, count, iter);
 184}
 185
 186/*
 187 *      vmalloc  -  allocate virtually contiguous memory
 188 *
 189 *      @size:          allocation size
 190 *
 191 *      Allocate enough pages to cover @size from the page level
 192 *      allocator and map them into contiguous kernel virtual space.
 193 *
 194 *      For tight control over page level allocator and protection flags
 195 *      use __vmalloc() instead.
 196 */
 197void *vmalloc_noprof(unsigned long size)
 198{
 199        return __vmalloc_noprof(size, GFP_KERNEL);
 200}
 201EXPORT_SYMBOL(vmalloc_noprof);
 202
 203/*
 204 *      vmalloc_huge_node  -  allocate virtually contiguous memory, on a node
 205 *
 206 *      @size:          allocation size
 207 *      @gfp_mask:      flags for the page level allocator
 208 *      @node:          node to use for allocation or NUMA_NO_NODE
 209 *
 210 *      Allocate enough pages to cover @size from the page level
 211 *      allocator and map them into contiguous kernel virtual space.
 212 *
 213 *      Due to NOMMU implications the node argument and HUGE page attribute is
 214 *      ignored.
 215 */
 216void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node)
 217{
 218        return __vmalloc_noprof(size, gfp_mask);
 219}
 220
 221/*
 222 *      vzalloc - allocate virtually contiguous memory with zero fill
 223 *
 224 *      @size:          allocation size
 225 *
 226 *      Allocate enough pages to cover @size from the page level
 227 *      allocator and map them into contiguous kernel virtual space.
 228 *      The memory allocated is set to zero.
 229 *
 230 *      For tight control over page level allocator and protection flags
 231 *      use __vmalloc() instead.
 232 */
 233void *vzalloc_noprof(unsigned long size)
 234{
 235        return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
 236}
 237EXPORT_SYMBOL(vzalloc_noprof);
 238
 239/**
 240 * vmalloc_node - allocate memory on a specific node
 241 * @size:       allocation size
 242 * @node:       numa node
 243 *
 244 * Allocate enough pages to cover @size from the page level
 245 * allocator and map them into contiguous kernel virtual space.
 246 *
 247 * For tight control over page level allocator and protection flags
 248 * use __vmalloc() instead.
 249 */
 250void *vmalloc_node_noprof(unsigned long size, int node)
 251{
 252        return vmalloc_noprof(size);
 253}
 254EXPORT_SYMBOL(vmalloc_node_noprof);
 255
 256/**
 257 * vzalloc_node - allocate memory on a specific node with zero fill
 258 * @size:       allocation size
 259 * @node:       numa node
 260 *
 261 * Allocate enough pages to cover @size from the page level
 262 * allocator and map them into contiguous kernel virtual space.
 263 * The memory allocated is set to zero.
 264 *
 265 * For tight control over page level allocator and protection flags
 266 * use __vmalloc() instead.
 267 */
 268void *vzalloc_node_noprof(unsigned long size, int node)
 269{
 270        return vzalloc_noprof(size);
 271}
 272EXPORT_SYMBOL(vzalloc_node_noprof);
 273
 274/**
 275 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 276 *      @size:          allocation size
 277 *
 278 *      Allocate enough 32bit PA addressable pages to cover @size from the
 279 *      page level allocator and map them into contiguous kernel virtual space.
 280 */
 281void *vmalloc_32_noprof(unsigned long size)
 282{
 283        return __vmalloc_noprof(size, GFP_KERNEL);
 284}
 285EXPORT_SYMBOL(vmalloc_32_noprof);
 286
 287/**
 288 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 289 *      @size:          allocation size
 290 *
 291 * The resulting memory area is 32bit addressable and zeroed so it can be
 292 * mapped to userspace without leaking data.
 293 *
 294 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 295 * remap_vmalloc_range() are permissible.
 296 */
 297void *vmalloc_32_user_noprof(unsigned long size)
 298{
 299        /*
 300         * We'll have to sort out the ZONE_DMA bits for 64-bit,
 301         * but for now this can simply use vmalloc_user() directly.
 302         */
 303        return vmalloc_user_noprof(size);
 304}
 305EXPORT_SYMBOL(vmalloc_32_user_noprof);
 306
 307void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 308{
 309        BUG();
 310        return NULL;
 311}
 312EXPORT_SYMBOL(vmap);
 313
 314void vunmap(const void *addr)
 315{
 316        BUG();
 317}
 318EXPORT_SYMBOL(vunmap);
 319
 320void *vm_map_ram(struct page **pages, unsigned int count, int node)
 321{
 322        BUG();
 323        return NULL;
 324}
 325EXPORT_SYMBOL(vm_map_ram);
 326
 327void vm_unmap_ram(const void *mem, unsigned int count)
 328{
 329        BUG();
 330}
 331EXPORT_SYMBOL(vm_unmap_ram);
 332
 333void vm_unmap_aliases(void)
 334{
 335}
 336EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 337
 338void free_vm_area(struct vm_struct *area)
 339{
 340        BUG();
 341}
 342EXPORT_SYMBOL_GPL(free_vm_area);
 343
 344int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 345                   struct page *page)
 346{
 347        return -EINVAL;
 348}
 349EXPORT_SYMBOL(vm_insert_page);
 350
 351int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 352                        struct page **pages, unsigned long *num)
 353{
 354        return -EINVAL;
 355}
 356EXPORT_SYMBOL(vm_insert_pages);
 357
 358int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 359                        unsigned long num)
 360{
 361        return -EINVAL;
 362}
 363EXPORT_SYMBOL(vm_map_pages);
 364
 365int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 366                                unsigned long num)
 367{
 368        return -EINVAL;
 369}
 370EXPORT_SYMBOL(vm_map_pages_zero);
 371
 372/*
 373 *  sys_brk() for the most part doesn't need the global kernel
 374 *  lock, except when an application is doing something nasty
 375 *  like trying to un-brk an area that has already been mapped
 376 *  to a regular file.  in this case, the unmapping will need
 377 *  to invoke file system routines that need the global lock.
 378 */
 379SYSCALL_DEFINE1(brk, unsigned long, brk)
 380{
 381        struct mm_struct *mm = current->mm;
 382
 383        if (brk < mm->start_brk || brk > mm->context.end_brk)
 384                return mm->brk;
 385
 386        if (mm->brk == brk)
 387                return mm->brk;
 388
 389        /*
 390         * Always allow shrinking brk
 391         */
 392        if (brk <= mm->brk) {
 393                mm->brk = brk;
 394                return brk;
 395        }
 396
 397        /*
 398         * Ok, looks good - let it rip.
 399         */
 400        flush_icache_user_range(mm->brk, brk);
 401        return mm->brk = brk;
 402}
 403
 404static int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
 405
 406static const struct ctl_table nommu_table[] = {
 407        {
 408                .procname       = "nr_trim_pages",
 409                .data           = &sysctl_nr_trim_pages,
 410                .maxlen         = sizeof(sysctl_nr_trim_pages),
 411                .mode           = 0644,
 412                .proc_handler   = proc_dointvec_minmax,
 413                .extra1         = SYSCTL_ZERO,
 414        },
 415};
 416
 417/*
 418 * initialise the percpu counter for VM and region record slabs, initialise VMA
 419 * state.
 420 */
 421void __init mmap_init(void)
 422{
 423        int ret;
 424
 425        ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 426        VM_BUG_ON(ret);
 427        vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 428        register_sysctl_init("vm", nommu_table);
 429        vma_state_init();
 430}
 431
 432/*
 433 * validate the region tree
 434 * - the caller must hold the region lock
 435 */
 436#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 437static noinline void validate_nommu_regions(void)
 438{
 439        struct vm_region *region, *last;
 440        struct rb_node *p, *lastp;
 441
 442        lastp = rb_first(&nommu_region_tree);
 443        if (!lastp)
 444                return;
 445
 446        last = rb_entry(lastp, struct vm_region, vm_rb);
 447        BUG_ON(last->vm_end <= last->vm_start);
 448        BUG_ON(last->vm_top < last->vm_end);
 449
 450        while ((p = rb_next(lastp))) {
 451                region = rb_entry(p, struct vm_region, vm_rb);
 452                last = rb_entry(lastp, struct vm_region, vm_rb);
 453
 454                BUG_ON(region->vm_end <= region->vm_start);
 455                BUG_ON(region->vm_top < region->vm_end);
 456                BUG_ON(region->vm_start < last->vm_top);
 457
 458                lastp = p;
 459        }
 460}
 461#else
 462static void validate_nommu_regions(void)
 463{
 464}
 465#endif
 466
 467/*
 468 * add a region into the global tree
 469 */
 470static void add_nommu_region(struct vm_region *region)
 471{
 472        struct vm_region *pregion;
 473        struct rb_node **p, *parent;
 474
 475        validate_nommu_regions();
 476
 477        parent = NULL;
 478        p = &nommu_region_tree.rb_node;
 479        while (*p) {
 480                parent = *p;
 481                pregion = rb_entry(parent, struct vm_region, vm_rb);
 482                if (region->vm_start < pregion->vm_start)
 483                        p = &(*p)->rb_left;
 484                else if (region->vm_start > pregion->vm_start)
 485                        p = &(*p)->rb_right;
 486                else if (pregion == region)
 487                        return;
 488                else
 489                        BUG();
 490        }
 491
 492        rb_link_node(&region->vm_rb, parent, p);
 493        rb_insert_color(&region->vm_rb, &nommu_region_tree);
 494
 495        validate_nommu_regions();
 496}
 497
 498/*
 499 * delete a region from the global tree
 500 */
 501static void delete_nommu_region(struct vm_region *region)
 502{
 503        BUG_ON(!nommu_region_tree.rb_node);
 504
 505        validate_nommu_regions();
 506        rb_erase(&region->vm_rb, &nommu_region_tree);
 507        validate_nommu_regions();
 508}
 509
 510/*
 511 * free a contiguous series of pages
 512 */
 513static void free_page_series(unsigned long from, unsigned long to)
 514{
 515        for (; from < to; from += PAGE_SIZE) {
 516                struct page *page = virt_to_page((void *)from);
 517
 518                atomic_long_dec(&mmap_pages_allocated);
 519                put_page(page);
 520        }
 521}
 522
 523/*
 524 * release a reference to a region
 525 * - the caller must hold the region semaphore for writing, which this releases
 526 * - the region may not have been added to the tree yet, in which case vm_top
 527 *   will equal vm_start
 528 */
 529static void __put_nommu_region(struct vm_region *region)
 530        __releases(nommu_region_sem)
 531{
 532        BUG_ON(!nommu_region_tree.rb_node);
 533
 534        if (--region->vm_usage == 0) {
 535                if (region->vm_top > region->vm_start)
 536                        delete_nommu_region(region);
 537                up_write(&nommu_region_sem);
 538
 539                if (region->vm_file)
 540                        fput(region->vm_file);
 541
 542                /* IO memory and memory shared directly out of the pagecache
 543                 * from ramfs/tmpfs mustn't be released here */
 544                if (region->vm_flags & VM_MAPPED_COPY)
 545                        free_page_series(region->vm_start, region->vm_top);
 546                kmem_cache_free(vm_region_jar, region);
 547        } else {
 548                up_write(&nommu_region_sem);
 549        }
 550}
 551
 552/*
 553 * release a reference to a region
 554 */
 555static void put_nommu_region(struct vm_region *region)
 556{
 557        down_write(&nommu_region_sem);
 558        __put_nommu_region(region);
 559}
 560
 561static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 562{
 563        vma->vm_mm = mm;
 564
 565        /* add the VMA to the mapping */
 566        if (vma->vm_file) {
 567                struct address_space *mapping = vma->vm_file->f_mapping;
 568
 569                i_mmap_lock_write(mapping);
 570                flush_dcache_mmap_lock(mapping);
 571                vma_interval_tree_insert(vma, &mapping->i_mmap);
 572                flush_dcache_mmap_unlock(mapping);
 573                i_mmap_unlock_write(mapping);
 574        }
 575}
 576
 577static void cleanup_vma_from_mm(struct vm_area_struct *vma)
 578{
 579        vma->vm_mm->map_count--;
 580        /* remove the VMA from the mapping */
 581        if (vma->vm_file) {
 582                struct address_space *mapping;
 583                mapping = vma->vm_file->f_mapping;
 584
 585                i_mmap_lock_write(mapping);
 586                flush_dcache_mmap_lock(mapping);
 587                vma_interval_tree_remove(vma, &mapping->i_mmap);
 588                flush_dcache_mmap_unlock(mapping);
 589                i_mmap_unlock_write(mapping);
 590        }
 591}
 592
 593/*
 594 * delete a VMA from its owning mm_struct and address space
 595 */
 596static int delete_vma_from_mm(struct vm_area_struct *vma)
 597{
 598        VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
 599
 600        vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
 601        if (vma_iter_prealloc(&vmi, NULL)) {
 602                pr_warn("Allocation of vma tree for process %d failed\n",
 603                       current->pid);
 604                return -ENOMEM;
 605        }
 606        cleanup_vma_from_mm(vma);
 607
 608        /* remove from the MM's tree and list */
 609        vma_iter_clear(&vmi);
 610        return 0;
 611}
 612/*
 613 * destroy a VMA record
 614 */
 615static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 616{
 617        vma_close(vma);
 618        if (vma->vm_file)
 619                fput(vma->vm_file);
 620        put_nommu_region(vma->vm_region);
 621        vm_area_free(vma);
 622}
 623
 624struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 625                                             unsigned long start_addr,
 626                                             unsigned long end_addr)
 627{
 628        unsigned long index = start_addr;
 629
 630        mmap_assert_locked(mm);
 631        return mt_find(&mm->mm_mt, &index, end_addr - 1);
 632}
 633EXPORT_SYMBOL(find_vma_intersection);
 634
 635/*
 636 * look up the first VMA in which addr resides, NULL if none
 637 * - should be called with mm->mmap_lock at least held readlocked
 638 */
 639struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 640{
 641        VMA_ITERATOR(vmi, mm, addr);
 642
 643        return vma_iter_load(&vmi);
 644}
 645EXPORT_SYMBOL(find_vma);
 646
 647/*
 648 * expand a stack to a given address
 649 * - not supported under NOMMU conditions
 650 */
 651int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
 652{
 653        return -ENOMEM;
 654}
 655
 656struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 657{
 658        mmap_read_unlock(mm);
 659        return NULL;
 660}
 661
 662/*
 663 * look up the first VMA exactly that exactly matches addr
 664 * - should be called with mm->mmap_lock at least held readlocked
 665 */
 666static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 667                                             unsigned long addr,
 668                                             unsigned long len)
 669{
 670        struct vm_area_struct *vma;
 671        unsigned long end = addr + len;
 672        VMA_ITERATOR(vmi, mm, addr);
 673
 674        vma = vma_iter_load(&vmi);
 675        if (!vma)
 676                return NULL;
 677        if (vma->vm_start != addr)
 678                return NULL;
 679        if (vma->vm_end != end)
 680                return NULL;
 681
 682        return vma;
 683}
 684
 685/*
 686 * determine whether a mapping should be permitted and, if so, what sort of
 687 * mapping we're capable of supporting
 688 */
 689static int validate_mmap_request(struct file *file,
 690                                 unsigned long addr,
 691                                 unsigned long len,
 692                                 unsigned long prot,
 693                                 unsigned long flags,
 694                                 unsigned long pgoff,
 695                                 unsigned long *_capabilities)
 696{
 697        unsigned long capabilities, rlen;
 698        int ret;
 699
 700        /* do the simple checks first */
 701        if (flags & MAP_FIXED)
 702                return -EINVAL;
 703
 704        if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 705            (flags & MAP_TYPE) != MAP_SHARED)
 706                return -EINVAL;
 707
 708        if (!len)
 709                return -EINVAL;
 710
 711        /* Careful about overflows.. */
 712        rlen = PAGE_ALIGN(len);
 713        if (!rlen || rlen > TASK_SIZE)
 714                return -ENOMEM;
 715
 716        /* offset overflow? */
 717        if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 718                return -EOVERFLOW;
 719
 720        if (file) {
 721                /* files must support mmap */
 722                if (!can_mmap_file(file))
 723                        return -ENODEV;
 724
 725                /* work out if what we've got could possibly be shared
 726                 * - we support chardevs that provide their own "memory"
 727                 * - we support files/blockdevs that are memory backed
 728                 */
 729                if (file->f_op->mmap_capabilities) {
 730                        capabilities = file->f_op->mmap_capabilities(file);
 731                } else {
 732                        /* no explicit capabilities set, so assume some
 733                         * defaults */
 734                        switch (file_inode(file)->i_mode & S_IFMT) {
 735                        case S_IFREG:
 736                        case S_IFBLK:
 737                                capabilities = NOMMU_MAP_COPY;
 738                                break;
 739
 740                        case S_IFCHR:
 741                                capabilities =
 742                                        NOMMU_MAP_DIRECT |
 743                                        NOMMU_MAP_READ |
 744                                        NOMMU_MAP_WRITE;
 745                                break;
 746
 747                        default:
 748                                return -EINVAL;
 749                        }
 750                }
 751
 752                /* eliminate any capabilities that we can't support on this
 753                 * device */
 754                if (!file->f_op->get_unmapped_area)
 755                        capabilities &= ~NOMMU_MAP_DIRECT;
 756                if (!(file->f_mode & FMODE_CAN_READ))
 757                        capabilities &= ~NOMMU_MAP_COPY;
 758
 759                /* The file shall have been opened with read permission. */
 760                if (!(file->f_mode & FMODE_READ))
 761                        return -EACCES;
 762
 763                if (flags & MAP_SHARED) {
 764                        /* do checks for writing, appending and locking */
 765                        if ((prot & PROT_WRITE) &&
 766                            !(file->f_mode & FMODE_WRITE))
 767                                return -EACCES;
 768
 769                        if (IS_APPEND(file_inode(file)) &&
 770                            (file->f_mode & FMODE_WRITE))
 771                                return -EACCES;
 772
 773                        if (!(capabilities & NOMMU_MAP_DIRECT))
 774                                return -ENODEV;
 775
 776                        /* we mustn't privatise shared mappings */
 777                        capabilities &= ~NOMMU_MAP_COPY;
 778                } else {
 779                        /* we're going to read the file into private memory we
 780                         * allocate */
 781                        if (!(capabilities & NOMMU_MAP_COPY))
 782                                return -ENODEV;
 783
 784                        /* we don't permit a private writable mapping to be
 785                         * shared with the backing device */
 786                        if (prot & PROT_WRITE)
 787                                capabilities &= ~NOMMU_MAP_DIRECT;
 788                }
 789
 790                if (capabilities & NOMMU_MAP_DIRECT) {
 791                        if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 792                            ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 793                            ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 794                            ) {
 795                                capabilities &= ~NOMMU_MAP_DIRECT;
 796                                if (flags & MAP_SHARED) {
 797                                        pr_warn("MAP_SHARED not completely supported on !MMU\n");
 798                                        return -EINVAL;
 799                                }
 800                        }
 801                }
 802
 803                /* handle executable mappings and implied executable
 804                 * mappings */
 805                if (path_noexec(&file->f_path)) {
 806                        if (prot & PROT_EXEC)
 807                                return -EPERM;
 808                } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 809                        /* handle implication of PROT_EXEC by PROT_READ */
 810                        if (current->personality & READ_IMPLIES_EXEC) {
 811                                if (capabilities & NOMMU_MAP_EXEC)
 812                                        prot |= PROT_EXEC;
 813                        }
 814                } else if ((prot & PROT_READ) &&
 815                         (prot & PROT_EXEC) &&
 816                         !(capabilities & NOMMU_MAP_EXEC)
 817                         ) {
 818                        /* backing file is not executable, try to copy */
 819                        capabilities &= ~NOMMU_MAP_DIRECT;
 820                }
 821        } else {
 822                /* anonymous mappings are always memory backed and can be
 823                 * privately mapped
 824                 */
 825                capabilities = NOMMU_MAP_COPY;
 826
 827                /* handle PROT_EXEC implication by PROT_READ */
 828                if ((prot & PROT_READ) &&
 829                    (current->personality & READ_IMPLIES_EXEC))
 830                        prot |= PROT_EXEC;
 831        }
 832
 833        /* allow the security API to have its say */
 834        ret = security_mmap_addr(addr);
 835        if (ret < 0)
 836                return ret;
 837
 838        /* looks okay */
 839        *_capabilities = capabilities;
 840        return 0;
 841}
 842
 843/*
 844 * we've determined that we can make the mapping, now translate what we
 845 * now know into VMA flags
 846 */
 847static vm_flags_t determine_vm_flags(struct file *file,
 848                unsigned long prot,
 849                unsigned long flags,
 850                unsigned long capabilities)
 851{
 852        vm_flags_t vm_flags;
 853
 854        vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
 855
 856        if (!file) {
 857                /*
 858                 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
 859                 * there is no fork().
 860                 */
 861                vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 862        } else if (flags & MAP_PRIVATE) {
 863                /* MAP_PRIVATE file mapping */
 864                if (capabilities & NOMMU_MAP_DIRECT)
 865                        vm_flags |= (capabilities & NOMMU_VMFLAGS);
 866                else
 867                        vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 868
 869                if (!(prot & PROT_WRITE) && !current->ptrace)
 870                        /*
 871                         * R/O private file mapping which cannot be used to
 872                         * modify memory, especially also not via active ptrace
 873                         * (e.g., set breakpoints) or later by upgrading
 874                         * permissions (no mprotect()). We can try overlaying
 875                         * the file mapping, which will work e.g., on chardevs,
 876                         * ramfs/tmpfs/shmfs and romfs/cramf.
 877                         */
 878                        vm_flags |= VM_MAYOVERLAY;
 879        } else {
 880                /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
 881                vm_flags |= VM_SHARED | VM_MAYSHARE |
 882                            (capabilities & NOMMU_VMFLAGS);
 883        }
 884
 885        return vm_flags;
 886}
 887
 888/*
 889 * set up a shared mapping on a file (the driver or filesystem provides and
 890 * pins the storage)
 891 */
 892static int do_mmap_shared_file(struct vm_area_struct *vma)
 893{
 894        int ret;
 895
 896        ret = mmap_file(vma->vm_file, vma);
 897        if (ret == 0) {
 898                vma->vm_region->vm_top = vma->vm_region->vm_end;
 899                return 0;
 900        }
 901        if (ret != -ENOSYS)
 902                return ret;
 903
 904        /* getting -ENOSYS indicates that direct mmap isn't possible (as
 905         * opposed to tried but failed) so we can only give a suitable error as
 906         * it's not possible to make a private copy if MAP_SHARED was given */
 907        return -ENODEV;
 908}
 909
 910/*
 911 * set up a private mapping or an anonymous shared mapping
 912 */
 913static int do_mmap_private(struct vm_area_struct *vma,
 914                           struct vm_region *region,
 915                           unsigned long len,
 916                           unsigned long capabilities)
 917{
 918        unsigned long total, point;
 919        void *base;
 920        int ret, order;
 921
 922        /*
 923         * Invoke the file's mapping function so that it can keep track of
 924         * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
 925         * it may attempt to share, which will make is_nommu_shared_mapping()
 926         * happy.
 927         */
 928        if (capabilities & NOMMU_MAP_DIRECT) {
 929                ret = mmap_file(vma->vm_file, vma);
 930                /* shouldn't return success if we're not sharing */
 931                if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
 932                        ret = -ENOSYS;
 933                if (ret == 0) {
 934                        vma->vm_region->vm_top = vma->vm_region->vm_end;
 935                        return 0;
 936                }
 937                if (ret != -ENOSYS)
 938                        return ret;
 939
 940                /* getting an ENOSYS error indicates that direct mmap isn't
 941                 * possible (as opposed to tried but failed) so we'll try to
 942                 * make a private copy of the data and map that instead */
 943        }
 944
 945
 946        /* allocate some memory to hold the mapping
 947         * - note that this may not return a page-aligned address if the object
 948         *   we're allocating is smaller than a page
 949         */
 950        order = get_order(len);
 951        total = 1 << order;
 952        point = len >> PAGE_SHIFT;
 953
 954        /* we don't want to allocate a power-of-2 sized page set */
 955        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
 956                total = point;
 957
 958        base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
 959        if (!base)
 960                goto enomem;
 961
 962        atomic_long_add(total, &mmap_pages_allocated);
 963
 964        vm_flags_set(vma, VM_MAPPED_COPY);
 965        region->vm_flags = vma->vm_flags;
 966        region->vm_start = (unsigned long) base;
 967        region->vm_end   = region->vm_start + len;
 968        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
 969
 970        vma->vm_start = region->vm_start;
 971        vma->vm_end   = region->vm_start + len;
 972
 973        if (vma->vm_file) {
 974                /* read the contents of a file into the copy */
 975                loff_t fpos;
 976
 977                fpos = vma->vm_pgoff;
 978                fpos <<= PAGE_SHIFT;
 979
 980                ret = kernel_read(vma->vm_file, base, len, &fpos);
 981                if (ret < 0)
 982                        goto error_free;
 983
 984                /* clear the last little bit */
 985                if (ret < len)
 986                        memset(base + ret, 0, len - ret);
 987
 988        } else {
 989                vma_set_anonymous(vma);
 990        }
 991
 992        return 0;
 993
 994error_free:
 995        free_page_series(region->vm_start, region->vm_top);
 996        region->vm_start = vma->vm_start = 0;
 997        region->vm_end   = vma->vm_end = 0;
 998        region->vm_top   = 0;
 999        return ret;
1000
1001enomem:
1002        pr_err("Allocation of length %lu from process %d (%s) failed\n",
1003               len, current->pid, current->comm);
1004        show_mem();
1005        return -ENOMEM;
1006}
1007
1008/*
1009 * handle mapping creation for uClinux
1010 */
1011unsigned long do_mmap(struct file *file,
1012                        unsigned long addr,
1013                        unsigned long len,
1014                        unsigned long prot,
1015                        unsigned long flags,
1016                        vm_flags_t vm_flags,
1017                        unsigned long pgoff,
1018                        unsigned long *populate,
1019                        struct list_head *uf)
1020{
1021        struct vm_area_struct *vma;
1022        struct vm_region *region;
1023        struct rb_node *rb;
1024        unsigned long capabilities, result;
1025        int ret;
1026        VMA_ITERATOR(vmi, current->mm, 0);
1027
1028        *populate = 0;
1029
1030        /* decide whether we should attempt the mapping, and if so what sort of
1031         * mapping */
1032        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1033                                    &capabilities);
1034        if (ret < 0)
1035                return ret;
1036
1037        /* we ignore the address hint */
1038        addr = 0;
1039        len = PAGE_ALIGN(len);
1040
1041        /* we've determined that we can make the mapping, now translate what we
1042         * now know into VMA flags */
1043        vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1044
1045
1046        /* we're going to need to record the mapping */
1047        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1048        if (!region)
1049                goto error_getting_region;
1050
1051        vma = vm_area_alloc(current->mm);
1052        if (!vma)
1053                goto error_getting_vma;
1054
1055        region->vm_usage = 1;
1056        region->vm_flags = vm_flags;
1057        region->vm_pgoff = pgoff;
1058
1059        vm_flags_init(vma, vm_flags);
1060        vma->vm_pgoff = pgoff;
1061
1062        if (file) {
1063                region->vm_file = get_file(file);
1064                vma->vm_file = get_file(file);
1065        }
1066
1067        down_write(&nommu_region_sem);
1068
1069        /* if we want to share, we need to check for regions created by other
1070         * mmap() calls that overlap with our proposed mapping
1071         * - we can only share with a superset match on most regular files
1072         * - shared mappings on character devices and memory backed files are
1073         *   permitted to overlap inexactly as far as we are concerned for in
1074         *   these cases, sharing is handled in the driver or filesystem rather
1075         *   than here
1076         */
1077        if (is_nommu_shared_mapping(vm_flags)) {
1078                struct vm_region *pregion;
1079                unsigned long pglen, rpglen, pgend, rpgend, start;
1080
1081                pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1082                pgend = pgoff + pglen;
1083
1084                for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1085                        pregion = rb_entry(rb, struct vm_region, vm_rb);
1086
1087                        if (!is_nommu_shared_mapping(pregion->vm_flags))
1088                                continue;
1089
1090                        /* search for overlapping mappings on the same file */
1091                        if (file_inode(pregion->vm_file) !=
1092                            file_inode(file))
1093                                continue;
1094
1095                        if (pregion->vm_pgoff >= pgend)
1096                                continue;
1097
1098                        rpglen = pregion->vm_end - pregion->vm_start;
1099                        rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1100                        rpgend = pregion->vm_pgoff + rpglen;
1101                        if (pgoff >= rpgend)
1102                                continue;
1103
1104                        /* handle inexactly overlapping matches between
1105                         * mappings */
1106                        if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1107                            !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1108                                /* new mapping is not a subset of the region */
1109                                if (!(capabilities & NOMMU_MAP_DIRECT))
1110                                        goto sharing_violation;
1111                                continue;
1112                        }
1113
1114                        /* we've found a region we can share */
1115                        pregion->vm_usage++;
1116                        vma->vm_region = pregion;
1117                        start = pregion->vm_start;
1118                        start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1119                        vma->vm_start = start;
1120                        vma->vm_end = start + len;
1121
1122                        if (pregion->vm_flags & VM_MAPPED_COPY)
1123                                vm_flags_set(vma, VM_MAPPED_COPY);
1124                        else {
1125                                ret = do_mmap_shared_file(vma);
1126                                if (ret < 0) {
1127                                        vma->vm_region = NULL;
1128                                        vma->vm_start = 0;
1129                                        vma->vm_end = 0;
1130                                        pregion->vm_usage--;
1131                                        pregion = NULL;
1132                                        goto error_just_free;
1133                                }
1134                        }
1135                        fput(region->vm_file);
1136                        kmem_cache_free(vm_region_jar, region);
1137                        region = pregion;
1138                        result = start;
1139                        goto share;
1140                }
1141
1142                /* obtain the address at which to make a shared mapping
1143                 * - this is the hook for quasi-memory character devices to
1144                 *   tell us the location of a shared mapping
1145                 */
1146                if (capabilities & NOMMU_MAP_DIRECT) {
1147                        addr = file->f_op->get_unmapped_area(file, addr, len,
1148                                                             pgoff, flags);
1149                        if (IS_ERR_VALUE(addr)) {
1150                                ret = addr;
1151                                if (ret != -ENOSYS)
1152                                        goto error_just_free;
1153
1154                                /* the driver refused to tell us where to site
1155                                 * the mapping so we'll have to attempt to copy
1156                                 * it */
1157                                ret = -ENODEV;
1158                                if (!(capabilities & NOMMU_MAP_COPY))
1159                                        goto error_just_free;
1160
1161                                capabilities &= ~NOMMU_MAP_DIRECT;
1162                        } else {
1163                                vma->vm_start = region->vm_start = addr;
1164                                vma->vm_end = region->vm_end = addr + len;
1165                        }
1166                }
1167        }
1168
1169        vma->vm_region = region;
1170
1171        /* set up the mapping
1172         * - the region is filled in if NOMMU_MAP_DIRECT is still set
1173         */
1174        if (file && vma->vm_flags & VM_SHARED)
1175                ret = do_mmap_shared_file(vma);
1176        else
1177                ret = do_mmap_private(vma, region, len, capabilities);
1178        if (ret < 0)
1179                goto error_just_free;
1180        add_nommu_region(region);
1181
1182        /* clear anonymous mappings that don't ask for uninitialized data */
1183        if (!vma->vm_file &&
1184            (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1185             !(flags & MAP_UNINITIALIZED)))
1186                memset((void *)region->vm_start, 0,
1187                       region->vm_end - region->vm_start);
1188
1189        /* okay... we have a mapping; now we have to register it */
1190        result = vma->vm_start;
1191
1192        current->mm->total_vm += len >> PAGE_SHIFT;
1193
1194share:
1195        BUG_ON(!vma->vm_region);
1196        vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1197        if (vma_iter_prealloc(&vmi, vma))
1198                goto error_just_free;
1199
1200        setup_vma_to_mm(vma, current->mm);
1201        current->mm->map_count++;
1202        /* add the VMA to the tree */
1203        vma_iter_store_new(&vmi, vma);
1204
1205        /* we flush the region from the icache only when the first executable
1206         * mapping of it is made  */
1207        if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1208                flush_icache_user_range(region->vm_start, region->vm_end);
1209                region->vm_icache_flushed = true;
1210        }
1211
1212        up_write(&nommu_region_sem);
1213
1214        return result;
1215
1216error_just_free:
1217        up_write(&nommu_region_sem);
1218error:
1219        vma_iter_free(&vmi);
1220        if (region->vm_file)
1221                fput(region->vm_file);
1222        kmem_cache_free(vm_region_jar, region);
1223        if (vma->vm_file)
1224                fput(vma->vm_file);
1225        vm_area_free(vma);
1226        return ret;
1227
1228sharing_violation:
1229        up_write(&nommu_region_sem);
1230        pr_warn("Attempt to share mismatched mappings\n");
1231        ret = -EINVAL;
1232        goto error;
1233
1234error_getting_vma:
1235        kmem_cache_free(vm_region_jar, region);
1236        pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1237                        len, current->pid);
1238        show_mem();
1239        return -ENOMEM;
1240
1241error_getting_region:
1242        pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1243                        len, current->pid);
1244        show_mem();
1245        return -ENOMEM;
1246}
1247
1248unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1249                              unsigned long prot, unsigned long flags,
1250                              unsigned long fd, unsigned long pgoff)
1251{
1252        struct file *file = NULL;
1253        unsigned long retval = -EBADF;
1254
1255        audit_mmap_fd(fd, flags);
1256        if (!(flags & MAP_ANONYMOUS)) {
1257                file = fget(fd);
1258                if (!file)
1259                        goto out;
1260        }
1261
1262        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1263
1264        if (file)
1265                fput(file);
1266out:
1267        return retval;
1268}
1269
1270SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1271                unsigned long, prot, unsigned long, flags,
1272                unsigned long, fd, unsigned long, pgoff)
1273{
1274        return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1275}
1276
1277#ifdef __ARCH_WANT_SYS_OLD_MMAP
1278struct mmap_arg_struct {
1279        unsigned long addr;
1280        unsigned long len;
1281        unsigned long prot;
1282        unsigned long flags;
1283        unsigned long fd;
1284        unsigned long offset;
1285};
1286
1287SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1288{
1289        struct mmap_arg_struct a;
1290
1291        if (copy_from_user(&a, arg, sizeof(a)))
1292                return -EFAULT;
1293        if (offset_in_page(a.offset))
1294                return -EINVAL;
1295
1296        return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1297                               a.offset >> PAGE_SHIFT);
1298}
1299#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1300
1301/*
1302 * split a vma into two pieces at address 'addr', a new vma is allocated either
1303 * for the first part or the tail.
1304 */
1305static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1306                     unsigned long addr, int new_below)
1307{
1308        struct vm_area_struct *new;
1309        struct vm_region *region;
1310        unsigned long npages;
1311        struct mm_struct *mm;
1312
1313        /* we're only permitted to split anonymous regions (these should have
1314         * only a single usage on the region) */
1315        if (vma->vm_file)
1316                return -ENOMEM;
1317
1318        mm = vma->vm_mm;
1319        if (mm->map_count >= sysctl_max_map_count)
1320                return -ENOMEM;
1321
1322        region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1323        if (!region)
1324                return -ENOMEM;
1325
1326        new = vm_area_dup(vma);
1327        if (!new)
1328                goto err_vma_dup;
1329
1330        /* most fields are the same, copy all, and then fixup */
1331        *region = *vma->vm_region;
1332        new->vm_region = region;
1333
1334        npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1335
1336        if (new_below) {
1337                region->vm_top = region->vm_end = new->vm_end = addr;
1338        } else {
1339                region->vm_start = new->vm_start = addr;
1340                region->vm_pgoff = new->vm_pgoff += npages;
1341        }
1342
1343        vma_iter_config(vmi, new->vm_start, new->vm_end);
1344        if (vma_iter_prealloc(vmi, vma)) {
1345                pr_warn("Allocation of vma tree for process %d failed\n",
1346                        current->pid);
1347                goto err_vmi_preallocate;
1348        }
1349
1350        if (new->vm_ops && new->vm_ops->open)
1351                new->vm_ops->open(new);
1352
1353        down_write(&nommu_region_sem);
1354        delete_nommu_region(vma->vm_region);
1355        if (new_below) {
1356                vma->vm_region->vm_start = vma->vm_start = addr;
1357                vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1358        } else {
1359                vma->vm_region->vm_end = vma->vm_end = addr;
1360                vma->vm_region->vm_top = addr;
1361        }
1362        add_nommu_region(vma->vm_region);
1363        add_nommu_region(new->vm_region);
1364        up_write(&nommu_region_sem);
1365
1366        setup_vma_to_mm(vma, mm);
1367        setup_vma_to_mm(new, mm);
1368        vma_iter_store_new(vmi, new);
1369        mm->map_count++;
1370        return 0;
1371
1372err_vmi_preallocate:
1373        vm_area_free(new);
1374err_vma_dup:
1375        kmem_cache_free(vm_region_jar, region);
1376        return -ENOMEM;
1377}
1378
1379/*
1380 * shrink a VMA by removing the specified chunk from either the beginning or
1381 * the end
1382 */
1383static int vmi_shrink_vma(struct vma_iterator *vmi,
1384                      struct vm_area_struct *vma,
1385                      unsigned long from, unsigned long to)
1386{
1387        struct vm_region *region;
1388
1389        /* adjust the VMA's pointers, which may reposition it in the MM's tree
1390         * and list */
1391        if (from > vma->vm_start) {
1392                if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1393                        return -ENOMEM;
1394                vma->vm_end = from;
1395        } else {
1396                if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1397                        return -ENOMEM;
1398                vma->vm_start = to;
1399        }
1400
1401        /* cut the backing region down to size */
1402        region = vma->vm_region;
1403        BUG_ON(region->vm_usage != 1);
1404
1405        down_write(&nommu_region_sem);
1406        delete_nommu_region(region);
1407        if (from > region->vm_start) {
1408                to = region->vm_top;
1409                region->vm_top = region->vm_end = from;
1410        } else {
1411                region->vm_start = to;
1412        }
1413        add_nommu_region(region);
1414        up_write(&nommu_region_sem);
1415
1416        free_page_series(from, to);
1417        return 0;
1418}
1419
1420/*
1421 * release a mapping
1422 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1423 *   VMA, though it need not cover the whole VMA
1424 */
1425int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1426{
1427        VMA_ITERATOR(vmi, mm, start);
1428        struct vm_area_struct *vma;
1429        unsigned long end;
1430        int ret = 0;
1431
1432        len = PAGE_ALIGN(len);
1433        if (len == 0)
1434                return -EINVAL;
1435
1436        end = start + len;
1437
1438        /* find the first potentially overlapping VMA */
1439        vma = vma_find(&vmi, end);
1440        if (!vma) {
1441                static int limit;
1442                if (limit < 5) {
1443                        pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1444                                        current->pid, current->comm,
1445                                        start, start + len - 1);
1446                        limit++;
1447                }
1448                return -EINVAL;
1449        }
1450
1451        /* we're allowed to split an anonymous VMA but not a file-backed one */
1452        if (vma->vm_file) {
1453                do {
1454                        if (start > vma->vm_start)
1455                                return -EINVAL;
1456                        if (end == vma->vm_end)
1457                                goto erase_whole_vma;
1458                        vma = vma_find(&vmi, end);
1459                } while (vma);
1460                return -EINVAL;
1461        } else {
1462                /* the chunk must be a subset of the VMA found */
1463                if (start == vma->vm_start && end == vma->vm_end)
1464                        goto erase_whole_vma;
1465                if (start < vma->vm_start || end > vma->vm_end)
1466                        return -EINVAL;
1467                if (offset_in_page(start))
1468                        return -EINVAL;
1469                if (end != vma->vm_end && offset_in_page(end))
1470                        return -EINVAL;
1471                if (start != vma->vm_start && end != vma->vm_end) {
1472                        ret = split_vma(&vmi, vma, start, 1);
1473                        if (ret < 0)
1474                                return ret;
1475                }
1476                return vmi_shrink_vma(&vmi, vma, start, end);
1477        }
1478
1479erase_whole_vma:
1480        if (delete_vma_from_mm(vma))
1481                ret = -ENOMEM;
1482        else
1483                delete_vma(mm, vma);
1484        return ret;
1485}
1486
1487int vm_munmap(unsigned long addr, size_t len)
1488{
1489        struct mm_struct *mm = current->mm;
1490        int ret;
1491
1492        mmap_write_lock(mm);
1493        ret = do_munmap(mm, addr, len, NULL);
1494        mmap_write_unlock(mm);
1495        return ret;
1496}
1497EXPORT_SYMBOL(vm_munmap);
1498
1499SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1500{
1501        return vm_munmap(addr, len);
1502}
1503
1504/*
1505 * release all the mappings made in a process's VM space
1506 */
1507void exit_mmap(struct mm_struct *mm)
1508{
1509        VMA_ITERATOR(vmi, mm, 0);
1510        struct vm_area_struct *vma;
1511
1512        if (!mm)
1513                return;
1514
1515        mm->total_vm = 0;
1516
1517        /*
1518         * Lock the mm to avoid assert complaining even though this is the only
1519         * user of the mm
1520         */
1521        mmap_write_lock(mm);
1522        for_each_vma(vmi, vma) {
1523                cleanup_vma_from_mm(vma);
1524                delete_vma(mm, vma);
1525                cond_resched();
1526        }
1527        __mt_destroy(&mm->mm_mt);
1528        mmap_write_unlock(mm);
1529}
1530
1531/*
1532 * expand (or shrink) an existing mapping, potentially moving it at the same
1533 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1534 *
1535 * under NOMMU conditions, we only permit changing a mapping's size, and only
1536 * as long as it stays within the region allocated by do_mmap_private() and the
1537 * block is not shareable
1538 *
1539 * MREMAP_FIXED is not supported under NOMMU conditions
1540 */
1541static unsigned long do_mremap(unsigned long addr,
1542                        unsigned long old_len, unsigned long new_len,
1543                        unsigned long flags, unsigned long new_addr)
1544{
1545        struct vm_area_struct *vma;
1546
1547        /* insanity checks first */
1548        old_len = PAGE_ALIGN(old_len);
1549        new_len = PAGE_ALIGN(new_len);
1550        if (old_len == 0 || new_len == 0)
1551                return (unsigned long) -EINVAL;
1552
1553        if (offset_in_page(addr))
1554                return -EINVAL;
1555
1556        if (flags & MREMAP_FIXED && new_addr != addr)
1557                return (unsigned long) -EINVAL;
1558
1559        vma = find_vma_exact(current->mm, addr, old_len);
1560        if (!vma)
1561                return (unsigned long) -EINVAL;
1562
1563        if (vma->vm_end != vma->vm_start + old_len)
1564                return (unsigned long) -EFAULT;
1565
1566        if (is_nommu_shared_mapping(vma->vm_flags))
1567                return (unsigned long) -EPERM;
1568
1569        if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1570                return (unsigned long) -ENOMEM;
1571
1572        /* all checks complete - do it */
1573        vma->vm_end = vma->vm_start + new_len;
1574        return vma->vm_start;
1575}
1576
1577SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1578                unsigned long, new_len, unsigned long, flags,
1579                unsigned long, new_addr)
1580{
1581        unsigned long ret;
1582
1583        mmap_write_lock(current->mm);
1584        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1585        mmap_write_unlock(current->mm);
1586        return ret;
1587}
1588
1589int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1590                unsigned long pfn, unsigned long size, pgprot_t prot)
1591{
1592        if (addr != (pfn << PAGE_SHIFT))
1593                return -EINVAL;
1594
1595        vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1596        return 0;
1597}
1598EXPORT_SYMBOL(remap_pfn_range);
1599
1600int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1601{
1602        unsigned long pfn = start >> PAGE_SHIFT;
1603        unsigned long vm_len = vma->vm_end - vma->vm_start;
1604
1605        pfn += vma->vm_pgoff;
1606        return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1607}
1608EXPORT_SYMBOL(vm_iomap_memory);
1609
1610int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1611                        unsigned long pgoff)
1612{
1613        unsigned int size = vma->vm_end - vma->vm_start;
1614
1615        if (!(vma->vm_flags & VM_USERMAP))
1616                return -EINVAL;
1617
1618        vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1619        vma->vm_end = vma->vm_start + size;
1620
1621        return 0;
1622}
1623EXPORT_SYMBOL(remap_vmalloc_range);
1624
1625vm_fault_t filemap_fault(struct vm_fault *vmf)
1626{
1627        BUG();
1628        return 0;
1629}
1630EXPORT_SYMBOL(filemap_fault);
1631
1632vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1633                pgoff_t start_pgoff, pgoff_t end_pgoff)
1634{
1635        BUG();
1636        return 0;
1637}
1638EXPORT_SYMBOL(filemap_map_pages);
1639
1640static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1641                              void *buf, int len, unsigned int gup_flags)
1642{
1643        struct vm_area_struct *vma;
1644        int write = gup_flags & FOLL_WRITE;
1645
1646        if (mmap_read_lock_killable(mm))
1647                return 0;
1648
1649        /* the access must start within one of the target process's mappings */
1650        vma = find_vma(mm, addr);
1651        if (vma) {
1652                /* don't overrun this mapping */
1653                if (addr + len >= vma->vm_end)
1654                        len = vma->vm_end - addr;
1655
1656                /* only read or write mappings where it is permitted */
1657                if (write && vma->vm_flags & VM_MAYWRITE)
1658                        copy_to_user_page(vma, NULL, addr,
1659                                         (void *) addr, buf, len);
1660                else if (!write && vma->vm_flags & VM_MAYREAD)
1661                        copy_from_user_page(vma, NULL, addr,
1662                                            buf, (void *) addr, len);
1663                else
1664                        len = 0;
1665        } else {
1666                len = 0;
1667        }
1668
1669        mmap_read_unlock(mm);
1670
1671        return len;
1672}
1673
1674/**
1675 * access_remote_vm - access another process' address space
1676 * @mm:         the mm_struct of the target address space
1677 * @addr:       start address to access
1678 * @buf:        source or destination buffer
1679 * @len:        number of bytes to transfer
1680 * @gup_flags:  flags modifying lookup behaviour
1681 *
1682 * The caller must hold a reference on @mm.
1683 */
1684int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1685                void *buf, int len, unsigned int gup_flags)
1686{
1687        return __access_remote_vm(mm, addr, buf, len, gup_flags);
1688}
1689
1690/*
1691 * Access another process' address space.
1692 * - source/target buffer must be kernel space
1693 */
1694int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1695                unsigned int gup_flags)
1696{
1697        struct mm_struct *mm;
1698
1699        if (addr + len < addr)
1700                return 0;
1701
1702        mm = get_task_mm(tsk);
1703        if (!mm)
1704                return 0;
1705
1706        len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1707
1708        mmput(mm);
1709        return len;
1710}
1711EXPORT_SYMBOL_GPL(access_process_vm);
1712
1713#ifdef CONFIG_BPF_SYSCALL
1714/*
1715 * Copy a string from another process's address space as given in mm.
1716 * If there is any error return -EFAULT.
1717 */
1718static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr,
1719                                void *buf, int len)
1720{
1721        unsigned long addr_end;
1722        struct vm_area_struct *vma;
1723        int ret = -EFAULT;
1724
1725        *(char *)buf = '\0';
1726
1727        if (mmap_read_lock_killable(mm))
1728                return ret;
1729
1730        /* the access must start within one of the target process's mappings */
1731        vma = find_vma(mm, addr);
1732        if (!vma)
1733                goto out;
1734
1735        if (check_add_overflow(addr, len, &addr_end))
1736                goto out;
1737
1738        /* don't overrun this mapping */
1739        if (addr_end > vma->vm_end)
1740                len = vma->vm_end - addr;
1741
1742        /* only read mappings where it is permitted */
1743        if (vma->vm_flags & VM_MAYREAD) {
1744                ret = strscpy(buf, (char *)addr, len);
1745                if (ret < 0)
1746                        ret = len - 1;
1747        }
1748
1749out:
1750        mmap_read_unlock(mm);
1751        return ret;
1752}
1753
1754/**
1755 * copy_remote_vm_str - copy a string from another process's address space.
1756 * @tsk:        the task of the target address space
1757 * @addr:       start address to read from
1758 * @buf:        destination buffer
1759 * @len:        number of bytes to copy
1760 * @gup_flags:  flags modifying lookup behaviour (unused)
1761 *
1762 * The caller must hold a reference on @mm.
1763 *
1764 * Return: number of bytes copied from @addr (source) to @buf (destination);
1765 * not including the trailing NUL. Always guaranteed to leave NUL-terminated
1766 * buffer. On any error, return -EFAULT.
1767 */
1768int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
1769                       void *buf, int len, unsigned int gup_flags)
1770{
1771        struct mm_struct *mm;
1772        int ret;
1773
1774        if (unlikely(len == 0))
1775                return 0;
1776
1777        mm = get_task_mm(tsk);
1778        if (!mm) {
1779                *(char *)buf = '\0';
1780                return -EFAULT;
1781        }
1782
1783        ret = __copy_remote_vm_str(mm, addr, buf, len);
1784
1785        mmput(mm);
1786
1787        return ret;
1788}
1789EXPORT_SYMBOL_GPL(copy_remote_vm_str);
1790#endif /* CONFIG_BPF_SYSCALL */
1791
1792/**
1793 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1794 * @inode: The inode to check
1795 * @size: The current filesize of the inode
1796 * @newsize: The proposed filesize of the inode
1797 *
1798 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1799 * make sure that any outstanding VMAs aren't broken and then shrink the
1800 * vm_regions that extend beyond so that do_mmap() doesn't
1801 * automatically grant mappings that are too large.
1802 */
1803int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1804                                size_t newsize)
1805{
1806        struct vm_area_struct *vma;
1807        struct vm_region *region;
1808        pgoff_t low, high;
1809        size_t r_size, r_top;
1810
1811        low = newsize >> PAGE_SHIFT;
1812        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1813
1814        down_write(&nommu_region_sem);
1815        i_mmap_lock_read(inode->i_mapping);
1816
1817        /* search for VMAs that fall within the dead zone */
1818        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1819                /* found one - only interested if it's shared out of the page
1820                 * cache */
1821                if (vma->vm_flags & VM_SHARED) {
1822                        i_mmap_unlock_read(inode->i_mapping);
1823                        up_write(&nommu_region_sem);
1824                        return -ETXTBSY; /* not quite true, but near enough */
1825                }
1826        }
1827
1828        /* reduce any regions that overlap the dead zone - if in existence,
1829         * these will be pointed to by VMAs that don't overlap the dead zone
1830         *
1831         * we don't check for any regions that start beyond the EOF as there
1832         * shouldn't be any
1833         */
1834        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1835                if (!(vma->vm_flags & VM_SHARED))
1836                        continue;
1837
1838                region = vma->vm_region;
1839                r_size = region->vm_top - region->vm_start;
1840                r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1841
1842                if (r_top > newsize) {
1843                        region->vm_top -= r_top - newsize;
1844                        if (region->vm_end > region->vm_top)
1845                                region->vm_end = region->vm_top;
1846                }
1847        }
1848
1849        i_mmap_unlock_read(inode->i_mapping);
1850        up_write(&nommu_region_sem);
1851        return 0;
1852}
1853
1854/*
1855 * Initialise sysctl_user_reserve_kbytes.
1856 *
1857 * This is intended to prevent a user from starting a single memory hogging
1858 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1859 * mode.
1860 *
1861 * The default value is min(3% of free memory, 128MB)
1862 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1863 */
1864static int __meminit init_user_reserve(void)
1865{
1866        unsigned long free_kbytes;
1867
1868        free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1869
1870        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1871        return 0;
1872}
1873subsys_initcall(init_user_reserve);
1874
1875/*
1876 * Initialise sysctl_admin_reserve_kbytes.
1877 *
1878 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1879 * to log in and kill a memory hogging process.
1880 *
1881 * Systems with more than 256MB will reserve 8MB, enough to recover
1882 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1883 * only reserve 3% of free pages by default.
1884 */
1885static int __meminit init_admin_reserve(void)
1886{
1887        unsigned long free_kbytes;
1888
1889        free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1890
1891        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1892        return 0;
1893}
1894subsys_initcall(init_admin_reserve);
1895
1896int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1897{
1898        mmap_write_lock(oldmm);
1899        dup_mm_exe_file(mm, oldmm);
1900        mmap_write_unlock(oldmm);
1901        return 0;
1902}
1903