linux/arch/arm/mm/mmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/arch/arm/mm/mmap.c
   4 */
   5#include <linux/fs.h>
   6#include <linux/mm.h>
   7#include <linux/mman.h>
   8#include <linux/shm.h>
   9#include <linux/sched/signal.h>
  10#include <linux/sched/mm.h>
  11#include <linux/io.h>
  12#include <linux/personality.h>
  13#include <linux/random.h>
  14#include <asm/cachetype.h>
  15
  16#define COLOUR_ALIGN(addr,pgoff)                \
  17        ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
  18         (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
  19
  20/*
  21 * We need to ensure that shared mappings are correctly aligned to
  22 * avoid aliasing issues with VIPT caches.  We need to ensure that
  23 * a specific page of an object is always mapped at a multiple of
  24 * SHMLBA bytes.
  25 *
  26 * We unconditionally provide this function for all cases, however
  27 * in the VIVT case, we optimise out the alignment rules.
  28 */
  29unsigned long
  30arch_get_unmapped_area(struct file *filp, unsigned long addr,
  31                unsigned long len, unsigned long pgoff, unsigned long flags)
  32{
  33        struct mm_struct *mm = current->mm;
  34        struct vm_area_struct *vma;
  35        int do_align = 0;
  36        int aliasing = cache_is_vipt_aliasing();
  37        struct vm_unmapped_area_info info;
  38
  39        /*
  40         * We only need to do colour alignment if either the I or D
  41         * caches alias.
  42         */
  43        if (aliasing)
  44                do_align = filp || (flags & MAP_SHARED);
  45
  46        /*
  47         * We enforce the MAP_FIXED case.
  48         */
  49        if (flags & MAP_FIXED) {
  50                if (aliasing && flags & MAP_SHARED &&
  51                    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
  52                        return -EINVAL;
  53                return addr;
  54        }
  55
  56        if (len > TASK_SIZE)
  57                return -ENOMEM;
  58
  59        if (addr) {
  60                if (do_align)
  61                        addr = COLOUR_ALIGN(addr, pgoff);
  62                else
  63                        addr = PAGE_ALIGN(addr);
  64
  65                vma = find_vma(mm, addr);
  66                if (TASK_SIZE - len >= addr &&
  67                    (!vma || addr + len <= vm_start_gap(vma)))
  68                        return addr;
  69        }
  70
  71        info.flags = 0;
  72        info.length = len;
  73        info.low_limit = mm->mmap_base;
  74        info.high_limit = TASK_SIZE;
  75        info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
  76        info.align_offset = pgoff << PAGE_SHIFT;
  77        return vm_unmapped_area(&info);
  78}
  79
  80unsigned long
  81arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  82                        const unsigned long len, const unsigned long pgoff,
  83                        const unsigned long flags)
  84{
  85        struct vm_area_struct *vma;
  86        struct mm_struct *mm = current->mm;
  87        unsigned long addr = addr0;
  88        int do_align = 0;
  89        int aliasing = cache_is_vipt_aliasing();
  90        struct vm_unmapped_area_info info;
  91
  92        /*
  93         * We only need to do colour alignment if either the I or D
  94         * caches alias.
  95         */
  96        if (aliasing)
  97                do_align = filp || (flags & MAP_SHARED);
  98
  99        /* requested length too big for entire address space */
 100        if (len > TASK_SIZE)
 101                return -ENOMEM;
 102
 103        if (flags & MAP_FIXED) {
 104                if (aliasing && flags & MAP_SHARED &&
 105                    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 106                        return -EINVAL;
 107                return addr;
 108        }
 109
 110        /* requesting a specific address */
 111        if (addr) {
 112                if (do_align)
 113                        addr = COLOUR_ALIGN(addr, pgoff);
 114                else
 115                        addr = PAGE_ALIGN(addr);
 116                vma = find_vma(mm, addr);
 117                if (TASK_SIZE - len >= addr &&
 118                                (!vma || addr + len <= vm_start_gap(vma)))
 119                        return addr;
 120        }
 121
 122        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 123        info.length = len;
 124        info.low_limit = FIRST_USER_ADDRESS;
 125        info.high_limit = mm->mmap_base;
 126        info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 127        info.align_offset = pgoff << PAGE_SHIFT;
 128        addr = vm_unmapped_area(&info);
 129
 130        /*
 131         * A failed mmap() very likely causes application failure,
 132         * so fall back to the bottom-up function here. This scenario
 133         * can happen with large stack limits and large mmap()
 134         * allocations.
 135         */
 136        if (addr & ~PAGE_MASK) {
 137                VM_BUG_ON(addr != -ENOMEM);
 138                info.flags = 0;
 139                info.low_limit = mm->mmap_base;
 140                info.high_limit = TASK_SIZE;
 141                addr = vm_unmapped_area(&info);
 142        }
 143
 144        return addr;
 145}
 146
 147/*
 148 * You really shouldn't be using read() or write() on /dev/mem.  This
 149 * might go away in the future.
 150 */
 151int valid_phys_addr_range(phys_addr_t addr, size_t size)
 152{
 153        if (addr < PHYS_OFFSET)
 154                return 0;
 155        if (addr + size > __pa(high_memory - 1) + 1)
 156                return 0;
 157
 158        return 1;
 159}
 160
 161/*
 162 * Do not allow /dev/mem mappings beyond the supported physical range.
 163 */
 164int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 165{
 166        return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
 167}
 168