linux/arch/x86/kernel/sys_x86_64.c
<<
>>
Prefs
   1#include <linux/errno.h>
   2#include <linux/sched.h>
   3#include <linux/syscalls.h>
   4#include <linux/mm.h>
   5#include <linux/fs.h>
   6#include <linux/smp.h>
   7#include <linux/sem.h>
   8#include <linux/msg.h>
   9#include <linux/shm.h>
  10#include <linux/stat.h>
  11#include <linux/mman.h>
  12#include <linux/file.h>
  13#include <linux/utsname.h>
  14#include <linux/personality.h>
  15#include <linux/random.h>
  16#include <linux/uaccess.h>
  17
  18#include <asm/ia32.h>
  19#include <asm/syscalls.h>
  20
  21SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
  22                unsigned long, prot, unsigned long, flags,
  23                unsigned long, fd, unsigned long, off)
  24{
  25        long error;
  26        error = -EINVAL;
  27        if (off & ~PAGE_MASK)
  28                goto out;
  29
  30        error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
  31out:
  32        return error;
  33}
  34
  35static void find_start_end(unsigned long flags, unsigned long *begin,
  36                           unsigned long *end)
  37{
  38        if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
  39                unsigned long new_begin;
  40                /* This is usually used needed to map code in small
  41                   model, so it needs to be in the first 31bit. Limit
  42                   it to that.  This means we need to move the
  43                   unmapped base down for this case. This can give
  44                   conflicts with the heap, but we assume that glibc
  45                   malloc knows how to fall back to mmap. Give it 1GB
  46                   of playground for now. -AK */
  47                *begin = 0x40000000;
  48                *end = 0x80000000;
  49                if (current->flags & PF_RANDOMIZE) {
  50                        new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
  51                        if (new_begin)
  52                                *begin = new_begin;
  53                }
  54        } else {
  55                *begin = TASK_UNMAPPED_BASE;
  56                *end = TASK_SIZE;
  57        }
  58}
  59
  60unsigned long
  61arch_get_unmapped_area(struct file *filp, unsigned long addr,
  62                unsigned long len, unsigned long pgoff, unsigned long flags)
  63{
  64        struct mm_struct *mm = current->mm;
  65        struct vm_area_struct *vma;
  66        unsigned long start_addr;
  67        unsigned long begin, end;
  68
  69        if (flags & MAP_FIXED)
  70                return addr;
  71
  72        find_start_end(flags, &begin, &end);
  73
  74        if (len > end)
  75                return -ENOMEM;
  76
  77        if (addr) {
  78                addr = PAGE_ALIGN(addr);
  79                vma = find_vma(mm, addr);
  80                if (end - len >= addr &&
  81                    (!vma || addr + len <= vma->vm_start))
  82                        return addr;
  83        }
  84        if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
  85            && len <= mm->cached_hole_size) {
  86                mm->cached_hole_size = 0;
  87                mm->free_area_cache = begin;
  88        }
  89        addr = mm->free_area_cache;
  90        if (addr < begin)
  91                addr = begin;
  92        start_addr = addr;
  93
  94full_search:
  95        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  96                /* At this point:  (!vma || addr < vma->vm_end). */
  97                if (end - len < addr) {
  98                        /*
  99                         * Start a new search - just in case we missed
 100                         * some holes.
 101                         */
 102                        if (start_addr != begin) {
 103                                start_addr = addr = begin;
 104                                mm->cached_hole_size = 0;
 105                                goto full_search;
 106                        }
 107                        return -ENOMEM;
 108                }
 109                if (!vma || addr + len <= vma->vm_start) {
 110                        /*
 111                         * Remember the place where we stopped the search:
 112                         */
 113                        mm->free_area_cache = addr + len;
 114                        return addr;
 115                }
 116                if (addr + mm->cached_hole_size < vma->vm_start)
 117                        mm->cached_hole_size = vma->vm_start - addr;
 118
 119                addr = vma->vm_end;
 120        }
 121}
 122
 123
 124unsigned long
 125arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 126                          const unsigned long len, const unsigned long pgoff,
 127                          const unsigned long flags)
 128{
 129        struct vm_area_struct *vma;
 130        struct mm_struct *mm = current->mm;
 131        unsigned long addr = addr0;
 132
 133        /* requested length too big for entire address space */
 134        if (len > TASK_SIZE)
 135                return -ENOMEM;
 136
 137        if (flags & MAP_FIXED)
 138                return addr;
 139
 140        /* for MAP_32BIT mappings we force the legact mmap base */
 141        if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
 142                goto bottomup;
 143
 144        /* requesting a specific address */
 145        if (addr) {
 146                addr = PAGE_ALIGN(addr);
 147                vma = find_vma(mm, addr);
 148                if (TASK_SIZE - len >= addr &&
 149                                (!vma || addr + len <= vma->vm_start))
 150                        return addr;
 151        }
 152
 153        /* check if free_area_cache is useful for us */
 154        if (len <= mm->cached_hole_size) {
 155                mm->cached_hole_size = 0;
 156                mm->free_area_cache = mm->mmap_base;
 157        }
 158
 159        /* either no address requested or can't fit in requested address hole */
 160        addr = mm->free_area_cache;
 161
 162        /* make sure it can fit in the remaining address space */
 163        if (addr > len) {
 164                vma = find_vma(mm, addr-len);
 165                if (!vma || addr <= vma->vm_start)
 166                        /* remember the address as a hint for next time */
 167                        return mm->free_area_cache = addr-len;
 168        }
 169
 170        if (mm->mmap_base < len)
 171                goto bottomup;
 172
 173        addr = mm->mmap_base-len;
 174
 175        do {
 176                /*
 177                 * Lookup failure means no vma is above this address,
 178                 * else if new region fits below vma->vm_start,
 179                 * return with success:
 180                 */
 181                vma = find_vma(mm, addr);
 182                if (!vma || addr+len <= vma->vm_start)
 183                        /* remember the address as a hint for next time */
 184                        return mm->free_area_cache = addr;
 185
 186                /* remember the largest hole we saw so far */
 187                if (addr + mm->cached_hole_size < vma->vm_start)
 188                        mm->cached_hole_size = vma->vm_start - addr;
 189
 190                /* try just below the current vma->vm_start */
 191                addr = vma->vm_start-len;
 192        } while (len < vma->vm_start);
 193
 194bottomup:
 195        /*
 196         * A failed mmap() very likely causes application failure,
 197         * so fall back to the bottom-up function here. This scenario
 198         * can happen with large stack limits and large mmap()
 199         * allocations.
 200         */
 201        mm->cached_hole_size = ~0UL;
 202        mm->free_area_cache = TASK_UNMAPPED_BASE;
 203        addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
 204        /*
 205         * Restore the topdown base:
 206         */
 207        mm->free_area_cache = mm->mmap_base;
 208        mm->cached_hole_size = ~0UL;
 209
 210        return addr;
 211}
 212