linux/arch/x86/kernel/sys_x86_64.c
<<
>>
Prefs
   1#include <linux/errno.h>
   2#include <linux/sched.h>
   3#include <linux/syscalls.h>
   4#include <linux/mm.h>
   5#include <linux/fs.h>
   6#include <linux/smp.h>
   7#include <linux/sem.h>
   8#include <linux/msg.h>
   9#include <linux/shm.h>
  10#include <linux/stat.h>
  11#include <linux/mman.h>
  12#include <linux/file.h>
  13#include <linux/utsname.h>
  14#include <linux/personality.h>
  15
  16#include <asm/uaccess.h>
  17#include <asm/ia32.h>
  18
  19/*
  20 * sys_pipe() is the normal C calling standard for creating
  21 * a pipe. It's not the way Unix traditionally does this, though.
  22 */
  23asmlinkage long sys_pipe(int __user *fildes)
  24{
  25        int fd[2];
  26        int error;
  27
  28        error = do_pipe(fd);
  29        if (!error) {
  30                if (copy_to_user(fildes, fd, 2*sizeof(int)))
  31                        error = -EFAULT;
  32        }
  33        return error;
  34}
  35
  36asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
  37        unsigned long fd, unsigned long off)
  38{
  39        long error;
  40        struct file * file;
  41
  42        error = -EINVAL;
  43        if (off & ~PAGE_MASK)
  44                goto out;
  45
  46        error = -EBADF;
  47        file = NULL;
  48        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  49        if (!(flags & MAP_ANONYMOUS)) {
  50                file = fget(fd);
  51                if (!file)
  52                        goto out;
  53        }
  54        down_write(&current->mm->mmap_sem);
  55        error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
  56        up_write(&current->mm->mmap_sem);
  57
  58        if (file)
  59                fput(file);
  60out:
  61        return error;
  62}
  63
  64static void find_start_end(unsigned long flags, unsigned long *begin,
  65                           unsigned long *end)
  66{
  67        if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
  68                /* This is usually used needed to map code in small
  69                   model, so it needs to be in the first 31bit. Limit
  70                   it to that.  This means we need to move the
  71                   unmapped base down for this case. This can give
  72                   conflicts with the heap, but we assume that glibc
  73                   malloc knows how to fall back to mmap. Give it 1GB
  74                   of playground for now. -AK */ 
  75                *begin = 0x40000000; 
  76                *end = 0x80000000;              
  77        } else {
  78                *begin = TASK_UNMAPPED_BASE;
  79                *end = TASK_SIZE; 
  80        }
  81} 
  82
  83unsigned long
  84arch_get_unmapped_area(struct file *filp, unsigned long addr,
  85                unsigned long len, unsigned long pgoff, unsigned long flags)
  86{
  87        struct mm_struct *mm = current->mm;
  88        struct vm_area_struct *vma;
  89        unsigned long start_addr;
  90        unsigned long begin, end;
  91        
  92        if (flags & MAP_FIXED)
  93                return addr;
  94
  95        find_start_end(flags, &begin, &end); 
  96
  97        if (len > end)
  98                return -ENOMEM;
  99
 100        if (addr) {
 101                addr = PAGE_ALIGN(addr);
 102                vma = find_vma(mm, addr);
 103                if (end - len >= addr &&
 104                    (!vma || addr + len <= vma->vm_start))
 105                        return addr;
 106        }
 107        if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
 108            && len <= mm->cached_hole_size) {
 109                mm->cached_hole_size = 0;
 110                mm->free_area_cache = begin;
 111        }
 112        addr = mm->free_area_cache;
 113        if (addr < begin) 
 114                addr = begin; 
 115        start_addr = addr;
 116
 117full_search:
 118        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 119                /* At this point:  (!vma || addr < vma->vm_end). */
 120                if (end - len < addr) {
 121                        /*
 122                         * Start a new search - just in case we missed
 123                         * some holes.
 124                         */
 125                        if (start_addr != begin) {
 126                                start_addr = addr = begin;
 127                                mm->cached_hole_size = 0;
 128                                goto full_search;
 129                        }
 130                        return -ENOMEM;
 131                }
 132                if (!vma || addr + len <= vma->vm_start) {
 133                        /*
 134                         * Remember the place where we stopped the search:
 135                         */
 136                        mm->free_area_cache = addr + len;
 137                        return addr;
 138                }
 139                if (addr + mm->cached_hole_size < vma->vm_start)
 140                        mm->cached_hole_size = vma->vm_start - addr;
 141
 142                addr = vma->vm_end;
 143        }
 144}
 145
 146asmlinkage long sys_uname(struct new_utsname __user * name)
 147{
 148        int err;
 149        down_read(&uts_sem);
 150        err = copy_to_user(name, utsname(), sizeof (*name));
 151        up_read(&uts_sem);
 152        if (personality(current->personality) == PER_LINUX32) 
 153                err |= copy_to_user(&name->machine, "i686", 5);                 
 154        return err ? -EFAULT : 0;
 155}
 156