linux/arch/sh/kernel/vsyscall/vsyscall.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/vsyscall/vsyscall.c
   3 *
   4 *  Copyright (C) 2006 Paul Mundt
   5 *
   6 * vDSO randomization
   7 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/mm.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/gfp.h>
  17#include <linux/module.h>
  18#include <linux/elf.h>
  19#include <linux/sched.h>
  20#include <linux/err.h>
  21
  22/*
  23 * Should the kernel map a VDSO page into processes and pass its
  24 * address down to glibc upon exec()?
  25 */
  26unsigned int __read_mostly vdso_enabled = 1;
  27EXPORT_SYMBOL_GPL(vdso_enabled);
  28
  29static int __init vdso_setup(char *s)
  30{
  31        vdso_enabled = simple_strtoul(s, NULL, 0);
  32        return 1;
  33}
  34__setup("vdso=", vdso_setup);
  35
  36/*
  37 * These symbols are defined by vsyscall.o to mark the bounds
  38 * of the ELF DSO images included therein.
  39 */
  40extern const char vsyscall_trapa_start, vsyscall_trapa_end;
  41static struct page *syscall_pages[1];
  42
  43int __init vsyscall_init(void)
  44{
  45        void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
  46        syscall_pages[0] = virt_to_page(syscall_page);
  47
  48        /*
  49         * XXX: Map this page to a fixmap entry if we get around
  50         * to adding the page to ELF core dumps
  51         */
  52
  53        memcpy(syscall_page,
  54               &vsyscall_trapa_start,
  55               &vsyscall_trapa_end - &vsyscall_trapa_start);
  56
  57        return 0;
  58}
  59
  60/* Setup a VMA at program startup for the vsyscall page */
  61int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  62{
  63        struct mm_struct *mm = current->mm;
  64        unsigned long addr;
  65        int ret;
  66
  67        if (down_write_killable(&mm->mmap_sem))
  68                return -EINTR;
  69
  70        addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  71        if (IS_ERR_VALUE(addr)) {
  72                ret = addr;
  73                goto up_fail;
  74        }
  75
  76        ret = install_special_mapping(mm, addr, PAGE_SIZE,
  77                                      VM_READ | VM_EXEC |
  78                                      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  79                                      syscall_pages);
  80        if (unlikely(ret))
  81                goto up_fail;
  82
  83        current->mm->context.vdso = (void *)addr;
  84
  85up_fail:
  86        up_write(&mm->mmap_sem);
  87        return ret;
  88}
  89
  90const char *arch_vma_name(struct vm_area_struct *vma)
  91{
  92        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  93                return "[vdso]";
  94
  95        return NULL;
  96}
  97