linux/arch/sh/kernel/vsyscall/vsyscall.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/vsyscall/vsyscall.c
   3 *
   4 *  Copyright (C) 2006 Paul Mundt
   5 *
   6 * vDSO randomization
   7 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/mm.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/gfp.h>
  17#include <linux/module.h>
  18#include <linux/elf.h>
  19#include <linux/sched.h>
  20#include <linux/err.h>
  21
  22/*
  23 * Should the kernel map a VDSO page into processes and pass its
  24 * address down to glibc upon exec()?
  25 */
  26unsigned int __read_mostly vdso_enabled = 1;
  27EXPORT_SYMBOL_GPL(vdso_enabled);
  28
  29static int __init vdso_setup(char *s)
  30{
  31        vdso_enabled = simple_strtoul(s, NULL, 0);
  32        return 1;
  33}
  34__setup("vdso=", vdso_setup);
  35
  36/*
  37 * These symbols are defined by vsyscall.o to mark the bounds
  38 * of the ELF DSO images included therein.
  39 */
  40extern const char vsyscall_trapa_start, vsyscall_trapa_end;
  41static struct page *syscall_pages[1];
  42
  43int __init vsyscall_init(void)
  44{
  45        void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
  46        syscall_pages[0] = virt_to_page(syscall_page);
  47
  48        /*
  49         * XXX: Map this page to a fixmap entry if we get around
  50         * to adding the page to ELF core dumps
  51         */
  52
  53        memcpy(syscall_page,
  54               &vsyscall_trapa_start,
  55               &vsyscall_trapa_end - &vsyscall_trapa_start);
  56
  57        return 0;
  58}
  59
  60/* Setup a VMA at program startup for the vsyscall page */
  61int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  62{
  63        struct mm_struct *mm = current->mm;
  64        unsigned long addr;
  65        int ret;
  66
  67        down_write(&mm->mmap_sem);
  68        addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  69        if (IS_ERR_VALUE(addr)) {
  70                ret = addr;
  71                goto up_fail;
  72        }
  73
  74        ret = install_special_mapping(mm, addr, PAGE_SIZE,
  75                                      VM_READ | VM_EXEC |
  76                                      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  77                                      syscall_pages);
  78        if (unlikely(ret))
  79                goto up_fail;
  80
  81        current->mm->context.vdso = (void *)addr;
  82
  83up_fail:
  84        up_write(&mm->mmap_sem);
  85        return ret;
  86}
  87
  88const char *arch_vma_name(struct vm_area_struct *vma)
  89{
  90        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  91                return "[vdso]";
  92
  93        return NULL;
  94}
  95
  96struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
  97{
  98        return NULL;
  99}
 100
 101int in_gate_area(struct mm_struct *mm, unsigned long address)
 102{
 103        return 0;
 104}
 105
 106int in_gate_area_no_mm(unsigned long address)
 107{
 108        return 0;
 109}
 110