linux/arch/sh/kernel/vsyscall/vsyscall.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/vsyscall/vsyscall.c
   3 *
   4 *  Copyright (C) 2006 Paul Mundt
   5 *
   6 * vDSO randomization
   7 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/mm.h>
  14#include <linux/slab.h>
  15#include <linux/kernel.h>
  16#include <linux/init.h>
  17#include <linux/gfp.h>
  18#include <linux/module.h>
  19#include <linux/elf.h>
  20#include <linux/sched.h>
  21#include <linux/err.h>
  22
  23/*
  24 * Should the kernel map a VDSO page into processes and pass its
  25 * address down to glibc upon exec()?
  26 */
  27unsigned int __read_mostly vdso_enabled = 1;
  28EXPORT_SYMBOL_GPL(vdso_enabled);
  29
  30static int __init vdso_setup(char *s)
  31{
  32        vdso_enabled = simple_strtoul(s, NULL, 0);
  33        return 1;
  34}
  35__setup("vdso=", vdso_setup);
  36
  37/*
  38 * These symbols are defined by vsyscall.o to mark the bounds
  39 * of the ELF DSO images included therein.
  40 */
  41extern const char vsyscall_trapa_start, vsyscall_trapa_end;
  42static struct page *syscall_pages[1];
  43
  44int __init vsyscall_init(void)
  45{
  46        void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
  47        syscall_pages[0] = virt_to_page(syscall_page);
  48
  49        /*
  50         * XXX: Map this page to a fixmap entry if we get around
  51         * to adding the page to ELF core dumps
  52         */
  53
  54        memcpy(syscall_page,
  55               &vsyscall_trapa_start,
  56               &vsyscall_trapa_end - &vsyscall_trapa_start);
  57
  58        return 0;
  59}
  60
  61/* Setup a VMA at program startup for the vsyscall page */
  62int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  63{
  64        struct mm_struct *mm = current->mm;
  65        unsigned long addr;
  66        int ret;
  67
  68        down_write(&mm->mmap_sem);
  69        addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  70        if (IS_ERR_VALUE(addr)) {
  71                ret = addr;
  72                goto up_fail;
  73        }
  74
  75        ret = install_special_mapping(mm, addr, PAGE_SIZE,
  76                                      VM_READ | VM_EXEC |
  77                                      VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
  78                                      VM_ALWAYSDUMP,
  79                                      syscall_pages);
  80        if (unlikely(ret))
  81                goto up_fail;
  82
  83        current->mm->context.vdso = (void *)addr;
  84
  85up_fail:
  86        up_write(&mm->mmap_sem);
  87        return ret;
  88}
  89
  90const char *arch_vma_name(struct vm_area_struct *vma)
  91{
  92        if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  93                return "[vdso]";
  94
  95        return NULL;
  96}
  97
  98struct vm_area_struct *get_gate_vma(struct task_struct *task)
  99{
 100        return NULL;
 101}
 102
 103int in_gate_area(struct task_struct *task, unsigned long address)
 104{
 105        return 0;
 106}
 107
 108int in_gate_area_no_task(unsigned long address)
 109{
 110        return 0;
 111}
 112